本科阶段最后一次竞赛Vlog——2024年智能车大赛智慧医疗组准备全过程——13使用Resnet-Bin
根据前面的内容,目前已经可以获取到resnet的bin模型
1 .Resnet的bin测试
这里给大家一个测试视频里面黑线的demo,大家可以用来测试自己的黑线识别精度
import cv2
import numpy as np
from hobot_dnn import pyeasy_dnn as dnndef convert_bgr_to_nv12(cv_image):yuv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2YUV)y_channel = yuv_image[:, :, 0]u_channel = yuv_image[::2, ::2, 1]v_channel = yuv_image[::2, ::2, 2]uv_channel = np.empty((u_channel.shape[0], u_channel.shape[1] * 2), dtype=u_channel.dtype)uv_channel[:, ::2] = u_channeluv_channel[:, 1::2] = v_channelnv12_image = np.concatenate((y_channel.flatten(), uv_channel.flatten()))return nv12_imagedef process_frame(cv_image, models, original_width, original_height):# 将图像缩放到模型期望的尺寸cv_image_resized = cv2.resize(cv_image, (224, 224), interpolation=cv2.INTER_LINEAR)nv12_image = convert_bgr_to_nv12(cv_image_resized)# 使用模型进行推理outputs = models[0].forward(np.frombuffer(nv12_image, dtype=np.uint8))outputs = outputs[0].buffer# 假设模型输出是在224x224图像上的比例坐标x_ratio, y_ratio = outputs[0][0][0][0], outputs[0][1][0][0]# 将比例坐标转换为原始视频帧的像素坐标x_pixel = int(x_ratio * original_width)y_pixel = int(y_ratio * original_height)return x_pixel, y_pixeldef main():models = dnn.load('/root/model/resnet18_224x224_nv12.bin')cap = cv2.VideoCapture("/root/model/03.avi")# 确定视频编解码器和创建VideoWriter对象fourcc = cv2.VideoWriter_fourcc(*'XVID')out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))while cap.isOpened():ret, frame = cap.read()if not ret:breakx, y = process_frame(frame, models,640,480)cv2.circle(frame, (x, y), radius=5, color=(0, 0, 255), thickness=-1)# 写入帧到输出文件out.write(frame)# cv2.imshow('Frame', frame)if cv2.waitKey(1) & 0xFF == ord('q'):breakcap.release()out.release() # 释放VideoWriter对象cv2.destroyAllWindows()if __name__ == "__main__":main()
2.Resnet的bin使用
如果按照前文的Resnet训练转化过程,这里唯一需要注意的就是加载时候,对于图片大小的244控制以及对于输出反归一化,
下面把代码给大家大家有需要可以使用
#!/usr/bin/env python3
# -*- coding: utf-8 -*-import rclpy
from rclpy.node import Node
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
import cv2
import numpy as np
from hobot_dnn import pyeasy_dnn as dnn
def convert_bgr_to_nv12(cv_image):# 首先将BGR图像转换为YUV格式yuv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2YUV)# 提取Y通道y_channel = yuv_image[:, :, 0]# 对U和V通道进行下采样u_channel = yuv_image[::2, ::2, 1]v_channel = yuv_image[::2, ::2, 2]# 交错U和V通道以形成UV通道uv_channel = np.empty((u_channel.shape[0], u_channel.shape[1] * 2), dtype=u_channel.dtype)uv_channel[:, ::2] = u_channeluv_channel[:, 1::2] = v_channel# 将Y通道和UV通道合并为NV12格式nv12_image = np.concatenate((y_channel.flatten(), uv_channel.flatten()))return nv12_image
class ResNetControlNode(Node):def __init__(self, name):super().__init__(name)self.cmd_vel_pub = self.create_publisher(Twist, "/cmd_vel", 10)self.subscription = self.create_subscription(Image, "/image", self.image_callback, 10)self.models = dnn.load('/root/model/resnet18_224x224_nv12.bin')# 微调PID参数self.Kp = 0.8 # 提高比例系数以增加对偏差的反应速度self.Kd = 0.5 # 减少微分系数以避免过冲self.Ki = 0.1 # 引入积分系数以帮助消除稳态误差(如果之前未使用)self.Target_value = 320.0self.last_Err = 0.0self.total_Err = 0.0self.output = 0.0self.twist = Twist()def image_callback(self, msg):# 使用CvBridge将ROS图像消息转换为OpenCV图像np_arr = np.frombuffer(msg.data, np.uint8)# 使用OpenCV解码MJPG数据image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)# 假设模型需要224x224大小的图像cv_image_resized = cv2.resize(image_np, (224, 224), interpolation=cv2.INTER_LINEAR)nv12_image = convert_bgr_to_nv12(cv_image_resized)# 转换图像格式以符合模型输入要求(如果需要)# 模型推理outputs = self.models[0].forward(np.frombuffer(nv12_image, dtype=np.uint8))outputs = outputs[0].bufferx, y = int(640 * outputs[0][0][0][0]), int(480 * outputs[0][1][0][0])# print(x,y)# PID控制逻辑self.Error = self.Target_value - xself.total_Err += self.Errorself.output = self.Kp * self.Error + self.Kd * (self.Error - self.last_Err)self.last_Err = self.Errorself.twist.linear.x = 0.4self.twist.angular.z = self.output / 100self.cmd_vel_pub.publish(self.twist)print(f"( {x}, {y} ) output = {int(self.output)}")def main(args=None):rclpy.init(args=args)node = ResNetControlNode("resnet_control_node")rclpy.spin(node)node.destroy_node()rclpy.shutdown()if __name__ == "__main__":main()
3.总结
到目前为止整个智能车地平线组的单个拆分任务已经结束了,下面就是编写总控制进行逻辑的判断了
下一节给大家分享一下,我再摸索过程中看到大佬的以及交流得到的思路。