支持yolov5和yolov8双模型
其中ChtDeploy中代码如下:
import onnxruntime
import numpy as np
import cv2class ChtDeploy():def __init__(self, img_path, onnx_path, iou_threshold=0.45, conf_threshold=0.3, detect_w=640, detect_h= 640):self.img = cv2.imread(img_path)#h,w,cself.img_h = self.img.shape[0]self.img_w = self.img.shape[1]self.iou_threshold = iou_thresholdself.conf_threshold = conf_thresholdself.detect_w = detect_wself.detect_h = detect_hself.onnx = onnx_pathself.max_wh = max(self.detect_h, self.detect_w)def letterbox(self):if(self.img_h == self.detect_h and self.img_w == self.detect_w):return self.imgscale = min(self.detect_w / self.img_w, self.detect_h / self.img_h) # 缩放比例nw, nh = int(self.img_w * scale), int(self.img_h * scale)image = cv2.resize(self.img, (nw, nh), interpolation=cv2.INTER_LINEAR)image_back = np.ones((self.detect_h, self.detect_w, 3), dtype=np.uint8) * 128# 将image放在画布中心区域-letterboximage_back[(self.detect_h - nh) // 2: (self.detect_h - nh) // 2 + nh, (self.detect_w - nw) // 2:(self.detect_w - nw) // 2 + nw, :] = imagereturn image_backdef img2input(self, img):img = np.transpose(img, (2, 0, 1))img = img / 255return np.expand_dims(img, axis=0).astype(np.float32) # (1,3,640,640)def infer(self, onnx, img):session = onnxruntime.InferenceSession(onnx)input_name = session.get_inputs()[0].namelabel_name = session.get_outputs()[0].namepred = session.run([label_name], {input_name: img})[0]return pred #yolov8 1 * 84 * 8400 yolov5 1 * 25200 * 85def xywh_to_x1y1x2y2(self, boxes):# 提取中心点坐标和宽高x_center, y_center, width, height = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]# 计算左上角和右下角坐标x1 = x_center - width / 2y1 = y_center - height / 2x2 = x_center + width / 2y2 = y_center + height / 2# 将计算结果组合成新的数组xyxy_boxes = np.stack((x1, y1, x2, y2), axis=1)return xyxy_boxesdef normalpred(self, pred):#the style of v8 to v5if pred.shape[1] < pred.shape[2]: #v8pred = np.squeeze(pred).T #1 * 84 * 8400 -> 8400 * 84scores = np.max(pred[:, 4:], axis=1)classes = np.argmax(pred[:, 4:], axis=1)mask = scores > self.conf_threshold #置信度过滤boxes = self.xywh_to_x1y1x2y2(pred[mask])scores = scores[mask]classes = classes[mask]return boxes, scores, classespred = np.squeeze(pred)scores = pred[:, 4]classes = np.argmax(pred[:, 5:], axis=1)mask = scores > self.conf_threshold # 置信度过滤boxes = self.xywh_to_x1y1x2y2(pred[mask])scores = scores[mask]classes = classes[mask]return boxes, scores, classesdef box_area(self, boxes):return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])def box_iou(self, box1, box2):area1 = self.box_area(box1) # Narea2 = self.box_area(box2) # M# broadcasting, 两个数组各维度大小 从后往前对比一致, 或者 有一维度值为1;lt = np.maximum(box1[:, np.newaxis, :2], box2[:, :2])rb = np.minimum(box1[:, np.newaxis, 2:], box2[:, 2:])wh = rb - ltwh = np.maximum(0, wh) # [N, M, 2]inter = wh[:, :, 0] * wh[:, :, 1]iou = inter / (area1[:, np.newaxis] + area2 - inter)return ioudef numpy_nms(self, boxes, scores, iou_threshold):idxs = scores.argsort() # 按分数 降序排列的索引 [N]keep = []while idxs.size > 0: # 统计数组中元素的个数max_score_index = idxs[-1]max_score_box = boxes[max_score_index][None, :]keep.append(max_score_index)if idxs.size == 1:breakidxs = idxs[:-1] # 将得分最大框 从索引中删除; 剩余索引对应的框 和 得分最大框 计算IoU;other_boxes = boxes[idxs] # [?, 4]ious = self.box_iou(max_score_box, other_boxes) # 一个框和其余框比较 1XMidxs = idxs[ious[0] <= iou_threshold]return keepdef draw_res(self, boxes, img, color=(255, 255, 0), thickness=2):for box in boxes:x1, y1, x2, y2 = int(box[0]), int(box[1]), int(box[2]), int(box[3])cv2.rectangle(img, (x1, y1), (x2, y2), color, thickness)cv2.imshow('detect', img)cv2.waitKey(0)cv2.destroyAllWindows()def deploy(self):img = self.letterbox()img1 = self.img2input(img)pred = self.infer(self.onnx, img1)boxes, scores, classes = self.normalpred(pred)c = classes * self.max_whnb = boxes + c[:, np.newaxis]id = self.numpy_nms(nb, scores, self.iou_threshold)self.draw_res(boxes[id], img)d = ChtDeploy(img_path="gg.webp", onnx_path="yolov8n.onnx")
d.deploy()