yolov7混淆矩阵

测试部分代码

 import argparse
import json
import os
from pathlib import Path
from threading import Threadimport numpy as np
import torch
import yaml
from tqdm import tqdmfrom models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized, TracedModeldef test(data,weights=None,batch_size=32,imgsz=640,conf_thres=0.001,iou_thres=0.6,  # for NMSsave_json=False,single_cls=False,augment=False,verbose=False,model=None,dataloader=None,save_dir=Path(''),  # for saving imagessave_txt=False,  # for auto-labellingsave_hybrid=False,  # for hybrid auto-labellingsave_conf=False,  # save auto-label confidencesplots=True,wandb_logger=None,compute_loss=None,half_precision=True,trace=False,is_coco=False,v5_metric=False):# Initialize/load model and set devicetraining = model is not Noneif training:  # called by train.pydevice = next(model.parameters()).device  # get model deviceelse:  # called directlyset_logging()device = select_device(opt.device, batch_size=batch_size)# Directoriessave_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))  # increment run(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir# Load modelmodel = attempt_load(weights, map_location=device)  # load FP32 modelgs = max(int(model.stride.max()), 32)  # grid size (max stride)imgsz = check_img_size(imgsz, s=gs)  # check img_sizeif trace:model = TracedModel(model, device, imgsz)# Halfhalf = device.type != 'cpu' and half_precision  # half precision only supported on CUDAif half:model.half()# Configuremodel.eval()if isinstance(data, str):is_coco = data.endswith('coco.yaml')with open(data) as f:data = yaml.load(f, Loader=yaml.SafeLoader)check_dataset(data)  # checknc = 1 if single_cls else int(data['nc'])  # number of classesiouv = torch.linspace(0.5, 0.95, 10).to(device)  # iou vector for mAP@0.5:0.95niou = iouv.numel()# Logginglog_imgs = 0if wandb_logger and wandb_logger.wandb:log_imgs = min(wandb_logger.log_imgs, 100)# Dataloaderif not training:if device.type != 'cpu':model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run oncetask = opt.task if opt.task in ('train', 'val', 'test') else 'val'  # path to train/val/test imagesdataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True,prefix=colorstr(f'{task}: '))[0]if v5_metric:print("Testing with YOLOv5 AP metric...")seen = 0confusion_matrix = ConfusionMatrix(nc=nc)names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}coco91class = coco80_to_coco91_class()s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.loss = torch.zeros(3, device=device)jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):img = img.to(device, non_blocking=True)img = img.half() if half else img.float()  # uint8 to fp16/32img /= 255.0  # 0 - 255 to 0.0 - 1.0targets = targets.to(device)nb, _, height, width = img.shape  # batch size, channels, height, widthwith torch.no_grad():# Run modelt = time_synchronized()out, train_out = model(img, augment=augment)  # inference and training outputst0 += time_synchronized() - t# Compute lossif compute_loss:loss += compute_loss([x.float() for x in train_out], targets)[1][:3]  # box, obj, cls# Run NMStargets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device)  # to pixelslb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else []  # for autolabellingt = time_synchronized()out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True)t1 += time_synchronized() - t# Statistics per imagefor si, pred in enumerate(out):labels = targets[targets[:, 0] == si, 1:]nl = len(labels)tcls = labels[:, 0].tolist() if nl else []  # target classpath = Path(paths[si])seen += 1if len(pred) == 0:if nl:stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))continue# Predictionspredn = pred.clone()scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1])  # native-space pred# Append to text fileif save_txt:gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]]  # normalization gain whwhfor *xyxy, conf, cls in predn.tolist():xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywhline = (cls, *xywh, conf) if save_conf else (cls, *xywh)  # label formatwith open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:f.write(('%g ' * len(line)).rstrip() % line + '\n')# W&B logging - Media Panel Plotsif len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0:  # Check for test operationif wandb_logger.current_epoch % wandb_logger.bbox_interval == 0:box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},"class_id": int(cls),"box_caption": "%s %.3f" % (names[cls], conf),"scores": {"class_score": conf},"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]boxes = {"predictions": {"box_data": box_data, "class_labels": names}}  # inference-spacewandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name))wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None# Append to pycocotools JSON dictionaryif save_json:# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...image_id = int(path.stem) if path.stem.isnumeric() else path.stembox = xyxy2xywh(predn[:, :4])  # xywhbox[:, :2] -= box[:, 2:] / 2  # xy center to top-left cornerfor p, b in zip(pred.tolist(), box.tolist()):jdict.append({'image_id': image_id,'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),'bbox': [round(x, 3) for x in b],'score': round(p[4], 5)})# Assign all predictions as incorrectcorrect = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)if nl:"""predn:  {x1, y1, x2, y2, conf, cls}tensor([[319.32043,  43.82351, 426.80283, 110.60916,   0.90186,  17.00000],[ 84.76453,  36.17760, 120.57571,  67.90933,   0.65967,  19.00000],[3.41989,  57.74736,  94.48680, 110.90230,   0.51758,   8.00000]], device='cuda:0')labels: {cls, x, y, w, h}tensor([[ 19.00000, 148.48001,  75.52252,  44.80000,  46.05405],[ 17.00000, 492.80002, 106.86487, 139.51996,  88.27026],[ 17.00000,  79.36000, 124.77477, 124.16000,  52.45045]], device='cuda:0')tobx: {x1, y1, x2, y2}tensor([[ 86.04846,  34.00000, 121.06818,  70.00000],[318.17920,  42.00001, 427.24057, 111.00000],[1.00056,  70.00000,  98.05522, 111.00000]], device='cuda:0')tcls_tensor: {cls}tensor([19., 17., 17.], device='cuda:0')"""detected = []  # target indicestcls_tensor = labels[:, 0]# target boxestbox = xywh2xyxy(labels[:, 1:5])scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1])  # native-space labelsif plots:confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1))# Per target class#         for cls in torch.unique(tcls_tensor):#             ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1)  # prediction indices#             pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1)  # target indices#             # Search for detections#             if pi.shape[0]:#                 # Prediction to target ious#                 ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1)  # best ious, indices#                 # Append detections#                 detected_set = set()#                 for j in (ious > iouv[0]).nonzero(as_tuple=False):#                     d = ti[i[j]]  # detected target#                     if d.item() not in detected_set:#                         detected_set.add(d.item())#                         detected.append(d)#                         correct[pi[j]] = ious[j] > iouv  # iou_thres is 1xn#                         if len(detected) == nl:  # all targets already located in image#                             break#     # Append statistics (correct, conf, pcls, tcls)#     stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))# # Plot images# if plots and batch_i < 3:#     f = save_dir / f'test_batch{batch_i}_labels.jpg'  # labels#     Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()#     f = save_dir / f'test_batch{batch_i}_pred.jpg'  # predictions#     Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))# Compute statisticsstats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpyif len(stats) and stats[0].any():p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, v5_metric=v5_metric, save_dir=save_dir, names=names)ap50, ap = ap[:, 0], ap.mean(1)  # AP@0.5, AP@0.5:0.95mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()nt = np.bincount(stats[3].astype(np.int64), minlength=nc)  # number of targets per classelse:nt = torch.zeros(1)# Print resultspf = '%20s' + '%12i' * 2 + '%12.3g' * 4  # print formatprint(pf % ('all', seen, nt.sum(), mp, mr, map50, map))# Print results per classif (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):for i, c in enumerate(ap_class):print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))# Print speedst = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size)  # tupleif not training:print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)# Plotsif plots:confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))if wandb_logger and wandb_logger.wandb:val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]wandb_logger.log({"Validation": val_batches})if wandb_images:wandb_logger.log({"Bounding Box Debugger/Images": wandb_images})# Save JSONif save_json and len(jdict):w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else ''  # weightsanno_json = './coco/annotations/instances_val2017.json'  # annotations jsonpred_json = str(save_dir / f"{w}_predictions.json")  # predictions jsonprint('\nEvaluating pycocotools mAP... saving %s...' % pred_json)with open(pred_json, 'w') as f:json.dump(jdict, f)try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynbfrom pycocotools.coco import COCOfrom pycocotools.cocoeval import COCOevalanno = COCO(anno_json)  # init annotations apipred = anno.loadRes(pred_json)  # init predictions apieval = COCOeval(anno, pred, 'bbox')if is_coco:eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files]  # image IDs to evaluateeval.evaluate()eval.accumulate()eval.summarize()map, map50 = eval.stats[:2]  # update results (mAP@0.5:0.95, mAP@0.5)except Exception as e:print(f'pycocotools unable to run: {e}')# Return resultsmodel.float()  # for trainingif not training:s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''print(f"Results saved to {save_dir}{s}")maps = np.zeros(nc) + mapfor i, c in enumerate(ap_class):maps[c] = ap[i]return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, tif __name__ == '__main__':parser = argparse.ArgumentParser(prog='test.py')parser.add_argument('--weights', nargs='+', type=str, default='runs/train/exp10/weights/best.pt', help='model.pt path(s)')parser.add_argument('--data', type=str, default='data/voc.yaml', help='*.data path')parser.add_argument('--batch-size', type=int, default=1, help='size of each image batch')parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold')parser.add_argument('--iou-thres', type=float, default=0.65, help='IOU threshold for NMS')parser.add_argument('--task', default='test', help='train, val, test, speed or study')parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')parser.add_argument('--augment', action='store_true', help='augmented inference')parser.add_argument('--verbose', action='store_true', help='report mAP by class')parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')parser.add_argument('--project', default='runs/test', help='save to project/name')parser.add_argument('--name', default='yolov7_640_val', help='save to project/name')parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')parser.add_argument('--no-trace', action='store_true', help='don`t trace model')parser.add_argument('--v5-metric', action='store_true', help='assume maximum recall as 1.0 in AP calculation')opt = parser.parse_args()opt.save_json |= opt.data.endswith('coco.yaml')opt.data = check_file(opt.data)  # check fileprint(opt)#check_requirements()if opt.task in ('train', 'val', 'test'):  # run normallytest(opt.data,opt.weights,opt.batch_size,opt.img_size,opt.conf_thres,opt.iou_thres,opt.save_json,opt.single_cls,opt.augment,opt.verbose,save_txt=opt.save_txt | opt.save_hybrid,save_hybrid=opt.save_hybrid,save_conf=opt.save_conf,trace=not opt.no_trace,v5_metric=opt.v5_metric)elif opt.task == 'speed':  # speed benchmarksfor w in opt.weights:test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, v5_metric=opt.v5_metric)elif opt.task == 'study':  # run over a range of settings and save/plot# python test.py --task study --data coco.yaml --iou 0.65 --weights yolov7.ptx = list(range(256, 1536 + 128, 128))  # x axis (image sizes)for w in opt.weights:f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt'  # filename to save toy = []  # y axisfor i in x:  # img-sizeprint(f'\nRunning {f} point {i}...')r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,plots=False, v5_metric=opt.v5_metric)y.append(r + t)  # results and timesnp.savetxt(f, y, fmt='%10.4g')  # saveos.system('zip -r study.zip study_*.txt')plot_study_txt(x=x)  # plot

混淆矩阵代码:

class ConfusionMatrix:# Updated version of https://github.com/kaanakan/object_detection_confusion_matrixdef __init__(self, nc, conf=0.25, iou_thres=0.45):self.matrix = np.zeros((nc + 1, nc + 1))self.nc = nc  # number of classesself.conf = confself.iou_thres = iou_thresdef process_batch(self, detections, labels):"""Return intersection-over-union (Jaccard index) of boxes.Both sets of boxes are expected to be in (x1, y1, x2, y2) format.Arguments:detections (Array[N, 6]), x1, y1, x2, y2, conf, classlabels (Array[M, 5]), class, x1, y1, x2, y2Returns:None, updates confusion matrix accordingly"""detections = detections[detections[:, 4] > self.conf]gt_classes = labels[:, 0].int()detection_classes = detections[:, 5].int()iou = general.box_iou(labels[:, 1:], detections[:, :4])x = torch.where(iou > self.iou_thres)if x[0].shape[0]:matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()if x[0].shape[0] > 1:matches = matches[matches[:, 2].argsort()[::-1]]matches = matches[np.unique(matches[:, 1], return_index=True)[1]]matches = matches[matches[:, 2].argsort()[::-1]]matches = matches[np.unique(matches[:, 0], return_index=True)[1]]else:matches = np.zeros((0, 3))n = matches.shape[0] > 0m0, m1, _ = matches.transpose().astype(np.int16)for i, gc in enumerate(gt_classes):j = m0 == iif n and sum(j) == 1:self.matrix[gc, detection_classes[m1[j]]] += 1  # correctelse:self.matrix[self.nc, gc] += 1  # background FPif n:for i, dc in enumerate(detection_classes):if not any(m1 == i):self.matrix[dc, self.nc] += 1  # background FNdef matrix(self):return self.matrixdef plot(self, save_dir='', names=()):try:import seaborn as sn# self.matrix:------------> (21, 21)print("self.matrix:------------>", self.matrix.shape)array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6)  # normalizearray[array < 0.005] = np.nan  # don't annotate (would appear as 0.00)fig = plt.figure(figsize=(12, 9), tight_layout=True)sn.set(font_scale=1.0 if self.nc < 50 else 0.8)  # for label sizelabels = (0 < len(names) < 99) and len(names) == self.nc  # apply names to ticklabelssn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True,xticklabels=names + ['background FP'] if labels else "auto",yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1))fig.axes[0].set_xlabel('True')fig.axes[0].set_ylabel('Predicted')fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)except Exception as e:passdef print(self):for i in range(self.nc + 1):print(' '.join(map(str, self.matrix[i])))

数据集划分代码:

import xml.etree.ElementTree as ET
import pickle
import os
from os import listdir, getcwd
from os.path import join
import random
from shutil import copyfileclasses = ["aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor"]TRAIN_RATIO = 70def clear_hidden_files(path):dir_list = os.listdir(path)for i in dir_list:abspath = os.path.join(os.path.abspath(path), i)if os.path.isfile(abspath):if i.startswith("._"):os.remove(abspath)else:clear_hidden_files(abspath)def convert(size, box):dw = 1. / size[0]dh = 1. / size[1]x = (box[0] + box[1]) / 2.0y = (box[2] + box[3]) / 2.0w = box[1] - box[0]h = box[3] - box[2]x = x * dww = w * dwy = y * dhh = h * dhreturn (x, y, w, h)def convert_annotation(image_id):in_file = open('VOCdevkit/VOC2007/Annotations/%s.xml' % image_id)out_file = open('VOCdevkit/VOC2007/YOLOLabels/%s.txt' % image_id, 'w')tree = ET.parse(in_file)root = tree.getroot()size = root.find('size')w = int(size.find('width').text)h = int(size.find('height').text)for obj in root.iter('object'):# difficult = obj.find('difficult').textcls = obj.find('name').textif cls not in classes:continuecls_id = classes.index(cls)xmlbox = obj.find('bndbox')b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),float(xmlbox.find('ymax').text))bb = convert((w, h), b)out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')in_file.close()out_file.close()wd = os.getcwd()
wd = os.getcwd()
data_base_dir = os.path.join(wd, "VOCdevkit/")
if not os.path.isdir(data_base_dir):os.mkdir(data_base_dir)
work_sapce_dir = os.path.join(data_base_dir, "VOC2007/")
if not os.path.isdir(work_sapce_dir):os.mkdir(work_sapce_dir)
annotation_dir = os.path.join(work_sapce_dir, "Annotations/")
if not os.path.isdir(annotation_dir):os.mkdir(annotation_dir)
clear_hidden_files(annotation_dir)
image_dir = os.path.join(work_sapce_dir, "JPEGImages/")
if not os.path.isdir(image_dir):os.mkdir(image_dir)
clear_hidden_files(image_dir)
yolo_labels_dir = os.path.join(work_sapce_dir, "YOLOLabels/")
if not os.path.isdir(yolo_labels_dir):os.mkdir(yolo_labels_dir)
clear_hidden_files(yolo_labels_dir)
yolov5_images_dir = os.path.join(data_base_dir, "images/")
if not os.path.isdir(yolov5_images_dir):os.mkdir(yolov5_images_dir)
clear_hidden_files(yolov5_images_dir)
yolov5_labels_dir = os.path.join(data_base_dir, "labels/")
if not os.path.isdir(yolov5_labels_dir):os.mkdir(yolov5_labels_dir)
clear_hidden_files(yolov5_labels_dir)
yolov5_images_train_dir = os.path.join(yolov5_images_dir, "train/")
if not os.path.isdir(yolov5_images_train_dir):os.mkdir(yolov5_images_train_dir)
clear_hidden_files(yolov5_images_train_dir)
yolov5_images_test_dir = os.path.join(yolov5_images_dir, "val/")
if not os.path.isdir(yolov5_images_test_dir):os.mkdir(yolov5_images_test_dir)
clear_hidden_files(yolov5_images_test_dir)
yolov5_labels_train_dir = os.path.join(yolov5_labels_dir, "train/")
if not os.path.isdir(yolov5_labels_train_dir):os.mkdir(yolov5_labels_train_dir)
clear_hidden_files(yolov5_labels_train_dir)
yolov5_labels_test_dir = os.path.join(yolov5_labels_dir, "val/")
if not os.path.isdir(yolov5_labels_test_dir):os.mkdir(yolov5_labels_test_dir)
clear_hidden_files(yolov5_labels_test_dir)train_file = open(os.path.join(wd, "yolov7_train.txt"), 'w')
test_file = open(os.path.join(wd, "yolov7_val.txt"), 'w')
train_file.close()
test_file.close()
train_file = open(os.path.join(wd, "yolov7_train.txt"), 'a')
test_file = open(os.path.join(wd, "yolov7_val.txt"), 'a')
list_imgs = os.listdir(image_dir)  # list image files
prob = random.randint(1, 100)
print("Probability: %d" % prob)
for i in range(0, len(list_imgs)):path = os.path.join(image_dir, list_imgs[i])if os.path.isfile(path):image_path = image_dir + list_imgs[i]voc_path = list_imgs[i](nameWithoutExtention, extention) = os.path.splitext(os.path.basename(image_path))(voc_nameWithoutExtention, voc_extention) = os.path.splitext(os.path.basename(voc_path))annotation_name = nameWithoutExtention + '.xml'annotation_path = os.path.join(annotation_dir, annotation_name)label_name = nameWithoutExtention + '.txt'label_path = os.path.join(yolo_labels_dir, label_name)prob = random.randint(1, 100)print("Probability: %d" % prob)if (prob < TRAIN_RATIO):  # train datasetif os.path.exists(annotation_path):train_file.write(image_path + '\n')convert_annotation(nameWithoutExtention)  # convert labelcopyfile(image_path, yolov5_images_train_dir + voc_path)copyfile(label_path, yolov5_labels_train_dir + label_name)else:  # test datasetif os.path.exists(annotation_path):test_file.write(image_path + '\n')convert_annotation(nameWithoutExtention)  # convert labelcopyfile(image_path, yolov5_images_test_dir + voc_path)copyfile(label_path, yolov5_labels_test_dir + label_name)
train_file.close()
test_file.close()

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/623263.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

10.Spring Type Convertion 原理

目录 概述Spring Type Convertion总结Spring MVC层的数据转换debug 关键断点测试代码关键处调试字符串Long结束概述 此篇文章对 Spring Type Convertion 做深入学习。 两个源码调试例子,一个是转换成 String ,一个转换成 Long 环境:spring boot 2.6.13 相关文章如下: 文章…

嵌入式培训机构四个月实训课程笔记(完整版)-C++和QT编程第二天-类与对象(物联技术666)

链接:https://pan.baidu.com/s/1Am83Ut449WCbuTiodwJWgg?pwd=1688 提取码:1688 上午:类和对象 下午:类和对象高级应用 教学内容: 1、构造函数\析构函数\拷贝构造函数 构造函数: 每一个对象的创建都必须初始化,如果在没有写初始化函数(即构造函数),系统会默认写…

OpenCV——八邻域断点检测

目录 一、理论基础1、八邻域2、断点检测 二、代码实现三、结果展示四、参考链接 OpenCV——八邻域断点检测由CSDN点云侠原创&#xff0c;爬虫自重。如果你不是在点云侠的博客中看到该文章&#xff0c;那么此处便是不要脸的爬虫。 一、理论基础 1、八邻域 图1 八邻域示意图 图…

基于嵌入式AI的ROI区域视频传输系统设计与实现

在当今快速发展的智能监控领域&#xff0c;实现高效的视频流处理和传输成为了一项重要挑战。本文介绍了一个基于嵌入式AI平台的视频传输系统设计&#xff0c;该系统能够识别视频中的关键区域&#xff08;ROI&#xff09;&#xff0c;并对这些区域进行高效的编码处理。特别地&am…

Python数据的处理

一.字符串拼接的几种方式 使用str.join()方法进行拼接字符串直接拼接使用格式化字符串进行拼接 ​ s1hello s2world #(1)使用➕进行拼接 print(s1s2) #(2)使用字符串的join&#xff08;&#xff09;方式 print(.join([s1,s2])) print(*.join([s1,s2])) print(你好.join([s1,s…

neus2安装运行纪实

./build/testbed --scene transforms.json

Python操作MySQL入门教程,使用pymysql操作MySQL,有录播直播私教课

创建数据库 create database gx character set utf8mb4;连接数据库 #!/usr/bin/python3import mysql as pymysql# 打开数据库连接 db pymysql.connect(hostlocalhost,port3306,userroot,passwordzhangdapeng520,databasegx)# 使用 cursor() 方法创建一个游标对象 cursor cur…

【MATLAB随笔】GUI编程(未完结)

文章目录 一、创建图窗1.1 figure 函数详解1.11 窗口标识1.12 窗口外观1.13 位置和大小 二、xxx 一、创建图窗 跟很多GUI编程一样的&#xff0c;先创建一个基本的图窗&#xff0c;然后再添加按钮、文章、标签&#xff0c;绑定函数等等&#xff0c;比如python的tkinter。 MATL…

C/C++算法从小白到高手(1):排序算法

1. 冒泡排序 (1) 基本思路 冒泡排序是一种简单的、但效率极低的排序算法&#xff0c;基本思路是重复地遍历待排序的序列&#xff0c;通过相邻元素的比较和交换&#xff0c;将较大&#xff08;或较小&#xff09;的元素逐步"冒泡"到右侧&#xff08;或左侧&#xff0…

文章解读与仿真程序复现思路——电网技术EI\CSCD\北大核心《考虑风电出力不确定性的电网无功-电压控制鲁棒分区方法》

本专栏栏目提供文章与程序复现思路&#xff0c;具体已有的论文与论文源程序可翻阅本博主免费的专栏栏目《论文与完整程序》 这个标题涉及到考虑风电出力不确定性的电网无功-电压控制鲁棒分区方法。让我们逐步解读这个标题的主要关键词和概念&#xff1a; 考虑风电出力不确定性…

android,app,小程序页面布局的各种栏

手机页面从上到下通常包含以下几个栏&#xff1a; 1.状态栏&#xff08;Status Bar&#xff09;&#xff1a;位于屏幕的顶部&#xff0c;用于显示手机的系统状态信息&#xff0c;例如时间、电池电量、信号强度等。状态栏也可以包含一些通知图标和快捷设置图标。 2.标题栏&…

根据编码规则使用nodejs脚本来大批量生成星原物联网设备采集点表

在使用星原网关时&#xff0c;需要导入点表&#xff0c;由于设备的点表非常的多&#xff0c;可写的点表有1095个。 所有根据编码规律&#xff0c;编写了一段nodejs代码&#xff0c;来生成点表。 一个编码有四部分组成&#xff0c; 分别是 [‘A’, ‘B’, ‘C’, ‘D’, ‘E’]…

CTF伪随机数爆破

要了解伪随机数的爆破首先你的先知道什么是PHP种子&#xff0c; 借用在rand()函数中&#xff0c;我们可以通过设置随机数种子来影响随机数的生成。例如&#xff0c;在rand()函数中加入了随机数种子编码后&#xff0c;每次运行程序将会生成同样的随机整数序列。这个就是伪随机数…

第28关 k8s监控实战之Prometheus(八)

大家好&#xff0c;我是博哥爱运维。从这节课开始&#xff0c;博哥计划引入golang&#xff08;简称go&#xff09;语言开发的一些内容&#xff0c;没有接触过go语言的同学也不用慌&#xff0c;我会尽量以一个新人的角度&#xff0c;去把这些go开发的内容讲得通俗一些。这节课还…

布隆过滤器四种实现(Java,Guava,hutool,Redisson)

1.背景 为预防大量黑客故意发起非法的时间查询请求&#xff0c;造成缓存击穿&#xff0c;建议采用布隆过滤器的方法解决。布隆过滤器通过一个很长的二进制向量和一系列随机映射函数&#xff08;哈希函数&#xff09;来记录与识别某个数据是否在一个集合中。如果数据不在集合中…

计算机毕业设计 基于Java的国产动漫网站的设计与实现 Java实战项目 附源码+文档+视频讲解

博主介绍&#xff1a;✌从事软件开发10年之余&#xff0c;专注于Java技术领域、Python人工智能及数据挖掘、小程序项目开发和Android项目开发等。CSDN、掘金、华为云、InfoQ、阿里云等平台优质作者✌ &#x1f345;文末获取源码联系&#x1f345; &#x1f447;&#x1f3fb; 精…

polar CTF 写shell

一、题目 <?php /*PolarD&N CTF*/highlight_file(__FILE__);file_put_contents($_GET[filename],"<?php exit();".$_POST[content]);?>二、解题 payload ?filenamephp://filter/convert.base64-decode/resourceshell.php #<?eval($_POST[1]);…

Hadoop 实战 | 词频统计WordCount

词频统计 通过分析大量文本数据中的词频&#xff0c;可以识别常见词汇和短语&#xff0c;从而抽取文本的关键信息和概要&#xff0c;有助于识别文本中频繁出现的关键词&#xff0c;这对于理解文本内容和主题非常关键。同时&#xff0c;通过分析词在文本中的相对频率&#xff0…

Echarts折线图中数据根据正负数显示不同区域背景色-配置

示例 Echarts折线图中数据根据正负数显示不同区域背景色 Piecewise 分段类型Continuous 连续类型 Echarts配置 option {backgroundColor: "#030A41",xAxis: {type: category,data: [Mon, Tue, Wed, Thu, Fri, Sat, Sun],axisTick: {show: false,},axisLabel: { /…

x-cmd pkg | jless - 受 Vim 启发的命令行 JSON 查看器

目录 简介首次用户功能特点类似工具与竞品进一步探索 简介 jless 是一个命令行 JSON 查看器&#xff0c;设计用于读取、探索和搜索 JSON 数据。可以使用它来替代 less 、 jq 、 cat 以及您当前用于查看 JSON 文件的编辑器的任何组合。它是用 Rust 编写的&#xff0c;可以作为单…