一、YOLOV8环境准备
1.1 下载安装最新的YOLOv8代码
仓库地址: https://github.com/ultralytics/ultralytics
1.2 配置环境
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
二、数据准备
2.1 安装labelme标注软件
pip install labelme
2.1.2 打开roLabelImg软件
使用Anaconda Prompt启动labeme标注工具
2.2 标注自己的数据
2.3 数据转换
2.3.1 运行下面代码,将json格式标签转为txt格式标签
# -*- coding: utf-8 -*-
import json
import os
import argparse
from tqdm import tqdmdef convert_label_json(json_dir, save_dir, classes):json_paths = os.listdir(json_dir)classes = classes.split(',')for json_path in tqdm(json_paths):# for json_path in json_paths:path = os.path.join(json_dir, json_path)with open(path, 'r') as load_f:json_dict = json.load(load_f)h, w = json_dict['imageHeight'], json_dict['imageWidth']# save txt pathtxt_path = os.path.join(save_dir, json_path.replace('json', 'txt'))txt_file = open(txt_path, 'w')for shape_dict in json_dict['shapes']:label = shape_dict['label']label_index = classes.index(label)points = shape_dict['points']points_nor_list = []for point in points:points_nor_list.append(point[0] / w)points_nor_list.append(point[1] / h)points_nor_list = list(map(lambda x: str(x), points_nor_list))points_nor_str = ' '.join(points_nor_list)label_str = str(label_index) + ' ' + points_nor_str + '\n'txt_file.writelines(label_str)if __name__ == "__main__":"""python json2txt_nomalize.py --json-dir my_datasets/color_rings/jsons --save-dir my_datasets/color_rings/txts --classes "cat,dogs""""parser = argparse.ArgumentParser(description='json convert to txt params')parser.add_argument('--json-dir', type=str,default='D:/study/cnn/yolo/ultralytics/data/json', help='json path dir')parser.add_argument('--save-dir', type=str,default='D:/study/cnn/yolo/ultralytics/data/txt' ,help='txt save dir')parser.add_argument('--classes', type=str, default='slot,lane,zerba_lane,STR_lane,double_arrow,jianshudai',help='classes')args = parser.parse_args()json_dir = args.json_dirsave_dir = args.save_dirclasses = args.classesconvert_label_json(json_dir, save_dir, classes)
转换后的txt格式标签
2.3.2 运行下面代码,划分数据集
我这里只标注了一张图片,为了增加数量,将png图片和转好的txt标签复制了32份。
# 将图片和标注数据按比例切分为 训练集和测试集
import shutil
import random
import os
import argparse# 检查文件夹是否存在
def mkdir(path):if not os.path.exists(path):os.makedirs(path)def main(image_dir, txt_dir, save_dir):# 创建文件夹mkdir(save_dir)images_dir = os.path.join(save_dir, 'images')labels_dir = os.path.join(save_dir, 'labels')img_train_path = os.path.join(images_dir, 'train')img_test_path = os.path.join(images_dir, 'test')img_val_path = os.path.join(images_dir, 'val')label_train_path = os.path.join(labels_dir, 'train')label_test_path = os.path.join(labels_dir, 'test')label_val_path = os.path.join(labels_dir, 'val')mkdir(images_dir);mkdir(labels_dir);mkdir(img_train_path);mkdir(img_test_path);mkdir(img_val_path);mkdir(label_train_path);mkdir(label_test_path);mkdir(label_val_path);# 数据集划分比例,训练集75%,验证集15%,测试集15%,按需修改train_percent = 0.8val_percent = 0.1test_percent = 0.1total_txt = os.listdir(txt_dir)num_txt = len(total_txt)list_all_txt = range(num_txt) # 范围 range(0, num)num_train = int(num_txt * train_percent)num_val = int(num_txt * val_percent)num_test = num_txt - num_train - num_valtrain = random.sample(list_all_txt, num_train)# 在全部数据集中取出trainval_test = [i for i in list_all_txt if not i in train]# 再从val_test取出num_val个元素,val_test剩下的元素就是testval = random.sample(val_test, num_val)print("训练集数目:{}, 验证集数目:{},测试集数目:{}".format(len(train), len(val), len(val_test) - len(val)))for i in list_all_txt:name = total_txt[i][:-4]srcImage = os.path.join(image_dir, name + '.png')srcLabel = os.path.join(txt_dir, name + '.txt')if i in train:dst_train_Image = os.path.join(img_train_path, name + '.png')dst_train_Label = os.path.join(label_train_path, name + '.txt')shutil.copyfile(srcImage, dst_train_Image)shutil.copyfile(srcLabel, dst_train_Label)elif i in val:dst_val_Image = os.path.join(img_val_path, name + '.png')dst_val_Label = os.path.join(label_val_path, name + '.txt')shutil.copyfile(srcImage, dst_val_Image)shutil.copyfile(srcLabel, dst_val_Label)else:dst_test_Image = os.path.join(img_test_path, name + '.png')dst_test_Label = os.path.join(label_test_path, name + '.txt')shutil.copyfile(srcImage, dst_test_Image)shutil.copyfile(srcLabel, dst_test_Label)if __name__ == '__main__':parser = argparse.ArgumentParser(description='split datasets to train,val,test params')parser.add_argument('--image-dir', type=str,default='D:/study/cnn/yolo/ultralytics/data/image', help='image path dir')parser.add_argument('--txt-dir', type=str,default='D:/study/cnn/yolo/ultralytics/data/txt' , help='txt path dir')parser.add_argument('--save-dir', default='D:/study/cnn/yolo/ultralytics/data/split',type=str, help='save dir')args = parser.parse_args()image_dir = args.image_dirtxt_dir = args.txt_dirsave_dir = args.save_dirmain(image_dir, txt_dir, save_dir)
看一下数据集划分后的目录结构
我这里标注图片少,所以最后train,val,test文件夹下都使用了32张图片,读者可以根据自己的数据集自行决定
三、配置文件设置
3.1 修改coco-seg.yaml
修改ultralytics\ultralytics\cfg\datasets\coco8-seg.yaml配置文件内容:(建议使用绝对路径)
train: D:/study/cnn/yolo/ultralytics/data/split/images/train
val: D:/study/cnn/yolo/ultralytics/data/split/images/val
test: D:/study/cnn/yolo/ultralytics/data/split/images/test names:0: slot1: lane2: zerba_lane3: STR_lane4: double_arrow5: jianshudai
四、训练
4.1 下载预训练权重
在YOLOv8 github上下载预训练权重:yolov8n-seg.pt
,ultralytics\ultralytics\路径下,新建weight
文件夹,预训练权重放入其中。
4.2 训练
步骤一:修改ultralytics\ultralytics\cfg\default.yaml
文件中的训练参数(根据自己的实际情况决定)
步骤二:执行下面代码:(建议使用绝对路径)
#训练实例分割代码
from ultralytics import YOLO# Load a model
model = YOLO('D:/study/cnn/yolo/ultralytics/weight/yolov8n-seg.pt')# Train the model
results = model.train(data='D:/study/cnn/yolo/ultralytics/ultralytics/cfg/datasets/coco8-seg.yaml', epochs=700, imgsz=640)
五、验证
# YOLOv8实例分割验证代码
from ultralytics import YOLO# Load a model
model = YOLO('D:/study/cnn/yolo/ultralytics/runs/segment/train4/weights/best.pt')# Train the model
results = model.val(data='D:/study/cnn/yolo/ultralytics/ultralytics/cfg/datasets/coco8-seg.yaml', epochs=700, imgsz=640)
查看模型验证的其中一张图片结果
六、推理
根据自己实际的情况,修改路径(建议使用绝对路径)
# YOLOv8实例分割推理代码from ultralytics import YOLO# Load a model
model = YOLO('D:/study/cnn/yolo/ultralytics/runs/segment/train4/weights/best.pt')# Predict with the model
results = model('D:/study/cnn/yolo/ultralytics/data/image/1.png',save=True) # predict on an image# 处理结果列表
for result in results:boxes = result.boxes # Boxes 对象,用于边界框输出masks = result.masks # Masks 对象,用于分割掩码输出probs = result.probs # Probs 对象,用于分类输出
查看图片的推理结果
七、导出ONNX模型
根据自己实际的情况,修改路径(建议使用绝对路径)
# YOLOv8-seg导出ONNX模型代码
from ultralytics import YOLO# Load a model
model = YOLO('D:/study/cnn/yolo/ultralytics/weight/yolov8n-seg.pt')# Export the model
model.export(format='onnx')
导出的ONNX模型