文章目录
- 模型配置文件
- 骨干网络 CSPBepBackbone
- 颈部网络 CSPRepBiFPNNeck
- 检测头 EffiDeHead
- 构建模型Model
模型配置文件
# YOLOv6l model
model = dict(type='YOLOv6l',pretrained='weights/yolov6l.pt',depth_multiple=1.0,width_multiple=1.0,backbone=dict(type='CSPBepBackbone',num_repeats=[1, 6, 12, 18, 6],out_channels=[64, 128, 256, 512, 1024],csp_e=float(1)/2,fuse_P2=True,),neck=dict(type='CSPRepBiFPANNeck',num_repeats=[12, 12, 12, 12],out_channels=[256, 128, 128, 256, 256, 512],csp_e=float(1)/2,),head=dict(type='EffiDeHead',in_channels=[128, 256, 512],num_layers=3,begin_indices=24,anchors=3,anchors_init=[[10,13, 19,19, 33,23],[30,61, 59,59, 59,119],[116,90, 185,185, 373,326]],out_indices=[17, 20, 23],strides=[8, 16, 32],atss_warmup_epoch=0,iou_type='giou',use_dfl=True,reg_max=16, #if use_dfl is False, please set reg_max to 0distill_weight={'class': 2.0,'dfl': 1.0,},)
)solver = dict(optim='SGD',lr_scheduler='Cosine',lr0=0.0032,lrf=0.12,momentum=0.843,weight_decay=0.00036,warmup_epochs=2.0,warmup_momentum=0.5,warmup_bias_lr=0.05
)data_aug = dict(hsv_h=0.0138,hsv_s=0.664,hsv_v=0.464,degrees=0.373,translate=0.245,scale=0.898,shear=0.602,flipud=0.00856,fliplr=0.5,mosaic=1.0,mixup=0.243,
)
training_mode = "conv_silu"
#use normal conv to speed up training and further improve accuracy.
骨干网络 CSPBepBackbone
stage_block中包含block这个模块。
class CSPBepBackbone(nn.Module):"""CSPBepBackbone module."""def __init__(self, in_channels=3, channels_list=None, num_repeats=None, block=RepVGGBlock, csp_e=float(1)/2, fuse_P2=False, cspsppf=False, stage_block_type="BepC3"):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneif stage_block_type == "BepC3":stage_block = BepC3elif stage_block_type == "MBLABlock":stage_block = MBLABlockelse:raise NotImplementedErrorself.fuse_P2 = fuse_P2self.stem = block(in_channels=in_channels, out_channels=channels_list[0], kernel_size=3, stride=2)self.ERBlock_2 = nn.Sequential(block(in_channels=channels_list[0], out_channels=channels_list[1], kernel_size=3, stride=2),stage_block(in_channels=channels_list[1], out_channels=channels_list[1], n=num_repeats[1], e=csp_e, block=block))self.ERBlock_3 = nn.Sequential(block(in_channels=channels_list[1], out_channels=channels_list[2], kernel_size=3, stride=2),stage_block(in_channels=channels_list[2], out_channels=channels_list[2], n=num_repeats[2], e=csp_e, block=block))self.ERBlock_4 = nn.Sequential(block(in_channels=channels_list[2], out_channels=channels_list[3], kernel_size=3, stride=2),stage_block(in_channels=channels_list[3], out_channels=channels_list[3], n=num_repeats[3], e=csp_e, block=block))channel_merge_layer = SPPF if block == ConvBNSiLU else SimSPPFif cspsppf:channel_merge_layer = CSPSPPF if block == ConvBNSiLU else SimCSPSPPFself.ERBlock_5 = nn.Sequential(block(in_channels=channels_list[3], out_channels=channels_list[4], kernel_size=3, stride=2),stage_block(in_channels=channels_list[4], out_channels=channels_list[4], n=num_repeats[4], e=csp_e, block=block),channel_merge_layer(in_channels=channels_list[4], out_channels=channels_list[4], kernel_size=5))def forward(self, x):outputs = []# x: 640x640x3 --> 320x320x64x = self.stem(x)# C2: 320x320x64 --> 160x160x128 x = self.ERBlock_2(x)if self.fuse_P2:outputs.append(x)# C3: 160x160x128 --> 80x80x256 x = self.ERBlock_3(x)outputs.append(x)# C4: 80x80x256 --> 40x40x512 x = self.ERBlock_4(x)outputs.append(x)# C5: 40x40x512 --> 20x20x1024 x = self.ERBlock_5(x)outputs.append(x)return tuple(outputs)
颈部网络 CSPRepBiFPNNeck
class CSPRepBiFPANNeck(nn.Module):"""CSPRepBiFPANNeck module."""def __init__(self, channels_list=None, num_repeats=None, block=BottleRep, csp_e=float(1)/2, stage_block_type="BepC3"):super().__init__()# channels_list [64, 128, 256, 512, 1024, 256, 128, 128, 256, 256, 512]assert channels_list is not None# num_repeat [1, 6, 12, 18, 6, 12, 12, 12, 12]assert num_repeats is not Noneif stage_block_type == "BepC3":stage_block = BepC3elif stage_block_type == "MBLABlock":stage_block = MBLABlockelse:raise NotImplementedErrorself.reduce_layer0 = ConvBNReLU(in_channels=channels_list[4], # 1024out_channels=channels_list[5], # 256kernel_size=1,stride=1)self.Bifusion0 = BiFusion(in_channels=[channels_list[3], channels_list[2]], # 512, 256out_channels=channels_list[5], # 256)self.Rep_p4 = stage_block(in_channels=channels_list[5], # 256out_channels=channels_list[5], # 256n=num_repeats[5],e=csp_e,block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[5], # 256out_channels=channels_list[6], # 128kernel_size=1,stride=1)self.Bifusion1 = BiFusion(in_channels=[channels_list[2], channels_list[1]], # 256, 128out_channels=channels_list[6], # 128)self.Rep_p3 = stage_block(in_channels=channels_list[6], # 128out_channels=channels_list[6], # 128n=num_repeats[6],e=csp_e,block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[6], # 128out_channels=channels_list[7], # 128kernel_size=3,stride=2)self.Rep_n3 = stage_block(in_channels=channels_list[6] + channels_list[7], # 128 + 128out_channels=channels_list[8], # 256n=num_repeats[7],e=csp_e,block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[8], # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)self.Rep_n4 = stage_block(in_channels=channels_list[5] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[8],e=csp_e,block=block)def forward(self, input):# x3: C2=160x160x128 # x2: C3=80x80x256# x1: C4=40x40x512# x0: C5=20x20x1024(x3, x2, x1, x0) = input# 20x20x1024--->20x20x256 fpn_out0 = self.reduce_layer0(x0)# x[0]: 20x20x256--->40x40x256 # x[1]: 40x40x512--->40x40x256# x[2]: 80x80x256--->80x80x256--->40x40x256# cat(x[0],x[1],x[2])--->40x40x768--->40x40x256f_concat_layer0 = self.Bifusion0([fpn_out0, x1, x2])# 特征整合卷积f_out0 = self.Rep_p4(f_concat_layer0)# 40x40x256--->40x40x128fpn_out1 = self.reduce_layer1(f_out0)# x[0]: 40x40x128--->80x80x128 # x[1]: 80x80x256--->80x80x128# x[2]: 160x160x128--->160x160x128--->80x80x128# cat(x[0],x[1],x[2])--->80x80x384--->80x80x128f_concat_layer1 = self.Bifusion1([fpn_out1, x2, x3])# 特征整合卷积# P3: 1/8特征图,80x80x128pan_out2 = self.Rep_p3(f_concat_layer1)# 80x80x128--->40x40x128down_feat1 = self.downsample2(pan_out2)# cat: 40x40x256p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)# 特征整合卷积# P4: 1/16特征图,40x40x256 pan_out1 = self.Rep_n3(p_concat_layer1)# 40x40x256--->20x20x256down_feat0 = self.downsample1(pan_out1)# cat: 20x20x512p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)# 特征整合卷积# P5: 1/32特征图,20x20x512 pan_out0 = self.Rep_n4(p_concat_layer2)# P3: 1/8特征图,80x80x128# P4: 1/16特征图,40x40x256 # P5: 1/32特征图,20x20x512 outputs = [pan_out2, pan_out1, pan_out0]return outputs
检测头 EffiDeHead
def build_effidehead_layer(channels_list, num_anchors, num_classes, reg_max=16, num_layers=3):chx = [6, 8, 10] if num_layers == 3 else [8, 9, 10, 11]head_layers = nn.Sequential(# stem0ConvBNSiLU(in_channels=channels_list[chx[0]],out_channels=channels_list[chx[0]],kernel_size=1,stride=1),# cls_conv0ConvBNSiLU(in_channels=channels_list[chx[0]],out_channels=channels_list[chx[0]],kernel_size=3,stride=1),# reg_conv0ConvBNSiLU(in_channels=channels_list[chx[0]],out_channels=channels_list[chx[0]],kernel_size=3,stride=1),# cls_pred0nn.Conv2d(in_channels=channels_list[chx[0]],out_channels=num_classes * num_anchors,kernel_size=1),# reg_pred0nn.Conv2d(in_channels=channels_list[chx[0]],out_channels=4 * (reg_max + num_anchors),kernel_size=1),# stem1ConvBNSiLU(in_channels=channels_list[chx[1]],out_channels=channels_list[chx[1]],kernel_size=1,stride=1),# cls_conv1ConvBNSiLU(in_channels=channels_list[chx[1]],out_channels=channels_list[chx[1]],kernel_size=3,stride=1),# reg_conv1ConvBNSiLU(in_channels=channels_list[chx[1]],out_channels=channels_list[chx[1]],kernel_size=3,stride=1),# cls_pred1nn.Conv2d(in_channels=channels_list[chx[1]],out_channels=num_classes * num_anchors,kernel_size=1),# reg_pred1nn.Conv2d(in_channels=channels_list[chx[1]],out_channels=4 * (reg_max + num_anchors),kernel_size=1),# stem2ConvBNSiLU(in_channels=channels_list[chx[2]],out_channels=channels_list[chx[2]],kernel_size=1,stride=1),# cls_conv2ConvBNSiLU(in_channels=channels_list[chx[2]],out_channels=channels_list[chx[2]],kernel_size=3,stride=1),# reg_conv2ConvBNSiLU(in_channels=channels_list[chx[2]],out_channels=channels_list[chx[2]],kernel_size=3,stride=1),# cls_pred2nn.Conv2d(in_channels=channels_list[chx[2]],out_channels=num_classes * num_anchors,kernel_size=1),# reg_pred2nn.Conv2d(in_channels=channels_list[chx[2]],out_channels=4 * (reg_max + num_anchors),kernel_size=1))if num_layers == 4:# stem3head_layers.add_module('stem3', ConvBNSiLU(in_channels=channels_list[chx[3]],out_channels=channels_list[chx[3]],kernel_size=1,stride=1))# cls_conv3head_layers.add_module('cls_conv3',ConvBNSiLU(in_channels=channels_list[chx[3]],out_channels=channels_list[chx[3]],kernel_size=3,stride=1))# reg_conv3head_layers.add_module('reg_conv3', ConvBNSiLU(in_channels=channels_list[chx[3]],out_channels=channels_list[chx[3]],kernel_size=3,stride=1))# cls_pred3head_layers.add_module('cls_pred3',nn.Conv2d(in_channels=channels_list[chx[3]],out_channels=num_classes * num_anchors,kernel_size=1))# reg_pred3head_layers.add_module('reg_pred3',nn.Conv2d(in_channels=channels_list[chx[3]],out_channels=4 * (reg_max + num_anchors),kernel_size=1))return head_layersclass Detect(nn.Module):export = False'''Efficient Decoupled HeadWith hardware-aware degisn, the decoupled head is optimized with hybridchannels methods.通过硬件感知设计,解耦头通过混合通道方法进行优化。'''def __init__(self, num_classes=80, num_layers=3, inplace=True, head_layers=None, use_dfl=True, reg_max=16): super().__init__()assert head_layers is not None# 类别数目self.nc = num_classes # 每个anchor输出维度, nc类别+1是否有目标+4放缩偏移量self.no = num_classes + 5 # 检测层的输出数量(不同尺度个数)self.nl = num_layers # 网格初始化self.grid = [torch.zeros(1)] * num_layersself.prior_prob = 1e-2self.inplace = inplace# stride在build期间进行计算stride = [8, 16, 32] if num_layers == 3 else [8, 16, 32, 64] self.stride = torch.tensor(stride)self.use_dfl = use_dflself.reg_max = reg_maxself.proj_conv = nn.Conv2d(self.reg_max+1, 1, 1, bias=False)self.grid_cell_offset = 0.5self.grid_cell_size = 5.0# 初始化解耦头decouple_headself.stems = nn.ModuleList()self.cls_convs = nn.ModuleList()self.reg_convs = nn.ModuleList()self.cls_preds = nn.ModuleList()self.reg_preds = nn.ModuleList()# Efficient decoupled head layersfor i in range(num_layers):idx = i*5self.stems.append(head_layers[idx])self.cls_convs.append(head_layers[idx+1])self.reg_convs.append(head_layers[idx+2])self.cls_preds.append(head_layers[idx+3])self.reg_preds.append(head_layers[idx+4])def initialize_biases(self):for conv in self.cls_preds:b = conv.bias.view(-1, )b.data.fill_(-math.log((1-self.prior_prob) / self.prior_prob))conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)w = conv.weightw.data.fill_(0.)conv.weight = torch.nn.Parameter(w, requires_grad=True)for conv in self.reg_preds:b = conv.bias.view(-1, )b.data.fill_(1.0)conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)w = conv.weightw.data.fill_(0.)conv.weight = torch.nn.Parameter(w, requires_grad=True)self.proj = nn.Parameter(torch.linspace(0, self.reg_max, self.reg_max+1), requires_grad=False)self.proj_conv.weight = nn.Parameter(self.proj.view([1, self.reg_max+1, 1, 1]).clone().detach(), requires_grad=False)def forward(self, x):# P3:80x80x128; P4:40x40x256; P5:20x20x512 # 如果是训练if self.training:cls_score_list = []reg_distri_list = []# 对于每一个尺度的特征图来说# [bs, num_classes*num_anchors, 80, 80], [bs, 4*(reg_max+num_anchors), 80, 80]# [bs, num_classes*num_anchors, 40, 40], [bs, 4*(reg_max+num_anchors), 40, 40]# [bs, num_classes*num_anchors, 20, 20], [bs, 4*(reg_max+num_anchors), 20, 20]for i in range(self.nl):# [bs,128,80,80]; [bs,256,40,40]; [bs,512,20,20] x[i] = self.stems[i](x[i])cls_x = x[i]reg_x = x[i]# 分类卷积cls_feat = self.cls_convs[i](cls_x)cls_output = self.cls_preds[i](cls_feat)# 损失函数采用nn.BCEWithLogitsLoss()cls_output = torch.sigmoid(cls_output)# 回归卷积reg_feat = self.reg_convs[i](reg_x)reg_output = self.reg_preds[i](reg_feat)cls_score_list.append(cls_output.flatten(2).permute((0, 2, 1)))reg_distri_list.append(reg_output.flatten(2).permute((0, 2, 1)))# 分类:[bs, 8400, num_classes*num_anchors]cls_score_list = torch.cat(cls_score_list, axis=1)# 回归:[bs, 8400, 4*(reg_max+num_anchors)]reg_distri_list = torch.cat(reg_distri_list, axis=1)return x, cls_score_list, reg_distri_list# 如果是验证或者推断else:cls_score_list = []reg_dist_list = []# 对于每一个尺度的特征图来说,anchor_free模式下# [bs, num_classes, 80, 80], [bs, 4*(reg_max+1), 80, 80]# [bs, num_classes, 40, 40], [bs, 4*(reg_max+1), 40, 40]# [bs, num_classes, 20, 20], [bs, 4*(reg_max+1), 20, 20] for i in range(self.nl):# [bs,128,80,80]; [bs,256,40,40]; [bs,512,20,20] b, _, h, w = x[i].shapel = h * wx[i] = self.stems[i](x[i])cls_x = x[i]reg_x = x[i]# 分类卷积cls_feat = self.cls_convs[i](cls_x)cls_output = self.cls_preds[i](cls_feat)cls_output = torch.sigmoid(cls_output)# 回归卷积reg_feat = self.reg_convs[i](reg_x)reg_output = self.reg_preds[i](reg_feat)if self.use_dfl:reg_output = reg_output.reshape([-1, 4, self.reg_max+1, l]).permute(0, 2, 1, 3)reg_output = self.proj_conv(F.softmax(reg_output, dim=1))if self.export:cls_score_list.append(cls_output)reg_dist_list.append(reg_output)else:cls_score_list.append(cls_output.reshape([b, self.nc, l]))reg_dist_list.append(reg_output.reshape([b, 4, l]))if self.export:return tuple(torch.cat([cls, reg], 1) for cls, reg in zip(cls_score_list, reg_dist_list))cls_score_list = torch.cat(cls_score_list, axis=-1).permute(0, 2, 1)reg_dist_list = torch.cat(reg_dist_list, axis=-1).permute(0, 2, 1)anchor_points, stride_tensor = generate_anchors(x, self.stride, self.grid_cell_size, self.grid_cell_offset, device=x[0].device, is_eval=True, mode='af')pred_bboxes = dist2bbox(reg_dist_list, anchor_points, box_format='xywh')pred_bboxes *= stride_tensorreturn torch.cat([pred_bboxes,torch.ones((b, pred_bboxes.shape[1], 1), device=pred_bboxes.device, dtype=pred_bboxes.dtype),cls_score_list],axis=-1)
构建模型Model
# 向上修正值x,使其能被除数整除。
def make_divisible(x, divisor):return math.ceil(x / divisor) * divisordef build_network(config, channels, num_classes, num_layers, fuse_ab=False, distill_ns=False):# 深度缩放因子(model depth multiple)depth_mul = config.model.depth_multiple# 宽度缩放因子(layer channel multiple)width_mul = config.model.width_multiple# backbone的网络参数设置num_repeat_backbone = config.model.backbone.num_repeatschannels_list_backbone = config.model.backbone.out_channels# P2可以提升小目标检测fuse_P2 = config.model.backbone.get('fuse_P2')cspsppf = config.model.backbone.get('cspsppf')# neck的网络参数设置num_repeat_neck = config.model.neck.num_repeatschannels_list_neck = config.model.neck.out_channels# head的网络参数设置use_dfl = config.model.head.use_dflreg_max = config.model.head.reg_max# 以yolov6l为例,# num_repeat [1, 6, 12, 18, 6, 12, 12, 12, 12]num_repeat = [(max(round(i*depth_mul), 1) if i > 1 else i) for i in (num_repeat_backbone + num_repeat_neck)]# channels_list [64, 128, 256, 512, 1024, 256, 128, 128, 256, 256, 512]channels_list = [make_divisible(i*width_mul, 8) for i in (channels_list_backbone + channels_list_neck)]# block结构block = get_block(config.training_mode)# 骨干网络backboneBACKBONE = eval(config.model.backbone.type)# 颈部结构neckNECK = eval(config.model.neck.type)if 'CSP' in config.model.backbone.type:if "stage_block_type" in config.model.backbone:stage_block_type = config.model.backbone.stage_block_typeelse:stage_block_type = "BepC3" #defaultbackbone = BACKBONE(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat, block=block,csp_e=config.model.backbone.csp_e, fuse_P2=fuse_P2, cspsppf=cspsppf,stage_block_type=stage_block_type)neck = NECK(channels_list=channels_list, num_repeats=num_repeat, block=block, csp_e=config.model.neck.csp_e,stage_block_type=stage_block_type)else:backbone = BACKBONE(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat, block=block, fuse_P2=fuse_P2, cspsppf=cspsppf)neck = NECK(channels_list=channels_list, num_repeats=num_repeat, block=block)if distill_ns:from yolov6.models.heads.effidehead_distill_ns import Detect, build_effidehead_layerif num_layers != 3:LOGGER.error('ERROR in: Distill mode not fit on n/s models with P6 head.\n')exit()head_layers = build_effidehead_layer(channels_list, 1, num_classes, reg_max=reg_max)head = Detect(num_classes, num_layers, head_layers=head_layers, use_dfl=use_dfl)elif fuse_ab:from yolov6.models.heads.effidehead_fuseab import Detect, build_effidehead_layeranchors_init = config.model.head.anchors_inithead_layers = build_effidehead_layer(channels_list, 3, num_classes, reg_max=reg_max, num_layers=num_layers)head = Detect(num_classes, anchors_init, num_layers, head_layers=head_layers, use_dfl=use_dfl)else:from yolov6.models.effidehead import Detect, build_effidehead_layerhead_layers = build_effidehead_layer(channels_list, 1, num_classes, reg_max=reg_max, num_layers=num_layers)head = Detect(num_classes, num_layers, head_layers=head_layers, use_dfl=use_dfl)return backbone, neck, headclass Model(nn.Module):export = False'''YOLOv6 model with backbone, neck and head.The default parts are EfficientRep Backbone, Rep-PAN and Efficient Decoupled Head.'''def __init__(self, config, channels=3, num_classes=None, fuse_ab=False, distill_ns=False): super().__init__()# 构建网络network# num_layers参数主要决定是P5或P6模型num_layers = config.model.head.num_layersself.backbone, self.neck, self.detect = build_network(config, channels, num_classes, num_layers, fuse_ab=fuse_ab, distill_ns=distill_ns)# 初始化检测头Init Detect headself.stride = self.detect.strideself.detect.initialize_biases()# 权重初始化Init weightsinitialize_weights(self)def forward(self, x):# 默认是Falseexport_mode = torch.onnx.is_in_onnx_export() or self.export x = self.backbone(x)x = self.neck(x)if export_mode == False:featmaps = []featmaps.extend(x)x = self.detect(x)return x if export_mode is True else [x, featmaps]def _apply(self, fn):self = super()._apply(fn)self.detect.stride = fn(self.detect.stride)self.detect.grid = list(map(fn, self.detect.grid))return selfdef build_model(cfg, num_classes, device, fuse_ab=False, distill_ns=False):model = Model(cfg, channels=3, num_classes=num_classes, fuse_ab=fuse_ab, distill_ns=distill_ns).to(device)return model