YOLOv6代码解读[02] configs/hub/yolov6l_finetune.py文件解读

文章目录

    • 模型配置文件
    • 骨干网络 CSPBepBackbone
    • 颈部网络 CSPRepBiFPNNeck
    • 检测头 EffiDeHead
    • 构建模型Model

模型配置文件

# YOLOv6l model
model = dict(type='YOLOv6l',pretrained='weights/yolov6l.pt',depth_multiple=1.0,width_multiple=1.0,backbone=dict(type='CSPBepBackbone',num_repeats=[1, 6, 12, 18, 6],out_channels=[64, 128, 256, 512, 1024],csp_e=float(1)/2,fuse_P2=True,),neck=dict(type='CSPRepBiFPANNeck',num_repeats=[12, 12, 12, 12],out_channels=[256, 128, 128, 256, 256, 512],csp_e=float(1)/2,),head=dict(type='EffiDeHead',in_channels=[128, 256, 512],num_layers=3,begin_indices=24,anchors=3,anchors_init=[[10,13, 19,19, 33,23],[30,61, 59,59, 59,119],[116,90, 185,185, 373,326]],out_indices=[17, 20, 23],strides=[8, 16, 32],atss_warmup_epoch=0,iou_type='giou',use_dfl=True,reg_max=16, #if use_dfl is False, please set reg_max to 0distill_weight={'class': 2.0,'dfl': 1.0,},)
)solver = dict(optim='SGD',lr_scheduler='Cosine',lr0=0.0032,lrf=0.12,momentum=0.843,weight_decay=0.00036,warmup_epochs=2.0,warmup_momentum=0.5,warmup_bias_lr=0.05
)data_aug = dict(hsv_h=0.0138,hsv_s=0.664,hsv_v=0.464,degrees=0.373,translate=0.245,scale=0.898,shear=0.602,flipud=0.00856,fliplr=0.5,mosaic=1.0,mixup=0.243,
)
training_mode = "conv_silu"
#use normal conv to speed up training and further improve accuracy.

骨干网络 CSPBepBackbone

stage_block中包含block这个模块。

class CSPBepBackbone(nn.Module):"""CSPBepBackbone module."""def __init__(self, in_channels=3, channels_list=None, num_repeats=None, block=RepVGGBlock, csp_e=float(1)/2, fuse_P2=False, cspsppf=False, stage_block_type="BepC3"):super().__init__()assert channels_list is not Noneassert num_repeats is not Noneif stage_block_type == "BepC3":stage_block = BepC3elif stage_block_type == "MBLABlock":stage_block = MBLABlockelse:raise NotImplementedErrorself.fuse_P2 = fuse_P2self.stem = block(in_channels=in_channels, out_channels=channels_list[0], kernel_size=3, stride=2)self.ERBlock_2 = nn.Sequential(block(in_channels=channels_list[0], out_channels=channels_list[1], kernel_size=3, stride=2),stage_block(in_channels=channels_list[1], out_channels=channels_list[1], n=num_repeats[1], e=csp_e, block=block))self.ERBlock_3 = nn.Sequential(block(in_channels=channels_list[1], out_channels=channels_list[2], kernel_size=3, stride=2),stage_block(in_channels=channels_list[2], out_channels=channels_list[2], n=num_repeats[2], e=csp_e, block=block))self.ERBlock_4 = nn.Sequential(block(in_channels=channels_list[2], out_channels=channels_list[3], kernel_size=3, stride=2),stage_block(in_channels=channels_list[3], out_channels=channels_list[3], n=num_repeats[3], e=csp_e, block=block))channel_merge_layer = SPPF if block == ConvBNSiLU else SimSPPFif cspsppf:channel_merge_layer = CSPSPPF if block == ConvBNSiLU else SimCSPSPPFself.ERBlock_5 = nn.Sequential(block(in_channels=channels_list[3], out_channels=channels_list[4], kernel_size=3, stride=2),stage_block(in_channels=channels_list[4], out_channels=channels_list[4], n=num_repeats[4], e=csp_e, block=block),channel_merge_layer(in_channels=channels_list[4], out_channels=channels_list[4], kernel_size=5))def forward(self, x):outputs = []# x: 640x640x3 --> 320x320x64x = self.stem(x)# C2: 320x320x64 --> 160x160x128 x = self.ERBlock_2(x)if self.fuse_P2:outputs.append(x)# C3: 160x160x128 --> 80x80x256 x = self.ERBlock_3(x)outputs.append(x)# C4: 80x80x256 --> 40x40x512 x = self.ERBlock_4(x)outputs.append(x)# C5: 40x40x512 --> 20x20x1024 x = self.ERBlock_5(x)outputs.append(x)return tuple(outputs)

颈部网络 CSPRepBiFPNNeck

class CSPRepBiFPANNeck(nn.Module):"""CSPRepBiFPANNeck module."""def __init__(self, channels_list=None, num_repeats=None, block=BottleRep, csp_e=float(1)/2, stage_block_type="BepC3"):super().__init__()# channels_list [64, 128, 256, 512, 1024, 256, 128, 128, 256, 256, 512]assert channels_list is not None# num_repeat [1, 6, 12, 18, 6, 12, 12, 12, 12]assert num_repeats is not Noneif stage_block_type == "BepC3":stage_block = BepC3elif stage_block_type == "MBLABlock":stage_block = MBLABlockelse:raise NotImplementedErrorself.reduce_layer0 = ConvBNReLU(in_channels=channels_list[4], # 1024out_channels=channels_list[5], # 256kernel_size=1,stride=1)self.Bifusion0 = BiFusion(in_channels=[channels_list[3], channels_list[2]], # 512, 256out_channels=channels_list[5], # 256)self.Rep_p4 = stage_block(in_channels=channels_list[5], # 256out_channels=channels_list[5], # 256n=num_repeats[5],e=csp_e,block=block)self.reduce_layer1 = ConvBNReLU(in_channels=channels_list[5], # 256out_channels=channels_list[6], # 128kernel_size=1,stride=1)self.Bifusion1 = BiFusion(in_channels=[channels_list[2], channels_list[1]], # 256, 128out_channels=channels_list[6], # 128)self.Rep_p3 = stage_block(in_channels=channels_list[6], # 128out_channels=channels_list[6], # 128n=num_repeats[6],e=csp_e,block=block)self.downsample2 = ConvBNReLU(in_channels=channels_list[6], # 128out_channels=channels_list[7], # 128kernel_size=3,stride=2)self.Rep_n3 = stage_block(in_channels=channels_list[6] + channels_list[7], # 128 + 128out_channels=channels_list[8], # 256n=num_repeats[7],e=csp_e,block=block)self.downsample1 = ConvBNReLU(in_channels=channels_list[8], # 256out_channels=channels_list[9], # 256kernel_size=3,stride=2)self.Rep_n4 = stage_block(in_channels=channels_list[5] + channels_list[9], # 256 + 256out_channels=channels_list[10], # 512n=num_repeats[8],e=csp_e,block=block)def forward(self, input):# x3: C2=160x160x128 # x2: C3=80x80x256# x1: C4=40x40x512# x0: C5=20x20x1024(x3, x2, x1, x0) = input# 20x20x1024--->20x20x256 fpn_out0 = self.reduce_layer0(x0)# x[0]: 20x20x256--->40x40x256 # x[1]: 40x40x512--->40x40x256# x[2]: 80x80x256--->80x80x256--->40x40x256# cat(x[0],x[1],x[2])--->40x40x768--->40x40x256f_concat_layer0 = self.Bifusion0([fpn_out0, x1, x2])# 特征整合卷积f_out0 = self.Rep_p4(f_concat_layer0)# 40x40x256--->40x40x128fpn_out1 = self.reduce_layer1(f_out0)# x[0]: 40x40x128--->80x80x128 # x[1]: 80x80x256--->80x80x128# x[2]: 160x160x128--->160x160x128--->80x80x128# cat(x[0],x[1],x[2])--->80x80x384--->80x80x128f_concat_layer1 = self.Bifusion1([fpn_out1, x2, x3])# 特征整合卷积# P3: 1/8特征图,80x80x128pan_out2 = self.Rep_p3(f_concat_layer1)# 80x80x128--->40x40x128down_feat1 = self.downsample2(pan_out2)# cat: 40x40x256p_concat_layer1 = torch.cat([down_feat1, fpn_out1], 1)# 特征整合卷积# P4: 1/16特征图,40x40x256 pan_out1 = self.Rep_n3(p_concat_layer1)# 40x40x256--->20x20x256down_feat0 = self.downsample1(pan_out1)# cat: 20x20x512p_concat_layer2 = torch.cat([down_feat0, fpn_out0], 1)# 特征整合卷积# P5: 1/32特征图,20x20x512 pan_out0 = self.Rep_n4(p_concat_layer2)# P3: 1/8特征图,80x80x128# P4: 1/16特征图,40x40x256 # P5: 1/32特征图,20x20x512 outputs = [pan_out2, pan_out1, pan_out0]return outputs

检测头 EffiDeHead

def build_effidehead_layer(channels_list, num_anchors, num_classes, reg_max=16, num_layers=3):chx = [6, 8, 10] if num_layers == 3 else [8, 9, 10, 11]head_layers = nn.Sequential(# stem0ConvBNSiLU(in_channels=channels_list[chx[0]],out_channels=channels_list[chx[0]],kernel_size=1,stride=1),# cls_conv0ConvBNSiLU(in_channels=channels_list[chx[0]],out_channels=channels_list[chx[0]],kernel_size=3,stride=1),# reg_conv0ConvBNSiLU(in_channels=channels_list[chx[0]],out_channels=channels_list[chx[0]],kernel_size=3,stride=1),# cls_pred0nn.Conv2d(in_channels=channels_list[chx[0]],out_channels=num_classes * num_anchors,kernel_size=1),# reg_pred0nn.Conv2d(in_channels=channels_list[chx[0]],out_channels=4 * (reg_max + num_anchors),kernel_size=1),# stem1ConvBNSiLU(in_channels=channels_list[chx[1]],out_channels=channels_list[chx[1]],kernel_size=1,stride=1),# cls_conv1ConvBNSiLU(in_channels=channels_list[chx[1]],out_channels=channels_list[chx[1]],kernel_size=3,stride=1),# reg_conv1ConvBNSiLU(in_channels=channels_list[chx[1]],out_channels=channels_list[chx[1]],kernel_size=3,stride=1),# cls_pred1nn.Conv2d(in_channels=channels_list[chx[1]],out_channels=num_classes * num_anchors,kernel_size=1),# reg_pred1nn.Conv2d(in_channels=channels_list[chx[1]],out_channels=4 * (reg_max + num_anchors),kernel_size=1),# stem2ConvBNSiLU(in_channels=channels_list[chx[2]],out_channels=channels_list[chx[2]],kernel_size=1,stride=1),# cls_conv2ConvBNSiLU(in_channels=channels_list[chx[2]],out_channels=channels_list[chx[2]],kernel_size=3,stride=1),# reg_conv2ConvBNSiLU(in_channels=channels_list[chx[2]],out_channels=channels_list[chx[2]],kernel_size=3,stride=1),# cls_pred2nn.Conv2d(in_channels=channels_list[chx[2]],out_channels=num_classes * num_anchors,kernel_size=1),# reg_pred2nn.Conv2d(in_channels=channels_list[chx[2]],out_channels=4 * (reg_max + num_anchors),kernel_size=1))if num_layers == 4:# stem3head_layers.add_module('stem3', ConvBNSiLU(in_channels=channels_list[chx[3]],out_channels=channels_list[chx[3]],kernel_size=1,stride=1))# cls_conv3head_layers.add_module('cls_conv3',ConvBNSiLU(in_channels=channels_list[chx[3]],out_channels=channels_list[chx[3]],kernel_size=3,stride=1))# reg_conv3head_layers.add_module('reg_conv3', ConvBNSiLU(in_channels=channels_list[chx[3]],out_channels=channels_list[chx[3]],kernel_size=3,stride=1))# cls_pred3head_layers.add_module('cls_pred3',nn.Conv2d(in_channels=channels_list[chx[3]],out_channels=num_classes * num_anchors,kernel_size=1))# reg_pred3head_layers.add_module('reg_pred3',nn.Conv2d(in_channels=channels_list[chx[3]],out_channels=4 * (reg_max + num_anchors),kernel_size=1))return head_layersclass Detect(nn.Module):export = False'''Efficient Decoupled HeadWith hardware-aware degisn, the decoupled head is optimized with hybridchannels methods.通过硬件感知设计,解耦头通过混合通道方法进行优化。'''def __init__(self, num_classes=80, num_layers=3, inplace=True, head_layers=None, use_dfl=True, reg_max=16):  super().__init__()assert head_layers is not None# 类别数目self.nc = num_classes  # 每个anchor输出维度, nc类别+1是否有目标+4放缩偏移量self.no = num_classes + 5  # 检测层的输出数量(不同尺度个数)self.nl = num_layers  # 网格初始化self.grid = [torch.zeros(1)] * num_layersself.prior_prob = 1e-2self.inplace = inplace# stride在build期间进行计算stride = [8, 16, 32] if num_layers == 3 else [8, 16, 32, 64] self.stride = torch.tensor(stride)self.use_dfl = use_dflself.reg_max = reg_maxself.proj_conv = nn.Conv2d(self.reg_max+1, 1, 1, bias=False)self.grid_cell_offset = 0.5self.grid_cell_size = 5.0# 初始化解耦头decouple_headself.stems = nn.ModuleList()self.cls_convs = nn.ModuleList()self.reg_convs = nn.ModuleList()self.cls_preds = nn.ModuleList()self.reg_preds = nn.ModuleList()# Efficient decoupled head layersfor i in range(num_layers):idx = i*5self.stems.append(head_layers[idx])self.cls_convs.append(head_layers[idx+1])self.reg_convs.append(head_layers[idx+2])self.cls_preds.append(head_layers[idx+3])self.reg_preds.append(head_layers[idx+4])def initialize_biases(self):for conv in self.cls_preds:b = conv.bias.view(-1, )b.data.fill_(-math.log((1-self.prior_prob) / self.prior_prob))conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)w = conv.weightw.data.fill_(0.)conv.weight = torch.nn.Parameter(w, requires_grad=True)for conv in self.reg_preds:b = conv.bias.view(-1, )b.data.fill_(1.0)conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)w = conv.weightw.data.fill_(0.)conv.weight = torch.nn.Parameter(w, requires_grad=True)self.proj = nn.Parameter(torch.linspace(0, self.reg_max, self.reg_max+1), requires_grad=False)self.proj_conv.weight = nn.Parameter(self.proj.view([1, self.reg_max+1, 1, 1]).clone().detach(), requires_grad=False)def forward(self, x):# P3:80x80x128; P4:40x40x256; P5:20x20x512 # 如果是训练if self.training:cls_score_list = []reg_distri_list = []# 对于每一个尺度的特征图来说# [bs, num_classes*num_anchors, 80, 80], [bs, 4*(reg_max+num_anchors), 80, 80]# [bs, num_classes*num_anchors, 40, 40], [bs, 4*(reg_max+num_anchors), 40, 40]# [bs, num_classes*num_anchors, 20, 20], [bs, 4*(reg_max+num_anchors), 20, 20]for i in range(self.nl):# [bs,128,80,80]; [bs,256,40,40]; [bs,512,20,20] x[i] = self.stems[i](x[i])cls_x = x[i]reg_x = x[i]# 分类卷积cls_feat = self.cls_convs[i](cls_x)cls_output = self.cls_preds[i](cls_feat)# 损失函数采用nn.BCEWithLogitsLoss()cls_output = torch.sigmoid(cls_output)# 回归卷积reg_feat = self.reg_convs[i](reg_x)reg_output = self.reg_preds[i](reg_feat)cls_score_list.append(cls_output.flatten(2).permute((0, 2, 1)))reg_distri_list.append(reg_output.flatten(2).permute((0, 2, 1)))# 分类:[bs, 8400, num_classes*num_anchors]cls_score_list = torch.cat(cls_score_list, axis=1)# 回归:[bs, 8400, 4*(reg_max+num_anchors)]reg_distri_list = torch.cat(reg_distri_list, axis=1)return x, cls_score_list, reg_distri_list# 如果是验证或者推断else:cls_score_list = []reg_dist_list = []# 对于每一个尺度的特征图来说,anchor_free模式下# [bs, num_classes, 80, 80], [bs, 4*(reg_max+1), 80, 80]# [bs, num_classes, 40, 40], [bs, 4*(reg_max+1), 40, 40]# [bs, num_classes, 20, 20], [bs, 4*(reg_max+1), 20, 20] for i in range(self.nl):# [bs,128,80,80]; [bs,256,40,40]; [bs,512,20,20] b, _, h, w = x[i].shapel = h * wx[i] = self.stems[i](x[i])cls_x = x[i]reg_x = x[i]# 分类卷积cls_feat = self.cls_convs[i](cls_x)cls_output = self.cls_preds[i](cls_feat)cls_output = torch.sigmoid(cls_output)# 回归卷积reg_feat = self.reg_convs[i](reg_x)reg_output = self.reg_preds[i](reg_feat)if self.use_dfl:reg_output = reg_output.reshape([-1, 4, self.reg_max+1, l]).permute(0, 2, 1, 3)reg_output = self.proj_conv(F.softmax(reg_output, dim=1))if self.export:cls_score_list.append(cls_output)reg_dist_list.append(reg_output)else:cls_score_list.append(cls_output.reshape([b, self.nc, l]))reg_dist_list.append(reg_output.reshape([b, 4, l]))if self.export:return tuple(torch.cat([cls, reg], 1) for cls, reg in zip(cls_score_list, reg_dist_list))cls_score_list = torch.cat(cls_score_list, axis=-1).permute(0, 2, 1)reg_dist_list = torch.cat(reg_dist_list, axis=-1).permute(0, 2, 1)anchor_points, stride_tensor = generate_anchors(x, self.stride, self.grid_cell_size, self.grid_cell_offset, device=x[0].device, is_eval=True, mode='af')pred_bboxes = dist2bbox(reg_dist_list, anchor_points, box_format='xywh')pred_bboxes *= stride_tensorreturn torch.cat([pred_bboxes,torch.ones((b, pred_bboxes.shape[1], 1), device=pred_bboxes.device, dtype=pred_bboxes.dtype),cls_score_list],axis=-1)

构建模型Model

# 向上修正值x,使其能被除数整除。
def make_divisible(x, divisor):return math.ceil(x / divisor) * divisordef build_network(config, channels, num_classes, num_layers, fuse_ab=False, distill_ns=False):# 深度缩放因子(model depth multiple)depth_mul = config.model.depth_multiple# 宽度缩放因子(layer channel multiple)width_mul = config.model.width_multiple# backbone的网络参数设置num_repeat_backbone = config.model.backbone.num_repeatschannels_list_backbone = config.model.backbone.out_channels# P2可以提升小目标检测fuse_P2 = config.model.backbone.get('fuse_P2')cspsppf = config.model.backbone.get('cspsppf')# neck的网络参数设置num_repeat_neck = config.model.neck.num_repeatschannels_list_neck = config.model.neck.out_channels# head的网络参数设置use_dfl = config.model.head.use_dflreg_max = config.model.head.reg_max# 以yolov6l为例,# num_repeat [1, 6, 12, 18, 6, 12, 12, 12, 12]num_repeat = [(max(round(i*depth_mul), 1) if i > 1 else i) for i in (num_repeat_backbone + num_repeat_neck)]# channels_list [64, 128, 256, 512, 1024, 256, 128, 128, 256, 256, 512]channels_list = [make_divisible(i*width_mul, 8) for i in (channels_list_backbone + channels_list_neck)]# block结构block = get_block(config.training_mode)# 骨干网络backboneBACKBONE = eval(config.model.backbone.type)# 颈部结构neckNECK = eval(config.model.neck.type)if 'CSP' in config.model.backbone.type:if "stage_block_type" in config.model.backbone:stage_block_type = config.model.backbone.stage_block_typeelse:stage_block_type = "BepC3"  #defaultbackbone = BACKBONE(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat, block=block,csp_e=config.model.backbone.csp_e, fuse_P2=fuse_P2, cspsppf=cspsppf,stage_block_type=stage_block_type)neck = NECK(channels_list=channels_list, num_repeats=num_repeat, block=block, csp_e=config.model.neck.csp_e,stage_block_type=stage_block_type)else:backbone = BACKBONE(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat, block=block, fuse_P2=fuse_P2, cspsppf=cspsppf)neck = NECK(channels_list=channels_list, num_repeats=num_repeat, block=block)if distill_ns:from yolov6.models.heads.effidehead_distill_ns import Detect, build_effidehead_layerif num_layers != 3:LOGGER.error('ERROR in: Distill mode not fit on n/s models with P6 head.\n')exit()head_layers = build_effidehead_layer(channels_list, 1, num_classes, reg_max=reg_max)head = Detect(num_classes, num_layers, head_layers=head_layers, use_dfl=use_dfl)elif fuse_ab:from yolov6.models.heads.effidehead_fuseab import Detect, build_effidehead_layeranchors_init = config.model.head.anchors_inithead_layers = build_effidehead_layer(channels_list, 3, num_classes, reg_max=reg_max, num_layers=num_layers)head = Detect(num_classes, anchors_init, num_layers, head_layers=head_layers, use_dfl=use_dfl)else:from yolov6.models.effidehead import Detect, build_effidehead_layerhead_layers = build_effidehead_layer(channels_list, 1, num_classes, reg_max=reg_max, num_layers=num_layers)head = Detect(num_classes, num_layers, head_layers=head_layers, use_dfl=use_dfl)return backbone, neck, headclass Model(nn.Module):export = False'''YOLOv6 model with backbone, neck and head.The default parts are EfficientRep Backbone, Rep-PAN and Efficient Decoupled Head.'''def __init__(self, config, channels=3, num_classes=None, fuse_ab=False, distill_ns=False):  super().__init__()# 构建网络network# num_layers参数主要决定是P5或P6模型num_layers = config.model.head.num_layersself.backbone, self.neck, self.detect = build_network(config, channels, num_classes, num_layers, fuse_ab=fuse_ab, distill_ns=distill_ns)# 初始化检测头Init Detect headself.stride = self.detect.strideself.detect.initialize_biases()# 权重初始化Init weightsinitialize_weights(self)def forward(self, x):# 默认是Falseexport_mode = torch.onnx.is_in_onnx_export() or self.export x = self.backbone(x)x = self.neck(x)if export_mode == False:featmaps = []featmaps.extend(x)x = self.detect(x)return x if export_mode is True else [x, featmaps]def _apply(self, fn):self = super()._apply(fn)self.detect.stride = fn(self.detect.stride)self.detect.grid = list(map(fn, self.detect.grid))return selfdef build_model(cfg, num_classes, device, fuse_ab=False, distill_ns=False):model = Model(cfg, channels=3, num_classes=num_classes, fuse_ab=fuse_ab, distill_ns=distill_ns).to(device)return model

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/703644.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

多线程基础说明【基础篇】

目录 🌭1.相关概念 🍿2.创建和启动线程 🥞3.线程安全 🧈4.死锁 🥓5.线程通信的方法 1.相关概念 1.1程序 为完成特定任务,用某种语言编写的一组指令的集合。即指一段静态的代码,静态对象…

都2024年了,软件测试面试都问什么?

1、最熟悉的 selenium 操作? 基本上 selenium 提供的一下几大类操作都能够灵活使用,比如说:八大元素定位方式、三大等待方式、用户点击、输入等常见操作、 还有窗口切换、iframe 切换操作,比如说 actionchains 文件上传、JS操作 等…

网站开发--详解Servlet

💕"Echo"💕 作者:Mylvzi 文章主要内容:网站开发–详解Servlet 一.基本介绍 tomcat是Java中开发服务器的重要的一个工具,任何开发的服务器都要部署在tomcat之上,可以说tomcat是所有服务器的底座,为了更好的操作http,to…

Pom文件中的scope到是什么作用

在 Maven 的 pom.xml 文件中&#xff0c;<scope> 标签用于定义依赖项的作用域。作用域决定了依赖项的生命周期、可见性和使用范围。<scope> 标签的常用值包括&#xff1a; 1. **compile**&#xff08;默认值&#xff09;&#xff1a;编译依赖在所有 classpath 中都…

golang学习3,golang 项目中配置gin的web框架

1.go 初始化 mod文件 go mod init gin-ranking 2.gin的crm框架 go get -u github.com/gin-gonic/gin 3.go.mod爆红解决

【Redis】redis通用命令

redis连接命令 要在 redis 服务上执行命令需要一个 redis 客户端。Redis 客户端在我们之前安装redis 的src目录下&#xff0c;具体为/usr/local/redis/src。注意此redis实例没有设置密码&#xff0c;如果设置了密码需要先使用命令AUTH执行验证或者开始在命令行中通过-a指定。 …

前后端分离Vue+node.js在线学习考试系统gqw7o

与其它应用程序相比&#xff0c;在线学习平台的设计主要面向于学校&#xff0c;旨在为管理员和学生、教师、院系提供一个在线学习平台。学生、教师、院系可以通过系统及时查看公告信息等。 在线学习平台是在Windows操作系统下的应用平台。为防止出现兼容性及稳定性问题&#xf…

B站项目-基于Pytorch的ResNet垃圾图片分类

基于Pytorch的ResNet垃圾图片分类 数据集预处理 画图片的宽高分布散点图 import osimport matplotlib.pyplot as plt import PIL.Image as Imagedef plot_resolution(dataset_root_path):image_size_list []#存放图片尺寸for root, dirs, files in os.walk(dataset_root_pa…

装配制造业的MES系统种的物料齐套技术

装配是制造企业涉及产品生产加工最为普遍的一种模式&#xff0c;包括汽车、电子、电器、电气等行业。经研究表明&#xff0c;装配在整个产品制造总成本中所占比例超过了50%&#xff0c;所占用的总生产时间比例在40%-60%&#xff0c;直接影响着产品质量和成本。装配制造非常强调…

树-王道-复试

树 1.度&#xff1a; 树中孩子节点个数&#xff0c;所有结点的度最大值为 树的度 2.有序树&#xff1a; 逻辑上看&#xff0c;树中结点的各子树从左至右是有次序的&#xff0c;不能互换。 **3.**树的根节点没有前驱&#xff0c;其他节点只有一个前驱 **4.**所有节点可有零个或…

Leetcoder Day23| 回溯part03:组合+分割

语言&#xff1a;Java/Go 39. 组合总和 给你一个 无重复元素 的整数数组 candidates 和一个目标整数 target &#xff0c;找出 candidates 中可以使数字和为目标数 target 的所有不同组合 &#xff0c;并以列表形式返回。你可以按任意顺序返回这些组合。 candidates 中的同一个…

机器学习是什么

机器学习是什么 引言 机器学习&#xff08;Machine Learning&#xff0c;简称ML&#xff09;是人工智能&#xff08;AI&#xff09;领域中的重要分支&#xff0c;旨在通过让计算机系统自动学习和适应&#xff0c;不需要明确的编程指导。机器学习的发展为我们提供了一种新的方…

环形光源让图像质量瞬间提升,一探究竟吧!

光源对机器视觉检测系统的性能起着重要作用,精确的光学结构设计可以提高捕获图像的质量,准确地分离目标和背景信息,不充足的光线会使捕捉到的图像无法满足需求&#xff0c;针对不同的检测对象,不同的形状光源应运而生。我们来看看最常用的LED光源之一—环形光源。 环形光源&…

代码随想录算法训练营day41 | 01背包问题 二维、01背包问题 一维、416. 分割等和子集

背包问题的理论基础重中之重是01背包 01背包问题 二维 二维dp的01背包 确定dp数组以及下标的含义&#xff1a;dp[i][j]表示从下标为[0-i]的物品里任意取&#xff0c;放进容量为j的背包&#xff0c;价值总和最大是多少确定递推公式&#xff1a;dp[i][j] max(dp[i - 1][j], d…

vue3 使用pina

一、Vue 3 项目中集成Pina 状态管理库 要在 Vue 3 项目中使用 Pina&#xff08;Vue 3 状态管理库&#xff09;&#xff0c;您可以按照以下步骤操作&#xff1a; 1. 安装 Pina 库相应的插件&#xff1a; yarn add pinia # 或者使用 npm npm install pinia 2. 在您的 Vue 3 项…

电脑休眠之后唤不醒

现象&#xff1a;午休时间电脑休眠了&#xff0c;醒来之后发现在密码输入界面&#xff0c;但鼠标键盘没反应。按重启键或电源机重新开机&#xff0c;结果开不了机。 原因&#xff1a;1、内存条脏了&#xff0c;导致内存条读取失败 2、休眠的时候硬盘休眠了&#xff0c;导致按…

如何在 CentOS 中配置 SSH 服务的 TCP 端口转发

在 CentOS 系统中&#xff0c;SSH&#xff08;Secure Shell&#xff09;服务提供了强大的功能&#xff0c;其中之一就是 TCP 端口转发。通过此功能&#xff0c;我们可以安全地将本地或远程服务器上的端口流量转发到其他主机上。本文将引导您如何在 CentOS 中启用或禁用 SSH 的 …

AngularJS安装版本问题

一、安装 Angular CLI 脚手架安装命令&#xff1a; npm install -g angular/cli 在安装前请确保自己安装NodeJS环境版本为V18及以上&#xff0c;否则会因node版本问题导致项目无法正常运行。 脚手架安装后&#xff0c;已提示了当前node版本必须为18.13.0或大于20.9.0版本&…

git之分支管理

一.理解分支 我们看下面这张图片&#xff1a; 在版本回退⾥&#xff0c;你已经知道&#xff0c;每次提交&#xff0c;Git都把它们串成⼀条时间线&#xff0c;这条时间线就可以理解为是⼀个分⽀。截⽌到⽬前&#xff0c;只有⼀条时间线&#xff0c;在Git⾥&#xff0c;这个分⽀…

javaScript打印n以内的素数——试除法及优化

素数&#xff1a;只能被1和它本身整除 试除法 试除法的时间复杂度为O(n*sqrt(n))&#xff0c;空间复杂度为O(1)&#xff0c;这已经是一种比较高效的解决方案了。 n如果不是质数&#xff0c;那么能整除的数一定与根号n。基于此缩小循环比较范围&#xff0c;并且一旦找到可以整除…