🍨 本文为:[🔗365天深度学习训练营] 中的学习记录博客
🍖 原作者:[K同学啊 | 接辅导、项目定制]
一、 基础配置
- 语言环境:Python3.8
- 编译器选择:Pycharm
- 深度学习环境:
-
- torch==1.12.1+cu113
- torchvision==0.13.1+cu113
二、 前期准备
1.设置GPU
import torch
import torch.nn as nn
from torchvision import transforms,datasets
import pathlib,warningswarnings.filterwarnings("ignore")device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
2. 导入数据
本项目所采用的数据集未收录于公开数据中,故需要自己在文件目录中导入相应数据集合,并设置对应文件目录,以供后续学习过程中使用。
运行下述代码:
data_dir = "./data/J3-data"
data_dir = pathlib.Path(data_dir)data_path = list(data_dir.glob('*'))
classNames = [str(path).split('\\')[2] for path in data_path]
print(classNames)
得到如下输出:
['0', '1']
接下来,我们通过transforms.Compose对整个数据集进行预处理:
train_transforms = transforms.Compose([transforms.Resize([224, 224]), # 将输入图片resize成统一尺寸# transforms.RandomHorizontalFlip(), # 随机水平翻转transforms.ToTensor(), # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间transforms.Normalize( # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])total_dataset = datasets.ImageFolder(data_dir,transform=transforms)
print(total_dataset.class_to_idx)
得到如下输出:
{'0': 0, '1': 1}
3. 划分数据集
此处数据集需要做按比例划分的操作:
train_size = int(0.8*len(total_dataset))
test_size = len(total_dataset) - train_size
train_dataset,test_dataset = torch.utils.data.random_split(total_dataset,[train_size,test_size])
接下来,根据划分得到的训练集和验证集对数据集进行包装:
batch_size = 32train_dl = torch.utils.data.DataLoader(train_dataset,batch_size = batch_size,shuffle = True,num_workers = 0)test_dl = torch.utils.data.DataLoader(test_dataset,batch_size = batch_size,shuffle = True,num_workers = 0)
并通过:
for X,y in test_dl:print('Shape of X:',X.shape)print('shape of y:',y.shape,y.dtype)break
输出测试数据集的数据分布情况:
Shape of X: torch.Size([32, 3, 224, 224])
shape of y: torch.Size([32]) torch.int64
4.搭建模型
首先,导入搭建模型所依赖的库用于后续模型的搭建过程:
import torch.nn.functional as F
from collections import OrderedDict
1.DenseLayer
class DenseLayer(nn.Sequential):def __init__(self, in_channel, growth_rate, bn_size, drop_rate):super(DenseLayer, self).__init__()self.add_module('norm1', nn.BatchNorm2d(in_channel))self.add_module('relu1', nn.ReLU(inplace=True))self.add_module('conv1', nn.Conv2d(in_channel, bn_size * growth_rate, kernel_size=1, stride=1))self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate))self.add_module('relu2', nn.ReLU(inplace=True))self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1))self.drop_rate = drop_ratedef forward(self, x):new_feature = super(DenseLayer, self).forward(x)if self.drop_rate > 0:new_feature = F.dropout(new_feature, p=self.drop_rate, training=self.training)return torch.cat([x, new_feature], 1)
2.DenseBlock
class DenseBlock(nn.Sequential):def __init__(self, num_layers, in_channel, bn_size, growth_rate, drop_rate):super(DenseBlock, self).__init__()for i in range(num_layers):layer = DenseLayer(in_channel + i * growth_rate, growth_rate, bn_size, drop_rate)self.add_module('denselayer%d' % (i + 1,), layer)
3.Transition
class Transition(nn.Sequential):def __init__(self, in_channel, out_channel):super(Transition, self).__init__()self.add_module('norm', nn.BatchNorm2d(in_channel))self.add_module('relu', nn.ReLU(inplace=True))self.add_module('conv', nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=1))self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
4.搭建DenseNet
class DenseNet(nn.Module):def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), init_channel=64, bn_size=4,compression_rate=0.5, drop_rate=0, num_classes=1000):super(DenseNet, self).__init__()self.features = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(3, init_channel, kernel_size=7, stride=2, padding=3)),('norm0', nn.BatchNorm2d(init_channel)),('relu0', nn.ReLU(inplace=True)),('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))]))num_features = init_channelfor i, num_layers in enumerate(block_config):block = DenseBlock(num_layers, num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)self.features.add_module('denseblock%d' % (i + 1), block)num_features += num_layers * growth_rateif i != len(block_config) - 1:transition = Transition(num_features, int(num_features * compression_rate))self.features.add_module('transition%d' % (i + 1), transition)num_features = int(num_features * compression_rate)self.features.add_module('norm5', nn.BatchNorm2d(num_features))self.features.add_module('relu5', nn.ReLU(inplace=True))self.classifier = nn.Linear(num_features, num_classes)for m in self.modules():if isinstance(m, nn.Conv2d):nn.init.kaiming_normal_(m.weight)elif isinstance(m, nn.BatchNorm2d):nn.init.constant_(m.bias, 0)nn.init.constant_(m.weight, 1)elif isinstance(m, nn.Linear):nn.init.constant_(m.bias, 0)def forward(self, x):x = self.features(x)x = F.avg_pool2d(x, 7, stride=1).view(x.size(0), -1)x = self.classifier(x)return x
5.利用DenseNet搭建DenseNet121
densenet121 = DenseNet(init_channel=64,growth_rate=32,block_config=(6,12,24,16),num_classes=len(classNames))
model = densenet121.to(device)
2.查看模型信息
import torchsummary as summary
summary.summary(model, (3, 224, 224))
得到如下输出:
----------------------------------------------------------------Layer (type) Output Shape Param #
================================================================Conv2d-1 [-1, 64, 112, 112] 9,472BatchNorm2d-2 [-1, 64, 112, 112] 128ReLU-3 [-1, 64, 112, 112] 0MaxPool2d-4 [-1, 64, 56, 56] 0BatchNorm2d-5 [-1, 64, 56, 56] 128ReLU-6 [-1, 64, 56, 56] 0Conv2d-7 [-1, 128, 56, 56] 8,320BatchNorm2d-8 [-1, 128, 56, 56] 256ReLU-9 [-1, 128, 56, 56] 0Conv2d-10 [-1, 32, 56, 56] 36,896BatchNorm2d-11 [-1, 96, 56, 56] 192ReLU-12 [-1, 96, 56, 56] 0Conv2d-13 [-1, 128, 56, 56] 12,416BatchNorm2d-14 [-1, 128, 56, 56] 256ReLU-15 [-1, 128, 56, 56] 0Conv2d-16 [-1, 32, 56, 56] 36,896BatchNorm2d-17 [-1, 128, 56, 56] 256ReLU-18 [-1, 128, 56, 56] 0Conv2d-19 [-1, 128, 56, 56] 16,512BatchNorm2d-20 [-1, 128, 56, 56] 256ReLU-21 [-1, 128, 56, 56] 0Conv2d-22 [-1, 32, 56, 56] 36,896BatchNorm2d-23 [-1, 160, 56, 56] 320ReLU-24 [-1, 160, 56, 56] 0Conv2d-25 [-1, 128, 56, 56] 20,608BatchNorm2d-26 [-1, 128, 56, 56] 256ReLU-27 [-1, 128, 56, 56] 0Conv2d-28 [-1, 32, 56, 56] 36,896BatchNorm2d-29 [-1, 192, 56, 56] 384ReLU-30 [-1, 192, 56, 56] 0Conv2d-31 [-1, 128, 56, 56] 24,704BatchNorm2d-32 [-1, 128, 56, 56] 256ReLU-33 [-1, 128, 56, 56] 0Conv2d-34 [-1, 32, 56, 56] 36,896BatchNorm2d-35 [-1, 224, 56, 56] 448ReLU-36 [-1, 224, 56, 56] 0Conv2d-37 [-1, 128, 56, 56] 28,800BatchNorm2d-38 [-1, 128, 56, 56] 256ReLU-39 [-1, 128, 56, 56] 0Conv2d-40 [-1, 32, 56, 56] 36,896BatchNorm2d-41 [-1, 256, 56, 56] 512ReLU-42 [-1, 256, 56, 56] 0Conv2d-43 [-1, 128, 56, 56] 32,896AvgPool2d-44 [-1, 128, 28, 28] 0BatchNorm2d-45 [-1, 128, 28, 28] 256ReLU-46 [-1, 128, 28, 28] 0Conv2d-47 [-1, 128, 28, 28] 16,512BatchNorm2d-48 [-1, 128, 28, 28] 256ReLU-49 [-1, 128, 28, 28] 0Conv2d-50 [-1, 32, 28, 28] 36,896BatchNorm2d-51 [-1, 160, 28, 28] 320ReLU-52 [-1, 160, 28, 28] 0Conv2d-53 [-1, 128, 28, 28] 20,608BatchNorm2d-54 [-1, 128, 28, 28] 256ReLU-55 [-1, 128, 28, 28] 0Conv2d-56 [-1, 32, 28, 28] 36,896BatchNorm2d-57 [-1, 192, 28, 28] 384ReLU-58 [-1, 192, 28, 28] 0Conv2d-59 [-1, 128, 28, 28] 24,704BatchNorm2d-60 [-1, 128, 28, 28] 256ReLU-61 [-1, 128, 28, 28] 0Conv2d-62 [-1, 32, 28, 28] 36,896BatchNorm2d-63 [-1, 224, 28, 28] 448ReLU-64 [-1, 224, 28, 28] 0Conv2d-65 [-1, 128, 28, 28] 28,800BatchNorm2d-66 [-1, 128, 28, 28] 256ReLU-67 [-1, 128, 28, 28] 0Conv2d-68 [-1, 32, 28, 28] 36,896BatchNorm2d-69 [-1, 256, 28, 28] 512ReLU-70 [-1, 256, 28, 28] 0Conv2d-71 [-1, 128, 28, 28] 32,896BatchNorm2d-72 [-1, 128, 28, 28] 256ReLU-73 [-1, 128, 28, 28] 0Conv2d-74 [-1, 32, 28, 28] 36,896BatchNorm2d-75 [-1, 288, 28, 28] 576ReLU-76 [-1, 288, 28, 28] 0Conv2d-77 [-1, 128, 28, 28] 36,992BatchNorm2d-78 [-1, 128, 28, 28] 256ReLU-79 [-1, 128, 28, 28] 0Conv2d-80 [-1, 32, 28, 28] 36,896BatchNorm2d-81 [-1, 320, 28, 28] 640ReLU-82 [-1, 320, 28, 28] 0Conv2d-83 [-1, 128, 28, 28] 41,088BatchNorm2d-84 [-1, 128, 28, 28] 256ReLU-85 [-1, 128, 28, 28] 0Conv2d-86 [-1, 32, 28, 28] 36,896BatchNorm2d-87 [-1, 352, 28, 28] 704ReLU-88 [-1, 352, 28, 28] 0Conv2d-89 [-1, 128, 28, 28] 45,184BatchNorm2d-90 [-1, 128, 28, 28] 256ReLU-91 [-1, 128, 28, 28] 0Conv2d-92 [-1, 32, 28, 28] 36,896BatchNorm2d-93 [-1, 384, 28, 28] 768ReLU-94 [-1, 384, 28, 28] 0Conv2d-95 [-1, 128, 28, 28] 49,280BatchNorm2d-96 [-1, 128, 28, 28] 256ReLU-97 [-1, 128, 28, 28] 0Conv2d-98 [-1, 32, 28, 28] 36,896BatchNorm2d-99 [-1, 416, 28, 28] 832ReLU-100 [-1, 416, 28, 28] 0Conv2d-101 [-1, 128, 28, 28] 53,376BatchNorm2d-102 [-1, 128, 28, 28] 256ReLU-103 [-1, 128, 28, 28] 0Conv2d-104 [-1, 32, 28, 28] 36,896BatchNorm2d-105 [-1, 448, 28, 28] 896ReLU-106 [-1, 448, 28, 28] 0Conv2d-107 [-1, 128, 28, 28] 57,472BatchNorm2d-108 [-1, 128, 28, 28] 256ReLU-109 [-1, 128, 28, 28] 0Conv2d-110 [-1, 32, 28, 28] 36,896BatchNorm2d-111 [-1, 480, 28, 28] 960ReLU-112 [-1, 480, 28, 28] 0Conv2d-113 [-1, 128, 28, 28] 61,568BatchNorm2d-114 [-1, 128, 28, 28] 256ReLU-115 [-1, 128, 28, 28] 0Conv2d-116 [-1, 32, 28, 28] 36,896BatchNorm2d-117 [-1, 512, 28, 28] 1,024ReLU-118 [-1, 512, 28, 28] 0Conv2d-119 [-1, 256, 28, 28] 131,328AvgPool2d-120 [-1, 256, 14, 14] 0BatchNorm2d-121 [-1, 256, 14, 14] 512ReLU-122 [-1, 256, 14, 14] 0Conv2d-123 [-1, 128, 14, 14] 32,896BatchNorm2d-124 [-1, 128, 14, 14] 256ReLU-125 [-1, 128, 14, 14] 0Conv2d-126 [-1, 32, 14, 14] 36,896BatchNorm2d-127 [-1, 288, 14, 14] 576ReLU-128 [-1, 288, 14, 14] 0Conv2d-129 [-1, 128, 14, 14] 36,992BatchNorm2d-130 [-1, 128, 14, 14] 256ReLU-131 [-1, 128, 14, 14] 0Conv2d-132 [-1, 32, 14, 14] 36,896BatchNorm2d-133 [-1, 320, 14, 14] 640ReLU-134 [-1, 320, 14, 14] 0Conv2d-135 [-1, 128, 14, 14] 41,088BatchNorm2d-136 [-1, 128, 14, 14] 256ReLU-137 [-1, 128, 14, 14] 0Conv2d-138 [-1, 32, 14, 14] 36,896BatchNorm2d-139 [-1, 352, 14, 14] 704ReLU-140 [-1, 352, 14, 14] 0Conv2d-141 [-1, 128, 14, 14] 45,184BatchNorm2d-142 [-1, 128, 14, 14] 256ReLU-143 [-1, 128, 14, 14] 0Conv2d-144 [-1, 32, 14, 14] 36,896BatchNorm2d-145 [-1, 384, 14, 14] 768ReLU-146 [-1, 384, 14, 14] 0Conv2d-147 [-1, 128, 14, 14] 49,280BatchNorm2d-148 [-1, 128, 14, 14] 256ReLU-149 [-1, 128, 14, 14] 0Conv2d-150 [-1, 32, 14, 14] 36,896BatchNorm2d-151 [-1, 416, 14, 14] 832ReLU-152 [-1, 416, 14, 14] 0Conv2d-153 [-1, 128, 14, 14] 53,376BatchNorm2d-154 [-1, 128, 14, 14] 256ReLU-155 [-1, 128, 14, 14] 0Conv2d-156 [-1, 32, 14, 14] 36,896BatchNorm2d-157 [-1, 448, 14, 14] 896ReLU-158 [-1, 448, 14, 14] 0Conv2d-159 [-1, 128, 14, 14] 57,472BatchNorm2d-160 [-1, 128, 14, 14] 256ReLU-161 [-1, 128, 14, 14] 0Conv2d-162 [-1, 32, 14, 14] 36,896BatchNorm2d-163 [-1, 480, 14, 14] 960ReLU-164 [-1, 480, 14, 14] 0Conv2d-165 [-1, 128, 14, 14] 61,568BatchNorm2d-166 [-1, 128, 14, 14] 256ReLU-167 [-1, 128, 14, 14] 0Conv2d-168 [-1, 32, 14, 14] 36,896BatchNorm2d-169 [-1, 512, 14, 14] 1,024ReLU-170 [-1, 512, 14, 14] 0Conv2d-171 [-1, 128, 14, 14] 65,664BatchNorm2d-172 [-1, 128, 14, 14] 256ReLU-173 [-1, 128, 14, 14] 0Conv2d-174 [-1, 32, 14, 14] 36,896BatchNorm2d-175 [-1, 544, 14, 14] 1,088ReLU-176 [-1, 544, 14, 14] 0Conv2d-177 [-1, 128, 14, 14] 69,760BatchNorm2d-178 [-1, 128, 14, 14] 256ReLU-179 [-1, 128, 14, 14] 0Conv2d-180 [-1, 32, 14, 14] 36,896BatchNorm2d-181 [-1, 576, 14, 14] 1,152ReLU-182 [-1, 576, 14, 14] 0Conv2d-183 [-1, 128, 14, 14] 73,856BatchNorm2d-184 [-1, 128, 14, 14] 256ReLU-185 [-1, 128, 14, 14] 0Conv2d-186 [-1, 32, 14, 14] 36,896BatchNorm2d-187 [-1, 608, 14, 14] 1,216ReLU-188 [-1, 608, 14, 14] 0Conv2d-189 [-1, 128, 14, 14] 77,952BatchNorm2d-190 [-1, 128, 14, 14] 256ReLU-191 [-1, 128, 14, 14] 0Conv2d-192 [-1, 32, 14, 14] 36,896BatchNorm2d-193 [-1, 640, 14, 14] 1,280ReLU-194 [-1, 640, 14, 14] 0Conv2d-195 [-1, 128, 14, 14] 82,048BatchNorm2d-196 [-1, 128, 14, 14] 256ReLU-197 [-1, 128, 14, 14] 0Conv2d-198 [-1, 32, 14, 14] 36,896BatchNorm2d-199 [-1, 672, 14, 14] 1,344ReLU-200 [-1, 672, 14, 14] 0Conv2d-201 [-1, 128, 14, 14] 86,144BatchNorm2d-202 [-1, 128, 14, 14] 256ReLU-203 [-1, 128, 14, 14] 0Conv2d-204 [-1, 32, 14, 14] 36,896BatchNorm2d-205 [-1, 704, 14, 14] 1,408ReLU-206 [-1, 704, 14, 14] 0Conv2d-207 [-1, 128, 14, 14] 90,240BatchNorm2d-208 [-1, 128, 14, 14] 256ReLU-209 [-1, 128, 14, 14] 0Conv2d-210 [-1, 32, 14, 14] 36,896BatchNorm2d-211 [-1, 736, 14, 14] 1,472ReLU-212 [-1, 736, 14, 14] 0Conv2d-213 [-1, 128, 14, 14] 94,336BatchNorm2d-214 [-1, 128, 14, 14] 256ReLU-215 [-1, 128, 14, 14] 0Conv2d-216 [-1, 32, 14, 14] 36,896BatchNorm2d-217 [-1, 768, 14, 14] 1,536ReLU-218 [-1, 768, 14, 14] 0Conv2d-219 [-1, 128, 14, 14] 98,432BatchNorm2d-220 [-1, 128, 14, 14] 256ReLU-221 [-1, 128, 14, 14] 0Conv2d-222 [-1, 32, 14, 14] 36,896BatchNorm2d-223 [-1, 800, 14, 14] 1,600ReLU-224 [-1, 800, 14, 14] 0Conv2d-225 [-1, 128, 14, 14] 102,528BatchNorm2d-226 [-1, 128, 14, 14] 256ReLU-227 [-1, 128, 14, 14] 0Conv2d-228 [-1, 32, 14, 14] 36,896BatchNorm2d-229 [-1, 832, 14, 14] 1,664ReLU-230 [-1, 832, 14, 14] 0Conv2d-231 [-1, 128, 14, 14] 106,624BatchNorm2d-232 [-1, 128, 14, 14] 256ReLU-233 [-1, 128, 14, 14] 0Conv2d-234 [-1, 32, 14, 14] 36,896BatchNorm2d-235 [-1, 864, 14, 14] 1,728ReLU-236 [-1, 864, 14, 14] 0Conv2d-237 [-1, 128, 14, 14] 110,720BatchNorm2d-238 [-1, 128, 14, 14] 256ReLU-239 [-1, 128, 14, 14] 0Conv2d-240 [-1, 32, 14, 14] 36,896BatchNorm2d-241 [-1, 896, 14, 14] 1,792ReLU-242 [-1, 896, 14, 14] 0Conv2d-243 [-1, 128, 14, 14] 114,816BatchNorm2d-244 [-1, 128, 14, 14] 256ReLU-245 [-1, 128, 14, 14] 0Conv2d-246 [-1, 32, 14, 14] 36,896BatchNorm2d-247 [-1, 928, 14, 14] 1,856ReLU-248 [-1, 928, 14, 14] 0Conv2d-249 [-1, 128, 14, 14] 118,912BatchNorm2d-250 [-1, 128, 14, 14] 256ReLU-251 [-1, 128, 14, 14] 0Conv2d-252 [-1, 32, 14, 14] 36,896BatchNorm2d-253 [-1, 960, 14, 14] 1,920ReLU-254 [-1, 960, 14, 14] 0Conv2d-255 [-1, 128, 14, 14] 123,008BatchNorm2d-256 [-1, 128, 14, 14] 256ReLU-257 [-1, 128, 14, 14] 0Conv2d-258 [-1, 32, 14, 14] 36,896BatchNorm2d-259 [-1, 992, 14, 14] 1,984ReLU-260 [-1, 992, 14, 14] 0Conv2d-261 [-1, 128, 14, 14] 127,104BatchNorm2d-262 [-1, 128, 14, 14] 256ReLU-263 [-1, 128, 14, 14] 0Conv2d-264 [-1, 32, 14, 14] 36,896BatchNorm2d-265 [-1, 1024, 14, 14] 2,048ReLU-266 [-1, 1024, 14, 14] 0Conv2d-267 [-1, 512, 14, 14] 524,800AvgPool2d-268 [-1, 512, 7, 7] 0BatchNorm2d-269 [-1, 512, 7, 7] 1,024ReLU-270 [-1, 512, 7, 7] 0Conv2d-271 [-1, 128, 7, 7] 65,664BatchNorm2d-272 [-1, 128, 7, 7] 256ReLU-273 [-1, 128, 7, 7] 0Conv2d-274 [-1, 32, 7, 7] 36,896BatchNorm2d-275 [-1, 544, 7, 7] 1,088ReLU-276 [-1, 544, 7, 7] 0Conv2d-277 [-1, 128, 7, 7] 69,760BatchNorm2d-278 [-1, 128, 7, 7] 256ReLU-279 [-1, 128, 7, 7] 0Conv2d-280 [-1, 32, 7, 7] 36,896BatchNorm2d-281 [-1, 576, 7, 7] 1,152ReLU-282 [-1, 576, 7, 7] 0Conv2d-283 [-1, 128, 7, 7] 73,856BatchNorm2d-284 [-1, 128, 7, 7] 256ReLU-285 [-1, 128, 7, 7] 0Conv2d-286 [-1, 32, 7, 7] 36,896BatchNorm2d-287 [-1, 608, 7, 7] 1,216ReLU-288 [-1, 608, 7, 7] 0Conv2d-289 [-1, 128, 7, 7] 77,952BatchNorm2d-290 [-1, 128, 7, 7] 256ReLU-291 [-1, 128, 7, 7] 0Conv2d-292 [-1, 32, 7, 7] 36,896BatchNorm2d-293 [-1, 640, 7, 7] 1,280ReLU-294 [-1, 640, 7, 7] 0Conv2d-295 [-1, 128, 7, 7] 82,048BatchNorm2d-296 [-1, 128, 7, 7] 256ReLU-297 [-1, 128, 7, 7] 0Conv2d-298 [-1, 32, 7, 7] 36,896BatchNorm2d-299 [-1, 672, 7, 7] 1,344ReLU-300 [-1, 672, 7, 7] 0Conv2d-301 [-1, 128, 7, 7] 86,144BatchNorm2d-302 [-1, 128, 7, 7] 256ReLU-303 [-1, 128, 7, 7] 0Conv2d-304 [-1, 32, 7, 7] 36,896BatchNorm2d-305 [-1, 704, 7, 7] 1,408ReLU-306 [-1, 704, 7, 7] 0Conv2d-307 [-1, 128, 7, 7] 90,240BatchNorm2d-308 [-1, 128, 7, 7] 256ReLU-309 [-1, 128, 7, 7] 0Conv2d-310 [-1, 32, 7, 7] 36,896BatchNorm2d-311 [-1, 736, 7, 7] 1,472ReLU-312 [-1, 736, 7, 7] 0Conv2d-313 [-1, 128, 7, 7] 94,336BatchNorm2d-314 [-1, 128, 7, 7] 256ReLU-315 [-1, 128, 7, 7] 0Conv2d-316 [-1, 32, 7, 7] 36,896BatchNorm2d-317 [-1, 768, 7, 7] 1,536ReLU-318 [-1, 768, 7, 7] 0Conv2d-319 [-1, 128, 7, 7] 98,432BatchNorm2d-320 [-1, 128, 7, 7] 256ReLU-321 [-1, 128, 7, 7] 0Conv2d-322 [-1, 32, 7, 7] 36,896BatchNorm2d-323 [-1, 800, 7, 7] 1,600ReLU-324 [-1, 800, 7, 7] 0Conv2d-325 [-1, 128, 7, 7] 102,528BatchNorm2d-326 [-1, 128, 7, 7] 256ReLU-327 [-1, 128, 7, 7] 0Conv2d-328 [-1, 32, 7, 7] 36,896BatchNorm2d-329 [-1, 832, 7, 7] 1,664ReLU-330 [-1, 832, 7, 7] 0Conv2d-331 [-1, 128, 7, 7] 106,624BatchNorm2d-332 [-1, 128, 7, 7] 256ReLU-333 [-1, 128, 7, 7] 0Conv2d-334 [-1, 32, 7, 7] 36,896BatchNorm2d-335 [-1, 864, 7, 7] 1,728ReLU-336 [-1, 864, 7, 7] 0Conv2d-337 [-1, 128, 7, 7] 110,720BatchNorm2d-338 [-1, 128, 7, 7] 256ReLU-339 [-1, 128, 7, 7] 0Conv2d-340 [-1, 32, 7, 7] 36,896BatchNorm2d-341 [-1, 896, 7, 7] 1,792ReLU-342 [-1, 896, 7, 7] 0Conv2d-343 [-1, 128, 7, 7] 114,816BatchNorm2d-344 [-1, 128, 7, 7] 256ReLU-345 [-1, 128, 7, 7] 0Conv2d-346 [-1, 32, 7, 7] 36,896BatchNorm2d-347 [-1, 928, 7, 7] 1,856ReLU-348 [-1, 928, 7, 7] 0Conv2d-349 [-1, 128, 7, 7] 118,912BatchNorm2d-350 [-1, 128, 7, 7] 256ReLU-351 [-1, 128, 7, 7] 0Conv2d-352 [-1, 32, 7, 7] 36,896BatchNorm2d-353 [-1, 960, 7, 7] 1,920ReLU-354 [-1, 960, 7, 7] 0Conv2d-355 [-1, 128, 7, 7] 123,008BatchNorm2d-356 [-1, 128, 7, 7] 256ReLU-357 [-1, 128, 7, 7] 0Conv2d-358 [-1, 32, 7, 7] 36,896BatchNorm2d-359 [-1, 992, 7, 7] 1,984ReLU-360 [-1, 992, 7, 7] 0Conv2d-361 [-1, 128, 7, 7] 127,104BatchNorm2d-362 [-1, 128, 7, 7] 256ReLU-363 [-1, 128, 7, 7] 0Conv2d-364 [-1, 32, 7, 7] 36,896BatchNorm2d-365 [-1, 1024, 7, 7] 2,048ReLU-366 [-1, 1024, 7, 7] 0Linear-367 [-1, 2] 2,050
================================================================
Total params: 6,966,146
Trainable params: 6,966,146
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.57
Forward/backward pass size (MB): 294.57
Params size (MB): 26.57
Estimated Total Size (MB): 321.72
----------------------------------------------------------------
三、 训练模型
1. 编写训练函数
def train(dataloader,model,optimizer,loss_fn):size = len(dataloader.dataset)num_batches = len(dataloader)train_acc,train_loss = 0,0for X,y in dataloader:X,y = X.to(device),y.to(device)pred = model(X)loss = loss_fn(pred,y)optimizer.zero_grad()loss.backward()optimizer.step()train_loss += loss.item()train_acc += (pred.argmax(1) == y).type(torch.float).sum().item()train_loss /= num_batchestrain_acc /= sizereturn train_acc,train_loss
2. 编写测试函数
测试函数和训练函数大致相同,但是由于不进行梯度下降对网络权重进行更新,所以不需要传入优化器
def test(dataloader, model, loss_fn):size = len(dataloader.dataset) # 测试集的大小num_batches = len(dataloader) # 批次数目, (size/batch_size,向上取整)test_loss, test_acc = 0, 0# 当不进行训练时,停止梯度更新,节省计算内存消耗with torch.no_grad():for imgs, target in dataloader:imgs, target = imgs.to(device), target.to(device)# 计算losstarget_pred = model(imgs)loss = loss_fn(target_pred, target)test_loss += loss.item()test_acc += (target_pred.argmax(1) == target).type(torch.float).sum().item()test_acc /= sizetest_loss /= num_batchesreturn test_acc, test_loss
3.正式训练
import copyoptimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
loss_fn = nn.CrossEntropyLoss() # 创建损失函数epochs = 10train_loss=[]
train_acc=[]
test_loss=[]
test_acc=[]
best_acc = 0for epoch in range(epochs):model.train()epoch_train_acc,epoch_train_loss = train(train_dl,model,opt,loss_fn)model.eval()epoch_test_acc,epoch_test_loss = test(test_dl,model,loss_fn)if epoch_test_acc > best_acc:best_acc = epoch_test_accbest_model = copy.deepcopy(model)train_acc.append(epoch_train_acc)train_loss.append(epoch_train_loss)test_acc.append(epoch_test_acc)test_loss.append(epoch_test_loss)lr = opt.state_dict()['param_groups'][0]['lr']template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E}')print(template.format(epoch+1, epoch_train_acc*100, epoch_train_loss,epoch_test_acc*100, epoch_test_loss, lr))# 保存最佳模型到文件中
PATH = '/best_model.pth' # 保存的参数文件名
torch.save(best_model.state_dict(), PATH)print('Done')
得到如下输出:
Epoch: 1, Train_acc:84.3%, Train_loss:0.359, Test_acc:86.7%, Test_loss:0.317, Lr:1.00E-04
Epoch: 2, Train_acc:87.6%, Train_loss:0.292, Test_acc:89.0%, Test_loss:0.270, Lr:1.00E-04
Epoch: 3, Train_acc:89.2%, Train_loss:0.260, Test_acc:89.8%, Test_loss:0.264, Lr:1.00E-04
Epoch: 4, Train_acc:90.2%, Train_loss:0.239, Test_acc:89.7%, Test_loss:0.259, Lr:1.00E-04
Epoch: 5, Train_acc:91.0%, Train_loss:0.222, Test_acc:90.3%, Test_loss:0.228, Lr:1.00E-04
Epoch: 6, Train_acc:91.1%, Train_loss:0.218, Test_acc:90.9%, Test_loss:0.236, Lr:1.00E-04
Epoch: 7, Train_acc:91.7%, Train_loss:0.201, Test_acc:82.4%, Test_loss:0.462, Lr:1.00E-04
Epoch: 8, Train_acc:92.5%, Train_loss:0.184, Test_acc:90.2%, Test_loss:0.264, Lr:1.00E-04
Epoch: 9, Train_acc:93.3%, Train_loss:0.172, Test_acc:90.2%, Test_loss:0.272, Lr:1.00E-04
Epoch:10, Train_acc:93.2%, Train_loss:0.171, Test_acc:90.7%, Test_loss:0.229, Lr:1.00E-04
DoneProcess finished with exit code 0
四、 结果可视化
1. Loss&Accuracy
import matplotlib.pyplot as plt
#隐藏警告
import warnings
warnings.filterwarnings("ignore") #忽略警告信息
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.rcParams['figure.dpi'] = 100 #分辨率epochs_range = range(epochs)plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
得到的可视化结果:
五、个人理解
本文为实战帖,具体代码细节及网络理解在之前的文章中已有涉及,这里不再做细节阐述。