pytorch学习--使用m1 进行训练

import torch
#判断是否存在 gpu
torch.backends.mps.is_available()
True
if torch.backends.mps.is_available():mps_device = torch.device("mps")x = torch.ones(1, device=mps_device)print (x)
else:print ("MPS device not found.")
tensor([1.], device='mps:0')

加载数据

from torch.utils.data import Dataset
import os
from PIL import Image
class MyData(Dataset):def __init__(self,root_dir,label_dir):self.root_dir = root_dirself.label_dir = label_dirself.path = os.path.join(self.root_dir,self.label_dir)self.img_path = os.listdir(self.path)def __getitem__(self,idx):img_name = self.img_path[idx]img_item_path = os.path.join(self.root_dir, self.label_dir, img_name)img = Image.open(img_item_path)label = self.label_dirreturn img,labeldef __len__(self):return len(self.img_path)
ants_dataset = MyData("./hymenoptera_data/train/","ants")
ants_dataset.__len__()
124

TensorBoard 使用

from torch.utils.tensorboard import SummaryWriter
import numpy as npimg = Image.open("./hymenoptera_data/train/ants/0013035.jpg")
img_array = np.array(img)
# 创建 SummaryWriter 类的对象,指定日志文件夹
writer = SummaryWriter("logs")writer.add_image("test", img_array, 1, dataformats="HWC")for i in range(150):# step1: 使用 conda activate 环境名 激活环境# step2: 进入 日志文件 logs 的父文件夹路径下# step3:在终端输入 tensorboard --logdir=logs --port=端口号writer.add_scalar('y=x**2',i**2,i)#writer.close()

transforms

from torchvision import transforms
#tensor
trans_totensor = transforms.ToTensor()
img_tensor = trans_totensor(img)
writer.add_image("img_tensor", img_tensor, 2)
type(img_tensor),img_tensor.shape
(torch.Tensor, torch.Size([3, 512, 768]))
#normalize 规范化
mean = [0.4,0.5,0.4]
std = [0.2,0.3,0.2]
trans_toNormalize = transforms.Normalize(mean,std)
img_normalize = trans_toNormalize(img_tensor)writer.add_image("img_normalize", img_normalize, 3)
img_normalize[0,0,0]
tensor(-0.4314)
#Resize 改变大小 整体缩放扩大
trans_resize = transforms.Resize((512,512))
img_resize = trans_resize(img)
img_resize = trans_totensor(img_resize)writer.add_image("img_resize", img_resize, 4)img_resize.shape
torch.Size([3, 512, 512])
#Randomcrop 随机裁剪
trans_random = transforms.RandomCrop(512)
trans_compose = transforms.Compose([trans_random, trans_totensor])
img_random = trans_compose(img)writer.add_image("img_random", img_random, 5)img_random.shape
torch.Size([3, 512, 512])

torchvsion 中数据集的使用

import torchvisiondataset_transform = transforms.Compose([transforms.ToTensor()])train_set = torchvision.datasets.CIFAR10("./dataset", train=True, transform=dataset_transform, download=True)
test_set = torchvision.datasets.CIFAR10("./dataset", train=False, transform=dataset_transform, download=True)
Files already downloaded and verified
Files already downloaded and verified
test_set[0]
img, labels = test_set[0]
display(img,labels)
tensor([[[0.6196, 0.6235, 0.6471,  ..., 0.5373, 0.4941, 0.4549],[0.5961, 0.5922, 0.6235,  ..., 0.5333, 0.4902, 0.4667],[0.5922, 0.5922, 0.6196,  ..., 0.5451, 0.5098, 0.4706],...,[0.2667, 0.1647, 0.1216,  ..., 0.1490, 0.0510, 0.1569],[0.2392, 0.1922, 0.1373,  ..., 0.1020, 0.1137, 0.0784],[0.2118, 0.2196, 0.1765,  ..., 0.0941, 0.1333, 0.0824]],[[0.4392, 0.4353, 0.4549,  ..., 0.3725, 0.3569, 0.3333],[0.4392, 0.4314, 0.4471,  ..., 0.3725, 0.3569, 0.3451],[0.4314, 0.4275, 0.4353,  ..., 0.3843, 0.3725, 0.3490],...,[0.4863, 0.3922, 0.3451,  ..., 0.3804, 0.2510, 0.3333],[0.4549, 0.4000, 0.3333,  ..., 0.3216, 0.3216, 0.2510],[0.4196, 0.4118, 0.3490,  ..., 0.3020, 0.3294, 0.2627]],[[0.1922, 0.1843, 0.2000,  ..., 0.1412, 0.1412, 0.1294],[0.2000, 0.1569, 0.1765,  ..., 0.1216, 0.1255, 0.1333],[0.1843, 0.1294, 0.1412,  ..., 0.1333, 0.1333, 0.1294],...,[0.6941, 0.5804, 0.5373,  ..., 0.5725, 0.4235, 0.4980],[0.6588, 0.5804, 0.5176,  ..., 0.5098, 0.4941, 0.4196],[0.6275, 0.5843, 0.5176,  ..., 0.4863, 0.5059, 0.4314]]])3
test_set.classes[labels]
'cat'
type(img)
torch.Tensor

dataloader 从 dataset 中读取数据,加载到 神经网络

from torch.utils.data import DataLoadertrain_data = DataLoader(dataset=train_set,batch_size = 4,shuffle = True, num_workers = 0, drop_last = False)
test_data = DataLoader(dataset=test_set,batch_size = 4,shuffle = True, num_workers = 0, drop_last = False)

神经网络基本骨架 – nn.module 的使用

from torch import nnclass MyNet(nn.Module):def __init__(self):super().__init__()def forward(self, input):output = input + 1return output
#ceshi(data) & ceshi.forward(data) 等价,
#因为在class Module中调用了__call__函数,forward函数在call函数中调用了
ceshi = MyNet()
x = torch.tensor(2.0)
y = ceshi(x)
y
tensor(3.)

搭建实战

class MyNet2(nn.Module):def __init__(self):super().__init__()self.cov1 = nn.Conv2d(3,32,5,padding=2)self.maxpool1 = nn.MaxPool2d(2)self.cov2 = nn.Conv2d(32,32,5,padding=2)self.maxpool2 = nn.MaxPool2d(2)self.cov3 = nn.Conv2d(32,64,5,padding=2)self.maxpool3 = nn.MaxPool2d(2)self.flatten = nn.Flatten()self.liner1 = nn.Linear(1024,64)self.liner2 = nn.Linear(64,10)def forward(self,x):x = self.cov1(x)x = self.maxpool1(x)x = self.cov2(x)x = self.maxpool2(x)x = self.cov3(x)x = self.maxpool3(x)x = self.flatten(x)x = self.liner1(x)x = self.liner2(x)return x
type(train_data)
torch.utils.data.dataloader.DataLoader
for i, data in enumerate(train_data):print(data)break
ceshi_net = MyNet2()
ceshi_net
MyNet2((cov1): Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(maxpool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(cov2): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(maxpool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(cov3): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(maxpool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(flatten): Flatten(start_dim=1, end_dim=-1)(liner1): Linear(in_features=1024, out_features=64, bias=True)(liner2): Linear(in_features=64, out_features=10, bias=True)
)
inputs = torch.ones((64,3,32,32),dtype=torch.float32)
inputs.shape
torch.Size([64, 3, 32, 32])
out_put = ceshi_net(inputs)
out_put.shape
torch.Size([64, 10])

损失函数与反向传播

inputs = torch.tensor([1,2,3] ,dtype = torch.float32)
targets = torch.tensor([1,2,5] ,dtype = torch.float32)
inputs = torch.reshape(inputs,(1,1,1,3))
targets = torch.reshape(targets,(1,1,1,3))
inputs,targets
(tensor([[[[1., 2., 3.]]]]), tensor([[[[1., 2., 5.]]]]))
loss = nn.L1Loss(reduction="sum")
result = loss(inputs, targets)
result
tensor(2.)
loss_mse = nn.MSELoss()
result_mse = loss_mse(inputs, targets)
result_mse
tensor(1.3333)
将数据集放入网络进行测试
lose_cro = nn.CrossEntropyLoss()
for data in train_data:imgs, labels = dataoutputs = ceshi_net(imgs)loss_values = lose_cro(outputs,labels)#进行反向传播时,使用的是损失值loss_values.backward()

优化器

#设置优化器 从 torch.optim 模块中导入
#第一个参数为 网络的各个层的权重、偏置等参数 使用方法  ceshi_net.parameters() --》ceshi_net 为你定义的网络
optimizer = torch.optim.SGD(ceshi_net.parameters(),lr=0.01)
for epoch in range(30):epoch_loss = 0.0for inputs, target in train_data:#将上次的梯度归0optimizer.zero_grad()output = ceshi_net(inputs)loss = lose_cro(output, target)#反向传播loss.backward()#更新参数optimizer.step()epoch_loss += lossprint(f'第 {epoch} 次的损失值为 {epoch_loss}')
#输出每一层的 权重参数
for param in ceshi_net.parameters():print(param)
#实验自己的网络模型能否添加新的网络层
ceshi_net.add_module("update_linear",nn.Linear(10,20))
ceshi_net
MyNet2((cov1): Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(maxpool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(cov2): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(maxpool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(cov3): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(maxpool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(flatten): Flatten(start_dim=1, end_dim=-1)(liner1): Linear(in_features=1024, out_features=64, bias=True)(liner2): Linear(in_features=64, out_features=10, bias=True)(update_linear): Linear(in_features=10, out_features=20, bias=True)
)

现有网络模型的使用及修改

# 使用 torchvision.models 中 现有的网络模型
vgg16_false = torchvision.models.vgg16(progress=False)
vgg16_true = torchvision.models.vgg16(progress=True)
vgg16_false
VGG((features): Sequential((0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(1): ReLU(inplace=True)(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(3): ReLU(inplace=True)(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(6): ReLU(inplace=True)(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(8): ReLU(inplace=True)(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(11): ReLU(inplace=True)(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(13): ReLU(inplace=True)(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(15): ReLU(inplace=True)(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(18): ReLU(inplace=True)(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(20): ReLU(inplace=True)(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(22): ReLU(inplace=True)(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(25): ReLU(inplace=True)(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(27): ReLU(inplace=True)(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(29): ReLU(inplace=True)(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))(classifier): Sequential((0): Linear(in_features=25088, out_features=4096, bias=True)(1): ReLU(inplace=True)(2): Dropout(p=0.5, inplace=False)(3): Linear(in_features=4096, out_features=4096, bias=True)(4): ReLU(inplace=True)(5): Dropout(p=0.5, inplace=False)(6): Linear(in_features=4096, out_features=1000, bias=True))
)
# 加载数据集
dataset_transform = transforms.Compose([transforms.ToTensor()])train_set = torchvision.datasets.CIFAR10("./dataset", train=True, transform=dataset_transform, download=True)
test_set = torchvision.datasets.CIFAR10("./dataset", train=False, transform=dataset_transform, download=True)
Files already downloaded and verified
Files already downloaded and verified
# 数据集是 10 分类,vgg16 最后一层是1000,表示是1000分类  无法直接使用,需要调整
len(train_set.classes)
10
# 方式一 在最后一层后面 添加新的一层网络
vgg16_true.add_module("add_linear",nn.Linear(1000,10))
vgg16_true
VGG((features): Sequential((0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(1): ReLU(inplace=True)(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(3): ReLU(inplace=True)(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(6): ReLU(inplace=True)(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(8): ReLU(inplace=True)(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(11): ReLU(inplace=True)(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(13): ReLU(inplace=True)(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(15): ReLU(inplace=True)(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(18): ReLU(inplace=True)(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(20): ReLU(inplace=True)(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(22): ReLU(inplace=True)(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(25): ReLU(inplace=True)(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(27): ReLU(inplace=True)(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(29): ReLU(inplace=True)(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))(classifier): Sequential((0): Linear(in_features=25088, out_features=4096, bias=True)(1): ReLU(inplace=True)(2): Dropout(p=0.5, inplace=False)(3): Linear(in_features=4096, out_features=4096, bias=True)(4): ReLU(inplace=True)(5): Dropout(p=0.5, inplace=False)(6): Linear(in_features=4096, out_features=1000, bias=True))(add_linear): Linear(in_features=1000, out_features=10, bias=True)
)
# 方式二 修改最后一层后网络
vgg16_false.classifier[6] = nn.Linear(4096,10)
vgg16_false
VGG((features): Sequential((0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(1): ReLU(inplace=True)(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(3): ReLU(inplace=True)(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(6): ReLU(inplace=True)(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(8): ReLU(inplace=True)(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(11): ReLU(inplace=True)(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(13): ReLU(inplace=True)(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(15): ReLU(inplace=True)(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(18): ReLU(inplace=True)(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(20): ReLU(inplace=True)(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(22): ReLU(inplace=True)(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(25): ReLU(inplace=True)(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(27): ReLU(inplace=True)(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(29): ReLU(inplace=True)(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))(classifier): Sequential((0): Linear(in_features=25088, out_features=4096, bias=True)(1): ReLU(inplace=True)(2): Dropout(p=0.5, inplace=False)(3): Linear(in_features=4096, out_features=4096, bias=True)(4): ReLU(inplace=True)(5): Dropout(p=0.5, inplace=False)(6): Linear(in_features=4096, out_features=10, bias=True))
)

网络模型的 保存与读取

# 模型保存 方式一  ==>保存 模型结构和模型参数
torch.save(vgg16_false, "./model_save/vgg16_false_method1.pth")
# 模型加载 方式一
vgg16 = torch.load("./model_save/vgg16_false_method1.pth")
vgg16
VGG((features): Sequential((0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(1): ReLU(inplace=True)(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(3): ReLU(inplace=True)(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(6): ReLU(inplace=True)(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(8): ReLU(inplace=True)(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(11): ReLU(inplace=True)(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(13): ReLU(inplace=True)(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(15): ReLU(inplace=True)(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(18): ReLU(inplace=True)(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(20): ReLU(inplace=True)(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(22): ReLU(inplace=True)(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(25): ReLU(inplace=True)(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(27): ReLU(inplace=True)(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(29): ReLU(inplace=True)(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))(classifier): Sequential((0): Linear(in_features=25088, out_features=4096, bias=True)(1): ReLU(inplace=True)(2): Dropout(p=0.5, inplace=False)(3): Linear(in_features=4096, out_features=4096, bias=True)(4): ReLU(inplace=True)(5): Dropout(p=0.5, inplace=False)(6): Linear(in_features=4096, out_features=10, bias=True))
)
# 模型保存 方式二  ==>保存 模型参数
torch.save(vgg16_false.state_dict(), "./model_save/vgg16_false_method2.pth")
# 模型加载 方式二 ==>直接加载,显示的是 模型参数
vgg16_2 = torch.load("./model_save/vgg16_false_method2.pth")
vgg16_2
OrderedDict([('features.0.weight',tensor([[[[-0.0168, -0.0325, -0.0205],[-0.0935, -0.0323,  0.0423],[-0.0430,  0.0772, -0.0303]],[[-0.0483,  0.0410, -0.0169],[-0.0571, -0.0028,  0.0750],[ 0.0327, -0.0050,  0.0498]],[[ 0.0227,  0.0167,  0.0986],[-0.0186,  0.0407, -0.0542],[-0.0003, -0.1219, -0.0128]]],[[[ 0.0837,  0.0252,  0.0755],[-0.0458, -0.0566,  0.0238],[-0.0009, -0.0628,  0.0475]],[[-0.0773, -0.0765, -0.0098],[ 0.0113,  0.0285,  0.0100],[ 0.0049,  0.0168, -0.0904]],[[ 0.0113, -0.0757,  0.1109],[ 0.0532, -0.0509,  0.0531],[-0.0180,  0.0739,  0.0259]]],[[[-0.0281,  0.0229,  0.0258],[ 0.0484,  0.1161, -0.0560],[ 0.0428, -0.0409,  0.1285]],[[-0.0033, -0.0482,  0.1041],[ 0.0417,  0.1011, -0.0453],[-0.0313, -0.0538,  0.0357]],[[-0.0417,  0.0482,  0.0200],[-0.0216,  0.1091, -0.0519],[ 0.0002,  0.0265, -0.0066]]],...,[-0.0115,  0.0140,  0.0149,  ...,  0.0089,  0.0155, -0.0049],[ 0.0082,  0.0035,  0.0130,  ...,  0.0147, -0.0052, -0.0023],[ 0.0055,  0.0116,  0.0001,  ...,  0.0019, -0.0021, -0.0098]])),('classifier.6.bias',tensor([-0.0146,  0.0148, -0.0090,  0.0127, -0.0008,  0.0108, -0.0094,  0.0004,0.0076, -0.0084]))])
#需要建立对应的 网络模型结构,使用 网络模型结构 加载 网络模型参数
vgg16_false_2 = torchvision.models.vgg16(progress=False)
vgg16_false_2.classifier[6] = nn.Linear(4096,10)
vgg16_false_2.load_state_dict(vgg16_2)
vgg16_false_2
VGG((features): Sequential((0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(1): ReLU(inplace=True)(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(3): ReLU(inplace=True)(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(6): ReLU(inplace=True)(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(8): ReLU(inplace=True)(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(11): ReLU(inplace=True)(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(13): ReLU(inplace=True)(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(15): ReLU(inplace=True)(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(18): ReLU(inplace=True)(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(20): ReLU(inplace=True)(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(22): ReLU(inplace=True)(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(25): ReLU(inplace=True)(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(27): ReLU(inplace=True)(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(29): ReLU(inplace=True)(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False))(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))(classifier): Sequential((0): Linear(in_features=25088, out_features=4096, bias=True)(1): ReLU(inplace=True)(2): Dropout(p=0.5, inplace=False)(3): Linear(in_features=4096, out_features=4096, bias=True)(4): ReLU(inplace=True)(5): Dropout(p=0.5, inplace=False)(6): Linear(in_features=4096, out_features=10, bias=True))
)

模型训练套路 -完整的训练过程

from torch.utils.data import DataLoader
# 加载数据集
dataset_transform = transforms.Compose([transforms.ToTensor()])train_set = torchvision.datasets.CIFAR10("./dataset", train=True, transform=dataset_transform, download=True)
test_set = torchvision.datasets.CIFAR10("./dataset", train=False, transform=dataset_transform, download=True)train_data = DataLoader(dataset=train_set,batch_size = 64,shuffle = True, num_workers = 0, drop_last = False)
test_data = DataLoader(dataset=test_set,batch_size = 64,shuffle = True, num_workers = 0, drop_last = False)#学习率 1e-2 = 0.01
learn_rate = 1e-2
Files already downloaded and verified
Files already downloaded and verified
from torchvision.models import vgg16
#定义网络
#ceshi_vgg16 = vgg16(progress=False)
#ceshi_vgg16.add_module("add_linear",nn.Linear(1000,10))
ceshi_vgg16 = MyNet2()
print(ceshi_vgg16)
MyNet2((cov1): Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(maxpool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(cov2): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(maxpool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(cov3): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(maxpool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(flatten): Flatten(start_dim=1, end_dim=-1)(liner1): Linear(in_features=1024, out_features=64, bias=True)(liner2): Linear(in_features=64, out_features=10, bias=True)
)
import time
start = time.time()
#定义优化器
optimizer = torch.optim.SGD(ceshi_vgg16.parameters(),lr=learn_rate)
#定义损失函数
loss = nn.CrossEntropyLoss()
#定义迭代次数
epochs = 5#开始循环训练
for epoch in range(epochs):print(f"第 {epoch + 1} 次迭代训练开始....")epoch_train_loss = 0epoch_test_loss = 0#如果 网络结构中存在 dropout 层,需要添加  ceshi_vgg16.train() ==> ceshi_vgg16 为你的网络模型ceshi_vgg16.train()for data in train_data:inputs,targets = dataoutputs = ceshi_vgg16(inputs)train_loss = loss(outputs,targets)#梯度归 0optimizer.zero_grad()#反向传播train_loss.backward()#梯度更新optimizer.step()epoch_train_loss += train_loss#如果 网络结构中存在 dropout 层,需要添加  ceshi_vgg16.eval() ==> ceshi_vgg16 为你的网络模型ceshi_vgg16.eval()with torch.no_grad():for data in test_data:test_inputs,test_targets = datatest_outputs = ceshi_vgg16(test_inputs)test_loss = loss(test_outputs,test_targets)epoch_test_loss += test_lossprint(f"第 {epoch + 1} 次迭代训练,训练集损失为: {epoch_train_loss} ,测试集损失为: {epoch_test_loss}")end = time.time()
print(f"cpu 训练时间为:{end - start}")
第 1 次迭代训练开始....
第 1 次迭代训练,训练集损失为: 1688.6795654296875 ,测试集损失为: 303.96173095703125
第 2 次迭代训练开始....
第 2 次迭代训练,训练集损失为: 1426.980712890625 ,测试集损失为: 268.0997314453125
第 3 次迭代训练开始....
第 3 次迭代训练,训练集损失为: 1285.7080078125 ,测试集损失为: 254.33773803710938
第 4 次迭代训练开始....
第 4 次迭代训练,训练集损失为: 1196.3980712890625 ,测试集损失为: 241.3218231201172
第 5 次迭代训练开始....
第 5 次迭代训练,训练集损失为: 1128.6549072265625 ,测试集损失为: 231.64186096191406
cpu 训练时间为:326.99496698379517

GPU 训练

from torchvision.models import vgg16
#定义网络
#ceshi_vgg16 = vgg16(progress=False)
#ceshi_vgg16.add_module("add_linear",nn.Linear(1000,10))
ceshi_vgg16 = MyNet2()
print(ceshi_vgg16)
MyNet2((cov1): Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(maxpool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(cov2): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(maxpool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(cov3): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(maxpool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)(flatten): Flatten(start_dim=1, end_dim=-1)(liner1): Linear(in_features=1024, out_features=64, bias=True)(liner2): Linear(in_features=64, out_features=10, bias=True)
)
# 获取 mac m1芯片(即GPU) 
device=torch.device("mps")
import time
start = time.time()#定义优化器
optimizer = torch.optim.SGD(ceshi_vgg16.parameters(),lr=learn_rate)
#定义损失函数
loss = nn.CrossEntropyLoss()
#将损失函数放到 GPU 上
loss = loss.to(device)
#定义迭代次数
epochs = 20#将 网络模型 放到 GPU 上
ceshi_vgg16 = ceshi_vgg16.to(device)#开始循环训练
for epoch in range(epochs):print(f"第 {epoch + 1} 次迭代训练开始....")epoch_train_loss = 0epoch_test_loss = 0#如果 网络结构中存在 dropout 层,需要添加  ceshi_vgg16.train() ==> ceshi_vgg16 为你的网络模型ceshi_vgg16.train()for data in train_data:inputs,targets = data#将 数据 放到 GPU 上inputs = inputs.to(device)targets = targets.to(device)outputs = ceshi_vgg16(inputs)train_loss = loss(outputs,targets)#梯度归 0optimizer.zero_grad()#反向传播train_loss.backward()#梯度更新optimizer.step()epoch_train_loss += train_loss#如果 网络结构中存在 dropout 层,需要添加  ceshi_vgg16.eval() ==> ceshi_vgg16 为你的网络模型ceshi_vgg16.eval()with torch.no_grad():for data in test_data:test_inputs,test_targets = data#将 数据 放到 GPU 上test_inputs = test_inputs.to(device)test_targets = test_targets.to(device)test_outputs = ceshi_vgg16(test_inputs)test_loss = loss(test_outputs,test_targets)epoch_test_loss += test_lossprint(f"第 {epoch + 1} 次迭代训练,训练集损失为: {epoch_train_loss} ,测试集损失为: {epoch_test_loss}")end = time.time()
print(f"gpu 训练时间为:{end - start}")
第 1 次迭代训练开始....
第 1 次迭代训练,训练集损失为: 820.5022583007812 ,测试集损失为: 189.2407684326172
第 2 次迭代训练开始....
第 2 次迭代训练,训练集损失为: 790.6588134765625 ,测试集损失为: 175.98353576660156
第 3 次迭代训练开始....
第 3 次迭代训练,训练集损失为: 762.98876953125 ,测试集损失为: 179.2063751220703
第 4 次迭代训练开始....
第 4 次迭代训练,训练集损失为: 739.4859008789062 ,测试集损失为: 169.62254333496094
第 5 次迭代训练开始....
第 5 次迭代训练,训练集损失为: 716.714599609375 ,测试集损失为: 198.77293395996094
第 6 次迭代训练开始....
第 6 次迭代训练,训练集损失为: 695.8759155273438 ,测试集损失为: 165.21372985839844
第 7 次迭代训练开始....
第 7 次迭代训练,训练集损失为: 678.6303100585938 ,测试集损失为: 169.66461181640625
第 8 次迭代训练开始....
第 8 次迭代训练,训练集损失为: 657.269775390625 ,测试集损失为: 179.4607391357422
第 9 次迭代训练开始....
第 9 次迭代训练,训练集损失为: 639.9208984375 ,测试集损失为: 164.27601623535156
第 10 次迭代训练开始....
第 10 次迭代训练,训练集损失为: 623.4369506835938 ,测试集损失为: 166.43560791015625
第 11 次迭代训练开始....
第 11 次迭代训练,训练集损失为: 606.757568359375 ,测试集损失为: 189.4309539794922
第 12 次迭代训练开始....
第 12 次迭代训练,训练集损失为: 587.788330078125 ,测试集损失为: 181.8220672607422
第 13 次迭代训练开始....
第 13 次迭代训练,训练集损失为: 572.0140380859375 ,测试集损失为: 151.99435424804688
第 14 次迭代训练开始....
第 14 次迭代训练,训练集损失为: 556.7083129882812 ,测试集损失为: 174.7952880859375
第 15 次迭代训练开始....
第 15 次迭代训练,训练集损失为: 544.226318359375 ,测试集损失为: 173.84246826171875
第 16 次迭代训练开始....
第 16 次迭代训练,训练集损失为: 527.2423095703125 ,测试集损失为: 165.7640380859375
第 17 次迭代训练开始....
第 17 次迭代训练,训练集损失为: 512.9745483398438 ,测试集损失为: 173.00523376464844
第 18 次迭代训练开始....
第 18 次迭代训练,训练集损失为: 501.0533142089844 ,测试集损失为: 196.33889770507812
第 19 次迭代训练开始....
第 19 次迭代训练,训练集损失为: 487.0603332519531 ,测试集损失为: 167.21218872070312
第 20 次迭代训练开始....
第 20 次迭代训练,训练集损失为: 473.097412109375 ,测试集损失为: 173.1332550048828
gpu 训练时间为:124.73064494132996

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/871547.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

UML建模案例分析-需求对类图的影响很大

概要 类图描述系统中类的静态结构。 概念是概念,但类图受需求的影响是非常大的,可以说类图是建模的源头。尽管用例图是源头,但对类图的作用有限。 例子 进销存系统里,产品类中,至少要包括如下属性:名称…

现代动力系统理论导论 第一卷+第二卷 Anatole Katok 金成桴

第0章 引言 0.1. 动力学主要分支 0.2. 流,向量场,微分方程 0.3. 时间1映射,截面,扭扩 0.4. 线性化与局部化 第1部分 例子与基本概念 …

Ubuntu使用K3S一分钟快速搭建K8S集群

快速入门指南 | Rancher文档 准备3台服务器 Master节点安装脚本# K3s 提供了一个安装脚本,可以方便的在 systemd 或 openrc 的系统上将其作为服务安装。这个脚本可以在 https://get.k3s.io 获得。要使用这种方法安装 K3s,只需运行以下命令: curl -sfL https://rancher-mi…

Android Spinner

1. Spinner Spinner是下拉列表,如图3-14所示,通常用于为用户提供选择输入。Spinner有一个重要的属性:spinnerMode,它有2种情况: 属性值为dropdown时,表示Spinner的数据下拉展示,如图1&#xf…

反应式编程:原理功能介绍及实践

简介 反应式编程(Reactive Programming)是一种面向数据流和变化传播的编程范式。它强调异步数据流的处理,通过声明性地定义依赖关系,使得系统能够自动响应数据的变化。 功能 异步处理:反应式编程天然支持异步操作&am…

机器学习和人工智能对金融行业的影响——案例分析

作者主页: 知孤云出岫 目录 引言机器学习和人工智能在金融行业的应用1. 风险管理信用评分风险预测 2. 交易高频交易量化交易 3. 客户服务聊天机器人个性化推荐 4. 反欺诈检测 机器学习和人工智能带来的变革1. 提高效率2. 降低成本3. 提升客户体验 未来发展趋势1. 更智能的风控系…

【中台】数字中台建设方案(PPT)

数字中台建设要点: 数据采集与整合: 打破企业内部各个业务系统的数据隔阂,通过数据采集和数据交换实现数据的集中管理,形成统一的数据中心,为后续数据价值的挖掘提供基础。 利用自研或第三方ETL(Extract, T…

FreeRTOS学习(1)STM32单片机移植FreeRTOS

一、FreeRTOS源码的下载 1、官网下载 FreeRTOS官方链接 官方下载速度慢,需要翻墙,一般选择第一个 2、直接通过仓库下载 仓库地址链接 同样很慢,甚至打不开网页,也不建议使用这种方法。 3、百度网盘 链接:https:…

多表联合的查询(实例)、对于前端返回数据有很多表,可以分开操作、debug调试教程

2024.7.13 一、 对于多表的更深层的认识1. 认识2. 多表联合查询的列子:3. 对于多表查询的进一步认识4. 在实现功能的时候,原本对于省市县这样的表,对于项目的要求,是直接全部查询出来,然后开始使用,但我想着…

JavaScript中的面向对象编程

OPP在JavaScript的表现方式:原型 传统的OPP:类 ● 对象(实例)由类实例化,类的功能类似于蓝图,通过蓝图来实现建筑(实例) ● 行为(方法)从类复制到所有实例 …

AWS-S3实现Minio分片上传、断点续传、秒传、分片下载、暂停下载

文章目录 前言一、功能展示上传功能点下载功能点效果展示 二、思路流程上传流程下载流程 三、代码示例四、疑问 前言 Amazon Simple Storage Service(S3),简单存储服务,是一个公开的云存储服务。Web应用程序开发人员可以使用它存…

2024.7.12 检测H1S-0806MT-XP (问题:脉冲自己会给)

步骤一:先把H1s里面的程序上载保存,避免丢失。 注意:上载程序时,参数也需要上载。(勾选软原件内存选项) 步…

EasyExcel批量读取Excel文件数据导入到MySQL表中

1、EasyExcel简介 官网&#xff1a;EasyExcel官方文档 - 基于Java的Excel处理工具 | Easy Excel 官网 2、代码实战 首先引入jar包 <dependency><groupId>com.alibaba</groupId><artifactId>easyexcel</artifactId><version>3.3.2</v…

智慧校园缴费管理-缴费项目类型功能概述

智慧校园的缴费管理系统&#xff0c;以缴费项目类型为核心功能之一&#xff0c;精细划分并优化了各类缴费流程&#xff0c;为学生和家长带来更为直观、便捷的财务管理体验。这一功能通过整合校园内广泛的费用类别&#xff0c;确保每一笔费用都能准确、高效地处理&#xff0c;体…

Provider(2)- SourceAudioBufferProvider

SourceAudioBufferProvider 从Source源端出来的数据&#xff0c;通常是来自于应用层&#xff0c;但没有与应用层直接连接&#xff0c;通过MonoPipe相关类连接&#xff0c;其SourceAudioBufferProvider和MonoPipe相关类的包含关系图如下&#xff1a; 如上图&#xff0c;Sourc…

11计算机视觉—语义分割与转置卷积

目录 1.语义分割应用语义分割和实例分割2.语义分割数据集:Pascal VOC2012 语义分割数据集预处理数据:我们使用图像增广中的随机裁剪,裁剪输入图像和标签的相同区域。3.转置卷积 上采样填充、步幅和多通道填充步幅多通道转置卷积是一种卷积:重新排列输入和核转置卷积是一种卷…

Java高级重点知识点-22-缓冲流、转换流、序列化流、打印流

文章目录 缓冲流字节缓冲流字符缓冲流 转换流InputStreamReader类OutputStreamWriter类 序列化ObjectOutputStream类ObjectInputStream类 打印流 缓冲流 缓冲流,也叫高效流&#xff0c;是对4个基本的 FileXxx 流的增强&#xff0c;所以也是4个流 基本原理&#xff1a; 缓冲流的…

ES13的4个改革性新特性

1、类字段声明 在 ES13 之前,类字段只能在构造函数中声明, ES13 消除了这个限制 // 之前 class Car {constructor() {this.color = blue;this.age = 2

C++ | Leetcode C++题解之第232题用栈实现队列

题目&#xff1a; 题解&#xff1a; class MyQueue { private:stack<int> inStack, outStack;void in2out() {while (!inStack.empty()) {outStack.push(inStack.top());inStack.pop();}}public:MyQueue() {}void push(int x) {inStack.push(x);}int pop() {if (outStac…

linux_进程周边知识——理解冯诺依曼体系结构

前言&#xff1a; 本篇内容是为了让友友们较好地理解进程的概念&#xff0c; 而在真正了解进行概念之前&#xff0c; 要先了解一下冯诺依曼体系结构。 所以博主会先对冯诺伊曼体系结构进行解释&#xff0c; 然后再讲解进程的概念。 ps&#xff1a; 本篇内容适合了解一些linux指…