正常方法
#---------------------------------Torch Modules --------------------------------------------------------
from __future__ import print_function
import numpy as np
import pandas as pd
import torch.nn as nn
import math
import torch.nn.functional as F
import torch
import torchvision
from torch.nn import init
import torch.optim as optim
from torchvision import datasets, transforms
from torchvision import models
import torch.nn.functional as F
from torch.utils import data
import matplotlib.pyplot as plt
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
###-----------------------------------variables-----------------------------------------------
# for Normalization
mean = [0.5]
std = [0.5]
# batch size
batch_size =128
epoch = 1 # epoch
lr = 0.01
##-----------------------------------Commands to download and perpare the MNIST dataset ------------------------------------
train_transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean, std)])test_transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean, std)])train_loader = torch.utils.data.DataLoader(datasets.MNIST('./mnist', train=True, download=True,transform=train_transform),batch_size=batch_size, shuffle=True) # train datasettest_loader = torch.utils.data.DataLoader(datasets.MNIST('./mnist', train=False, transform=test_transform),batch_size=batch_size, shuffle=False) # test dataset loader的形状为128,1,28,28
#visualization
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): """Plot a list of images."""figsize = (num_cols * scale, num_rows * scale)_, axes = plt.subplots(num_rows, num_cols, figsize=figsize)axes = axes.flatten()for i, (ax, img) in enumerate(zip(axes, imgs)):if torch.is_tensor(img):# Tensor Imageax.imshow(img.numpy())else:# PIL Imageax.imshow(img)ax.axes.get_xaxis().set_visible(False)ax.axes.get_yaxis().set_visible(False)if titles:ax.set_title(titles[i])return axes
mnist_train = torchvision.datasets.MNIST(root="../data", train=True,#load minist
transform=train_transform,
download=True)
X, y = next(iter(data.DataLoader(mnist_train, batch_size=18)))
show_images(X.reshape(18, 28, 28), 2, 9)
model = nn.Sequential(nn.Flatten(), nn.Linear(784, 100), nn.ReLU(),nn.Linear(100, 10))#28*28,展平到784,最后被分类为10种,中间层的神经元数量自定义
def init_weights(m):#初始化权重if type(m) == nn.Linear:#如果只有一个线性层,设置std为0.01nn.init.normal_(m.weight, std=0.01)
model.apply(init_weights);## Loss function
criterion = torch.nn.CrossEntropyLoss() # pytorch's cross entropy loss function,多分类一般使用交叉熵# definin which paramters to train only the CNN model parameters
optimizer = torch.optim.SGD(model.parameters(),lr)#优化器,设置随机梯度下降
# defining the training function
# Train baseline classifier on clean data
def train(model, optimizer,criterion,epoch): model.train() # setting up for trainingfor batch_idx, (data, target) in enumerate(train_loader): # data contains the image and target contains the label = 0/1/2/3/4/5/6/7/8/9data = data.view(-1, 28*28).requires_grad_()#此处图像被展平了,bs是128,输入是28*28optimizer.zero_grad() # setting gradient to zerooutput = model(data) # forwardloss = criterion(output, target) # loss computationloss.backward() # back propagation here pytorch will take care of itoptimizer.step() # updating the weight valuesif batch_idx % 100 == 0:print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset),100. * batch_idx / len(train_loader), loss.item()))
# to evaluate the model
## validation of test accuracy
def test(model, criterion, val_loader, epoch,train= False): model.eval()test_loss = 0correct = 0 with torch.no_grad():for batch_idx, (data, target) in enumerate(val_loader):data = data.view(-1, 28*28).requires_grad_()output = model(data)test_loss += criterion(output, target).item() # sum up batch losspred = output.argmax(1, keepdim=True) # get the index of the max log-probabilitycorrect += pred.eq(target.view_as(pred)).sum().item() # if pred == target then correct +=1test_loss /= len(val_loader.dataset) # average test lossif train == False:print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'.format(test_loss, correct, val_loader.sampler.__len__(),100. * correct / val_loader.sampler.__len__() ))if train == True:print('\nTrain set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'.format(test_loss, correct, val_loader.sampler.__len__(),100. * correct / val_loader.sampler.__len__() ))return 100. * correct / val_loader.sampler.__len__()
test_acc = torch.zeros([epoch])
train_acc = torch.zeros([epoch])
## training the logistic model
for i in range(epoch):train(model, optimizer,criterion,i)train_acc[i] = test(model, criterion, train_loader, i,train=True) #Testing the the current CNNtest_acc[i] = test(model, criterion, test_loader, i)if not os.path.exists('./saved_model'):os.makedirs('./saved_model')torch.save(model.state_dict(),'./saved_model/model_normal.bin')
Train Epoch: 0 [0/60000 (0%)] Loss: 0.314632
Train Epoch: 0 [12800/60000 (21%)] Loss: 0.283743
Train Epoch: 0 [25600/60000 (43%)] Loss: 0.229258
Train Epoch: 0 [38400/60000 (64%)] Loss: 0.219923
Train Epoch: 0 [51200/60000 (85%)] Loss: 0.187836
Train set: Average loss: 0.0020, Accuracy: 55673/60000 (92.7883%)
Test set: Average loss: 0.0020, Accuracy: 9284/10000 (92.8400%)
K折交叉验证
#!pip install sklearn -i https://pypi.mirrors.ustc.edu.cn/simple
from sklearn.model_selection import KFold
train_init = datasets.MNIST('./mnist', train=True,transform=train_transform)test_init = datasets.MNIST('./mnist', train=False, transform=test_transform)# the dataset for k fold cross validation
dataFold = torch.utils.data.ConcatDataset([train_init, test_init])#将验证集和测试集合并def train_flod_Mnist(k_split_value):different_k_mse = []kf = KFold(n_splits=k_split_value,shuffle=True, random_state=2024) # init KFold 10折交叉for train_index , test_index in kf.split(dataFold): # split # get train, val train_fold = torch.utils.data.dataset.Subset(dataFold, train_index)test_fold = torch.utils.data.dataset.Subset(dataFold, test_index) # package type of DataLoadertrain_loader = torch.utils.data.DataLoader(dataset=train_fold, batch_size=batch_size, shuffle=True)test_loader = torch.utils.data.DataLoader(dataset=test_fold, batch_size=batch_size, shuffle=True)# train modeltest_acc = torch.zeros([epoch])#记录acctrain_acc = torch.zeros([epoch])## training the logistic modelfor i in range(epoch):train(model, optimizer,criterion,i)train_acc[i] = test(model, criterion, train_loader, i,train=True) #Testing the the current CNNtest_acc[i] = test(model, criterion, test_loader, i)#torch.save(model,'perceptron.pt')# one epoch, all accdifferent_k_mse.append(np.array(test_acc))return different_k_mse
testAcc_compare_map = {}
for k_split_value in range(10, 10+1): print('now k_split_value is:', k_split_value)testAcc_compare_map[k_split_value] = train_flod_Mnist(k_split_value)
now k_split_value is: 10
tensor([0.])
Train Epoch: 0 [0/60000 (0%)] Loss: 0.187658
Train Epoch: 0 [12800/60000 (21%)] Loss: 0.191175
Train Epoch: 0 [25600/60000 (43%)] Loss: 0.166395
Train Epoch: 0 [38400/60000 (64%)] Loss: 0.216365
Train Epoch: 0 [51200/60000 (85%)] Loss: 0.156119
Train set: Average loss: 0.0015, Accuracy: 59648/63000 (94.6794%)
Test set: Average loss: 0.0018, Accuracy: 6550/7000 (93.5714%)
tensor([0.])
Train Epoch: 0 [0/60000 (0%)] Loss: 0.220123
Train Epoch: 0 [12800/60000 (21%)] Loss: 0.135370
Train Epoch: 0 [25600/60000 (43%)] Loss: 0.132626
Train Epoch: 0 [38400/60000 (64%)] Loss: 0.258545
Train Epoch: 0 [51200/60000 (85%)] Loss: 0.181543
Train set: Average loss: 0.0015, Accuracy: 59698/63000 (94.7587%)
Test set: Average loss: 0.0015, Accuracy: 6622/7000 (94.6000%)
tensor([0.])
Train Epoch: 0 [0/60000 (0%)] Loss: 0.263651
Train Epoch: 0 [12800/60000 (21%)] Loss: 0.204342
Train Epoch: 0 [25600/60000 (43%)] Loss: 0.141438
Train Epoch: 0 [38400/60000 (64%)] Loss: 0.137843
Train Epoch: 0 [51200/60000 (85%)] Loss: 0.217006
Train set: Average loss: 0.0014, Accuracy: 59751/63000 (94.8429%)
Test set: Average loss: 0.0014, Accuracy: 6639/7000 (94.8429%)
tensor([0.])
Train Epoch: 0 [0/60000 (0%)] Loss: 0.166842
Train Epoch: 0 [12800/60000 (21%)] Loss: 0.096612
Train Epoch: 0 [25600/60000 (43%)] Loss: 0.230569
Train Epoch: 0 [38400/60000 (64%)] Loss: 0.109163
Train Epoch: 0 [51200/60000 (85%)] Loss: 0.101741
Train set: Average loss: 0.0014, Accuracy: 59757/63000 (94.8524%)
Test set: Average loss: 0.0013, Accuracy: 6669/7000 (95.2714%)
tensor([0.])
Train Epoch: 0 [0/60000 (0%)] Loss: 0.113768
Train Epoch: 0 [12800/60000 (21%)] Loss: 0.202454
Train Epoch: 0 [25600/60000 (43%)] Loss: 0.119112
Train Epoch: 0 [38400/60000 (64%)] Loss: 0.116779
Train Epoch: 0 [51200/60000 (85%)] Loss: 0.376868
Train set: Average loss: 0.0014, Accuracy: 59879/63000 (95.0460%)
Test set: Average loss: 0.0013, Accuracy: 6682/7000 (95.4571%)
tensor([0.])
Train Epoch: 0 [0/60000 (0%)] Loss: 0.100557
Train Epoch: 0 [12800/60000 (21%)] Loss: 0.189366
Train Epoch: 0 [25600/60000 (43%)] Loss: 0.174508
Train Epoch: 0 [38400/60000 (64%)] Loss: 0.104910
Train Epoch: 0 [51200/60000 (85%)] Loss: 0.146227
Train set: Average loss: 0.0013, Accuracy: 59914/63000 (95.1016%)
Test set: Average loss: 0.0013, Accuracy: 6652/7000 (95.0286%)
tensor([0.])
Train Epoch: 0 [0/60000 (0%)] Loss: 0.103640
Train Epoch: 0 [12800/60000 (21%)] Loss: 0.179051
Train Epoch: 0 [25600/60000 (43%)] Loss: 0.138919
Train Epoch: 0 [38400/60000 (64%)] Loss: 0.214437
Train Epoch: 0 [51200/60000 (85%)] Loss: 0.127463
Train set: Average loss: 0.0013, Accuracy: 59986/63000 (95.2159%)
Test set: Average loss: 0.0013, Accuracy: 6674/7000 (95.3429%)
tensor([0.])
Train Epoch: 0 [0/60000 (0%)] Loss: 0.154551
Train Epoch: 0 [12800/60000 (21%)] Loss: 0.157627
Train Epoch: 0 [25600/60000 (43%)] Loss: 0.163700
Train Epoch: 0 [38400/60000 (64%)] Loss: 0.148417
Train Epoch: 0 [51200/60000 (85%)] Loss: 0.130215
Train set: Average loss: 0.0013, Accuracy: 60056/63000 (95.3270%)
Test set: Average loss: 0.0013, Accuracy: 6685/7000 (95.5000%)
tensor([0.])
Train Epoch: 0 [0/60000 (0%)] Loss: 0.146108
Train Epoch: 0 [12800/60000 (21%)] Loss: 0.205999
Train Epoch: 0 [25600/60000 (43%)] Loss: 0.115849
Train Epoch: 0 [38400/60000 (64%)] Loss: 0.222786
Train Epoch: 0 [51200/60000 (85%)] Loss: 0.178309
Train set: Average loss: 0.0012, Accuracy: 60162/63000 (95.4952%)
Test set: Average loss: 0.0013, Accuracy: 6683/7000 (95.4714%)
tensor([0.])
Train Epoch: 0 [0/60000 (0%)] Loss: 0.240678
Train Epoch: 0 [12800/60000 (21%)] Loss: 0.234599
Train Epoch: 0 [25600/60000 (43%)] Loss: 0.183265
Train Epoch: 0 [38400/60000 (64%)] Loss: 0.148125
Train Epoch: 0 [51200/60000 (85%)] Loss: 0.168119
Train set: Average loss: 0.0012, Accuracy: 60174/63000 (95.5143%)
Test set: Average loss: 0.0012, Accuracy: 6716/7000 (95.9429%)