1.初始化模型参数
import torch
from IPython import display
from d2l import torch as d2l
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
num_inputs = 784
num_outputs = 10
W = torch.normal(0, 0.01, size=(num_inputs, num_outputs), requires_grad=True)
b = torch.zeros(num_outputs, requires_grad=True)
2. 定义模型
def softmax(X):
X_exp = torch.exp(X)
partition = X_exp.sum(1, keepdim=True)
return X_exp / partition
注意,虽然这在数学上看起来是正确的,但我们在代码实现中有点草率。矩阵中的非常大或非常小的元素可
能造成数值上溢或下溢,但我们没有采取措施来防止这点。
def net(X):
return softmax(torch.matmul(X.reshape((-1, W.shape[0])), W) + b)
先线性模型得出值,softmax转换为(0,1)之间。正向传播
3 定义损失函数
def cross_entropy(y_hat, y):
return - torch.log(y_hat[range(len(y_hat)), y])
4.梯度下降
updater核心作用是更新参数
lr = 0.1
def updater(batch_size):
return d2l.sgd([W, b], lr, batch_size)
def sgd(params, lr, batch_size): #@save
"""小批量随机梯度下降"""
with torch.no_grad():
for param in params:
param -= lr * param.grad / batch_size
param.grad.zero_()
5.训练函数
单epoch的
def train_epoch_ch3(net, train_iter, loss, updater): #@save
"""训练模型一个迭代周期(定义见第3章)"""
# 将模型设置为训练模式if isinstance(net, torch.nn.Module):net.train()
# 训练损失总和、训练准确度总和、样本数metric = Accumulator(3)for X, y in train_iter:
# 计算梯度并更新参数y_hat = net(X)l = loss(y_hat, y)if isinstance(updater, torch.optim.Optimizer):
# 使用PyTorch内置的优化器和损失函数,这个没有batchsize,传入的是meanupdater.zero_grad()l.mean().backward() updater.step()else:
# 使用定制的优化器和损失函数 传入batchsize就行,会/batchsize,直接sum更新就好了l.sum().backward()updater(X.shape[0])metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
# 返回训练损失和训练精度return metric[0] / metric[2], metric[1] / metric[2]
多epoch的
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater): #@save
"""训练模型(定义见第3章)"""animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],legend=['train loss', 'train acc', 'test acc'])for epoch in range(num_epochs):train_metrics = train_epoch_ch3(net, train_iter, loss, updater)test_acc = evaluate_accuracy(net, test_iter)animator.add(epoch + 1, train_metrics + (test_acc,))#把原来的两个元素的元组变成三个的
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
6.训练
num_epochs = 10
train_ch3(net, train_iter, test_iter, cross_entropy, num_epochs, updater)
7.预测
def predict_ch3(net, test_iter, n=6): #@save
"""预测标签(定义见第3章)"""for X, y in test_iter:breaktrues = d2l.get_fashion_mnist_labels(y)preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))titles = [true +'\n' + pred for true, pred in zip(trues, preds)]d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])
predict_ch3(net, test_iter)
附加:计算分类精度
y_hat是矩阵,那么假定第二个维度存储每个类的预测分数。我们使用argmax获得每行中最大元素的索引来获得预测类别。最后得到正确预测的数量
def accuracy(y_hat, y): #@save
"""计算预测正确的数量"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
cmp = y_hat.type(y.dtype) == y
return float(cmp.type(y.dtype).sum())
实用程序类Accumulator,用于对多个变量进行累加。
Accumulator实例中创建了2个变量,分别用于存储正确预测的数量和预测的总数量。当我们遍历数据集时,两者都将随着时间的推移而累加。
class Accumulator: #@save
"""在n个变量上累加"""def __init__(self, n):self.data = [0.0] * ndef add(self, *args):self.data = [a + float(b) for a, b in zip(self.data, args)]
# 这一步,zip并行加载两个数组,a为原数据,b为要添加的数据
# 如:[1,1]+[1,2]def reset(self):self.data = [0.0] * len(self.data)def __getitem__(self, idx):return self.data[idx]
评估在任意模型net的精度
def evaluate_accuracy(net, data_iter): #@save
"""计算在指定数据集上模型的精度"""if isinstance(net, torch.nn.Module):net.eval() # 将模型设置为评估模式metric = Accumulator(2) # 正确预测数、预测总数
with torch.no_grad():for X, y in data_iter:metric.add(accuracy(net(X), y), y.numel())
return metric[0] / metric[1]
调包实现
1 初始化模型参数
net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))
def init_weights(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, std=0.01)
net.apply(init_weights);2.损失函数
loss = nn.CrossEntropyLoss(reduction='none')3.优化算法
trainer = torch.optim.SGD(net.parameters(), lr=0.1)4 训练
num_epochs = 10
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)