本文目标
- 分享李沐老师关于深度学习的观点:1⃣️从实践的角度入手深度学习可能比单纯的研究算法更好;2⃣️如果想学习深度学习,要只用简单的数据结构,譬如numpy、NDArray,从0实现一个深度学习算法,这样才能碰到进而解决深度学习中的许多核心问题,也可以更好的理解现在流行的框架;3⃣️从应用的角度,那就直接上现成的框架,结合真实数据不断练习,调得一手好参;
- 结合李航《统计学习方法》中的观点,总结出机器学习(深度学习)的一般代码框架,具体看代码。
机器学习的一般框架
从0实现版
import d2lzh as d2l
from mxnet import nd
from mxnet import autograd
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
num_inputs, num_hiddens1, num_hiddens2, num_outputs = 784, 256, 256, 10
W1 = nd.random.normal(scale=0.01, shape=(num_inputs, num_hiddens1))
b1 = nd.zeros(num_hiddens1)
W2 = nd.random.normal(scale=0.01, shape=(num_hiddens1, num_hiddens2))
b2 = nd.zeros(num_hiddens2)
W3 = nd.random.normal(scale=0.01, shape=(num_hiddens2, num_outputs))
b3 = nd.zeros(num_outputs)
params = [W1, b1, W2, b2, W3, b3]for param in params:param.attach_grad()def relu(X):return nd.maximum(X, 0)def softmax(X):X_exp = X.exp()partition = X_exp.sum(axis=1, keepdims=True)return X_exp / partitiondef net(X):X = X.reshape((-1, num_inputs))H1 = relu(nd.dot(X, W1) + b1)H2 = relu(nd.dot(H1, W2) + b2)return softmax(H2)
def cross_entropy(y_hat, y):return -nd.pick(y_hat, y).log()loss = cross_entropy
def sgd(params, lr, batch_size):for param in params:param[:] = param - lr * param.grad / batch_size
def evaluate_accuracy(data_iter, net):acc_sum, n = 0.0, 0for X, y in data_iter:y = y.astype('float32')acc_sum += (net(X).argmax(axis=1) == y).sum().asscalar()n += y.sizereturn acc_sum / ndef train(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr):for epoch in range(num_epochs):train_l_sum, train_acc_sum, n = 0.0, 0.0, 0for X, y in train_iter:with autograd.record():y_hat = net(X)l = loss(y_hat, y).sum()l.backward()sgd(params, lr, batch_size)y = y.astype('float32')train_l_sum += l.asscalar()train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()n += y.sizetest_acc = evaluate_accuracy(test_iter, net)print('epoch: %d, loss %.4f, train_acc %.3f, test_acc %.3f'% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))num_epochs, lr = 10, 0.3
train(net, train_iter, test_iter, loss, num_epochs, batch_size,params, lr)
if __name__ == '__main__':print('------ok-------')
- 说明:代码中还是使用了d2l.load_data_fashion_mnist来加载图片数据,有时间把这个也替换掉,用NDArray实现;
mxnet框架版
import d2lzh as d2l
from mxnet import gluon, init
from mxnet.gluon import loss as gloss, nn
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
net = nn.Sequential()
net.add(nn.Dense(256, activation='relu'),nn.Dense(256, activation='relu'),nn.Dense(10))
net.initialize(init.Normal(sigma=0.01))
loss = gloss.SoftmaxCrossEntropyLoss()
lr = 0.3
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
num_epochs = 10
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, trainer)
if __name__ == '__main__':print('-----ok------')
参考资料