- 参考
- 包含: 1.层级的计算、2.训练的整体流程、3.tensorboard画图、4.保存/使用模型、5.总体代码(含详细注释)
1. 层级的计算
如上图,mnist手写数字识别的训练集提供的图片是 28 * 28 * 1的手写图像,初始识别的时候,并不知道一次要训练多少个数据,因此输入的规模为 [None, 784]. 由于最终的标签输出的是10个数据,因此输出的规模为[None, 10], 中间采取一个简单的全连接层作为隐藏层,规模为[784, 10]
2. 训练的整体流程
- 1.首先定义占位符:
# 训练集数据
x = tf.placehodler(tf.float32, [None, 784])
# 训练集标签
y_true = tf.placeholder(rf.int32, [None, 10])
- 2.建立模型
# 随机生成权重矩阵和偏置
# 权重
weight = tf.Variable(tf.random_normal([784, 10], mean =0.0, stddev=1.0), name="weight")
# 偏置
bias = tf.Variable(tf.constant(0.0, shape=[10]))
# 预测
y_predict = tf.matmul(x, weight) + bias
- 3.计算平均损失
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_predict))
- 4.优化方案(梯度下降)
train_op = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
- 5.计算损失率
equal_list = tf.equal(tf.argmax(y_true, 1), tf.argmax(y_predict, 1))
accuracy = tf.reduce_mean(tf.cast(equal_list, tf.float32))
3. tensorboard使用
# 按作用域命名
with tf.variable_scope("data"):passwith tf.variable_scope("full_layer"):pass
# 收集变量(单维度)
tf.summary.scalar("losses", loss)
tf.summary.scalar("acc", accuracy)# 收集变量(多维度)
tf.summary.histogram("weightes", weight)
tf.summary.histogram("biases", bias)# 将训练的每一步写入
with tf.Session() as sess:# 建立events文件,然后写入filewriter = tf.summary.FileWriter("./tmp/", graph=sess.graph)for i in range(5000):# 写入每步训练的值summary = sess.run(merged, feed_dict={x: mnist_x, y_true: mnist_y})filewriter.add_summary(summary, i)
- 4.模型的保存/使用
# 模型的初始化(一般写在Session上面)
saver = tf.train.Saver()# Session中为模型保存分配资源
with tf.Session() as sess:# 保存模型saver.save(sess, "./tmp/ckpt/fc_model")# 加载模型saver.restore(sess, "./tmp/ckpt/fc_model")# 预测for i in range(100):x, y = mnist.test.next_batch(1)predict = tf.argmax(sess.run(y_predict, feed_dict={x: x_test, y_true: y_test}), 1).eval()
5.总体代码
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_dataFLAGS = tf.app.flags.FLAGStf.app.flags.DEFINE_integer("is_train", 1, "0: 预测, 1: 训练")"""单层(全连接层)实现手写数字识别特征值[None, 784] 目标值[None, 10]1、 定义数据占位符特征值[None, 784] 目标值[None, 10]2、 建立模型随机初始化权重和偏置w[784, 10] by_predict = tf.matmul(x, w) + b3、 计算损失loss: 平均样本的损失 4、 梯度下降优化5、 准确率计算:equal_list = tf.equal(tf.argmax(y, 1), tf.argmax(y_label, 1)) accuracy = tf.reduce_mean(tf.cast(equal_list, tf.float32))
"""def ful_connected():# 读取数据mnist = input_data.read_data_sets("./data/mnist/input_data/", one_hot=True)# 1、 建立数据的占位符 x [None, 784] y_true [None, 10]with tf.variable_scope("data"):x = tf.placeholder(tf.float32, [None, 784])y_true = tf.placeholder(tf.int32, [None, 10])# 2、 建立一个全连接层的神经网络 w [784, 10] b [10]with tf.variable_scope("full_layer"):# 随机初始化权重和偏置weight = tf.Variable(tf.random_normal([784, 10], mean=0.0, stddev=1.0), name="weight")bias = tf.Variable(tf.constant(0.0, shape=[10]))# 预测None个样本的输出结果 [None, 784] * [784, 10] + [10] = [None, 10]y_predict = tf.matmul(x, weight) + bias# 3、 求出所有样本的损失,然后求平均值with tf.variable_scope("softmax"):# 求平均交叉熵损失loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_predict))# 4、 梯度下降求出损失with tf.variable_scope("optimizer"):train_op = tf.train.GradientDescentOptimizer(0.1).minimize(loss)# 5、 计算准确率with tf.variable_scope("count_acc"):equal_list = tf.equal(tf.argmax(y_true, 1), tf.argmax(y_predict, 1))# equal_list None个样本 [1, 0, 1, 0, 1, 1, ....]accuracy = tf.reduce_mean(tf.cast(equal_list, tf.float32))# 收集变量(单维度)tf.summary.scalar("losses", loss)tf.summary.scalar("acc", accuracy)# 收集变量(高维度)tf.summary.histogram("weightes", weight)tf.summary.histogram("biases", bias)# 定义一个初始化变量的opinit_op = tf.global_variables_initializer()# 定义合并变量merged = tf.summary.merge_all()# 保存模型saver = tf.train.Saver()# 开启会话训练with tf.Session() as sess:# 初始化变量sess.run(init_op)# 建立events文件,然后写入filewriter = tf.summary.FileWriter("./tmp/", graph=sess.graph)if FLAGS.is_train == 0:# 迭代步骤去训练,更新参数预测for i in range(5000):# 取出真实存在的特征值 和 目标值mnist_x, mnist_y = mnist.train.next_batch(50)# 运行train_op训练sess.run(train_op, feed_dict={x: mnist_x, y_true: mnist_y})# 写入每步训练的值summary = sess.run(merged, feed_dict={x: mnist_x, y_true: mnist_y})filewriter.add_summary(summary, i)# 打印损失print("训练第%d步,准确率为:%f" % (i, sess.run(accuracy, feed_dict={x: mnist_x, y_true: mnist_y})))# 保存模型saver.save(sess, "./tmp/ckpt/fc_model")else:# 加载模型saver.restore(sess, "./tmp/ckpt/fc_model")# 预测for i in range(100):# 每次测试一张图片x_test, y_test = mnist.test.next_batch(1)print("第%d张图片是: %d,预测结果是:%d" % (i,tf.argmax(y_test, 1).eval(),tf.argmax(sess.run(y_predict, feed_dict={x: x_test, y_true: y_test}), 1).eval()))return Noneif __name__ == "__main__":ful_connected()
6. cnn版本的mnist
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data"""使用卷积神经网络实现 mnist的手写数据集识别
""""""input: [None, 784]output: [784, 10]进入卷积时,首先需要改变图片的形状 [None, 784] --> [None, 28, 28, 1]卷积网络设计:· 第一层卷积层: 32 * core(5*5)、strides(1)、padding="SAME"· 此时大小为: [None, 28, 28, 32]· 激活· 池化: 2*2、 strides(2)、 padding="SAME"· 此时大小为: [None, 14, 14, 32]· 第二层卷积层: 64 * core(5*5)、 strides(1)、 padding="SAME"· 此时大小为: [None, 14, 14, 64]· 激活· 池化: 2*2、 strides(2)、 padding="SAME"· 此时大小为: [None, 7, 7, 64]· 全连接层: [None, 7*7*64] * [7*7*64, 10] + bias = [None, 10]
"""# 定义个初始化权重的函数
def weight_variable(shape):w = tf.Variable(tf.random_normal(shape=shape, mean=0.0, stddev=1.0))return w# 定义一个初始化偏置的函数
def bias_variables(shape):b = tf.Variable(tf.constant(0.0, shape=shape))return bdef model():"""自定义的卷积模型:return:"""# 1、准备数据的占位符 x [None, 784] 、 y_true [None, 10]with tf.variable_scope("data"):x = tf.placeholder(tf.float32, [None, 784])y_true = tf.placeholder(tf.int32, [None, 10])# 2、一卷积层 卷积: 5*5*1, 32个, strides = 1 、激活、池化with tf.variable_scope("conv1"):# 随机初始化权重,偏置[32]w_conv1 = weight_variable([5, 5, 1, 32])b_conv1 = bias_variables([32])# 对x进行形状的改变 [None, 784] -> [None, 28, 28, 1]x_reshape = tf.reshape(x, [-1, 28, 28, 1])# [None, 28, 28, 1] -> [None, 28, 28, 32]x_relu1 = tf.nn.relu(tf.nn.conv2d(x_reshape, w_conv1, strides=[1, 1, 1, 1], padding="SAME") + b_conv1)# 池化 2*2, strides2 [None, 28, 28, 32] -> [None, 14, 14, 32]x_pool1 = tf.nn.max_pool(x_relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")# 3、二卷积层 5*5*32, 64个filter, strides= 1with tf.variable_scope("conv2"):w_conv2 = weight_variable([5, 5, 32, 64])b_conv2 = bias_variables([64])# 卷积、激活、池化计算# [None, 14, 14, 32] -> [None, 14, 14, 64]x_relu2 = tf.nn.relu(tf.nn.conv2d(x_pool1, w_conv2, strides=[1, 1, 1, 1], padding="SAME") + b_conv2)# 池化 2*2, strides2 [None, 14, 14, 64] -> [None, 7, 7, 64]x_pool2 = tf.nn.max_pool(x_relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")# 4、全连接层 [None, 7, 7, 64] --> [None, 7*7*64] * [7*7*64, 10] + [10] = [None, 10]# 随机初始化权重和偏置w_fc = weight_variable([7 * 7 * 64, 10])b_fc = bias_variables([10])# 修改形状: [None, 7, 7, 64] -> [None, 7*7*64]x_fc_reshape = tf.reshape(x_pool2, [-1, 7 * 7 * 64])# 矩阵运算,得出每个样本的10个结果y_predict = tf.matmul(x_fc_reshape, w_fc) + b_fcreturn x, y_true, y_predictdef conf_fc():# 1、 读取数据mnist = input_data.read_data_sets("./data/mnist/input_data/", one_hot=True)# 2、 定义模型,得出输出x, y_true, y_predict = model()# 3、 求出所有的损失,然后求平均值with tf.variable_scope("soft_cross"):# 求平均交叉熵损失loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_predict))# 4、 梯度下降求出损失with tf.variable_scope("optimizer"):train_op = tf.train.GradientDescentOptimizer(0.00005).minimize(loss)# 5、 计算准确率with tf.variable_scope("acc"):equal_list = tf.equal(tf.argmax(y_true, 1), tf.argmax(y_predict, 1))accuracy = tf.reduce_mean(tf.cast(equal_list, tf.float32))# 定义一个初始变量opinit_op = tf.global_variables_initializer()# 开启会话运行with tf.Session() as sess:sess.run(init_op)# 循环去训练for i in range(1000):# 取出真实存在的特征值和目标值mnist_x, mnist_y = mnist.train.next_batch(50)# 运行train_op训练sess.run(train_op, feed_dict={x: mnist_x, y_true: mnist_y})# 打印损失print("训练第%d步,准确率为:%f" % (i, sess.run(accuracy, feed_dict={x: mnist_x, y_true: mnist_y})))return Noneif __name__ == "__main__":conf_fc()