一、什么是变量
1、TensorFlow变量是表示程序处理的共享持久状态的最佳方法。变量通过tf.Variable OP类进行操作
这里的变量和传统认知里存储值或者返回值不一样,他是TensorFlow里的一个组件
2、变量的特点
(1)存储持久化
把程序中定义的数据以变量的形式保存下来
(2)可修改值
有时候存储一些数据,希望它能够被迭代更新
(3)可指定被训练
可以不断的迭代更新
3、变量很适合保存模型参数
深度学习目的:训练很好的模型解决问题
二、创建变量
1、tf.Variable(initial_value=None, trainable=True, collections=None, name=None)
说明:
(1)initial_value:初始化的值
(2)trainable:是否可以被训练
(3)collections:新变量将添加到列出的图的集合中collections
默认为[GraphKeys.GLOBAL_VARIABLES],如果trainable是True,变量也被添加到图形集合[GraphKeys.TRAINABLE_VARIABLES]
2、变量需要显示初始化,才能运行值(TensorFlow2.0版本不需要)
tf.global_variables_initializer()
3、例子
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tfdef tensorflow_demo():"""TensorFlow的基本结构"""# TensorFlow实现加减法运算a_t = tf.constant(2)b_t = tf.constant(3)c_t = a_t + b_tprint("TensorFlow加法运算结果:\n", c_t)print(c_t.numpy())# 2.0版本不需要开启会话,已经没有会话模块了return Nonedef graph_demo():"""图的演示"""# TensorFlow实现加减法运算a_t = tf.constant(2)b_t = tf.constant(3)c_t = a_t + b_tprint("TensorFlow加法运算结果:\n", c_t)print(c_t.numpy())# 查看默认图# 方法1:调用方法default_g = tf.compat.v1.get_default_graph()print("default_g:\n", default_g)# 方法2:查看属性# print("a_t的图属性:\n", a_t.graph)# print("c_t的图属性:\n", c_t.graph)# 自定义图new_g = tf.Graph()# 在自己的图中定义数据和操作with new_g.as_default():a_new = tf.constant(20)b_new = tf.constant(30)c_new = a_new + b_newprint("c_new:\n", c_new)print("a_new的图属性:\n", a_new.graph)print("b_new的图属性:\n", b_new.graph)# 开启new_g的会话with tf.compat.v1.Session(graph=new_g) as sess:c_new_value = sess.run(c_new)print("c_new_value:\n", c_new_value)print("我们自己创建的图为:\n", sess.graph)# 可视化自定义图# 1)创建一个writerwriter = tf.summary.create_file_writer("./tmp/summary")# 2)将图写入with writer.as_default():tf.summary.graph(new_g)return Nonedef session_run_demo():"""feed操作"""tf.compat.v1.disable_eager_execution()# 定义占位符a = tf.compat.v1.placeholder(tf.float32)b = tf.compat.v1.placeholder(tf.float32)sum_ab = tf.add(a, b)print("a:\n", a)print("b:\n", b)print("sum_ab:\n", sum_ab)# 开启会话with tf.compat.v1.Session() as sess:print("占位符的结果:\n", sess.run(sum_ab, feed_dict={a: 1.1, b: 2.2}))return Nonedef tensor_demo():"""张量的演示"""tensor1 = tf.constant(4.0)tensor2 = tf.constant([1, 2, 3, 4])linear_squares = tf.constant([[4], [9], [16], [25]], dtype=tf.int32)print("tensor1:\n", tensor1)print("tensor2:\n", tensor2)print("linear_squares:\n", linear_squares)# 张量类型的修改l_cast = tf.cast(linear_squares, dtype=tf.float32)print("before:\n", linear_squares)print("l_cast:\n", l_cast)return Nonedef variable_demo():"""变量的演示"""a = tf.Variable(initial_value=50)b = tf.Variable(initial_value=40)c = tf.add(a, b)print("a:\n", a)print("b:\n", b)print("c:\n", c)return Noneif __name__ == "__main__":# 代码1:TensorFlow的基本结构# tensorflow_demo()# 代码2:图的演示#graph_demo()# feed操作#session_run_demo()# 代码4:张量的演示#tensor_demo()# 代码5:变量的演示variable_demo()
运行结果:
a:<tf.Variable 'Variable:0' shape=() dtype=int32, numpy=50>
b:<tf.Variable 'Variable:0' shape=() dtype=int32, numpy=40>
c:tf.Tensor(90, shape=(), dtype=int32)
a和b是tf.Variable
c是Tensor对象
三、修改变量的命名空间
1、使用tf.variable_scope()修改变量的命名空间
会在OP的名字前面增加命名空间的指定名字
TensorFlow 2.0版本用,tf.compat.v1.variable_scope()
2、例子
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tfdef tensorflow_demo():"""TensorFlow的基本结构"""# TensorFlow实现加减法运算a_t = tf.constant(2)b_t = tf.constant(3)c_t = a_t + b_tprint("TensorFlow加法运算结果:\n", c_t)print(c_t.numpy())# 2.0版本不需要开启会话,已经没有会话模块了return Nonedef graph_demo():"""图的演示"""# TensorFlow实现加减法运算a_t = tf.constant(2)b_t = tf.constant(3)c_t = a_t + b_tprint("TensorFlow加法运算结果:\n", c_t)print(c_t.numpy())# 查看默认图# 方法1:调用方法default_g = tf.compat.v1.get_default_graph()print("default_g:\n", default_g)# 方法2:查看属性# print("a_t的图属性:\n", a_t.graph)# print("c_t的图属性:\n", c_t.graph)# 自定义图new_g = tf.Graph()# 在自己的图中定义数据和操作with new_g.as_default():a_new = tf.constant(20)b_new = tf.constant(30)c_new = a_new + b_newprint("c_new:\n", c_new)print("a_new的图属性:\n", a_new.graph)print("b_new的图属性:\n", b_new.graph)# 开启new_g的会话with tf.compat.v1.Session(graph=new_g) as sess:c_new_value = sess.run(c_new)print("c_new_value:\n", c_new_value)print("我们自己创建的图为:\n", sess.graph)# 可视化自定义图# 1)创建一个writerwriter = tf.summary.create_file_writer("./tmp/summary")# 2)将图写入with writer.as_default():tf.summary.graph(new_g)return Nonedef session_run_demo():"""feed操作"""tf.compat.v1.disable_eager_execution()# 定义占位符a = tf.compat.v1.placeholder(tf.float32)b = tf.compat.v1.placeholder(tf.float32)sum_ab = tf.add(a, b)print("a:\n", a)print("b:\n", b)print("sum_ab:\n", sum_ab)# 开启会话with tf.compat.v1.Session() as sess:print("占位符的结果:\n", sess.run(sum_ab, feed_dict={a: 1.1, b: 2.2}))return Nonedef tensor_demo():"""张量的演示"""tensor1 = tf.constant(4.0)tensor2 = tf.constant([1, 2, 3, 4])linear_squares = tf.constant([[4], [9], [16], [25]], dtype=tf.int32)print("tensor1:\n", tensor1)print("tensor2:\n", tensor2)print("linear_squares:\n", linear_squares)# 张量类型的修改l_cast = tf.cast(linear_squares, dtype=tf.float32)print("before:\n", linear_squares)print("l_cast:\n", l_cast)return Nonedef variable_demo():"""变量的演示"""a = tf.Variable(initial_value=50)b = tf.Variable(initial_value=40)c = tf.add(a, b)print("a:\n", a)print("b:\n", b)print("c:\n", c)with tf.compat.v1.variable_scope("my_scope"):d = tf.Variable(initial_value=30)e = tf.Variable(initial_value=20)f = tf.add(d, e)print("d:\n", d)print("e:\n", e)print("f:\n", f)return Noneif __name__ == "__main__":# 代码1:TensorFlow的基本结构# tensorflow_demo()# 代码2:图的演示#graph_demo()# feed操作#session_run_demo()# 代码4:张量的演示#tensor_demo()# 代码5:变量的演示variable_demo()
运行结果:
a:<tf.Variable 'Variable:0' shape=() dtype=int32, numpy=50>
b:<tf.Variable 'Variable:0' shape=() dtype=int32, numpy=40>
c:tf.Tensor(90, shape=(), dtype=int32)
d:<tf.Variable 'my_scope/Variable:0' shape=() dtype=int32, numpy=30>
e:<tf.Variable 'my_scope/Variable:0' shape=() dtype=int32, numpy=20>
f:tf.Tensor(50, shape=(), dtype=int32)
2、命名空间好处,使得结构更加清晰