在tensorflow中,为了 节约变量存储空间 ,我们常常需要通过共享 变量作用域(variable_scope) 来实现 共享变量 。
大家比较常用也比较笨的一种方法是,在重复使用(即 非第一次使用)时,设置 reuse=True 来 再次调用 该共享变量作用域(variable_scope)。但是这种方法太繁琐了。
有两种 更简洁 的方法来一次性对variable_scope进行reuse,现将代码模板总结如下:
使用 tf.Variable_scope(..., reuse=tf.AUTO_REUSE)
。
# -*- coding: utf-8 -*-
import tensorflow as tf
def func(...):
with tf.variable_scope(name_or_scope='', reuse=tf.AUTO_REUSE): ### 改动部分 ###
pass
def main():
with tf.Graph().as_default():
pass
for _ in xrange(5):
output = func(...)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
pass
_output = sess.run(func(), feed_dict=...)
pass
if __name__ == "__main__":
main()
# -*- coding: utf-8 -*-
import tensorflow as tf
def func(in_put, in_channel, out_channel):
with tf.variable_scope(name_or_scope='', reuse=tf.AUTO_REUSE): ### 改动部分 ###
weights = tf.get_variable(name="weights", shape=[2, 2, in_channel, out_channel],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
convolution = tf.nn.conv2d(input=in_put, filter=weights, strides=[1, 1, 1, 1], padding="SAME")
return convolution
def main():
with tf.Graph().as_default():
input_x = tf.placeholder(dtype=tf.float32, shape=[1, 4, 4, 1])
for _ in xrange(5):
output = func(input_x, 1, 1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
import numpy as np
_output = sess.run([output], feed_dict={input_x:np.random.uniform(low=0, high=255, size=[1, 4, 4, 1])})
print _output
if __name__ == "__main__":
main()
通过 from tensorflow.python.ops import variable_scope as vs
来导入操作。
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.python.ops import variable_scope as vs ### 改动部分 ###
def func(..., reuse=False): ### 改动部分 ###
if reuse: ### 改动部分 ###
vs.get_variable_scope().reuse_variables() ### 改动部分 ###
pass
return output
def main():
with tf.Graph().as_default():
pass
for _ in xrange(5):
output = func(..., reuse=(_!=0)) ### 改动部分 ###
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
pass
_output = sess.run(output, feed_dict=...)
pass
if __name__ == "__main__":
main()
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.python.ops import variable_scope as vs ### 改动部分 ###
def func(in_put, in_channel, out_channel, reuse=False): ### 改动部分 ###
if reuse: ### 改动部分 ###
vs.get_variable_scope().reuse_variables() ### 改动部分 ###
weights = tf.get_variable(name="weights", shape=[2, 2, in_channel, out_channel],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
output = tf.nn.conv2d(input=in_put, filter=weights, strides=[1, 1, 1, 1], padding="SAME")
return output
def main():
with tf.Graph().as_default():
input_x = tf.placeholder(dtype=tf.float32, shape=[1, 4, 4, 1])
for _ in xrange(5):
output = func(input_x, 1, 1, reuse=(_!=0)) ### 改动部分 ###
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
import numpy as np
_output = sess.run(output, feed_dict={input_x:np.random.uniform(low=0, high=255, size=[1, 4, 4, 1])})
print _output
if __name__ == "__main__":
main()