我们使用一个三层的小网络来,模拟函数y = x^3+b函数
1 import tensorflow as tf
2 import numpy as np
3 import matplotlib.pyplot as plt
4
5 #训练数据
6 x_data = np.linspace(-6.0,6.0,30)[:,np.newaxis]
7 y_data = np.power(x_data,3) + 0.7
8 #验证数据
9 t_data = np.linspace(-20.0,20.0,40)[:,np.newaxis]
10 ty_data = np.power(t_data,3) + 0.7
11 #占位符
12 x = tf.placeholder(tf.float32,[None,1])
13 y = tf.placeholder(tf.float32,[None,1])
14
15 #network
16 #--layer one--
17 l_w_1 = tf.Variable(tf.random_normal([1,10]))
18 l_b_1 = tf.Variable(tf.zeros([1,10]))
19 l_fcn_1 = tf.matmul(x, l_w_1) + l_b_1
20 relu_1 = tf.nn.relu(l_fcn_1)
21 #---layer two----
22 l_w_2 = tf.Variable(tf.random_normal([10,20]))
23 l_b_2 = tf.Variable(tf.zeros([1,20]))
24 l_fcn_2 = tf.matmul(relu_1, l_w_2) + l_b_2
25 relu_2 = tf.nn.relu(l_fcn_2)
26
27
28 #---output---
29 l_w_3 = tf.Variable(tf.random_normal([20,1]))
30 l_b_3 = tf.Variable(tf.zeros([1,1]))
31 l_fcn_3 = tf.matmul(relu_2, l_w_3) + l_b_3
32 #relu_3 = tf.tanh(l_fcn_3)
33 # init
34 init = tf.global_variables_initializer()
35 #定义 loss func
36 loss = tf.reduce_mean(tf.square(y-l_fcn_3))
37 learn_rate =0.001
38 train_step = tf.train.GradientDescentOptimizer(learn_rate).minimize(loss)
39
40 with tf.Session() as sess:
41 sess.run(init);
42 for epoch in range(20):
43 for step in range(5000):
44 sess.run(train_step,feed_dict={x:x_data,y:y_data})
45 y_pred = sess.run(l_fcn_3,feed_dict={x:t_data})
46 print sess.run(l_fcn_3,feed_dict={x:[[10.]]})
47 plt.figure()
48 plt.scatter(t_data,ty_data)
49 plt.plot(t_data,y_pred,'r-')
50 plt.show()
[[ 533.45062256]]