Tensorflow非线性回归

使用tensorflow进行非线性回归的实践

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


# 生成样本200个
x_data = np.linspace(-0.5, 0.5, 200)[:,np.newaxis] #生成200个均匀分布在-0.5到0.5的点,增加一个维度 200行1列
noise = np.random.normal(0, 0.02, x_data.shape)     #噪音,np.random.randn(size)所谓标准正态分布 对应于np.random.normal(loc=0, scale=1, size)
y_data = np.square(x_data) + noise

# 定义两个placeholder
x = tf.placeholder(tf.float32, [None, 1])       #一列,行none不确定
y = tf.placeholder(tf.float32, [None, 1])

#定义神经网络中间层
Weights_L1 = tf.Variable(tf.random_normal([1,10])) # 权值矩阵,一行十列,一个输入,中间十个神经元
biases_L1 = tf.Variable(tf.zeros([1,10]))     # 10个偏测值
Wx_plus_b_L1 = tf.matmul(x, Weights_L1) + biases_L1     # 信号总和
L1 = tf.nn.tanh(Wx_plus_b_L1)  #激活函数

# 定义神经网络输出层
Weights_L2 = tf.Variable(tf.random_normal([10,1]))    #10个神经元 输出1
biases_L2 = tf.Variable(tf.zeros([1,1]))
Wx_plus_b_L2 = tf.matmul(L1, Weights_L2) + biases_L2
prediction = tf.nn.tanh(Wx_plus_b_L2)   #预测值

#二次代价函数
loss = tf.reduce_mean(tf.square(y-prediction))
#使用梯度下降法,最小损失函数
train_setp = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

with tf.Session() as sess:
    # 变量初始化
    sess.run(tf.global_variables_initializer())
    # 训练2000次
    for i_ in range(2000):
        sess.run(train_setp,feed_dict={x:x_data, y:y_data})
    # 获得预测值
    prediction_value = sess.run(prediction, feed_dict={x:x_data})
    #画图
    plt.figure()
    plt.scatter(x_data,y_data)
    plt.plot(x_data,prediction_value, 'r-', lw=5)
    plt.show()

程序运行结果

运行结果

发表评论 / Comment

用心评论~


Warning: Cannot modify header information - headers already sent by (output started at /www/wwwroot/blog.dyboy.cn/content/templates/dyblog/footer.php:56) in /www/wwwroot/blog.dyboy.cn/include/lib/view.php on line 23