Tensorflow手写体数字识别

手写体数字识别,使用Tensorflow,其中涉及到一些调参优化

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data


#载入数据
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)

#每个批次的大小
batch_size = 100    #减小改值准确率有提升

#计算一共的批次数
n_batch = mnist.train.num_examples // batch_size

#定义两个placeholder
x = tf.placeholder(tf.float32, [None, 784])     #28*28 = 784一维向量
y = tf.placeholder(tf.float32, [None, 10])      # 标签10,0--9共10个数字

#创建一个简单的神经网络
# 初始化时候使用zeros()效果更好,random_normal()较差

# version 1.0
Weight = tf.Variable(tf.zeros([784,10]))     #权值,784个神经元,输出层10个标签
biase = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x,Weight) + biase)     #预测值,softmax转为概率值

"""
# version 1.1 准确率下降
Weight_L1 = tf.Variable(tf.zeros([784,100]))
biases_L1 = tf.Variable(tf.zeros([100]))
L1 = tf.nn.softmax(tf.matmul(x ,Weight_L1) + biases_L1)

Weight_L2 = tf.Variable(tf.zeros([100,10]))
biases_L2 = tf.Variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(L1 ,Weight_L2) + biases_L2)
"""

#二次代价函数
loss = tf.reduce_mean(tf.square(y -prediction))
#梯度下降法,学习率越大,正确率有一定提升
train_setp = tf.train.GradientDescentOptimizer(0.4).minimize(loss)      #0.2的学习率


#初始化变量
init = tf.global_variables_initializer()

#结果存在一个bool型列表中
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(prediction, 1))     #argmax()求标签最大值的位置(返回一维张量中最大值所在的位置),equal()返回bool

#求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # cast():true=1,false=0


with tf.Session() as sess:
    sess.run(init)
    # 迭代21个周期
    for epoch in range(21):     # 训练21次,增加迭代次数可以提升准确率
        for batch in range(n_batch):    # 所有图片循环一次
            batch_xs,batch_ys = mnist.train.next_batch(batch_size) #获得100个图片,数据保存在xs,标签在ys
            sess.run(train_setp, feed_dict={x:batch_xs, y:batch_ys})
        acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels})  #传入测试集
        print("Iter:"+str(epoch)+", Testing Accuracy:"+str(acc))

运行结果:

运行结果


优化

交叉熵:

使用交叉熵

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))

防止过拟合:

过拟合的直观结果就是测试集与训练集正确率相差较大

  • 增加数据集
  • 正则化方法
  • Dropout

权值初始化:

truncated_normal()函数,生态

# 正太分布初始化,标准差0.1
Weight = tf.Variable(tf.truncated_normal([784,10], stddev=0.1))     #权值,784个神经元,输出层10个标签,截断正太化
biase = tf.Variable(tf.zeros([10])+0.1)

增加隐藏层和神经元:

#version 1.1
Weight1 = tf.Variable(tf.truncated_normal([784,1500], stddev=0.1))     #权值,784个神经元,输出层10个标签,正太分布初始化,标准差0.1
biase1 = tf.Variable(tf.zeros([1500])+0.1)
L1 = tf.nn.tanh(tf.matmul(x, Weight1) + biase1)
L1_drop = tf.nn.dropout(L1, keep_prob)     #keep_prob决定神经元工作百分比,避免过拟合

Weight2 = tf.Variable(tf.truncated_normal([1500,100], stddev=0.1))    
biase2 = tf.Variable(tf.zeros([100])+0.1)
L2 = tf.nn.tanh(tf.matmul(L1_drop, Weight2) + biase2)
L2_drop = tf.nn.dropout(L2, keep_prob)

Weight3 = tf.Variable(tf.truncated_normal([100,10], stddev=0.1))    
biase3 = tf.Variable(tf.zeros([10])+0.1)

prediction = tf.nn.softmax(tf.matmul(L2_drop,Weight3) + biase3)     #预测值,softmax转为概率值

优化器:

优化器

最终优化程序:

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data


#载入数据
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)

#每个批次的大小
batch_size = 100    #减小改值准确率有提升

#计算一共的批次数
n_batch = mnist.train.num_examples // batch_size

#定义两个placeholder
x = tf.placeholder(tf.float32, [None, 784])     #28*28 = 784一维向量
y = tf.placeholder(tf.float32, [None, 10])      # 标签10,0--9共10个数字
keep_prob = tf.placeholder(tf.float32)          # 神经元使用率
learn_rate = tf.Variable(0.001, dtype=tf.float32)    # 学习率


#创建一个简单的神经网络
# 初始化时候使用truncated_normal()效果更好,random_normal()较差

#version 1.2
Weight1 = tf.Variable(tf.truncated_normal([784,500], stddev=0.1))     #权值,784个神经元,输出层10个标签,正太分布初始化,标准差0.1
biase1 = tf.Variable(tf.zeros([500])+0.1)
L1 = tf.nn.tanh(tf.matmul(x, Weight1) + biase1)
L1_drop = tf.nn.dropout(L1, keep_prob)     #keep_prob决定神经元工作百分比,避免过拟合

Weight2 = tf.Variable(tf.truncated_normal([500,300], stddev=0.1))    
biase2 = tf.Variable(tf.zeros([300])+0.1)
L2 = tf.nn.tanh(tf.matmul(L1_drop, Weight2) + biase2)
L2_drop = tf.nn.dropout(L2, keep_prob)

Weight3 = tf.Variable(tf.truncated_normal([300,10], stddev=0.1))    
biase3 = tf.Variable(tf.zeros([10])+0.1)

prediction = tf.nn.softmax(tf.matmul(L2_drop,Weight3) + biase3)     #预测值,softmax转为概率值


#二次代价函数
#loss = tf.reduce_mean(tf.square(y -prediction))
# 使用交叉熵d代价函数
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
#梯度下降法,学习率越大,正确率有一定提升,训练
#train_setp = tf.train.GradientDescentOptimizer(0.2).minimize(loss)      #0.2的学习率
train_setp = tf.train.AdamOptimizer(learn_rate).minimize(loss)

#初始化变量
init = tf.global_variables_initializer()

#结果存在一个bool型列表中
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(prediction, 1))     #argmax()求标签最大值的位置(返回一维张量中最大值所在的位置),equal()返回bool

#求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # cast():true=1,false=0


with tf.Session() as sess:
    sess.run(init)
    # 迭代21个周期
    for epoch in range(51):     # 训练21次,增加迭代次数可以提升准确率
        sess.run(tf.assign(learn_rate, 0.001 * (0.95 **epoch)))     #学习率逐渐减小,一开始快速收敛,找到全/局部最小值,降低学习率找到最低点,避免反复震荡
        for batch in range(n_batch):    # 所有图片循环一次
            batch_xs,batch_ys = mnist.train.next_batch(batch_size) #获得100个图片,数据保存在xs,标签在ys
            sess.run(train_setp, feed_dict={x:batch_xs, y:batch_ys, keep_prob:1.0})

        lr =sess.run(learn_rate)   
        test_acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_prob:1.0})  #传入测试集
        #train_acc = sess.run(accuracy, feed_dict={x:mnist.train.images, y:mnist.train.labels, keep_prob:1.0})
        print("Iter:"+str(epoch)+", Testing Accuracy:"+str(test_acc)+", Learning Rate:"+str(lr))

效果:

优化过后

已经可以达到98%的正确率了…

CNN(卷积神经网络)

# -*- coding:utf-8 -*-

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data


mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

# 每个批次大小
batch_size = 100

# 计算一共有多少个批次
n_batch = mnist.train.num_examples // batch_size

# 初始化权值
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)    # 生成一个截断的正态分布
    return tf.Variable(initial)

#初始化偏置
def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

# 卷基层
def conv2d(x, W):
    # x: tensor of shape`[batch, in_height, in_width, in_channels]`
    # W: 滤波器:tensor of shape `[filter_height, filter_weight, in_channels, out_channels]`
    # strides[0]=strides[3]=1, strides[1]代表x方向步长,strides[2]代表y方向步长
    # padding: 类型 'SAME' 'VALID'
    return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')

# 池化层
def max_pool_2x2(x):
    # ksize [1,x,y,1]
    return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')


# 定义两个placeholder
x = tf.placeholder(tf.float32, [None,784]) #28*28
y = tf.placeholder(tf.float32, [None,10])


# 改变x的格式转为4D的向量[batch, in_height, in_width, in_channels] 色彩通道
x_image = tf.reshape(x, [-1,28,28,1])


# 初始化第一个卷积层的权值和偏置
W_conv1 = weight_variable([5,5,1,32]) # 5*5的采样窗口,32个(通道)卷积核从1个平面抽取特征得到32个特征平面
b_conv1 = bias_variable([32]) # 每一个卷积核一个偏置

# 把x_images和权值向量进行卷积,再加上偏置值,然后应用于relu激活函数
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1) # 进行max-pooling

# 初始化第二个卷积层的权值和偏置值
W_conv2 = weight_variable([5,5,32,64]) # 5*5的采样窗口,64个(通道)卷积核从32个平面抽取特征得到64个特征平面
b_conv2 = bias_variable([64]) # 每一个卷积核一个偏置

# 把x_images和权值向量进行卷积,再加上偏置值,然后应用于relu激活函数
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2) # 进行max-pooling


"""
原始:28*28图片 第一次卷积后 28*28 第一池化后 14*14
第二次卷积后 14*14 ,第二次池化后 7*7
经过上面操作后得到7*7的平面
"""

# 初始化第一个全连接层的权值
W_fc1 = weight_variable([7*7*64,1024]) # 上一场有 7*7*64个神经元 ,全连接层有1024个神经元
b_fc1 = bias_variable([1024]) # 1024个节点

# 把池化层2的输出扁平化为1维
h_pool2_flat = tf.reshape(h_pool2, [-1,7*7*64])
#求第一个全连接层的输出
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

# keep_prob用来表示神经元的输出概率
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

#初始化第二个全连接层
W_fc2 = weight_variable([1024,10]) 
b_fc2 = bias_variable([10])

#计算输出
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)


#交叉熵代价函数
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))
#是哟个AdaminOptimizer优化
train_setp = train_setp = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
#结果存放在一个bool列表
correct_prediction = tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
#求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(21):
        for batch in range(n_batch):
            batch_xs,batch_ys = mnist.train.next_batch(batch_size)
            sess.run(train_setp, feed_dict={x:batch_xs, y:batch_ys, keep_prob:0.7})

        acc = sess.run(accuracy, feed_dict={x:batch_xs, y:batch_ys, keep_prob:0.7})
        print("Iter:"+str(epoch)+", Testing Accuracy:"+str(acc))

    """
    # 保存模型
    saver.save(sess, 'cnn_sx.ckpt')

    # 载入模型
    saver = tf.train.Saver()
    saver.restore(sess, 'cnn_sx.skpt')
    test_acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_prob:1.0})
    """

经过21次迭代,训练集上的正确率可达99%,测试集99%

发表评论 / Comment

用心评论~


Warning: Cannot modify header information - headers already sent by (output started at /www/wwwroot/blog.dyboy.cn/content/templates/dyblog/footer.php:56) in /www/wwwroot/blog.dyboy.cn/include/lib/view.php on line 23