本文共 3226 字,大约阅读时间需要 10 分钟。
import tensorflow.examples.tutorials.mnist.input_data as inputDataimport tensorflow as tfmnist=inputData.read_data_sets('MNIST_data',one_hot=True)def compute_accuracy(t_xs,t_ys): global y prediction=tf.nn.softmax(y) #sess.run(prediction,feed_dict={xs:t_xs,keep_pro:1}) correct_prdiction=tf.equal(tf.argmax(prediction,1),tf.argmax(t_ys,1)) accuracy=tf.reduce_mean(tf.cast(correct_prdiction,tf.float32)) result=sess.run(accuracy,feed_dict={xs:t_xs,ys:t_ys,keep_pro:1}) return result#定义weight变量def weight_Variables(shape): initial=tf.truncated_normal(shape,stddev=0.1) return tf.Variable(initial)#定义bias变量def bias_Variable(shape): initial=tf.constant(0.1,shape=shape) return tf.Variable(initial)#卷积函数def cov2d(x,W): # stride [1, x_movement, y_movement, 1] # 必有 strides[0] = strides[3] = 1 return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')#定义池化层def max_pool_2X2(x): #Size(2,2),最大池化 return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')#1 define the place holder for the train dataxs=tf.placeholder(tf.float32,[None,784])#28*28ys=tf.placeholder(tf.float32,[None,10])#labelskeep_pro=tf.placeholder(tf.float32)x_image=tf.reshape(xs,[-1,28,28,1])#1means黑白print(x_image.shape) #[nsamples,28,28]#2 ===Cov1 layer====W_cov1=weight_Variables([5,5,1,32]) #Size(5,5),Stride(1), 32feature Mapb_cov1=bias_Variable([32])#利用Relu激励函数构建第一个卷积层h_conv1=tf.nn.relu(cov2d(x_image,W_cov1)+b_cov1)#第1层池化,池化后Size[14,14]h_pool1=max_pool_2X2(h_conv1)#3 ===Cov2 layer====W_cov2=weight_Variables([5,5,32,64]) #Size(5,5),Stride(1), 64feature Mapb_cov2=bias_Variable([64])#利用Relu激励函数构建第2个卷积层h_conv1=tf.nn.relu(cov2d(h_pool1, W_cov2)+b_cov2)#第2层池化,池化后Size[7,7]h_pool2=max_pool_2X2(h_conv1)#4 ===全连接层1(fully connected layer)====W_fc1=weight_Variables([7*7*64,1024]) #1024特征b_fc1=bias_Variable([1024])#将h_pool2变平,即[nsamples,7,7,64]->[nsamples,7*7*64]h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])h_fc1=tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)#dropout,增强模型的泛化能力h_fc1_drop=tf.nn.dropout(h_fc1,keep_pro)#5 ===全连接层2(fully connected layer)====W_fc2=weight_Variables([1024,10]) #10个类别b_fc2=bias_Variable([10])#分类结果y=tf.matmul(h_fc1_drop,W_fc2)+b_fc2#6 loss and optimizerloss=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=ys,logits=y))optimizer=tf.train.AdamOptimizer(1e-4).minimize(loss)#7 启动会话sess=tf.InteractiveSession()sess.run(tf.global_variables_initializer())#8 training and testfor i in range(1000): # 100 data points are randomly selected from the training data set batch_x, batch_y=mnist.train.next_batch(100) #training sess.run(optimizer,feed_dict={xs:batch_x, ys:batch_y,keep_pro:0.5}) if i%50==0: print(compute_accuracy(mnist.test.images, mnist.test.labels))
转载地址:http://kqjti.baihongyu.com/