python - Testing image on Tensorflow Mnist Model using checkpoints -


i newbie tensorflow.i got mnist train sample , want test image generating checkpoints.i referred tensorflow documentation , generated checkpoints , tried test sample image accessing softmax layer.but given image number-9 softmax gives me invalid one-hot-encoded array 'array([[ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]], dtype=float32)',when tried access softmax using

softmax = graph.get_tensor_by_name('softmax:0').

i tried testing different images,it didn't give proper result of them.

1.i asssumed, softmax give me array of probabilities.am right?

2.am saving model properly?

3.am accessing correct layer testing input?

4.is there further added in testing/training code?

sorry posting here.

this train code:

from __future__ import division, print_function, unicode_literals import tensorflow tf time import time import numpy np import os import scipy.ndimage ndimage scipy import misc  tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("mnist_data/", one_hot=true)  logs_train_dir = '/home/test/logs'  def weight_variable(shape,name):     initial = tf.truncated_normal(shape, stddev=0.1)     return tf.variable(initial,name=name+'_weight')  def bias_variable(shape,name):     initial = tf.constant(0.1, shape=shape)     return tf.variable(initial,name=name+'_bias')  def conv2d(x, w):     return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='same')  def max_pool_2x2(x,name):     return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='same',name=name+'_max_pool')  # correct labels  y_ = tf.placeholder(tf.float32, [none, 10])  # reshape input data image dimensions  x = tf.placeholder(tf.float32, [none, 784],name='x')#input tensor x_image = tf.reshape(x, [-1, 28, 28, 1],name='x_image')  # build network  w_conv1 = weight_variable([5, 5, 1, 32],'w_conv1') b_conv1 = bias_variable([32],'b_conv1') h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1,name='h_conv1') h_pool1 = max_pool_2x2(h_conv1,'h_pool1') w_conv2 = weight_variable([5, 5, 32, 64],'w_conv2') b_conv2 = bias_variable([64],'b_conv2') h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2,name='h_conv2') h_pool2 = max_pool_2x2(h_conv2,'w_conv2') w_fc1 = weight_variable([7 * 7 * 64, 1024],name='wc1') b_fc1 = bias_variable([1024],name='b_fc1') h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) keep_prob = tf.placeholder(tf.float32,name='keep_prob') h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) w_fc2 = weight_variable([1024, 10],name='w_fc2') b_fc2 = bias_variable([10],name='b_fc2') y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2) + b_fc2,name='softmax')#softmax tensor  # define loss function cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]),name='cross_entropy') loss_summary = tf.summary.scalar('loss_sc',cross_entropy)  # define training step , accuracy  train_step = tf.train.adamoptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1),name='correct_pred') accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),name='accuracy') accuracy_summary = tf.summary.scalar('accuracy_sc', accuracy)  # create saver saver = tf.train.saver()  # initialize graph init = tf.global_variables_initializer() summary_op = tf.summary.merge_all()  sess = tf.session() train_writer = tf.summary.filewriter(logs_train_dir, sess.graph) sess.run(init)  # train  print("startin burn-in...") in range(500):     input_images, correct_predictions = mnist.train.next_batch(50)     if % 100 == 0:         train_accuracy = sess.run(accuracy, feed_dict={x: input_images, y_: correct_predictions, keep_prob: 1.0})         print("step %d, training accuracy_a %g" % (i, train_accuracy))      sess.run(train_step, feed_dict={x: input_images, y_: correct_predictions, keep_prob: 0.5})  print("starting training...") start_time = time() in range(20000):     input_images, correct_predictions = mnist.train.next_batch(50)     if % 100 == 0:         train_accuracy = sess.run(accuracy, feed_dict={x: input_images, y_: correct_predictions, keep_prob: 1.0})         print("step %d, training accuracy_b %g" % (i, train_accuracy))     sess.run(train_step, feed_dict={x: input_images, y_: correct_predictions, keep_prob: 0.5})      summary_str = sess.run(summary_op,feed_dict={x: input_images, y_: correct_predictions, keep_prob: 0.5})     train_writer.add_summary(summary_str, i)      print('saving checkpoints......i ',i)      if % 1000 == 0 or (i+1) == 20000:         checkpoint_path = os.path.join(logs_train_dir,'cnn_new_model.ckpt')         print('checkpoint_path ',checkpoint_path)         saver.save(sess,checkpoint_path,global_step=i)  print("the training took %.4f seconds." % (time() - start_time)) # validate print("test accuracy %g" % sess.run(accuracy, feed_dict={ x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) 

the accuracy 0.97.

this test code:

import numpy np import tensorflow tf import scipy.ndimage ndimage scipy import misc import cv2 cv  def get_test_image():         image = cv.imread('/home/test/downloads/9.png', 0)     resized = cv.resize(image, (28,28), interpolation = cv.inter_area)     image = np.array(resized)     flat = np.ndarray.flatten(image)         reshaped_image = np.reshape(flat,(1, 784)) return reshaped_image   def evaluate_one_image():      image_array = get_test_image()     image_array = image_array.astype(np.float32)     logs_train_dir ='/home/test/logs'         model_path = logs_train_dir+"/cnn_new_model.ckpt-19999"     detection_graph = tf.graph()      tf.session(graph=detection_graph) sess:         # load graph trained states         loader = tf.train.import_meta_graph(model_path+'.meta')         loader.restore(sess, model_path)          # tensors variable name          image_tensor = detection_graph.get_tensor_by_name('x:0')         softmax = detection_graph.get_tensor_by_name('softmax:0')         keep_prob = detection_graph.get_tensor_by_name('keep_prob:0')                 # make prediction          softmax = sess.run(softmax, feed_dict={image_tensor: image_array,keep_prob:0.75})           print('softmax ', cost_val,'\n\n')         print('softmax maximum val ', np.argmax(cost_val))  evaluate_one_image() 

so when tested image of number 9 ,it gave me following output:

softmax [[ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]]

softmax maximum val 1

i have no idea,where going wrong.any useful , appreciated.

  1. dropout isn't used during evaluation/prediction. need set keep_prob=1

  2. inspect pixel values of input image image_array, pixel values should in range [0, 1], else need normalize pixel values subtracting image mean , dividing image std

for function load image can add following lines normalize

def get_test_image():       ...       image = np.array(resized)     mean = image.mean()     std = image.std()     image = np.subtract(image, mean)     image = np.divide(image, std)     image = np.clip(image, 0, 1.000001)     ... 

Comments

Popular posts from this blog

ubuntu - PHP script to find files of certain extensions in a directory, returns populated array when run in browser, but empty array when run from terminal -

php - How can i create a user dashboard -

javascript - How to detect toggling of the fullscreen-toolbar in jQuery Mobile? -