Before
import tensorflow as tf
import numpy as np
import os

images = tf.placeholder(tf.float32, [None, 784])
labels = tf.placeholder(tf.float32, [None, 10])

weight = tf.Variable(tf.random_normal([784,10],stddev=0.01),name='weight')
bias = tf.Variable(tf.random_normal([10],stddev=0.01),name='bias')

logits = tf.add(tf.matmul(images, weight),bias)
output = tf.argmax(logits, axis=1)





        
After
import tensorflow as tf
import numpy as np
import os

images = tf.placeholder(tf.float32, [None, 784])
labels = tf.placeholder(tf.float32, [None, 10])

weight = tf.Variable(tf.random_normal([784,10],stddev=0.01),name='weight')
bias = tf.Variable(tf.random_normal([10],stddev=0.01),name='bias')

logits = tf.add(tf.matmul(images, weight),bias)
output = tf.argmax(logits, axis=1)

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
accuracy = tf.reduce_mean(tf.cast(tf.equal(output, tf.argmax(labels,axis=1)),tf.float32))

learning_rate = 1e-3
optimize = tf.train.AdamOptimizer(learning_rate).minimize(loss)

init = tf.global_variables_initializer()
saver = tf.train.Saver()

savedir = 'savedir/'
if not os.path.exists(savedir):
    os.makedirs(savedir)

from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets('MNIST_data/',one_hot=True)

sess = tf.Session()

sess.run(init)

batch_size = 1000
max_iteration = 100
batch_size = np.minimum(batch_size,mnist.train.images.shape[0])
for epoch in range(max_iteration):
    batch_input, batch_labels = mnist.train.next_batch(batch_size)

    _, training_loss, training_accuracy = sess.run([optimize, loss, accuracy], \
                                        feed_dict={images:batch_input, labels: batch_labels})

    print(epoch+1, training_loss, training_accuracy)
    save_path = saver.save(sess,savedir+'model.ckpt')
    
    print(save_path)