|
15 | 15 | tf.set_random_seed(1) |
16 | 16 | np.random.seed(1) |
17 | 17 |
|
18 | | -EPOCH = 1 # train the training data n times, to save time, we just train 1 epoch |
19 | 18 | BATCH_SIZE = 50 |
20 | 19 | LR = 0.001 # learning rate |
21 | 20 |
|
|
24 | 23 | test_y = mnist.test.labels[:2000] |
25 | 24 |
|
26 | 25 | tf_x = tf.placeholder(tf.float32, [None, 28*28])/255. # normalize to range (0, 1) |
27 | | -image = tf.reshape(tf_x, [-1, 28, 28, 1]) # (batch, height, width, channel) |
| 26 | +image = tf.reshape(tf_x, [-1, 28, 28, 1]) # (batch, height, width, channel) |
28 | 27 | tf_y = tf.placeholder(tf.int32, [None, 10]) # input y |
29 | 28 |
|
30 | 29 | # CNN |
|
35 | 34 | strides=1, |
36 | 35 | padding='same', |
37 | 36 | activation=tf.nn.relu |
38 | | -) # -> (28, 28, 16) |
| 37 | +) # -> (28, 28, 16) |
39 | 38 | pool1 = tf.layers.max_pooling2d( |
40 | 39 | conv1, |
41 | 40 | pool_size=2, |
42 | 41 | strides=2, |
43 | | -) # -> (14, 14, 16) |
| 42 | +) # -> (14, 14, 16) |
44 | 43 | conv2 = tf.layers.conv2d(pool1, 32, 5, 1, 'same', activation=tf.nn.relu) # -> (14, 14, 32) |
45 | 44 | pool2 = tf.layers.max_pooling2d(conv2, 2, 2) # -> (7, 7, 32) |
46 | | -flat = tf.reshape(pool2, [-1, 7*7*32]) # -> (7*7*32, ) |
47 | | -output = tf.layers.dense(flat, 10) # output layer |
| 45 | +flat = tf.reshape(pool2, [-1, 7*7*32]) # -> (7*7*32, ) |
| 46 | +output = tf.layers.dense(flat, 10) # output layer |
48 | 47 |
|
49 | 48 | loss = tf.losses.softmax_cross_entropy(onehot_labels=tf_y, logits=output) # compute cost |
50 | 49 | train_op = tf.train.AdamOptimizer(LR).minimize(loss) |
51 | 50 |
|
52 | 51 | accuracy = tf.metrics.accuracy( # return (acc, update_op), and create 2 local variables |
53 | 52 | labels=tf.argmax(tf_y, axis=1), predictions=tf.argmax(output, axis=1),)[1] |
54 | 53 |
|
55 | | -sess = tf.Session() # control training and others |
| 54 | +sess = tf.Session() |
56 | 55 | init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) # the local var is for accuracy_op |
57 | 56 | sess.run(init_op) # initialize var in graph |
58 | 57 |
|
59 | | - |
60 | 58 | for step in range(600): |
61 | 59 | b_x, b_y = mnist.train.next_batch(BATCH_SIZE) |
62 | 60 | _, loss_ = sess.run([train_op, loss], {tf_x: b_x, tf_y: b_y}) |
|
0 commit comments