Skip to content

Commit 6af70cb

Browse files
committed
README update
1 parent ef32e64 commit 6af70cb

File tree

3 files changed

+206
-20
lines changed

3 files changed

+206
-20
lines changed

README.rst

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11

22
********************
3-
`TensorFlow World`_
3+
`TensorFlow Course`_
44
********************
55
.. image:: https://travis-ci.org/astorfi/TensorFlow-World.svg?branch=master
66
:target: https://travis-ci.org/astorfi/TensorFlow-World
@@ -184,6 +184,9 @@ Neural Networks
184184
+----+-----------------------------------+-----------------------------------------------------------------------------------------------+----------------------------------------------+
185185
| 10 | *Autoencoders* | `Undercomplete Autoencoder <udercompleteautoencodercode_>`_ | `Documentation <Documentationauto_>`_ |
186186
+----+-----------------------------------+-----------------------------------------------------------------------------------------------+----------------------------------------------+
187+
| 10 | *RNN* | `RNN`_ / `IPython <RNNIpython_>`_ | |
188+
+----+-----------------------------------+-----------------------------------------------------------------------------------------------+----------------------------------------------+
189+
187190

188191
.. ~~~~~~~~~~~~
189192
.. **Welcome**
@@ -256,6 +259,7 @@ Neural Networks
256259
.. | 3 | `Linear SVM`_ | |
257260
.. +---+---------------------------------------------+----------------------------------------+
258261
.. | 4 | `MultiClass Kernel SVM`_ | |
262+
.. +---+---------------------------------------------+----------------------------------------+
259263
260264
.. ~~~~~~~~~~~~~~~~~~~
261265
.. **Neural Networks**
@@ -272,6 +276,10 @@ Neural Networks
272276
.. _udercompleteautoencodercode: codes/3-neural_networks/undercomplete-autoencoder
273277
.. _Documentationauto: docs/tutorials/3-neural_network/autoencoder
274278

279+
.. _RNN: codes/codes/3-neural_networks/recurrent-neural-networks/code/rnn.py
280+
.. _RNNIpython: codes/codes/3-neural_networks/recurrent-neural-networks/code/rnn.py
281+
282+
275283
.. +---+---------------------------------------------+----------------------------------------+
276284
.. | # | Source Code | |
277285
.. +===+=============================================+========================================+
Lines changed: 189 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,189 @@
1+
{
2+
"nbformat": 4,
3+
"nbformat_minor": 0,
4+
"metadata": {
5+
"colab": {
6+
"name": "Untitled0.ipynb",
7+
"version": "0.3.2",
8+
"provenance": [],
9+
"collapsed_sections": []
10+
},
11+
"kernelspec": {
12+
"name": "python3",
13+
"display_name": "Python 3"
14+
},
15+
"accelerator": "GPU"
16+
},
17+
"cells": [
18+
{
19+
"metadata": {
20+
"id": "UBpbr4JZKYTz",
21+
"colab_type": "text"
22+
},
23+
"cell_type": "markdown",
24+
"source": [
25+
"# MNIST Digit Classification Using Recurrent Neural Networks"
26+
]
27+
},
28+
{
29+
"metadata": {
30+
"id": "CxeZAiQkLMNR",
31+
"colab_type": "code",
32+
"colab": {
33+
"base_uri": "https://localhost:8080/",
34+
"height": 704
35+
},
36+
"outputId": "77ad5785-4e41-40d0-896e-ed9e6873b4ae"
37+
},
38+
"cell_type": "code",
39+
"source": [
40+
"import tensorflow as tf\n",
41+
"import numpy as np\n",
42+
"import matplotlib.pyplot as plt\n",
43+
"import argparse\n",
44+
"\n",
45+
"######################\n",
46+
"# Optimization Flags #\n",
47+
"######################\n",
48+
"\n",
49+
"learning_rate = 0.001 # initial learning rate\n",
50+
"seed = 111\n",
51+
"\n",
52+
"##################\n",
53+
"# Training Flags #\n",
54+
"##################\n",
55+
"batch_size = 128 # Batch size for training\n",
56+
"num_epoch = 10 # Number of training iterations\n",
57+
"\n",
58+
"###############\n",
59+
"# Model Flags #\n",
60+
"###############\n",
61+
"hidden_size = 128 # Number of neurons for RNN hodden layer\n",
62+
"\n",
63+
"# Reset the graph set the random numbers to be the same using \"seed\"\n",
64+
"tf.reset_default_graph()\n",
65+
"tf.set_random_seed(seed)\n",
66+
"np.random.seed(seed)\n",
67+
"\n",
68+
"# Divide 28x28 images to rows of data to feed to RNN as sequantial information\n",
69+
"step_size = 28\n",
70+
"input_size = 28\n",
71+
"output_size = 10\n",
72+
"\n",
73+
"# Input tensors\n",
74+
"X = tf.placeholder(tf.float32, [None, step_size, input_size])\n",
75+
"y = tf.placeholder(tf.int32, [None])\n",
76+
"\n",
77+
"# Rnn\n",
78+
"cell = tf.nn.rnn_cell.BasicRNNCell(num_units=hidden_size)\n",
79+
"output, state = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)\n",
80+
"\n",
81+
"# Forward pass and loss calcualtion\n",
82+
"logits = tf.layers.dense(state, output_size)\n",
83+
"cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)\n",
84+
"loss = tf.reduce_mean(cross_entropy)\n",
85+
"\n",
86+
"# optimizer\n",
87+
"optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n",
88+
"\n",
89+
"# Prediction\n",
90+
"prediction = tf.nn.in_top_k(logits, y, 1)\n",
91+
"accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))\n",
92+
"\n",
93+
"# input data\n",
94+
"from tensorflow.examples.tutorials.mnist import input_data\n",
95+
"mnist = input_data.read_data_sets(\"MNIST_data/\")\n",
96+
"\n",
97+
"# Process MNIST\n",
98+
"X_test = mnist.test.images # X_test shape: [num_test, 28*28]\n",
99+
"X_test = X_test.reshape([-1, step_size, input_size])\n",
100+
"y_test = mnist.test.labels\n",
101+
"\n",
102+
"# initialize the variables\n",
103+
"init = tf.global_variables_initializer()\n",
104+
"\n",
105+
"# Empty list for tracking\n",
106+
"loss_train_list = []\n",
107+
"acc_train_list = []\n",
108+
"\n",
109+
"# train the model\n",
110+
"with tf.Session() as sess:\n",
111+
" sess.run(init)\n",
112+
" n_batches = mnist.train.num_examples // batch_size\n",
113+
" for epoch in range(num_epoch):\n",
114+
" for batch in range(n_batches):\n",
115+
" X_train, y_train = mnist.train.next_batch(batch_size)\n",
116+
" X_train = X_train.reshape([-1, step_size, input_size])\n",
117+
" sess.run(optimizer, feed_dict={X: X_train, y: y_train})\n",
118+
" loss_train, acc_train = sess.run(\n",
119+
" [loss, accuracy], feed_dict={X: X_train, y: y_train})\n",
120+
" loss_train_list.append(loss_train)\n",
121+
" acc_train_list.append(acc_train)\n",
122+
" print('Epoch: {}, Train Loss: {:.3f}, Train Acc: {:.3f}'.format(\n",
123+
" epoch + 1, loss_train, acc_train))\n",
124+
" loss_test, acc_test = sess.run(\n",
125+
" [loss, accuracy], feed_dict={X: X_test, y: y_test})\n",
126+
" print('Test Loss: {:.3f}, Test Acc: {:.3f}'.format(loss_test, acc_test))\n"
127+
],
128+
"execution_count": 2,
129+
"outputs": [
130+
{
131+
"output_type": "stream",
132+
"text": [
133+
"WARNING:tensorflow:From <ipython-input-2-dc6c3a05d58e>:56: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
134+
"Instructions for updating:\n",
135+
"Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
136+
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
137+
"Instructions for updating:\n",
138+
"Please write your own downloading logic.\n",
139+
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/base.py:252: _internal_retry.<locals>.wrap.<locals>.wrapped_fn (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
140+
"Instructions for updating:\n",
141+
"Please use urllib or similar directly.\n",
142+
"Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\n",
143+
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
144+
"Instructions for updating:\n",
145+
"Please use tf.data to implement this functionality.\n",
146+
"Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
147+
"Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\n",
148+
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
149+
"Instructions for updating:\n",
150+
"Please use tf.data to implement this functionality.\n",
151+
"Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
152+
"Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\n",
153+
"Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
154+
"Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\n",
155+
"Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n",
156+
"WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
157+
"Instructions for updating:\n",
158+
"Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
159+
"Epoch: 1, Train Loss: 0.279, Train Acc: 0.898\n",
160+
"Epoch: 2, Train Loss: 0.124, Train Acc: 0.969\n",
161+
"Epoch: 3, Train Loss: 0.145, Train Acc: 0.977\n",
162+
"Epoch: 4, Train Loss: 0.231, Train Acc: 0.914\n",
163+
"Epoch: 5, Train Loss: 0.088, Train Acc: 0.961\n",
164+
"Epoch: 6, Train Loss: 0.104, Train Acc: 0.961\n",
165+
"Epoch: 7, Train Loss: 0.174, Train Acc: 0.961\n",
166+
"Epoch: 8, Train Loss: 0.099, Train Acc: 0.961\n",
167+
"Epoch: 9, Train Loss: 0.075, Train Acc: 0.961\n",
168+
"Epoch: 10, Train Loss: 0.081, Train Acc: 0.969\n",
169+
"Test Loss: 0.124, Test Acc: 0.965\n"
170+
],
171+
"name": "stdout"
172+
}
173+
]
174+
},
175+
{
176+
"metadata": {
177+
"id": "nkPppIILLN5Z",
178+
"colab_type": "code",
179+
"colab": {}
180+
},
181+
"cell_type": "code",
182+
"source": [
183+
""
184+
],
185+
"execution_count": 0,
186+
"outputs": []
187+
}
188+
]
189+
}

codes/3-neural_networks/recurrent-neural-networks/code/rnn.py

Lines changed: 8 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,3 @@
1-
'''
2-
MNIST using Recurrent Neural Network to predict handwritten digits
3-
In this tutorial, I am going to demonstrate how to use recurrent neural
4-
network to predict the famous handwritten digits "MNIST".
5-
The MNIST dataset consists:
6-
mnist.train: 55000 training images
7-
mnist.validation: 5000 validation images
8-
mnist.test: 10000 test images
9-
Each image is 28 pixels (rows) by 28 pixels (cols).
10-
'''
11-
121
import tensorflow as tf
132
import numpy as np
143
import matplotlib.pyplot as plt
@@ -25,23 +14,23 @@ def str2bool(v):
2514
# Optimization Flags #
2615
######################
2716

28-
parser.add_argument('--learning_rate', default=0.001, type=float, help='initial learning rate')
29-
parser.add_argument('--seed', default=111, type=int, help='seed')
17+
tf.app.flags.DEFINE_float('learning_rate', default=0.001, help='initial learning rate')
18+
tf.app.flags.DEFINE_integer('seed', default=111, help='seed')
3019

3120
##################
3221
# Training Flags #
3322
##################
34-
parser.add_argument('--batch_size', default=128, type=int, help='Batch size for training')
35-
parser.add_argument('--num_epoch', default=10, type=int, help='Number of training iterations')
36-
parser.add_argument('--batch_per_log', default=10, type=int, help='Print the log at what number of batches?')
23+
tf.app.flags.DEFINE_integer('batch_size', default=128, help='Batch size for training')
24+
tf.app.flags.DEFINE_integer('num_epoch', default=10, help='Number of training iterations')
25+
tf.app.flags.DEFINE_integer('batch_per_log', default=10, help='Print the log at what number of batches?')
3726

3827
###############
3928
# Model Flags #
4029
###############
41-
parser.add_argument('--hidden_size', default=128, type=int, help='Number of neurons for RNN hodden layer')
30+
tf.app.flags.DEFINE_integer('hidden_size', default=128, help='Number of neurons for RNN hodden layer')
4231

43-
# Add all arguments to parser
44-
args = parser.parse_args()
32+
# Store all elemnts in FLAG structure!
33+
args = tf.app.flags.FLAGS
4534

4635

4736
# Reset the graph set the random numbers to be the same using "seed"

0 commit comments

Comments
 (0)