Skip to content

Commit

Permalink
update consisteny of all jupyter notebook files
Browse files Browse the repository at this point in the history
  • Loading branch information
graykode committed Apr 4, 2019
1 parent d7aa35e commit cf9f407
Show file tree
Hide file tree
Showing 20 changed files with 2,958 additions and 2,956 deletions.
220 changes: 110 additions & 110 deletions 1-1.NNLM/NNLM_Tensor.ipynb
Original file line number Diff line number Diff line change
@@ -1,115 +1,115 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"id": "4dTc8F-IObVT",
"colab_type": "code",
"colab": {
"name": "NNLM-Tensor.ipynb",
"version": "0.3.2",
"provenance": [],
"collapsed_sections": []
"base_uri": "https://localhost:8080/",
"height": 125.0
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"cells": [
"outputId": "20bfe275-33af-4655-b2ee-cecdea69cce7"
},
"outputs": [
{
"metadata": {
"id": "4dTc8F-IObVT",
"colab_type": "code",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 125
},
"outputId": "20bfe275-33af-4655-b2ee-cecdea69cce7"
},
"cell_type": "code",
"source": [
"# code by Tae Hwan Jung @graykode\n",
"import tensorflow as tf\n",
"import numpy as np\n",
"\n",
"tf.reset_default_graph()\n",
"\n",
"sentences = [ \"i like dog\", \"i love coffee\", \"i hate milk\"]\n",
"\n",
"word_list = \" \".join(sentences).split()\n",
"word_list = list(set(word_list))\n",
"word_dict = {w: i for i, w in enumerate(word_list)}\n",
"number_dict = {i: w for i, w in enumerate(word_list)}\n",
"n_class = len(word_dict) # number of Vocabulary\n",
"\n",
"# NNLM Parameter\n",
"n_step = 2 # number of steps ['i like', 'i love', 'i hate']\n",
"n_hidden = 2 # number of hidden units\n",
"\n",
"def make_batch(sentences):\n",
" input_batch = []\n",
" target_batch = []\n",
"\n",
" for sen in sentences:\n",
" word = sen.split()\n",
" input = [word_dict[n] for n in word[:-1]]\n",
" target = word_dict[word[-1]]\n",
"\n",
" input_batch.append(np.eye(n_class)[input])\n",
" target_batch.append(np.eye(n_class)[target])\n",
"\n",
" return input_batch, target_batch\n",
"\n",
"# Model\n",
"X = tf.placeholder(tf.float32, [None, n_step, n_class]) # [batch_size, number of steps, number of Vocabulary]\n",
"Y = tf.placeholder(tf.float32, [None, n_class])\n",
"\n",
"input = tf.reshape(X, shape=[-1, n_step * n_class]) # [batch_size, n_step * n_class]\n",
"H = tf.Variable(tf.random_normal([n_step * n_class, n_hidden]))\n",
"d = tf.Variable(tf.random_normal([n_hidden]))\n",
"U = tf.Variable(tf.random_normal([n_hidden, n_class]))\n",
"b = tf.Variable(tf.random_normal([n_class]))\n",
"\n",
"tanh = tf.nn.tanh(d + tf.matmul(input, H)) # [batch_size, n_hidden]\n",
"model = tf.matmul(tanh, U) + b # [batch_size, n_class]\n",
"\n",
"cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=model, labels=Y))\n",
"optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)\n",
"prediction =tf.argmax(model, 1)\n",
"\n",
"# Training\n",
"init = tf.global_variables_initializer()\n",
"sess = tf.Session()\n",
"sess.run(init)\n",
"\n",
"input_batch, target_batch = make_batch(sentences)\n",
"\n",
"for epoch in range(5000):\n",
" _, loss = sess.run([optimizer, cost], feed_dict={X: input_batch, Y: target_batch})\n",
" if (epoch + 1)%1000 == 0:\n",
" print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))\n",
"\n",
"# Predict\n",
"predict = sess.run([prediction], feed_dict={X: input_batch})\n",
"\n",
"# Test\n",
"input = [sen.split()[:2] for sen in sentences]\n",
"print([sen.split()[:2] for sen in sentences], '->', [number_dict[n] for n in predict[0]])"
],
"execution_count": 1,
"outputs": [
{
"output_type": "stream",
"text": [
"Epoch: 1000 cost = 0.468996\n",
"Epoch: 2000 cost = 0.125292\n",
"Epoch: 3000 cost = 0.045533\n",
"Epoch: 4000 cost = 0.020888\n",
"Epoch: 5000 cost = 0.010938\n",
"[['i', 'like'], ['i', 'love'], ['i', 'hate']] -> ['dog', 'coffee', 'milk']\n"
],
"name": "stdout"
}
]
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch: 1000 cost = 0.468996\n",
"Epoch: 2000 cost = 0.125292\n",
"Epoch: 3000 cost = 0.045533\n",
"Epoch: 4000 cost = 0.020888\n",
"Epoch: 5000 cost = 0.010938\n",
"[['i', 'like'], ['i', 'love'], ['i', 'hate']] -> ['dog', 'coffee', 'milk']\n"
]
}
]
}
],
"source": [
"# code by Tae Hwan Jung @graykode\n",
"import tensorflow as tf\n",
"import numpy as np\n",
"\n",
"tf.reset_default_graph()\n",
"\n",
"sentences = [ \"i like dog\", \"i love coffee\", \"i hate milk\"]\n",
"\n",
"word_list = \" \".join(sentences).split()\n",
"word_list = list(set(word_list))\n",
"word_dict = {w: i for i, w in enumerate(word_list)}\n",
"number_dict = {i: w for i, w in enumerate(word_list)}\n",
"n_class = len(word_dict) # number of Vocabulary\n",
"\n",
"# NNLM Parameter\n",
"n_step = 2 # number of steps ['i like', 'i love', 'i hate']\n",
"n_hidden = 2 # number of hidden units\n",
"\n",
"def make_batch(sentences):\n",
" input_batch = []\n",
" target_batch = []\n",
"\n",
" for sen in sentences:\n",
" word = sen.split()\n",
" input = [word_dict[n] for n in word[:-1]]\n",
" target = word_dict[word[-1]]\n",
"\n",
" input_batch.append(np.eye(n_class)[input])\n",
" target_batch.append(np.eye(n_class)[target])\n",
"\n",
" return input_batch, target_batch\n",
"\n",
"# Model\n",
"X = tf.placeholder(tf.float32, [None, n_step, n_class]) # [batch_size, number of steps, number of Vocabulary]\n",
"Y = tf.placeholder(tf.float32, [None, n_class])\n",
"\n",
"input = tf.reshape(X, shape=[-1, n_step * n_class]) # [batch_size, n_step * n_class]\n",
"H = tf.Variable(tf.random_normal([n_step * n_class, n_hidden]))\n",
"d = tf.Variable(tf.random_normal([n_hidden]))\n",
"U = tf.Variable(tf.random_normal([n_hidden, n_class]))\n",
"b = tf.Variable(tf.random_normal([n_class]))\n",
"\n",
"tanh = tf.nn.tanh(d + tf.matmul(input, H)) # [batch_size, n_hidden]\n",
"model = tf.matmul(tanh, U) + b # [batch_size, n_class]\n",
"\n",
"cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=model, labels=Y))\n",
"optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)\n",
"prediction =tf.argmax(model, 1)\n",
"\n",
"# Training\n",
"init = tf.global_variables_initializer()\n",
"sess = tf.Session()\n",
"sess.run(init)\n",
"\n",
"input_batch, target_batch = make_batch(sentences)\n",
"\n",
"for epoch in range(5000):\n",
" _, loss = sess.run([optimizer, cost], feed_dict={X: input_batch, Y: target_batch})\n",
" if (epoch + 1)%1000 == 0:\n",
" print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))\n",
"\n",
"# Predict\n",
"predict = sess.run([prediction], feed_dict={X: input_batch})\n",
"\n",
"# Test\n",
"input = [sen.split()[:2] for sen in sentences]\n",
"print([sen.split()[:2] for sen in sentences], '->', [number_dict[n] for n in predict[0]])"
]
}
],
"metadata": {
"colab": {
"name": "NNLM-Tensor.ipynb",
"version": "0.3.2",
"provenance": [],
"collapsed_sections": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"accelerator": "GPU"
},
"nbformat": 4,
"nbformat_minor": 0
}
Loading

0 comments on commit cf9f407

Please sign in to comment.