forked from knazeri/coursera
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
19 changed files
with
1,431 additions
and
0 deletions.
There are no files selected for viewing
427 changes: 427 additions & 0 deletions
427
...s/2-deep-convolutional-models-case-studies/KerasTutorial/Keras-Tutorial-Happy House.ipynb
Large diffs are not rendered by default.
Oops, something went wrong.
Binary file added
BIN
+1.76 MB
...al-networks/2-deep-convolutional-models-case-studies/KerasTutorial/datasets/test_happy.h5
Binary file not shown.
Binary file added
BIN
+7.04 MB
...l-networks/2-deep-convolutional-models-case-studies/KerasTutorial/datasets/train_happy.h5
Binary file not shown.
Binary file added
BIN
+3.78 MB
...s/2-deep-convolutional-models-case-studies/KerasTutorial/images/happy-house.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+547 KB
...2-deep-convolutional-models-case-studies/KerasTutorial/images/house-members.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+1.91 MB
...orks/2-deep-convolutional-models-case-studies/KerasTutorial/images/my_image.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
26 changes: 26 additions & 0 deletions
26
...tional-neural-networks/2-deep-convolutional-models-case-studies/KerasTutorial/kt_utils.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
import keras.backend as K | ||
import math | ||
import numpy as np | ||
import h5py | ||
import matplotlib.pyplot as plt | ||
|
||
|
||
def mean_pred(y_true, y_pred): | ||
return K.mean(y_pred) | ||
|
||
def load_dataset(): | ||
train_dataset = h5py.File('datasets/train_happy.h5', "r") | ||
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features | ||
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels | ||
|
||
test_dataset = h5py.File('datasets/test_happy.h5', "r") | ||
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features | ||
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels | ||
|
||
classes = np.array(test_dataset["list_classes"][:]) # the list of classes | ||
|
||
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) | ||
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) | ||
|
||
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes | ||
|
855 changes: 855 additions & 0 deletions
855
...-neural-networks/2-deep-convolutional-models-case-studies/ResNets/Residual Networks.ipynb
Large diffs are not rendered by default.
Oops, something went wrong.
Binary file added
BIN
+1.41 MB
...al-neural-networks/2-deep-convolutional-models-case-studies/ResNets/dataset/test_signs.h5
Binary file not shown.
Binary file added
BIN
+12.7 MB
...l-neural-networks/2-deep-convolutional-models-case-studies/ResNets/dataset/train_signs.h5
Binary file not shown.
Binary file added
BIN
+43.6 KB
...rks/2-deep-convolutional-models-case-studies/ResNets/images/convblock_kiank.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+33 KB
...orks/2-deep-convolutional-models-case-studies/ResNets/images/idblock2_kiank.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+42.8 KB
...orks/2-deep-convolutional-models-case-studies/ResNets/images/idblock3_kiank.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+934 KB
...l-networks/2-deep-convolutional-models-case-studies/ResNets/images/my_image.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+49 KB
...tworks/2-deep-convolutional-models-case-studies/ResNets/images/resnet_kiank.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+465 KB
...ks/2-deep-convolutional-models-case-studies/ResNets/images/signs_data_kiank.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+68.2 KB
...deep-convolutional-models-case-studies/ResNets/images/skip_connection_kiank.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+75.7 KB
...-deep-convolutional-models-case-studies/ResNets/images/vanishing_grad_kiank.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
123 changes: 123 additions & 0 deletions
123
...utional-neural-networks/2-deep-convolutional-models-case-studies/ResNets/resnets_utils.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,123 @@ | ||
import os | ||
import numpy as np | ||
import tensorflow as tf | ||
import h5py | ||
import math | ||
|
||
def load_dataset(): | ||
train_dataset = h5py.File('datasets/train_signs.h5', "r") | ||
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features | ||
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels | ||
|
||
test_dataset = h5py.File('datasets/test_signs.h5', "r") | ||
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features | ||
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels | ||
|
||
classes = np.array(test_dataset["list_classes"][:]) # the list of classes | ||
|
||
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) | ||
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) | ||
|
||
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes | ||
|
||
|
||
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0): | ||
""" | ||
Creates a list of random minibatches from (X, Y) | ||
Arguments: | ||
X -- input data, of shape (input size, number of examples) (m, Hi, Wi, Ci) | ||
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) (m, n_y) | ||
mini_batch_size - size of the mini-batches, integer | ||
seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours. | ||
Returns: | ||
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y) | ||
""" | ||
|
||
m = X.shape[0] # number of training examples | ||
mini_batches = [] | ||
np.random.seed(seed) | ||
|
||
# Step 1: Shuffle (X, Y) | ||
permutation = list(np.random.permutation(m)) | ||
shuffled_X = X[permutation,:,:,:] | ||
shuffled_Y = Y[permutation,:] | ||
|
||
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case. | ||
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning | ||
for k in range(0, num_complete_minibatches): | ||
mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:] | ||
mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:] | ||
mini_batch = (mini_batch_X, mini_batch_Y) | ||
mini_batches.append(mini_batch) | ||
|
||
# Handling the end case (last mini-batch < mini_batch_size) | ||
if m % mini_batch_size != 0: | ||
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:] | ||
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:] | ||
mini_batch = (mini_batch_X, mini_batch_Y) | ||
mini_batches.append(mini_batch) | ||
|
||
return mini_batches | ||
|
||
|
||
def convert_to_one_hot(Y, C): | ||
Y = np.eye(C)[Y.reshape(-1)].T | ||
return Y | ||
|
||
|
||
def forward_propagation_for_predict(X, parameters): | ||
""" | ||
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX | ||
Arguments: | ||
X -- input dataset placeholder, of shape (input size, number of examples) | ||
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3" | ||
the shapes are given in initialize_parameters | ||
Returns: | ||
Z3 -- the output of the last LINEAR unit | ||
""" | ||
|
||
# Retrieve the parameters from the dictionary "parameters" | ||
W1 = parameters['W1'] | ||
b1 = parameters['b1'] | ||
W2 = parameters['W2'] | ||
b2 = parameters['b2'] | ||
W3 = parameters['W3'] | ||
b3 = parameters['b3'] | ||
# Numpy Equivalents: | ||
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1 | ||
A1 = tf.nn.relu(Z1) # A1 = relu(Z1) | ||
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2 | ||
A2 = tf.nn.relu(Z2) # A2 = relu(Z2) | ||
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3 | ||
|
||
return Z3 | ||
|
||
def predict(X, parameters): | ||
|
||
W1 = tf.convert_to_tensor(parameters["W1"]) | ||
b1 = tf.convert_to_tensor(parameters["b1"]) | ||
W2 = tf.convert_to_tensor(parameters["W2"]) | ||
b2 = tf.convert_to_tensor(parameters["b2"]) | ||
W3 = tf.convert_to_tensor(parameters["W3"]) | ||
b3 = tf.convert_to_tensor(parameters["b3"]) | ||
|
||
params = {"W1": W1, | ||
"b1": b1, | ||
"W2": W2, | ||
"b2": b2, | ||
"W3": W3, | ||
"b3": b3} | ||
|
||
x = tf.placeholder("float", [12288, 1]) | ||
|
||
z3 = forward_propagation_for_predict(x, params) | ||
p = tf.argmax(z3) | ||
|
||
sess = tf.Session() | ||
prediction = sess.run(p, feed_dict = {x: X}) | ||
|
||
return prediction |