Deep Learning Laboratory
Deep Learning Laboratory
………………… …………………………
…………………… .…………………..
Internal Examiner External Examiner
JNN INSTITUTE OF ENGINEERING DEPT. OF AI & DS
TABLE OF CONTENTS
- PROGRAM OUTCOMES v
- SYLLABUS vii
i
JNN INSTITUTE OF ENGINEERING DEPT. OF AI & DS
ii
JNN INSTITUTE OF ENGINEERING DEPT. OF AI & DS
PO2. Problem Analysis: Identify, formulate, review research literature, and analyze
complex engineering problems reaching substantiated conclusions
using first principles of mathematics, natural sciences, and engineering sciences.
PO5. Modern Tool Usage: Create, select, and apply appropriate techniques,
resources, and modern engineering and IT tools including prediction and modeling
of complex engineering activities with an understanding of the limitations.
PO6. The Engineer and Society: Apply reasoning informed by the contextual
knowledge to assess societal, health, safety, legal and cultural issues and the
consequent responsibilities relevant to the professional engineering practice.
PO8. Ethics: Apply ethical principles and commit to professional ethics and
responsibilities and norms of the engineering practice.
3
JNN INSTITUTE OF ENGINEERING DEPT. OF AI & DS
PO12. Life-long Learning: Recognize the need for, and have the preparation and
ability to engage in independent and life-long learning in the broadest context
of technological change.
4
JNN INSTITUTE OF ENGINEERING DEPT. OF AI & DS
.
SYLLABUS
LIST OF EXPERIMENTS:
COURSE OUTCOMES:
At the end of this course, the students will be able to:
• CO1: Apply deep neural network for simple problems (K3)
• CO2: Apply Convolution Neural Network for image processing (K3)
• CO3: Apply Recurrent Neural Network and its variants for text analysis (K3)
• CO4: Apply generative models for data augmentation (K3)
• CO5: Develop real-world solutions using suitable deep neural networks (K4
5
JNN INSTITUTE OF ENGINEERING DEPT. OF AI & DS
TABLE OF CONTENTS
PAGE NO.
S. NO. TITLE OF THE EXPERIMENTS
1
Solving XOR problem using DNN 1
2
Character recognition using CNN 5
3
Face recognition using CNN 12
4
Language modeling using RNN 20
5
Sentiment analysis using LSTM 27
6
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Ex. No: 1
SOLVING XOR PROBLEM USING DNN
Date :
Aim:
Algorithm:
Dept of AI & DS 1
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Program:
import numpy as np
# For matrix math import matplotlib.pyplot as plt
# For plotting import sys
# For printing
# The training data.
X = np.array([
[0, 1],
[1, 0],
[1, 1],
[0, 0]
])
m = 4
np.random.seed(1)
W1 = np.random.normal(0, 1, (num_h_units, num_i_units)) # 2x2
W2 = np.random.normal(0, 1, (num_o_units, num_h_units)) # 1x2
B1 = np.random.random((num_h_units, 1)) # 2x1
B2 = np.random.random((num_o_units, 1))#1x1
def sigmoid(z, derv=False):
if derv: return z * (1 - z)
return 1 / (1 + np.exp(-z))
Dept of AI & DS 2
J.N.N Institute of Engineering AD3511 Deep Learning Lab
for j in range(m):
sys.stdout.write("\rIteration: {} and {}".format(i + 1, j + 1))
# Forward Prop.
a0 = X[j].reshape(X[j].shape[0], 1) # 2x1
z1 = _W1.dot(a0) + _B1 # 2x2 * 2x1 + 2x1 = 2x1
a1 = sigmoid(z1) # 2x1
z2 = _W2.dot(a1) + _B2 # 1x2 * 2x1 + 1x1 = 1x1
a2 = sigmoid(z2) # 1x1 # Back prop.
dz2 = a2 - y[j] # 1x1
dW2 += dz2 * a1.T # 1x1 .* 1x2 = 1x2
dz1 = np.multiply((_W2.T * dz2), sigmoid(a1, derv=True)) #
(2x1 * 1x1) .* 2x1 = 2x1
dW1 += dz1.dot(a0.T) # 2x1 * 1x2 = 2x2
dB1 += dz1 # 2x1
dB2 += dz2 # 1x1
Dept of AI & DS 3
J.N.N Institute of Engineering AD3511 Deep Learning Lab
_W2)
Output:
Result:
Thus the Python program successfully to Solving XOR problem using DNN.
Dept of AI & DS 4
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Ex. No: 2
CHARACTER RECOGNITION USING CNN
Date :
Aim:
Algorithm:
Dept of AI & DS 5
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Program:
pip install opencv-python
pip install keras
pip install tensorflow
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout
from keras.optimizers import SGD, Adam
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
from keras.utils import to_categorical
data = pd.read_csv(r"A_Z Handwritten Data.csv").astype('float32')
X = data.drop('0',axis = 1)
y = data['0']
train_x, test_x, train_y, test_y = train_test_split(X, y, test_size = 0.2)
train_x = np.reshape(train_x.values, (train_x.shape[0],
28,28)) test_x = np.reshape(test_x.values, (test_x.shape[0],
28,28)) word_dict =
{0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',13:'N',14:'O',
15:'P',16:'Q',17:'R',18:'S',19:'T',20:'U',21:'V',22:'W',23:'X', 24:'Y',25:'Z'}
y_int = np.int0(y)
count = np.zeros(26, dtype='int')
for i in y_int:
count[i] +=1
alphabets = []
for i in word_dict.values():
alphabets.append(i)
Dept of AI & DS 6
J.N.N Institute of Engineering AD3511 Deep Learning Lab
uff = shuffle(train_x[:100])
fig, ax = plt.subplots(3,3, figsize = (10,10))
axes = ax.flatten()
for i in range(9):
_, shu = cv2.threshold(shuff[i], 30, 200, cv2.THRESH_BINARY)
axes[i].imshow(np.reshape(shuff[i], (28,28)), cmap=plt.get_cmap('gray'))
plt.show()
# Reshape data for model creation
train_X = train_x.reshape(train_x.shape[0],train_x.shape[1],train_x.shape[2],1)
print("The new shape of train data: ", train_X.shape)
Dept of AI & DS 7
J.N.N Institute of Engineering AD3511 Deep Learning Lab
model.add(Flatten())
model.add(Dense(64,activation ="relu"))
model.add(Dense(128,activation ="relu"))
model.add(Dense(26,activation ="softmax"))
model.compile(optimizer = Adam(learning_rate=0.001),
loss='categorical_crossentropy', metrics=['accuracy'])
pred = word_dict[np.argmax(test_yOHE[i])]
ax.set_title("Prediction: "+pred)
# Predection on External Image
img = cv2.imread(r'test_image.jpg')
img_copy = img.copy()
img_pred = word_dict[np.argmax(model.predict(img_final))]
Dept of AI & DS 8
J.N.N Institute of Engineering AD3511 Deep Learning Lab
while (1):
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
Output:
Dept of AI & DS 9
J.N.N Institute of Engineering AD3511 Deep Learning Lab
model: "sequential"
Dept of AI & DS 10
J.N.N Institute of Engineering AD3511 Deep Learning Lab
2D)
=================================================================
Total params: 137,178
Trainable params: 137,178
Non-trainable params: 0
Result:
Thus written a python program to implement successfully for the Character
recognition using CNN.
Dept of AI & DS 11
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Ex. No: 3
FACE RECOGNITION USING CNN
Date :
Aim:
.
Algorithm:
Dept of AI & DS 12
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Program:
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_lfw_people
print(faces.target_names)
print(faces.images.shape)
%matplotlib inline
import matplotlib.pyplot as
sns.set()
df = pd.DataFrame.from_dict(names, orient='index')
df.plot(kind='bar')
mask = np.zeros(faces.target.shape, dtype=np.bool)
x_faces = faces.data[mask]
y_faces = faces.target[mask]
x_faces = np.reshape(x_faces, (x_faces.shape[0], faces.images.shape[1],
faces.images.shape[2],faces.images.shape[3]))
x_faces.shape
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
Dept of AI & DS 13
J.N.N Institute of Engineering AD3511 Deep Learning Lab
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu',
input_shape=(face_images.shape[1:])))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(class_count, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
hist = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=20,
batch_size=25)
acc = hist.history['accuracy']
val_acc = hist.history['val_accuracy']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, '-', label='Training Accuracy')
plt.plot(epochs, val_acc, ':', label='ValidationAccuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.plot()
y_predicted = model.predict(x_test)
mat = confusion_matrix(y_test.argmax(axis=1), y_predicted.argmax(axis=1))
Dept of AI & DS 14
J.N.N Institute of Engineering AD3511 Deep Learning Lab
plt.xlabel('Predicted label')
plt.ylabel('Actual label')
import keras.utils as image
x = image.load_img('george.jpg',target_size=(face_images.shape[1:]))
plt.xticks([])
plt.yticks([])
plt.imshow(x)
x = image.img_to_array(x) / 255
x = np.expand_dims(x, axis=0)
y = model.predict(x)[0]
for i in range(len(y)):
print(faces.target_names[i] + ': ' + str(y[i]))
Output:
['Colin Powell' 'Donald Rumsfeld' 'George W Bush' 'Gerhard Schroeder' 'Tony
Blair']
(1140, 128, 128, 3)
Dept of AI & DS 15
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Model: "sequential"
Dept of AI & DS 16
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Epoch 1/20
16/16 [==============================] - 2s 123ms/step - loss: 1.6558 - accuracy: 0.1925 -
val_loss: 1.6038 - val_accuracy: 0.2000
Epoch 2/20
16/16 [==============================] - 2s 110ms/step - loss: 1.5860 - accuracy: 0.3175 -
val_loss: 1.5416 - val_accuracy: 0.3200
Epoch 3/20
16/16 [==============================] - 2s 112ms/step - loss: 1.4851 - accuracy: 0.3675 -
val_loss: 1.3706 - val_accuracy: 0.4500
Epoch 4/20
16/16 [==============================] - 2s 110ms/step - loss: 1.1602 - accuracy: 0.5775 -
val_loss: 1.0931 - val_accuracy: 0.5900
Epoch 5/20
16/16 [==============================] - 2s 112ms/step - loss: 0.8385 - accuracy: 0.7000 -
val_loss: 0.8494 - val_accuracy: 0.6700
Epoch 6/20
16/16 [==============================] - 2s 111ms/step - loss: 0.5011 - accuracy: 0.8275 -
val_loss: 0.8085 - val_accuracy: 0.6900
Epoch 7/20
16/16 [==============================] - 2s 111ms/step - loss: 0.3819 - accuracy: 0.8550 -
val_loss: 0.7241 - val_accuracy: 0.7200
Epoch 8/20
16/16 [==============================] - 2s 110ms/step - loss: 0.3558 - accuracy: 0.8950 -
val_loss: 0.5499 - val_accuracy: 0.7800
Epoch 9/20
16/16 [==============================] - 2s 114ms/step - loss: 0.1407 - accuracy: 0.9575 -
val_loss: 0.7090 - val_accuracy: 0.8000
Epoch 10/20
16/16 [==============================] - 2s 115ms/step - loss: 0.0869 - accuracy: 0.9875 -
val_loss: 0.6296 - val_accuracy: 0.8400
Epoch 11/20
16/16 [==============================] - 2s 111ms/step - loss: 0.0413 - accuracy: 0.9950 -
val_loss: 0.5816 - val_accuracy: 0.8300
Epoch 12/20
16/16 [==============================] - 2s 110ms/step - loss: 0.0325 - accuracy: 0.9950 -
val_loss: 0.5888 - val_accuracy: 0.8300
Epoch 13/20
16/16 [==============================] - 2s 110ms/step - loss: 0.0359 - accuracy: 0.9900 -
val_loss: 0.6945 - val_accuracy: 0.8100
Epoch 14/20
16/16 [==============================] - 2s 110ms/step - loss: 0.0085 - accuracy: 1.0000 -
val_loss: 0.5278 - val_accuracy: 0.8600
Epoch 15/20
16/16 [==============================] - 2s 111ms/step - loss: 0.0048 - accuracy: 1.0000 -
val_loss: 0.5697 - val_accuracy: 0.8500
Epoch 16/20
16/16 [==============================] - 2s 111ms/step - loss: 0.0032 - accuracy: 1.0000 -
val_loss: 0.6065 - val_accuracy: 0.8500
Epoch 17/20
16/16 [==============================] - 2s 110ms/step - loss: 0.0022 - accuracy: 1.0000 -
val_loss: 0.6007 - val_accuracy: 0.8500
Dept of AI & DS 17
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Epoch 18/20
16/16 [==============================] - 2s 112ms/step - loss: 0.0017 - accuracy: 1.0000 -
val_loss: 0.6242 - val_accuracy: 0.8500
Epoch 19/20
16/16 [==============================] - 2s 118ms/step - loss: 0.0013 - accuracy: 1.0000 -
val_loss: 0.6333 - val_accuracy: 0.8500
Epoch 20/20
16/16 [==============================] - 2s 111ms/step - loss: 0.0011 - accuracy: 1.0000 -
val_loss: 0.6541 - val_accuracy: 0.8500
<matplotlib.image.AxesImage at 0x1ec80d4d910>
Dept of AI & DS 18
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Result:
Thus written a python program to implemented successfully for the Face recognitio
using CNN.
Dept of AI & DS 19
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Ex. No: 4
LANGUAGE MODELING USING RNN
Date :
Aim:
.
Algorithm
Dept of AI & DS 20
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Program:
from future import unicode_literals, print_function,division
from io import open
import glob
import os
import unicodedata
import string
all_letters = string.ascii_letters + " .,;'-"
n_letters = len(all_letters) + 1
# Plus EOS marker
def findFiles(path): return glob.glob(path)
n_categories = len(all_categories)
if n_categories == 0:
raise RuntimeError('Data not found. Make sure that you downloaded data '
'from https://download.pytorch.org/tutorial/data.zip and extract it to '
'the current directory.')
import torch
import torch.nn as nn
Dept of AI & DS 21
J.N.N Institute of Engineering AD3511 Deep Learning Lab
class RNN(nn.Module):
def init (self, input_size, hidden_size, output_size):
super(RNN, self). init ()
self.hidden_size = hidden_size
def initHidden(self):
return torch.zeros(1, self.hidden_size)
import random
def categoryTensor(category):
li = all_categories.index(category)
tensor = torch.zeros(1, n_categories)
tensor[0][li] = 1
return tensor
Dept of AI & DS 22
J.N.N Institute of Engineering AD3511 Deep Learning Lab
loss.backward() for p in
rnn.parameters():
p.data.add_(p.grad.data, alpha=-learning_rate)
import time
import math
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
n_iters = 100000
print_every = 5000
plot_every = 500
all_losses = []
total_loss = 0 # Reset every ``plot_every`` ``iters``
start = time.time()
for iter in range(1, n_iters + 1):
Dept of AI & DS 23
J.N.N Institute of Engineering AD3511 Deep Learning Lab
if iter % plot_every == 0:
all_losses.append(total_loss / plot_every)
total_loss = 0
import matplotlib.pyplot as plt
plt.figure()
plt.plot(all_losses)
max_length = 20
hidden = rnn.initHidden()
output_name = start_letter
for i in range(max_length):
output, hidden = rnn(category_tensor, input[0], hidden)
topv, topi = output.topk(1)
topi = topi[0][0]
if topi == n_letters - 1:
break
else:
letter = all_letters[topi]
output_name += letter
input = inputTensor(letter)
return output_name
# Get multiple samples from one category and multiple starting letters
def samples(category, start_letters='ABC'):
for start_letter in start_letters:
print(sample(category, start_letter))
samples('Russian','RUS')
samples('German','GER')
samples('Spanish','SPA')
samples('Chinese','CHI')
Dept of AI & DS 24
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Output:
# categories: 18 ['Arabic', 'Chinese', 'Czech', 'Dutch',
'English', 'French', 'German', 'Greek', 'Irish', 'Italian
', 'Japanese', 'Korean', 'Polish', 'Portuguese', 'Russian
', 'Scottish', 'Spanish', 'Vietnamese']
O'Neal
0m 5s (5000 5%) 2.6595
0m 11s (10000 10%) 2.9644
0m 16s (15000 15%) 3.3754
0m 22s (20000 20%) 2.0799
0m 27s (25000 25%) 2.6884
[<matplotlib.lines.Line2D at 0x1e56757bcd0>]
Dept of AI & DS 25
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Rovonov
Uarakov
Shavanov
Gerre
Eeren
Roure
Salla
Para
Allana
Cha
Han
Iun
Result:
Thus written a python program to implemented successfully for the Language
Modeling Using RNN.
Dept of AI & DS 26
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Ex. No: 5
SENTIMENT ANALYSIS USING LSTM
Date :
Aim:
Algorithm:
Dept of AI & DS 27
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Program:
pip install Keras-Preprocessing
import re import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras_preprocessing.sequence import pad_sequences
import keras
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
import math
import nltk
data = pd.read_csv('IMDB Dataset.csv')
data
def remove_tags(string):
removelist = ""
result = re.sub('','',string) #remove HTML tags result =
re.sub('https://.*','',result) #remove URLs
result = re.sub(r'[^w'+removelist+']', ' ',result) #remove non-alphanumeric
characters
result = result.lower()
return result
data['review']=data['review'].apply(lambda cw : remove_tags(cw))
nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
data['review'] = data['review'].apply(lambda x: ' '.join([word for word in x.split() if
word not in (stop_words)]))
import nltk
nltk.download()
#we want to download 'wordnet' and 'omw-1.4' from nltk
w_tokenizer = nltk.tokenize.WhitespaceTokenizer()
lemmatizer = nltk.stem.WordNetLemmatizer()
def lemmatize_text(text):
st = ""
for w in w_tokenizer.tokenize(text):
st = st + lemmatizer.lemmatize(w) + " "
return st
data['review'] = data.review.apply(lemmatize_text)
data
s = 0.0
for i in data['review']:
word_list = i.split()
s = s + len(word_list)
Dept of AI & DS 28
J.N.N Institute of Engineering AD3511 Deep Learning Lab
reviews = data['review'].values
labels = data['sentiment'].values
encoder = LabelEncoder()
encoded_labels = encoder.fit_transform(labels)
# model initialization
model=keras.Sequential([
keras.layers.Embedding(vocab_size, embedding_dim,
input_length=max_length),
keras.layers.Bidirectional(keras.layers.LSTM(64)),
keras.layers.Dense(24, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
Dept of AI & DS 29
J.N.N Institute of Engineering AD3511 Deep Learning Lab
# compile model
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# model summary
model.summary()
num_epochs = 5
history = model.fit(train_padded, train_labels,
epochs=num_epochs, verbose=1,
validation_split=0.1)
prediction = model.predict(test_padded)
# Get labels based on probability 1 if p>= 0.5 else 0
pred_labels = []
for i in prediction:
if i >= 0.5:
pred_labels.append(1)
else:
pred_labels.append(0)
print("Accuracy of prediction on test set : ",
accuracy_score(test_labels,pred_labels))
for i in range(len(sentence)):
print(sentence[i])
if pred_labels[i] == 1:
s = 'Positive'
else:
s = 'Negative'
Dept of AI & DS 30
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Output:
review sentiment
0 One of the other reviewers has mentioned that ... positive
1 A wonderful little production. <br /><br />The... positive
2 I thought this was a wonderful way to spend ti... positive
3 Basically there's a family where a little boy ... negative
4 Petter Mattei's "Love in the Time of Money" is... positive
... ... ...
49995 I thought this movie did a down right good job... positive
49996 Bad plot, bad dialogue, bad acting, idiotic di... negative
49997 I am a Catholic taught in parochial elementary... negative
49998 I'm going to have to disagree with the previou... negative
49999 No one expects the Star Trek movies to be high... negative
review sentiment
0 w w w w w w w w w w w w w w w w w w w w w w w ... positive
1 wwwwwwwwwwwwwww
positive
2 wwwwwwwwwwwwwwwwwww positive
3 wwwwwwwwwww negative
4 wwwwwwwwwwwwwwwwww positive
review sentiment
Dept of AI & DS 31
J.N.N Institute of Engineering AD3511 Deep Learning Lab
...
... ...
wwwwwwwwwwwwwwwwwww
49995 positive
wwwwwwww
49996 negative
wwwwwwwwwwww
49997 negative
wwwwwwwwwwwwwww
49998 negative
wwwwwwwwwwww
49999 negative
Model: "sequential"
===========================================================
======
Total params: 387,601
Trainable params: 387,601
Non-trainable params: 0
Dept of AI & DS 32
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Epoch 1/5
1055/1055 [==============================] - 60s 55ms/step - loss: 0.69
32 - accuracy: 0.5021 - val_loss: 0.6925 - val_accuracy: 0.5205
Epoch 2/5
1055/1055 [==============================] - 58s 55ms/step - loss: 0.69
26 - accuracy: 0.5094 - val_loss: 0.6925 - val_accuracy: 0.5173
Epoch 3/5
1055/1055 [==============================] - 59s 56ms/step - loss: 0.69
26 - accuracy: 0.5129 - val_loss: 0.6924 - val_accuracy: 0.5171
Epoch 4/5
1055/1055 [==============================] - 59s 56ms/step - loss: 0.69
23 - accuracy: 0.5166 - val_loss: 0.6927 - val_accuracy: 0.4965
Epoch 5/5
1055/1055 [==============================] - 58s 55ms/step - loss: 0.69
25 - accuracy: 0.5141 - val_loss: 0.6924 - val_accuracy: 0.5173
Result:
Thus written a python program to implemented successfully for the Sentiment
analysis using LSTM.
Dept of AI & DS 33
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Ex. No: 6
PARTS OF SPEECH TAGGING USING
SEQUENCE TO SEQUENCE ARCHITECTURE
Date :
Aim:
Algorithm:
Dept of AI & DS 34
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Program:
import numpy as np
import pandas as pd
import json import
functools as fc
from sklearn.metrics import accuracy_score
for i in range(len(word)):
if word[i] in vocab:
vocab[word[i]] += 1
else:
vocab[word[i]] = 1
# replace rare words with <unk> (threshold = 3)
vocab2 = {}
num_unk = 0
for w in vocab:
if vocab[w] >= 3:
vocab2[w] = vocab[w]
else:
num_unk += vocab[w]
# sort the vocabulary by occurrences of words
vocab_sorted = sorted(vocab.items(), key=lambda item: item[1], reverse=True)
# write the sorted vocabulary to vocab file
#with open('recap/vocab.txt', 'w') as vocab_file:
with open('output/vocab_frequent', 'w') as vocab_file:
# the format of the vocab is word index occurrence
# we add <unk> to the top of the vocabulary manually
vocab_file.write('<unk>' + '\t' + str(0) + '\t' + str(num_unk) + '\n')
for i in range(len(vocab_sorted)):
vocab_file.write(vocab_sorted[i][0] + '\t' + str(i+1) + '\t' + str(vocab_sorted[i][1]) +
'\n')
print(f'The total size of my vocabulary is {len(vocab_sorted)}\n')
print(f'The total occurrences of <unk> is {num_unk}\n')
Dept of AI & DS 35
J.N.N Institute of Engineering AD3511 Deep Learning Lab
# build a vocabulary list with only frequent words (i.e. occur no less than 3 times)
vocab_ls = list(vocab2.keys())
# for ss, we need to count the times that a pos tag occurs at the beginning
# of a sequence (i.e. (s|<s>))
for i in range(len(word)):
if index[i] == 1:
if str(pos[i]) + '|' + '<s>' in ss:
ss[str(pos[i]) + '|' + '<s>'] += 1
else:
ss[str(pos[i]) + '|' + '<s>'] = 1
Dept of AI & DS 36
J.N.N Institute of Engineering AD3511 Deep Learning Lab
for p in pos:
if p in count_pos:
count_pos[p] += 1
else:
count_pos[p] = 1
Dept of AI & DS 37
J.N.N Institute of Engineering AD3511 Deep Learning Lab
pos = pos.strip('\n')
pos_distinct.append(pos)
pos_sample.append(pos_dev[i])
pos_dev2.append(pos_sample)
pos_sample = []
def greedy(sentence):
# initialize a dictionary to keep track of the pos for each position
pos = []
# we need to make sure the first word is in the vocabulary. If not, replace
# with <unk>
if sentence[0] not in vocab_frequent:
sentence[0] = '<unk>'
# predict pos based on the product of the emission and transition
max_prob = 0
p0 = 'UNK'
for p in pos_distinct:
try:
Dept of AI & DS 38
J.N.N Institute of Engineering AD3511 Deep Learning Lab
pos.append(p0)
max_prob = 0 pi
= 'UNK'
for p in pos_distinct:
try:
temp = emission[sentence[i] + '|' + p] * transition[p + '|' + pos[-1]]
if temp > max_prob: max_prob = temp pi = p
except: pass
pos.append(pi)
return pos
pos_greedy = [greedy(s) for s in word_dev2]
# concatenate the list of sublists into one single list
pos_greedy = fc.reduce(lambda a, b: a + b, pos_greedy)
pos_dev = fc.reduce(lambda a, b: a + b, pos_dev2)
Dept of AI & DS 39
J.N.N Institute of Engineering AD3511 Deep Learning Lab
pos = pos.strip('\n')
pos_distinct.append(pos)
# load json file hmm
#with open('recap/hmm.json', 'r') as hmm:
with open('output/hmm.json', 'r') as hmm:
json_data = json.load(hmm)
# split dev lists (index, word and pos) to individual samples (list --> list of sublists)
word_dev2 = []
pos_dev2 = []
word_sample = []
pos_sample = []
for i in range(len(dev)-1):
if index_dev[i] < index_dev[i+1]:
word_sample.append(word_dev[i])
pos_sample.append(pos_dev[i])
else:
word_sample.append(word_dev[i])
word_dev2.append(word_sample)
word_sample = []
pos_sample.append(pos_dev[i])
pos_dev2.append(pos_sample)
pos_sample = []
# for the first position, the highest cumulative probability of each possible pos would be
# emission[sentence[0]|pos] * transition[pos|<s>]
Dept of AI & DS 40
J.N.N Institute of Engineering AD3511 Deep Learning Lab
# check if the first word is in the vocabualry. If not, replace with '<unk>'
if sentence[0] not in vocab_frequent:
sentence[0] = '<unk>'
for p in pos_distinct:
if p + '|' + '<s>' in transition:
try:
seq[0][p] = transition[p + '|' + '<s>'] * \
emission[sentence[0] + '|' + p]
except:
seq[0][p] = 0
# set <s> as the previous pos of each possible pos at the first position
for p in seq[0].keys():
pre_pos[0][p] = '<s>'
# for position i > 0, the highest cumulative probability of each possible pos would be
# emission[sentence[i]|pos[i]] * transition[pos[i]|pos[i-1]] * seq[i-1][pos]
for i in range(1, len(sentence)):
# still, check if the word is in the vocabulary
if sentence[i] not in vocab_frequent:
sentence[i] = '<unk>'
for p in seq[i-1].keys():
for p_prime in pos_distinct: if
p_prime + '|' + p in transition:
if p_prime in seq[i]:
try:
temp = seq[i-1][p] * \
transition[p_prime + '|' + p] * \
emission[sentence[i] + '|' + p_prime]
if temp > seq[i][p_prime]:
seq[i][p_prime] = temp
pre_pos[i][p_prime] = p
except:
pass
else: try:
seq[i][p_prime] = seq[i-1][p] * \
transition[p_prime + '|' + p] * \
emission[sentence[i] + '|' + p_prime]
pre_pos[i][p_prime] = p
except:
seq[i][p_prime] = 0
# after we get the maximum probability for every possible pos at every position of a
sentence,
# we can trace backward to find out our prediction on the pos for the sentence.
seq_predict = []
# The pos of the last word in the sentence is the one with the highest probability
# after predicting the pos of the last word in the sentence, we can iterate through pre_pos to
predict
# the pos of the remaining words in the input sentence in the reverse order
Dept of AI & DS 41
J.N.N Institute of Engineering AD3511 Deep Learning Lab
prob_max = max(seq[len(sentence)-1].values())
# the index of the highest probability
index_max = list(seq[len(sentence)-1].values()).index(prob_max)
# the pos of the highest probability
pos_max = list(seq[len(sentence)-1].keys())[index_max]
seq_predict.append(pos_max)
Output:
0 1 Pierre NNP
1 2 Vinken NNP
2 3 , ,
3 4 61 CD
4 5 years NNS
Dept of AI & DS 42
J.N.N Institute of Engineering AD3511 Deep Learning Lab
POS
index word
0 1
The
DT
1 2
Arizona NNP
2 3
Corporations
NNP
3 4 Commission NNP
4 5 authorized VBD
The total size of my vocabulary is 43193
The total occurrences of <unk> is 32537
Result:
Thus written a python program to implemented successfully for the parts of speech
tagging using Sequence to Sequence architecture.
Dept of AI & DS 43
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Ex. No: 7
MACHINE TRANSLATION USING ENCODER-DECODER MODEL
Date :
Aim:
Algorithm:
Dept of AI & DS 44
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Program:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
the
input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved
as output
when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of
the
current session
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input,LSTM,Dense
batch_size=64
epochs=100
latent_dim=256 # here latent dim represent hidden state or cell state
num_samples=10000
data_path='fra.txt'
# Vectorize the data.
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
for line in lines[: min(num_samples, len(lines) - 1)]:
input_text, target_text, _ = line.split('\t')
# We use "tab" as the "start sequence" character
# for the targets, and "\n" as "end sequence" character.
target_text = '\t' + target_text + '\n'
input_texts.append(input_text)
target_texts.append(target_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
Dept of AI & DS 45
J.N.N Institute of Engineering AD3511 Deep Learning Lab
input_characters=sorted(list(input_characters))
target_characters=sorted(list(target_characters))
num_encoder_tokens=len(input_characters)
num_decoder_tokens=len(target_characters)
input_token_index=dict(
[(char,i) for i, char in enumerate(input_characters)])
target_token_index=dict(
[(char,i) for i, char in enumerate(target_characters)])
encoder_input_data = np.zeros(
(len(input_texts), max_encoder_seq_length, num_encoder_tokens),
dtype='float32')
decoder_input_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
decoder_target_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
Dept of AI & DS 46
J.N.N Institute of Engineering AD3511 Deep Learning Lab
# Run training
model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2)
model.save('eng2french.h5')
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
Dept of AI & DS 47
J.N.N Institute of Engineering AD3511 Deep Learning Lab
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Update states
states_value = [h, c]
return decoded_sentence
Dept of AI & DS 48
J.N.N Institute of Engineering AD3511 Deep Learning Lab
…
Epoch 98/100
125/125 [==============================] - 13s 107ms/step - loss:
0.1532 - accuracy: 0.9531 - val_loss: 0.5529 - val_accuracy: 0.8705
Epoch 99/100
125/125 [==============================] - 13s 108ms/step - loss:
0.1517 - accuracy: 0.9533 - val_loss: 0.5561 - val_accuracy: 0.8697
Epoch 100/100
125/125 [==============================] - 13s 108ms/step - loss:
0.1497 - accuracy: 0.9543 - val_loss: 0.5522 - val_accuracy: 0.8706
Dept of AI & DS 49
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Ex. No: 8
IMAGE AUGMENTATION USING GANS
Date :
Aim:
Algorithm:
Dept of AI & DS 50
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Program:
import os
import numpy as np
import keras.utils as image
import matplotlib.pyplot as plt
%matplotlib inline
labels.append((label))
def show_images(images):
fig, axes = plt.subplots(1, 8, figsize=(20, 20), subplot_kw={'xticks': [], 'yticks':
[]})
for i, ax in enumerate(axes.flat):
ax.imshow(images[i] / 255)
x_train = []
y_train = []
x_test = []
y_test = []
Dept of AI & DS 51
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Dept of AI & DS 52
J.N.N Institute of Engineering AD3511 Deep Learning Lab
val_acc = hist.history['val_accuracy']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, '-', label='Training Accuracy')
plt.plot(epochs, val_acc, ':', label='Validation Accuracy')
sns.set()
y_predicted = model.predict(x_test)
mat = confusion_matrix(y_test_encoded.argmax(axis=1),
y_predicted.argmax(axis=1))
class_labels = ['arctic fox', 'polar bear', 'walrus']
sns.heatmap(mat, square=True, annot=True, fmt='d', cbar=False, cmap='Blues',
xticklabels=class_labels,
yticklabels=class_labels)
plt.xlabel('Predicted label')
plt.ylabel('Actual label')
x = image.load_img('arctic-
wildlife/samples/arctic_fox/arctic_fox_140.jpeg',
target_size=(224, 224))
plt.xticks([])
plt.yticks([])
plt.imshow(x)
x = image.img_to_array(x)
x = np.expand_dims(x, axis=0)
Dept of AI & DS 53
J.N.N Institute of Engineering AD3511 Deep Learning Lab
x = preprocess_input(x)
predictions = model.predict(x)
for i, label in enumerate(class_labels):
print(f'{label}: {predictions[0][i]}')
x = image.load_img('arctic-wildlife/samples/walrus/walrus_143.png',
target_size=(224, 224))
plt.xticks([])
plt.yticks([])
plt.imshow(x)
x = image.img_to_array(x)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
predictions = model.predict(x)
Output: Train :
Dept of AI & DS 54
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Test :
Dept of AI & DS 55
J.N.N Institute of Engineering AD3511 Deep Learning Lab
<matplotlib.image.AxesImage at 0x2c5496dfa00>
Dept of AI & DS 56
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Result:
Thus written a python program to implemented successfully for the Image
augmentation using GANs.
Dept of AI & DS 57
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Ex. No: 9
Helmet and number plate Detection and Recognition
Date :
Aim:
Algorithm:
Dept of AI & DS 58
J.N.N Institute of Engineering AD3511 Deep Learning Lab
Program:
import cv2
import numpy as np
import random
import os
from PIL import Image
import time
import imutils
from tensorflow.keras.models
import load_model
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
model = load_model('helmet-nonhelmet_cnn.h5')
print('model loaded!!!')
fourcc = cv2.VideoWriter_fourcc(*"XVID")
writer = cv2.VideoWriter('output.avi', fourcc, 5,(888,500))
writer = VideoWriter('output.avi',(frame.shape[1], frame.shape[0]))
writer.open()
def helmet_or_nohelmet(helmet_roi):
try:
helmet_roi = cv2.resize(helmet_roi, (224, 224))
helmet_roi = np.array(helmet_roi,dtype='float32')
helmet_roi = helmet_roi.reshape(1, 224, 224, 3)
helmet_roi = helmet_roi/255.0
return int(model.predict(helmet_roi)[0][0])
except:
pass
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
ret = True
while ret:
Dept of AI & DS
59
J.N.N Institute of Engineering AD3511 Deep Learning Lab
net.setInput(blob)
outs = net.forward(output_layers)
confidences = []
boxes = []
classIds = []
w = int(detection[2] * width)
h = int(detection[3] * height)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
classIds.append(class_id)
for i in range(len(boxes)):
if i in indexes:
x,y,w,h = boxes[i]
color = [int(c) for c in COLORS[classIds[i]]]
# green --> bike
# red --> number plate
if classIds[i]==0: #bike
helmet_roi = img[max(0,y):max(0,y)+max(0,h)//4,max(0,x):max(0,x)+max(0,w)]
else: #number plate
x_h = x-60
y_h = y-350
w_h = w+100
h_h = h+100
cv2.rectangle(img, (x, y), (x + w, y + h), color, 7)
# h_r = img[max(0,(y-330)):max(0,(y-330 + h+100)) , max(0,(x-80)):max(0,(x-80 +
w+130))]
if y_h>0 and x_h>0:
h_r = img[y_h:y_h+h_h , x_h:x_h +w_h]
c = helmet_or_nohelmet(h_r)
cv2.putText(img,['helmet','no-helmet'][c],(x,y-
100),cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,0),2)
Dept of AI & DS 60
J.N.N Institute of Engineering AD3511 Deep Learning Lab
writer.write(img)
cv2.imshow("Image", img)
if cv2.waitKey(1) == 27:
break
writer.release()
cap.release()
cv2.waitKey(0)
cv2.destroyAllWindows()
cm
Output:
Result:
Dept of AI & DS