Tensor Flow and Keras Sample Programs
Tensor Flow and Keras Sample Programs
https://drive.google.com/drive/folders/1YrF9bKyERtN_iNnkPryOtWrwhFyey9mW
Code Explanation:
1. Import Libraries:
o pandas as pd: Imports the pandas library for data manipulation (creating
DataFrames).
o numpy as np: Imports the NumPy library for numerical operations.
function from scikit-learn to split data into training and testing sets.
o from sklearn.tree import DecisionTreeClassifier: Imports the
models.
o from sklearn import tree: Imports the tree module from scikit-learn for
creating plots.
o import seaborn as sns: Imports the seaborn library for creating heatmap
features (sepal length, sepal width, petal length, petal width) and target
labels (species of Iris flower).
3. Create DataFrame:
o df = pd.DataFrame(data.data, columns=data.feature_names): Converts the data
the "Species" column with the corresponding species names using the
dictionary.
5. Extract Features and Target:
o x = df.drop(columns="Species"): Creates a new DataFrame x containing all
random_state=42): Splits the data into training and testing sets using
train_test_split.
test_size=0.4: Specifies that 40% of the data will be used for testing.
Code:
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
data = load_iris()
#convert to a dataframe
df = pd.DataFrame(data.data, columns = data.feature_names)
#create the species column
df['Species'] = data.target
#replace this with the actual names
target = np.unique(data.target)
target_names = np.unique(data.target_names)
targets = dict(zip(target, target_names))
df['Species'] = df['Species'].replace(targets)
#extract datasets
x = df.drop(columns="Species")
y = df["Species"]
feature_names = x.columns
labels = y.unique()
#split the dataset
from sklearn.model_selection import train_test_split
X_train, test_x, y_train, test_lab = train_test_split(x,y,test_size = 0.4,random_state = 42)
# Importing Decision Tree Classifier
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(max_depth =3, random_state = 42)
clf.fit(X_train, y_train)
# Tree Diagram
from sklearn import tree
import matplotlib.pyplot as plt
plt.figure(figsize=(30,10), facecolor ='k')
a = tree.plot_tree(clf,feature_names = feature_names, class_names = labels,rounded = True,filled
=
True,fontsize=14)
plt.show()
# Predict Class From Test Values
test_pred_decision_tree = clf.predict(test_x)
from sklearn import metrics
import seaborn as sns
import matplotlib.pyplot as plt
confusion_matrix = metrics.confusion_matrix(test_lab,test_pred_decision_tree)
matrix_df = pd.DataFrame(confusion_matrix)
ax = plt.axes()
sns.set(font_scale=1.3)
plt.figure(figsize=(10,7))
model.compile(optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics="acc")
#compile your model
model.compile(optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics="acc")
2.Keras
Code Breakdown:
Python
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
model.
o from keras.datasets import mnist: Imports the MNIST dataset specifically.
network model.
o from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D: Imports
The typical output is (60000, 28, 28) for images and (60000,) for
labels (60,000 samples).
4. Reshape Data for CNN:
o x_train = x_train.reshape(x_train.shape[0], 28, 28, 1): Reshapes the training data
model. This helps make the code more readable and maintainable.
6. Set Batch Size (Optional):
o batch_size = 128: Sets the batch size for training the model.
Code:
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape, y_train.shape)
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)
batch_size = 128
3.Scipy
# Print image data type (e.g., uint8 for unsigned 8-bit integers)
print(img.dtype)
import numpy as np
NumPy's np.fromfile() loads the raw data into a NumPy array, specifying the data
type as uint8 for unsigned 8-bit integers (common for image data).
Similar to before, img.shape shows the image dimensions.
img.max(), img.min(), and img.mean() provide insights into the intensity range of the
image's pixels.
3. Grayscale Conversion, Cropping, and Flipping:
Python
# Load the image and convert to grayscale
img = misc.face(gray=True)
Slicing is used to crop a rectangular area from the center of the image:
[start_row:end_row, start_column:end_column].
o x // 3 and y // 3 calculate one-third of the image height and width,
respectively.
o Negative indexing (-x // 8) is used to exclude a portion from the end (right
and bottom).
np.flipud(img) flips the image vertically (upside down).
Code:
from scipy import misc
import imageio
import matplotlib.pyplot as plt
# reads a raccoon face
face = misc.face()
# save the image
imageio.imsave('raccoon.png', face)
plt.imshow(face)
plt.show()
img = imageio.imread('raccoon.png')
print(img.shape)
print(img.dtype)
plt.imshow(img)
plt.show()
# reads a raccoon face
face = misc.face()
face.tofile("raccoon.raw")
import numpy as np
img = np.fromfile('raccoon.raw', dtype=np.uint8)
print(img.shape)
img = misc.face()
print(img.max())
print(img.min())
print(img.mean())
# for grascaling the image
img = misc.face(gray = True)
x, y = img.shape
# Cropping the image
crop = img[x//3: - x//8, y//3: - y//8]
plt.imshow(crop)
plt.show()
img = misc.face()
flip = np.flipud(img)
plt.imshow(flip)
plt.show()
from scipy import misc,ndimage
import matplotlib.pyplot as plt
img = misc.face()
rotate = ndimage.rotate(face, 30)
plt.imshow(rotate)
plt.show()
img = misc.face()
blur_G = ndimage.gaussian_filter(img,sigma=7)
plt.imshow(blur_G)
plt.show()
img = misc.face(gray=True).astype(float)
blur = ndimage.gaussian_filter(img, 5)
# Showing Blur Image
plt.imshow(blur)
plt.show()
blur_G = ndimage.gaussian_filter(blur, 1)
alpha = 30
sharp = blur+alpha*(blur-blur_G)
# showing sharp images
plt.imshow(sharp)
plt.show()
img=misc.face(gray=True).astype(float)
img=img[40:100,30:100]
noise_img=img+0.9*img.std()*np.random.random(img.shape)
plt.imshow(noise_img)
plt.show()
4.Keras
red color value. If red value is greater than 0.5 (threshold), it's labeled as
red (0), otherwise white (1).
3. Data Preparation (Part 2):
Image Loading and Preprocessing:
o Reads image files from a directory structure containing subdirectories for
each plant disease class.
o Converts loaded images into NumPy arrays.
o Resizes images to a specific size (256x256 pixels in this case).
Label Assignment: Assigns labels (0, 1, 2) based on the subdirectory the image
belongs to (e.g., 0 for Corn disease).
Data Splitting: Splits the data into training, testing, and validation sets (common
for model training and evaluation).
Normalization: Pixel values are normalized by dividing each value by 255 (since
they typically range from 0 to 255).
Reshaping: The data is reshaped to a format suitable for CNNs (usually 4D
tensors with dimensions for image width, height, channels (RGB), and number of
samples).
4. Model Building (Both Parts):
Sequential Model:
o model = Sequential(): Creates a sequential model, where layers are added
one after another. This is a common structure for many neural networks.
5. Model Layers (Part 1):
Dense Layers (Fully Connected):
o These layers are the core building blocks of artificial neural networks.
They perform linear transformations on the input data and introduce
non-linearity with activation functions.
o model.add(Dense(64, activation='relu', input_shape=(3,)):
Adds the first Dense layer with 64 neurons and the ReLU (Rectified
Linear Unit) activation function. ReLU helps introduce non-
linearity, allowing the model to learn more complex patterns.
input_shape=(3,) specifies the input shape as a 3D vector
activation="relu")):
Adds the first convolutional layer with 32 filters of size 3x3. Filters
are like kernels that slide across the image, extracting features.
padding="same" ensures the output remains the same size as the
linearity.
o Additional convolutional layers with different filter sizes and pooling
layers (MaxPooling2D) are used for further feature extraction and
reducing dimensionality.
Flatten Layer:
o model.add(Flatten()): Flattens the multi-dimensional output of the
Code:
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# Generate an imaginary dataset with random RGB values
np.random.seed(42) # For reproducibility
num_samples = 1000
colors = np.random.rand(num_samples, 3) # RGB values between 0 and 1
# Labels: 0 for red, 1 for white
labels = (colors[:, 0] > 0.5).astype(int)
# Split the dataset into training and testing sets
split_ratio = 0.8
split_index = int(num_samples * split_ratio)
5.Tensorflow
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.image import imread
import cv2
import random
import os
from os import listdir
from PIL import Image
from sklearn.preprocessing import label_binarize, LabelBinarizer
from keras.preprocessing import image
from tensorflow.keras.utils import img_to_array, array_to_img
from tensorflow.keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Flatten, Dropout, Dense
from sklearn.model_selection import train_test_split
from keras.models import model_from_json
from tensorflow.keras.utils import to_categorical
!ls "/content/drive/MyDrive/Plant_images"
plt.figure(figsize=(12,12))
path = "/content/drive/MyDrive/Plant_images/Potato___Early_blight"
for i in range(1,17):
plt.subplot(4,4,i)
plt.tight_layout()
rand_img = imread(path +'/'+ random.choice(sorted(os.listdir(path))))
plt.imshow(rand_img)
plt.xlabel(rand_img.shape[1], fontsize = 10)#width of image
plt.ylabel(rand_img.shape[0], fontsize = 10)#height of image
#Converting Images to array
def convert_image_to_array(image_dir):
try:
image = cv2.imread(image_dir)
if image is not None :
image = cv2.resize(image, (256,256))
#image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return img_to_array(image)
else :
return np.array([])
except Exception as e:
print(f"Error : {e}")
return None
dir = "/content/drive/MyDrive/Plant_images"
root_dir = listdir(dir)
image_list, label_list = [], []
all_labels = ['Corn-Common_rust', 'Potato-Early_blight', 'Tomato-Bacterial_spot']
binary_labels = [0,1,2]
temp = -1
# Reading and converting image to numpy array
#Now we will convert all the images into numpy array.
for directory in root_dir:
plant_image_list = listdir(f"{dir}/{directory}")
temp += 1
for files in plant_image_list:
image_path = f"{dir}/{directory}/{files}"
image_list.append(convert_image_to_array(image_path))
label_list.append(binary_labels[temp])
# Visualize the number of classes count
label_counts = pd.DataFrame(label_list).value_counts()
label_counts.head()
#Next we will observe the shape of the image.
image_list[0].shape
label_list = np.array(label_list)
label_list.shape
x_train, x_test, y_train, y_test = train_test_split(image_list, label_list, test_size=0.2,
random_state =
10)
#Now we will normalize the dataset of our images.As pixel values ranges from 0 to 255
so we will
divide each image pixel with 255 to nor
x_train = np.array(x_train, dtype=np.float16) /225.0
x_test = np.array(x_test, dtype=np.float16) /225.0
x_train = x_train.reshape(-1,256,256,3)
x_test = x_test.reshape(-1,256,256,3)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(Conv2D(32, (3,3), padding="same",input_shape=(256,256,3),
activation="relu"))
model.add(MaxPooling2D(pool_size=(3,3)))
model.add(Conv2D(16, (3,3), padding="same", activation="relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(8, activation="relu"))
model.add(Dense(3, activation="softmax"))
model.summary()
model.compile(loss ='categorical_crossentropy', optimizer =
Adam(0.0001),metrics=['accuracy'])
#Next we will split the dataset into validation and training data.
# Splitting the training data set into training and validation data sets
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size =0.2)
# Training the model
epochs =50
batch_size =128
history = model.fit(x_train, y_train, batch_size = batch_size, epochs =
epochs,validation_data =
(x_val, y_val))
#Plot the training history
plt.figure(figsize=(12,5))
plt.plot(history.history['accuracy'], color='r')
plt.plot(history.history['val_accuracy'], color='b')
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend(['train','val'])
plt.show()
y_pred = model.predict(x_test)
print("[INFO] Calculating model accuracy")
scores = model.evaluate(x_test, y_test)
print(f"Test Accuracy:{scores[1]*100}")
img = array_to_img(x_test[10])
img