0% found this document useful (0 votes)
48 views6 pages

CS114

Uploaded by

Tuấn Hoàng
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
48 views6 pages

CS114

Uploaded by

Tuấn Hoàng
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd

LINEAR REGRESSION

LINEAR REGRESSION UNIVARIENT :

import numpy as np def gradient_descent(x, y, w_in, b_in, alpha, num_iters,):


def compute_cost(x, y, w, b):
w = copy.deepcopy(w_in)
m = x.shape[0]
cost_sum = 0 b = b_in
for i in range(m):
f_wb = w * x[i] + b w = w_in
cost = (f_wb - y[i]) ** 2 for i in range(num_iters):
cost_sum = cost_sum + cost
total_cost = (1 / (2 * m))*cost_sum dj_dw, dj_db = compute_gradient(x, y, w , b)
return total_cost b = b - alpha * dj_db
w = w - alpha * dj_dw
return w, b
def compute_gradient(x, y, w, b):
m = x.shape[0]
dj_dw = 0
dj_db = 0
for i in range(m):
f_wb = w * x[i] + b
dj_dw_i = (f_wb - y[i]) * x[i]
dj_db_i = f_wb - y[i]
dj_db += dj_db_i
dj_dw += dj_dw_i
dj_dw = dj_dw / m
dj_db = dj_db / m
return dj_dw, dj_db
LINEAR REGRSSION WITH MULTIPLE FEATURE:

import numpy as np def gradient_descent(X, y, w_in, b_in, alpha, num_iters):


def compute_cost(X, y, w, b): w = copy.deepcopy(w_in)
m = X.shape[0] b = b_in
cost = 0.0 for i in range(num_iters):
for i in range(m): dj_db,dj_dw = gradient_function(X, y, w, b)
f_wb_i = np.dot(X[i], w) + b w = w - alpha * dj_dw
cost = cost + (f_wb_i - y[i])**2 b = b - alpha * dj_db
cost = cost / (2 * m) return w, b
return cost
def compute_gradient(X, y, w, b): def zscore_normalize_features(X):
m,n = X.shape mu = np.mean(X, axis=0)
dj_dw = np.zeros((n,)) sigma = np.std(X, axis=0)
dj_db = 0. X_norm = (X - mu) / sigma
for i in range(m): return X_norm
err = (np.dot(X[i], w) + b) - y[i] -------------------------------------------------------------------
for j in range(n): w_norm, b_norm = gradient_descent(X_norm, y_train, 1000, 1.0e-1)
dj_dw[j] = dj_dw[j] + err * X[i, j]
dj_db = dj_db + err
dj_dw = dj_dw / m
dj_db = dj_db / m
return dj_db, dj_dw
Linear Regression using Scikit-Learn (1):
import numpy as np sgdr = SGDRegressor(max_iter=1000)
import matplotlib.pyplot as plt sgdr.fit(X_norm, y_train)
from sklearn.linear_model import SGDRegressor b_norm = sgdr.intercept_
from sklearn.preprocessing import StandardScaler w_norm = sgdr.coef_

scaler = StandardScaler() y_pred_sgd = sgdr.predict(X_norm)


X_norm = scaler.fit_transform(X_train) y_pred = np.dot(X_norm, w_norm) + b_norm

Linear Regression using Scikit-Learn (2):


import numpy as np b = linear_model.intercept_
import matplotlib.pyplot as plt w = linear_model.coef_
from sklearn.linear_model import LinearRegression
linear_model = LinearRegression() y_pred = linear_model.predict(X_train)
linear_model.fit(X_train, y_train)

Logistic Regression
def compute_gradient_logistic(X, y, w, b):
import numpy as np
m,n = X.shape
import copy, math
dj_dw = np.zeros((n,))
def sigmoid(z):
dj_db = 0.
g = 1/(1+np.exp(-z))
for i in range(m):
return g
f_wb_i = sigmoid(np.dot(X[i],w) + b)
err_i = f_wb_i - y[i]
for j in range(n):
dj_dw[j] = dj_dw[j] + err_i * X[i,j]
dj_db = dj_db + err_i
dj_dw = dj_dw/m
dj_db = dj_db/m
return dj_db, dj_dw
def gradient_descent(X, y, w_in, b_in, alpha, num_iters):
def compute_cost_logistic(X, y, w, b):
w = copy.deepcopy(w_in)
m = X.shape[0]
b = b_in
cost = 0.0
for i in range(num_iters):
for i in range(m):
dj_db, dj_dw = compute_gradient_logistic(X, y, w, b)
z_i = np.dot(X[i],w) + b
w = w - alpha * dj_dw
f_wb_i = sigmoid(z_i)
b = b - alpha * dj_db
cost += -y[i]*np.log(f_wb_i) - (1-y[i])*np.log(1-f_wb_i)
return w, b
cost = cost / m
return cost

Logistic Regression using Scikit-Learn:

from sklearn.linear_model import LogisticRegression


lr_model = LogisticRegression()
lr_model.fit(X, y)
y_pred = lr_model.predict(X)
print("Prediction on training set:", y_pred)
print("Accuracy on training set:", lr_model.score(X, y))
Regularized Cost and Gradient
def compute_cost_linear_reg(X, y, w, b, lambda_ = 1): def compute_cost_logistic_reg(X, y, w, b, lambda_ = 1):
m = X.shape[0] m,n = X.shape
n = len(w) cost = 0.
cost = 0. for i in range(m):
for i in range(m): z_i = np.dot(X[i], w) + b
f_wb_i = np.dot(X[i], w) + b f_wb_i = sigmoid(z_i)
cost = cost + (f_wb_i - y[i])**2 cost += -y[i]*np.log(f_wb_i) - (1-y[i])*np.log(1-f_wb_i)
cost = cost / (2 * m) cost = cost/m
reg_cost = 0 reg_cost = 0
for j in range(n): for j in range(n):
reg_cost += (w[j]**2) reg_cost += (w[j]**2)
reg_cost = (lambda_/(2*m)) * reg_cost reg_cost = (lambda_/(2*m)) * reg_cost
total_cost = cost + reg_cost total_cost = cost + reg_cost
return total_cost return total_cost

def compute_gradient_linear_reg(X, y, w, b, lambda_): def compute_gradient_logistic_reg(X, y, w, b, lambda_):


m,n = X.shape m,n = X.shape
dj_dw = np.zeros((n,)) dj_dw = np.zeros((n,))
dj_db = 0. dj_db = 0.0
for i in range(m): for i in range(m):
err = (np.dot(X[i], w) + b) - y[i] f_wb_i = sigmoid(np.dot(X[i],w) + b)
for j in range(n): err_i = f_wb_i - y[i]
dj_dw[j] = dj_dw[j] + err * X[i, j] for j in range(n):
dj_db = dj_db + err dj_dw[j] = dj_dw[j] + err_i * X[i,j]
dj_dw = dj_dw / m dj_db = dj_db + err_i
dj_db = dj_db / m dj_dw = dj_dw/m
for j in range(n): dj_db = dj_db/m
dj_dw[j] = dj_dw[j] + (lambda_/m) * w[j] for j in range(n):
return dj_db, dj_dw dj_dw[j] = dj_dw[j] + (lambda_/m) * w[j]
return dj_db, dj_dw

Simple Neural Network (1)


import numpy as np model.compile( loss = tf.keras.losses.BinaryCrossentropy(),
import tensorflow as tf optimizer = tf.keras.optimizers.Adam(learning_rate=0.01),)
from tensorflow.keras.models import Sequential model.fit(X,Y, epochs=10,)
from tensorflow.keras.layers import Dense

model = Sequential([tf.keras.Input(shape=(2,)), predictions = model.predict(X_testn)


Dense(3, activation='sigmoid', name = 'layer1'),
Dense(1, activation='sigmoid', name = 'layer2')])
Simple Neural Network (2)
import numpy as np def my_predict(X, W1, b1, W2, b2):
import tensorflow as tf m = X.shape[0]
def my_dense(a_in, W, b, g): p = np.zeros((m,1))
units = W.shape[1] for i in range(m):
a_out = np.zeros(units) p[i,0] = my_sequential(X[i], W1, b1, W2, b2)
for j in range(units): return(p)
w = W[:,j]
z = np.dot(w, a_in) + b[j]
a_out[j] = g(z)
return(a_out)
def my_sequential(x, W1, b1, W2, b2): predictions = my_predict(X_tstn, W1_tmp, b1_tmp, W2_tmp, b2_tmp)
a1 = my_dense(x, W1, b1, sigmoid) yhat = np.zeros_like(predictions)
a2 = my_dense(a1, W2, b2, sigmoid) for i in range(len(predictions)):
return(a2) if predictions[i] >= 0.5:
yhat[i] = 1
else:
yhat[i] = 0

K-means Clustering
import numpy as np def run_kMeans(X, initial_centroids, max_iters=10, plot_progress=False):
def find_closest_centroids(X, centroids): m, n = X.shape
K = centroids.shape[0] K = initial_centroids.shape[0]
idx = np.zeros(X.shape[0], dtype=int) centroids = initial_centroids
for i in range(X.shape[0]): previous_centroids = centroids
distance = [] idx = np.zeros(m)
for j in range(centroids.shape[0]): for i in range(max_iters):
norm_ij = np.linalg.norm(X[i] - centroids[j]) print("K-Means iteration %d/%d" % (i, max_iters-1))
distance.append(norm_ij) idx = find_closest_centroids(X, centroids)
idx[i] = np.argmin(distance) centroids = compute_centroids(X, idx, K)
return idx return centroids, idx

def compute_centroids(X, idx, K): X = load_data()


m, n = X.shape initial_centroids = np.array([[3,3],[6,2],[8,5]])
centroids = np.zeros((K, n)) K = 3
for k in range(K): max_iters = 10
points = X[idx == k] centroids, idx = run_kMeans(X, initial_centroids, max_iters,
centroids[k] = np.mean(points, axis = 0) plot_progress=True)
return centroids
Anomaly Detection
mu, var = estimate_gaussian(X_train)
import numpy as np
p = multivariate_gaussian(X, mu, var)
def estimate_gaussian(X):
m, n = X.shape
mu = 1 / m * np.sum(X, axis = 0)
var = 1 / m * np.sum((X - mu) ** 2, axis = 0)
return mu, var
def multivariate_gaussian(X, mu, var):
print('# Anomalies found: %d'% sum(p<epsilon=0))
k = len(mu)
if var.ndim == 1:
var = np.diag(var)
X = X - mu
p = (2* np.pi)**(-k/2) * np.linalg.det(var)**(-0.5) * \
np.exp(-0.5 * np.sum(np.matmul(X, np.linalg.pinv(var)) * X, axis=1))
return p

Collaborative Filtering Recommender Systems


import numpy as np iterations = 200
import tensorflow as tf lambda_ = 1
from tensorflow import keras for iter in range(iterations):
def cofi_cost_func(X, W, b, Y, R, lambda_): with tf.GradientTape() as tape:
nm, nu = Y.shape cost_value = cofi_cost_func_v(X, W, b, Y, R, lambda_)
J = 0 grads = tape.gradient( cost_value, [X,W,b] )
optimizer.apply_gradients( zip(grads, [X,W,b]) )
for j in range(nu):
w = W[j,:]
p = np.matmul(X.numpy(), np.transpose(W.numpy())) + b.numpy()
b_j = b[0,j]
for i in range(nm):
x = X[i,:]
y = Y[i,j]
r = R[i,j]
J += r * np.square((np.dot(w,x) + b_j - y ))
J += (lambda_) * (np.sum(np.square(W)) +
np.sum(np.square(X)))
J = J/2
return J

Deep Learning for Content-Based Filtering


import numpy as np cost_fn = tf.keras.losses.MeanSquaredError()
import numpy.ma as ma opt = keras.optimizers.Adam(learning_rate=0.01)
from numpy import genfromtxt model.compile(optimizer=opt, loss=cost_fn)
from collections import defaultdict
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from sklearn.preprocessing import StandardScaler,
MinMaxScaler
from sklearn.model_selection import train_test_split
import tabulate

num_outputs = 32 user_vecs = gen_user_vecs(user_vec,len(item_vecs))


tf.random.set_seed(1) sorted_index, sorted_ypu, sorted_items, sorted_user = predict_uservec(user_vecs,
user_NN = tf.keras.models.Sequential([ item_vecs, model, u_s, i_s,scaler, scalerUser, scalerItem, scaledata=scaledata)
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'), print_pred_movies(sorted_ypu, sorted_user, sorted_items, movie_dict, maxcount = 10)
tf.keras.layers.Dense(num_outputs,
activation='linear'),
])
item_NN = tf.keras.models.Sequential([
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(num_outputs,
activation='linear'),
])
input_user =
tf.keras.layers.Input(shape=(num_user_features))
vu = user_NN(input_user)
vu = tf.linalg.l2_normalize(vu, axis=1)
input_item =
tf.keras.layers.Input(shape=(num_item_features))
vm = item_NN(input_item)
vm = tf.linalg.l2_normalize(vm, axis=1)
output = tf.keras.layers.Dot(axes=1)([vu, vm])
model = Model([input_user, input_item], output)
model.summary()

Deep Q-Learning
def compute_loss(experiences, gamma, q_network, target_q_network):
states, actions, rewards, next_states, done_vals = experiences
max_qsa = tf.reduce_max(target_q_network(next_states), axis=-1)
y_targets = rewards + (gamma * max_qsa * (1 - done_vals))
q_values = q_network(states)
q_values = tf.gather_nd(q_values, tf.stack([tf.range(q_values.shape[0]),
tf.cast(actions, tf.int32)], axis=1))
loss = MSE(y_targets, q_values)
return loss

def agent_learn(experiences, gamma):


with tf.GradientTape() as tape:
loss = compute_loss(experiences, gamma, q_network, target_q_network)
gradients = tape.gradient(loss, q_network.trainable_variables)
optimizer.apply_gradients(zip(gradients, q_network.trainable_variables))

You might also like