Skip to content

Commit

Permalink
docs added
Browse files Browse the repository at this point in the history
  • Loading branch information
Mohamed Gamal committed Jan 25, 2021
1 parent a01e45c commit 7491030
Show file tree
Hide file tree
Showing 10 changed files with 105 additions and 33 deletions.
4 changes: 4 additions & 0 deletions nn_recipe/NN/ActivationFunctions/__activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,12 @@ class ActivationFunction(Function):
functionality
:cvar ID: unique if for each activation function class
:type ID: int
"""
ID = -1

def save(self):
"""
Returns the activation function ID to be saved in the save phase
"""
return self.ID
11 changes: 7 additions & 4 deletions nn_recipe/NN/ActivationFunctions/hardTanh.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,13 @@ class HardTanh(ActivationFunction):
"""
Class represents the hardtanh activation function
Examples:
---------
>>> x = np.array([1, 2, 3]) # input vector
>>> f = HardTanh() # creating HardTanh object
>>> print(f(x)) # calculating HardTanh of the input
>>> print(f.local_grad) # get local_grad of the HardTanh at the input
"""

def __init__(self):
Expand All @@ -24,7 +27,7 @@ def _forward(self, x):
1 x>1
-1 x<-1
}
- visit ////////// for more info on HardTanh func
:param x: input that is wanted to calculate the HardTanh at
:return: HardTanh value at input x
:rtype: np.ndarray
Expand All @@ -41,15 +44,15 @@ def _calc_local_grad(self, x):
0 1 < x < -1
1 -1 <= x < 1
}
- visit //////////////////////
to get more info about HardTanh
:param x: input that is wanted to calculate the HardTanh at
:return: HardTanh gradient at input x
:rtype: np.ndarray
"""
X = np.copy(x)
X[x < -1 and x > 1] = 0
X[x >= -1 and x < 1] = 1
X[-1 > x > 1] = 0
X[-1 <= x < 1] = 1
return X


4 changes: 2 additions & 2 deletions nn_recipe/NN/ActivationFunctions/identity.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

class Identity(ActivationFunction):
"""
Class represents the sigmoid activation function
Class represents the Identity activation function
>>> x = np.array([1, 2, 3]) # input vector
>>> f = Identity() # creating sigmoid object
Expand All @@ -17,7 +17,7 @@ def __init__(self):

def _forward(self, x):
"""
- Forward pass of the sigmoid function
- Forward pass of the Identity function
- Identity(x) = x
:param x: input that is wanted to calculate the sigmoid at
:return: Identity value at input x which is x
Expand Down
1 change: 1 addition & 0 deletions nn_recipe/NN/ActivationFunctions/leakyRelu.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from .__activation import ActivationFunction
import numpy as np


class LeakyReLU(ActivationFunction):
"""
Class represents the leaky relu activation function
Expand Down
2 changes: 1 addition & 1 deletion nn_recipe/NN/ActivationFunctions/relu.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def _calc_local_grad(self, x):
- Backward pass of the relu function
- ∇ relu(x) = max(0, x)
- visit https://en.wikipedia.org/wiki/Sigmoid_functionhttps://en.wikipedia.org/wiki/Rectifier_(neural_networks)
to get more info about sigmoid
to get more info about relu
:param x: input that is wanted to calculate the relu at
:return: relu gradient at input x
:rtype: np.ndarray
Expand Down
7 changes: 2 additions & 5 deletions nn_recipe/NN/ActivationFunctions/tanh.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,10 @@ class Tanh(ActivationFunction):
def __init__(self):
super(Tanh, self).__init__()


def _forward(self, x):
"""
- Forward pass of the Tanh function
- tanh(x)
- visit ////////// for more info on tanh func
:param x: input that is wanted to calculate the tanh at
:return: tanh value at input x
:rtype: np.ndarray
Expand All @@ -32,8 +30,7 @@ def _calc_local_grad(self, x):
"""
- Backward pass of the tanh function
- ∇ Tanh = 1-tanh**2
- visit //////////////////////
to get more info about Tanh
:param x: input that is wanted to calculate the Tanh at
:return: Tanh gradient at input x
:rtype: np.ndarray
Expand Down
16 changes: 12 additions & 4 deletions nn_recipe/NN/Layers/__layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,20 +9,20 @@
from enum import Enum, auto


class Layer(Function): # TODO add default value to activation type
class Layer(Function):
"""
This Class represents a Layer in our Neural Network, the layer have _out_dim neurons and was connected to another
layer with in_dim neurons
Layer is responsible for:
- Calculating the forward path
- Calculating gradients that will be used to calculate backward path
"""
ID = -1

def __init__(self, in_dim, out_dim, **kwargs):
""" Initializes variables that will be used later by the Layer object"""
# TODO add type checking for constructor input
super(Layer, self).__init__() # calling base class (Function) constructor
self._weights: np.ndarray = None # weights matrix
self._bias: np.ndarray = None # bias matrix
Expand Down Expand Up @@ -73,7 +73,6 @@ def weights(self):
@weights.setter
def weights(self, value):
""" Layer's weights setter"""
# TODO add type checking for weights setter
assert self._weights.shape == value.shape
self._weights = value

Expand All @@ -85,28 +84,37 @@ def bias(self):
@bias.setter
def bias(self, value):
""" Layer's weights setter"""
# TODO add type checking for bias setter
assert self._bias.shape == value.shape
self._bias = value

@property
def size(self):
""" Gets the size of the current layer (number of neurons)"""
return self._out_dim

@property
def input_size(self):
""" Gets input size expected by the layer (number of neurons in the previous layer)"""
return self._in_dim

@abstractmethod
def _save(self):
""" Abstract methode used to get the data that will be saved in the save phase"""
pass

def save(self):
""" Get a dictionary represents the Layer contents
Dictionary Construction:
- ID: unique identifier for the layer class
- Other data specific for each layer specialization
"""
out = self._save()
out["ID"] = self.ID
return out

@staticmethod
@abstractmethod
def load(data):
""" Abstract class used to build a Layer from the Dict descriptor"""
pass
31 changes: 20 additions & 11 deletions nn_recipe/NN/Layers/linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,17 @@ class Linear(Layer):
1. dW: ∂Y/∂Z * ∂Z/∂W = activation gradient * X
2. dX: ∂Y/∂Z * ∂Z/∂X = activation gradient * W
3. dB: ∂Y/∂Z * ∂Z/∂B = activation gradient * 1
:cvar ID: unique id for the activation function used by the layer loader
"""

@staticmethod
def load(data):
"""
This function is used to create a new layer based on the descriptor
:rtype: Linear
"""
act = ActivationFunctionFactory(data.pop("activation"))
return Linear(in_dim=data.pop("in_dim"), out_dim=data.pop("out_dim"), activation=act, **data)

Expand All @@ -48,21 +55,13 @@ def __init__(self, in_dim, out_dim, activation, **kwargs):
self.__activation = activation
super(Linear, self).__init__(in_dim, out_dim, **kwargs)


def _init_params(self):
"""
Initializes layer parameters (weights and bias)
Many different initialize schemes could be used: # TODO add different init_factors that can be used (mar)
-
-
-
"""
# factor = np.tanh(1/self._in_dim) # factor that will be used to normalize params
factor = np.sqrt(1/self._in_dim)
self._weights = np.random.normal(0, factor, (self._out_dim, self._in_dim)) # init weights
# TODO make initializing bias and weights with a pre defined values a feature
factor = np.sqrt(1 / self._in_dim)
self._weights = np.random.normal(0, factor, (self._out_dim, self._in_dim)) # init weights
self._bias = np.random.normal(0, factor, (self._out_dim, 1))
# self._bias = np.ones((self._out_dim, self.__batch_size)) # init bias

Expand All @@ -75,7 +74,6 @@ def _forward(self, x):
"""
return self.__activation(np.dot(self._weights, x.T) + self._bias).T


def _calc_local_grad(self, x):
"""
Local gradient calculations
Expand All @@ -97,6 +95,17 @@ def _calc_local_grad(self, x):
}

def _save(self):
"""
Methode used to get the data that will be saved in the save phase
Expected Descriptor Structure:
- ID: unique id for each layer (0 in case of Linear Layer)
- in_dim: number of inputs (number of neurons in the previous layer)
- iut_dim: number of neurons in the current layer
- activation: Activation function descriptor
- bias: numpy array represents the bias used by the layer
- weights: numpy array represents the weights used by the layer
"""
return {
"in_dim": self._in_dim,
"out_dim": self._out_dim,
Expand Down
42 changes: 38 additions & 4 deletions nn_recipe/NN/LossFunctions/__loss_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,42 @@


class LossFunction(Function):
"""
This class is used as a base class for all loss function, and loss function must implement this class
Examples
--------
>>> x = np,array([[4, 5, 6]])
>>> act = LossFunction() # you can't create an instance from loss function as it's an abstract class
>>> a = act(x)
>>> grad = a.local_grad
:cvar ID: unique id used by the loss function factory to build loss functions from the network descriptor
:type ID: int
:ivar __sum: flag used to sum the output of loss function (may be used in case of multiclass loss function)
:type __sum: bool
:ivar __axis: axis index at which the numpy sum will happen
:type __axis: int
"""
ID = -1

def __init__(self, sum=False, axis=0):
"""
:param sum: flag used to sum the output of loss function (may be used in case of multiclass loss function)
:type sum: bool
:ivar axis: axis index at which the numpy sum will happen
:type axis: int
"""
super(LossFunction, self).__init__()
self.__sum = sum
self.__axis = axis

def _forward(self, Y, Y_hat):
"""
This function is called when the object is called, the function calls it's subclass compute_loss then checks for
the sum flag to sum the losses values in the specific axis
"""
loss = self._compute_loss(Y, Y_hat)
if self.__sum:
loss = np.sum(loss, axis=self.__axis)
Expand All @@ -21,21 +49,27 @@ def _forward(self, Y, Y_hat):

@abstractmethod
def _compute_loss(self, Y, Y_hat):
""" Abstract method must be implemented by the user to compute the loss (forward path) """
pass

def _calc_local_grad(self, Y, Y_hat):
grad = self._compute_local_grad(Y, Y_hat)
# if self.__sum:
# grad = np.sum(grad, axis=self.__axis)
# if self.__axis == 0: grad = grad.reshape((1, -1))
# else: grad = grad.reshape((-1, 1))
return grad

@abstractmethod
def _compute_local_grad(self, Y, Y_hat):
""" Abstract method must be implemented by the user to compute the loss gradient (forward path) """
pass

def save(self):
"""
Methode used to get the data that will be saved in the save phase
Expected Descriptor Structure:
- ID: unique id for each layer (0 in case of Linear Layer)
- sum: flag to indicate whether loss sum is needed or not
- axis: axis at which the losses will be summed
"""
return {
"ID": self.ID,
"sum": self.__sum,
Expand Down
20 changes: 18 additions & 2 deletions nn_recipe/NN/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class Network:
... ],
... optimizer=GD(learning_rate=0.5),
... )
>>> net.train()
>>> net.train(X, Y)
>>> net.__feed_forward([1, 0.1, 0.5, 1.1])
>>> net._save()
Expand Down Expand Up @@ -140,8 +140,10 @@ def train(self, X, Y, batch_size=None, epsilon=0.1, max_itr=100, notify_func=Non
:type epsilon: int
:param max_itr: maximum number of iteration to be executed
:type max_itr: int
:notify_func: callback function used to report loss after training an epoch
:param notify_func: callback function used to report loss after training an epoch
:type notify_func: Function[int]
:param verify_func: function hook used to get the accurecy of the validation data set
:type verify_func: Function
:return: loss value and number of iterations executed
:rtype: Tuple[int, int]
"""
Expand Down Expand Up @@ -232,6 +234,13 @@ def __feed_forward(self, X):
return input_val

def evaluate(self, X):
"""
Function used to get the classified class of an input X
:param X: input feature
:type X: np.ndarray
:rtype: np.ndarrauy
"""
feed = self.__feed_forward(X)
if feed.shape[1] == 1:
return feed
Expand All @@ -254,6 +263,13 @@ def save(self, path:str):

@staticmethod
def load(path:str):
"""
Load network descriptor from the disk
:param path: path at which the descriptor is saved
:type path: str
:rtype: Network
"""
with open(path, 'rb') as handle:
data = pickle.load(handle)
layers = LayerFactory(data["layers"])
Expand Down

0 comments on commit 7491030

Please sign in to comment.