Skip to content

Instantly share code, notes, and snippets.

@yvki
Created April 14, 2024 05:25
Show Gist options
  • Save yvki/e9522032db0d0130406ddd71d491b474 to your computer and use it in GitHub Desktop.
Save yvki/e9522032db0d0130406ddd71d491b474 to your computer and use it in GitHub Desktop.
Simple implementation of Activation, MSE Loss, Propagation Functions ⚙️
# 1. Algorithm to create depiction of Sigmoid class activation function
class Sigmoid:
def __init__(self):
return
def forward(self, x):
sigmoid_x = 1 / (1 + np.exp(-x))
return sigmoid_x
def derivative(self, z):
sigmoid_z = self.forward(z)
sigmoid_derivative_z = sigmoid_z * (1 - sigmoid_z)
return sigmoid_derivative_z
sigmoid = Sigmoid()
sample_data_sigmoid_forward = sigmoid.forward(sample_data)
print(sample_data_sigmoid_forward)
sample_data_sigmoid_derivative = sigmoid.derivative(sample_data)
print(sample_data_sigmoid_derivative)
# 2. Algorithm to create depiction of Tanh class activation function
class Tanh:
def __init__(self):
return
def forward(self, x):
tanh_x = np.tanh(x)
return tanh_x
def derivative(self, z):
tanh_z = self.forward(z)
tanh_derivative_z = 1 - tanh_z**2
return tanh_derivative_z
tanh = Tanh()
sample_data_tanh_forward = tanh.forward(sample_data)
print(sample_data_tanh_forward)
sample_data_tanh_derivative = tanh.derivative(sample_data)
print(sample_data_tanh_derivative)
# 3. Algorithm to create depiction of ReLU class activation function
class ReLU:
def __init__(self):
return
def forward(self, x):
relu_x = np.maximum(0, x)
return relu_x
def derivative(self, z):
relu_derivative_z = np.where(z >= 0, 1, 0)
return relu_derivative_z
relu = ReLU()
sample_data_relu_forward = relu.forward(sample_data)
print(sample_data_relu_forward)
sample_data_relu_derivative = relu.derivative(sample_data)
print(sample_data_relu_derivative)
# 4. Algorithm to create Mean Square Error (MSE) Loss function
# 4.1 Assume that the size of A is a N x C matrix where:
# A = output from the network,
# N = dimension 0 of A,
# C = dimension 1 of A, and
# Y = ground truth
class MSELoss:
def forward(self, A, Y):
N = A.shape[0]
C = A.shape[1]
self.A = A
self.Y = Y
se = np.sum((A - Y)**2)
sse = np.sum(se)
mse = (1 / (2 * N * C)) * sse
return mse
def backward(self):
dLdA = self.A - self.Y
return dLdA
from numpy import random
N = 5
C = 4
random.seed(5)
Y = random.random(size=(N, C))
print(Y)
random.seed(8)
A = Y + random.random(size=(N, C)) / 10
print(A)
mseless = MSELoss()
mseless.forward(A, Y)
print(mseless.forward(A, Y))
mseless.backward()
# 5. Algorithm to create propagation functions
import numpy as np
class Linear:
def __init__(self, in_features, out_features, debug=False):
self.W = np.ones((out_features, in_features), dtype="f")
self.b = np.zeros((out_features, 1), dtype="f")
self.dLdW = np.ones((out_features, in_features), dtype="f")
self.dLdb = np.zeros((out_features, 1), dtype="f")
def forward(self, A):
self.A = A
self.N = A.shape[0]
self.Ones = np.ones((self.N, 1), dtype="f")
Z = np.dot(A, self.W.T) + self.b.T
return Z
def backward(self, dLdZ):
dZdA = self.W.T
dZdW = self.A
dZdb = self.Ones
dLdA = np.dot(dLdZ, dZdA.T)
dLdW = np.dot(dLdZ.T, dZdW)
dLdb = np.dot(dLdZ.T, dZdb)
self.dLdW = dLdW.T
self.dLdb = dLdb.T
dLdi = None
dZdi = None
return dLdA, dLdW, dLdb, dLdi, dZdi
# 5.1 Forward
C_in = C
C_out = 1
linear_layer = Linear(C_in, C_out, True)
Z = linear_layer.forward(A)
print(Z)
Y = Z + random.random(size=(N, C_out)) / 10
print(Y)
mseless = MSELoss()
mseless.forward(Z, Y)
print(mseless.forward(Z, Y))
# 5.2 Backward
dLdz = mseless.backward()
dLdA = linear_layer.backward(dLdz)
print(dLdA)
print(linear_layer.dLdW)
print(linear_layer.dLdb)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment