Created
June 7, 2021 10:24
-
-
Save sameerg07/0e533cb0b562db7ebdcd2855b2f46b60 to your computer and use it in GitHub Desktop.
gradient descent for more than 1 independent variable
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
# Theta is the vector representing coefficients (intercept, area, bedrooms) | |
theta = np.matrix(np.array([0,0,0])) | |
alpha = 0.01 | |
iterations = 1000 | |
# define cost function | |
# takes in theta (current values of coefficients b0, b1, b2), X and y | |
# returns total cost at current b0, b1, b2 | |
def compute_cost(X, y, theta): | |
return np.sum(np.square(np.matmul(X, theta) - y)) / (2 * len(y)) | |
# gradient descent | |
# takes in current X, y, learning rate alpha, num_iters | |
# returns cost (notice it uses the cost function defined above) | |
def gradient_descent_multi(X, y, theta, alpha, iterations): | |
theta = np.zeros(X.shape[1]) | |
m = len(X) | |
gdm_df = pd.DataFrame( columns = ['Bets','cost']) | |
for i in range(iterations): | |
gradient = (1/m) * np.matmul(X.T, np.matmul(X, theta) - y) | |
theta = theta - alpha * gradient | |
cost = compute_cost(X, y, theta) | |
gdm_df.loc[i] = [theta,cost] | |
return gdm_df |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment