Currently trying to run a very basic linear regression using some test data points on a jupyter notebook. below is my code, and as you can see if you run this the prediction line certainly moves towards where it should go but then it stops for some reason and I'm not really sure why. Can anyone help me?
starting weights
ending weights
Loss
import matplotlib.pyplot as plt
import numpy as np
%matplotlib notebook
plt.style = "ggplot"
y = np.array([30,70,90,120,150,160,190,220])
x = np.arange(2,len(y)+2)
N = len(y)
weights = np.array([0.2,0.2])
plt.figure()
plt.scatter(x, y, color="red")
plt.plot(y_hat)
x_ticks = np.array([[1,x*0.1] for x in range(100)])
y_hat = []
for j in range(len(x_ticks)):
y_hat.append(np.dot(weights, x_ticks[j]))
def plot_model(x, y, weights, loss):
x_ticks = np.array([[1,x*0.1] for x in range(100)])
y_hat = []
for j in range(len(x_ticks)):
y_hat.append(np.dot(weights, x_ticks[j]))
plt.figure()
plt.scatter(x, y, color="red")
plt.plot(y_hat)
plt.figure()
plt.plot(loss)
def calculate_grad(weights, N, x_proc, y, loss):
residuals = np.sum(y.reshape(N,1) - weights*x_proc, 1)
loss.append(sum(residuals**2)/2)
#print(residuals, x_proc)
return -np.dot(residuals, x_proc)
def adjust_weights(weights, grad, learning_rate):
weights -= learning_rate*grad
return weights
learning_rate = 0.006
epochs = 2000
loss = []
x_processed = np.array([[1,i] for i in x])
for j in range(epochs):
grad = calculate_grad(weights, N, x_processed, y, loss)
weights = adjust_weights(weights, grad, learning_rate)
if j % 200 == 0:
print(weights, grad)
plot_model(x, y, weights, loss)