import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mlp
url ="sample.csv"data = pd.read_csv(url)
x = data["x-axis"]
y = data["y-axis"]
defplot(x, y, w, b):
line = w * x + b
plt.plot(x, line, color="red", label="prediction")
plt.scatter(x, y, color="blue", label="data", marker="x")
plt.title("Title")
plt.xlabel("x Axis")
plt.ylabel("y Axis")
plt.xlim([0,12])
plt.ylim([20,140])
plt.show()
plot(x, y, 10, 20)
defcost_function(x, y, w, b):
y2 = w * x + b
cost = (y - y2) **2return cost.mean()
cost_function(x, y, 10, 20)
假設在 b = 20 的情形下,找 w 的最小值
w_arr = []
costs = []
for w in range(-100, 101):
w2 =10+ w/100 cost = cost_function(x, y, w2, 20)
w_arr.append(w2)
costs.append(cost)
import matplotlib.pyplot as plt
plt.title("cost function - when b = 20)plt.xlabel("w")
plt.ylabel("cost function")
plt.plot(w_arr, costs)
plt.show()
import numpy as np
ws = np.arange(-100, 101)
bs = np.arange(-100, 101)
costs = np.zeros((201, 201))
i =0for w in ws:
j =0for b in bs:
cost = cost_function(x, y, w, b)
costs[i,j] = cost
j = j+1 i = i+1print(costs)
learning_rate =0.001for i in range(10):
w_gradient, b_gradient = compute_gradient(x, y, w, b)
w = w - w_gradient * learning_rate
b = b - b_gradient * learning_rate
cost = cost_function(x, y, w, b)
print(f"Iteration {i} : Cost {cost}, w: {w}, b: {b}")
defgradient_descent(x, y, w_init, b_init, learning_rate, cost_function, gradient_function, run_iteration):
c_hist = []
w_hist = []
b_hist = []
w = w_init
b = b_init
for i in range(run_iteration):
w_gradient, b_gradient = gradient_function(x, y, w, b)
w = w - w_gradient * learning_rate
b = b - b_gradient * learning_rate
cost = cost_function(x, y, w, b)
w_hist.append(w)
b_hist.append(b)
c_hist.append(cost)
return w, b, w_hist, b_hist, c_hist