# 类目的求解斜率和截距
class Linear_model(object):
def __init__(self):
self.w = np.random.randn(1)[0]
self.b = np.random.randn(1)[0]
print('----------------------起始随机生成的斜率和截距',self.w,self.b)
# model就是方程f(x) = wx + b
def model(self,x):
return self.w * x + self.b
# 线性问题,原理都是最小二乘法
def loss(self,x,y):
# 方程中几个未知数???
cost = (y - self.model(x))**2
# 求偏导数 ,把其他的都当成已知数,求一个未知数的导数
# 导数是偏导数的一种特殊形式
g_w = 2*(y - self.model(x))*(-x)
g_b = 2*(y - self.model(x))*(-1)
return g_w,g_b
# 梯度下降
def gradient_descend(self,g_w,g_b,step = 0.01):
# 更新新的斜率和截距
self.w = self.w - g_w*step
self.b = self.b - g_b*step
print('----------------------',self.w,self.b)
def fit(self,X,y):
w_last = self.w + 1
b_last = self.b + 1
precision = 0.00001
max_count = 3000
count = 0
while True:
if (np.abs(self.w - w_last) < precision) and (np.abs(self.b - b_last) < precision):
break
if count > max_count:
break
# 更新斜率和截距
g_w = 0
g_b = 0
size = X.shape[0]
for xi,yi in zip(X,y):
g_w += self.loss(xi,yi)[0]/size
g_b += self.loss(xi,yi)[1]/size
self.gradient_descend(g_w,g_b)
count += 1
def coef_(self):
return self.w
def intercept_(self):
return self.b
请问
def fit(self,X,y):
w_last = self.w + 1
b_last = self.b + 1
这里为什么
w_last = self.w + 1
b_last = self.b + 1
加一是什么意思