# 梯度下降算法,找到合适的w
# 链接API
import numpy as np
import matplotlib as plt
# 数据集
x_data=[1.0,2.0,3.0,4.0]
y_data=[2.0,4.0,6.0,8.0]
# 预测函数模型:f(X)=w*x
def forward(x):
"""前馈函数"""
return w*x
# 损失函数
def cost(xs,ys):
"""损失函数"""
cost=0
for x,y in zip(xs,ys):
y_pred=forward(x)
loss=(y-y_pred)**2
cost+=loss
return cost/len(xs)
# 梯度下降,寻找合适的w
def gradient(xs,ys):
"""输入数据集,返回一个关于变量w的梯度函数,此处w需要不断更新,所以w定义为全局变量"""
grad=0
for x,y in zip(xs,ys):
grad+=2*(x*w-y)*x
return grad/len(xs)
# 预测一个w的起始值并选取一个合适大小的学习率a(注意:a的取值影响性能和学习效率,所以要选取合适大小)
w=0.1
a=0.01
#开始学习(历元epoch,遍历100次)
def main():
for epoch in range(100):
cost_val=cost(x_data,y_data)
w-=a*gradient(x_data,y_data)
print("epoch=",epoch,"w=",w,cost_val)