克隆策略

线性回归算法预测房价

输入:房子的面积,$ x $

输出:房子的价格,$ y $

  1. 研究假设
$$ h_\theta(x) = \theta_0 + \theta_1*x $$
  1. 损失函数
$$ J(\theta_0, \theta_1) = \frac{1}{2m} \sum_{i=1}^{m}(h_\theta(x^{(i)}) - y^{(i)})^2 $$
  1. 优化目标
$$ \mathop{minimize}\limits_{\theta_0, \theta_1} J(\theta_0, \theta_1) $$
In [60]:
import random
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D

plt.rcParams["figure.figsize"] = (8, 6)

数据分析

  • 训练数据
  • 数据分布
In [61]:
# 生成训练数据
x = np.array([random.randint(80, 140) for i in range(10)])
print("房子面积", x)


def random_price(x):
    y = x*random.uniform(2.8, 3.1) + random.randint(20, 23)
    return round(y, 1)

y = np.array([random_price(i) for i in x])
print("房子的价格", y)
房子面积 [ 83 128  98  94 114  93 137 136 129 115]
房子的价格 [262.5 397.9 324.  290.8 359.  304.  431.8 431.5 418.6 371.3]
In [62]:
# 可视化数据的分布
plt.scatter(x, y)
plt.show()

建模

  1. 最简单的假设一元线性回归模型
  2. 定义MSE损失函数
In [63]:
# 定义线性回归函数

class LinearRegression(object):
    def __init__(self, theta_0=-50, theta_1=-5):
        self.theta_0 = theta_0
        self.theta_1 = theta_1
    
    def __call__(self, x):
        x = self.theta_0 + self.theta_1 * x
        return x
    
    def update(self, theta_0, theta_1):
        self.theta_0 = theta_0
        self.theta_1 = theta_1

        
linear_regression_model = LinearRegression()
In [64]:
plt.scatter(x, y)
plt.plot([80, 140], [linear_regression_model(i) for i in [80, 140]])
plt.show()
In [65]:
# 定义损失函数
def loss_func(y, y_pred):
    loss = np.square(y - y_pred).mean()
    return loss

y_pred = [linear_regression_model(i) for i in x]
print("真实值", y)
print("预测值", y_pred)

loss = loss_func(y, y_pred)
print("mse loss", loss)
真实值 [262.5 397.9 324.  290.8 359.  304.  431.8 431.5 418.6 371.3]
预测值 [-465, -690, -540, -520, -620, -515, -735, -730, -695, -625]
mse loss 968910.044

随机梯度下降算法

随机梯度下降算法:

损失函数 $$ \begin{align*} J(\theta_0, \theta_1) &= \frac{1}{2m} \sum_{i=1}^{m}[(\theta_0 + \theta_1*x_i) - y_i]^2 \end{align*} $$

分别求 $\theta_0, \theta_1$的导数: $$ \begin{align*} \frac{\partial}{\partial \theta_0} J(\theta_0, \theta_1) &= \frac{1}{m} \sum_{i=1}^{m}(\theta_0 + \theta_1*x_i - y_i) \\ \frac{\partial}{\partial \theta_1} J(\theta_0, \theta_1) &= \frac{1}{m} \sum_{i=1}^{m}x_i * (\theta_0 + \theta_1*x_i - y_i) \\ \end{align*} $$

梯度更新 $$ \begin{align*} \theta_i &= \theta_i - lr*\mathrm{d}\theta_i \end{align*} $$

In [66]:
# 可视化lossx
def show_grade(dots=None):
    theta_0 = np.linspace(-100, 100, 100)
    theta_1 = np.linspace(-10, 10, 100)

    z = np.zeros([len(theta_0), len(theta_0)])

    for i in range(len(theta_0)):
        for j in range(len(theta_1)):
            theta_0_ = theta_0[i]
            theta_1_ = theta_1[j]
            model = LinearRegression(theta_0=theta_0_, theta_1=theta_1_)
            y_ = [model(x_) for x_ in x]
            loss_z_ = loss_func(y, y_)

            z[i, j] = loss_z_
            
    theta_0, theta_1 =np.meshgrid(theta_0, theta_1)
    fig = plt.figure(figsize=(8, 6))
    ax = Axes3D(fig)
    ax.plot_surface(theta_0, theta_1, z, cmap='rainbow', alpha=0.5)
    
    
    if dots:
        list_x = dots[0]
        list_y = dots[1]

        # plot
        for i in range(len(list_x)):
            a = list_x[i]
            b = list_y[i]
            
            model = LinearRegression(theta_0=a, theta_1=b)
            y_ = [model(x_) for x_ in x]
            c = loss_func(y, y_)
            ax.scatter(a, b, c, c='Black', s=10, alpha=1, marker='o')
    
    # ax.view_init(elev=30, azim=175)
    plt.xlabel('theta_0', fontsize=16)
    plt.ylabel('theta_1', fontsize=16)
    plt.show()

show_grade()
In [67]:
# 绘当前的梯度
theta_0 = linear_regression_model.theta_0
theta_1 = linear_regression_model.theta_1
    
show_grade(dots=([theta_0], [theta_1]))
In [77]:
# 计算梯度
def cal_dtheta0(model, x, y):
    theta_0 = model.theta_0
    theta_1 = model.theta_1
    
    dtheta_0 = []
    for i in range(len(x)):
        # 𝜃_0+𝜃_1∗𝑥𝑖−𝑦𝑖
        d = theta_0 + theta_1 * x[i] - y[i]
        dtheta_0.append(d)
    return np.mean(dtheta_0)

def cal_dtheta1(model, x, y):
    theta_0 = model.theta_0
    theta_1 = model.theta_1
    
    dtheta_1_ = []
    for i in range(len(x)):
        # 𝜃_0+𝜃_1∗𝑥𝑖−𝑦𝑖
        d = x[i] * (theta_0 + theta_1 * x[i] - y[i])
        dtheta_1_.append(d)
    return np.mean(dtheta_1_)


# 训练步数
tain_step = 50
# 学习率
lr = 0.00001

history_theta_0 = []
history_theta_1 = []
losses = []
linear_regression_model = LinearRegression()
for i in range(tain_step):
    # 计算loss
    y_pred = [linear_regression_model(i) for i in x]
    loss = loss_func(y, y_pred)
    losses.append(loss)
    
    # 计算梯度
    dtheta_0 = cal_dtheta0(linear_regression_model, x, y)
    dtheta_1 = cal_dtheta1(linear_regression_model, x, y)
    theta_0 = linear_regression_model.theta_0
    theta_1 = linear_regression_model.theta_1
    history_theta_0.append(theta_0)
    history_theta_1.append(theta_1)
    
    # 更新梯度
    theta_0 = theta_0 - lr*dtheta_0
    theta_1 = theta_1 - lr*dtheta_1
    linear_regression_model.update(theta_0, theta_1)

plt.plot(losses)
plt.show()
In [78]:
show_grade(dots=(history_theta_0, history_theta_1))
In [79]:
plt.scatter(x, y)
plt.plot([80, 140], [linear_regression_model(i) for i in [80, 140]])
plt.show()
In [80]:
print(linear_regression_model.theta_1)
3.607776372216733
In [ ]: