import numpy as np# 数据准备
X = np.array([1, 2, 3])
y = np.array([3, 5, 7])# 参数初始化
w0, w1 = 0, 0
alpha = 0.1
n = len(X)# 迭代10次
for epoch in range(10):# 计算预测值y_pred = w1 * X + w0# 计算梯度grad_w0 = (1/n) * np.sum(y_pred - y)grad_w1 = (1/n) * np.sum((y_pred - y) * X)# 更新参数w0 = w0 - alpha * grad_w0w1 = w1 - alpha * grad_w1# 计算损失函数loss = (1/(2*n)) * np.sum((y_pred - y)**2)print(f'Epoch {epoch+1}: w0={w0:.3f}, w1={w1:.3f}, Loss={loss:.3f}')
六、代码实现(Python示例)
import numpy as np# 数据准备
X = np.array([1, 2, 3])
y = np.array([3, 5, 7])# 参数初始化
w0, w1 = 0, 0
alpha = 0.1
n = len(X)# 迭代10次
for epoch in range(10):# 计算预测值y_pred = w1 * X + w0# 计算梯度grad_w0 = (1/n) * np.sum(y_pred - y)grad_w1 = (1/n) * np.sum((y_pred - y) * X)# 更新参数w0 = w0 - alpha * grad_w0w1 = w1 - alpha * grad_w1# 计算损失函数loss = (1/(2*n)) * np.sum((y_pred - y)**2)print(f'Epoch {epoch+1}: w0={w0:.3f}, w1={w1:.3f}, Loss={loss:.3f}')
七、总结
- 线性回归:通过线性模型拟合数据,核心是最小化预测误差。
- 梯度下降:通过计算损失函数的梯度,逐步调整参数逼近最优解。
- 核心公式:参数更新规则 wj:=wj−α∂J∂wjwj:=wj−α∂wj∂J。
- 实际应用:需注意学习率选择、特征缩放和收敛判断。
"""
2.1线性回归模型与梯度下降\
"""import numpy as np
import matplotlib.pyplot as plt# 设置随机种子(保证可重复性)
np.random.seed(42)# 生成特征 X(单变量)和标签 y
m = 100 # 样本数量
X = 2 * np.random.rand(m, 1) # 生成 [0, 2) 之间的均匀分布数据
y = 4 + 3 * X + np.random.randn(m, 1) # 真实关系: y = 4 + 3X + 高斯噪声# 可视化数据
plt.scatter(X, y, alpha=0.7)
plt.xlabel("X")
plt.ylabel("y")
plt.title("Simulated Linear Data")
plt.show()def linear_model(X, theta):"""线性回归的预测函数"""# 公式: h_θ(X) = θ₀ + θ₁X₁ + ... + θₙXₙ# X 形状: (m, n+1)(包含偏置项 1)# theta 形状: (n+1, 1)return X.dot(theta)def compute_cost(X, y, theta):"""计算均方误差(MSE)损失函数"""m = len(y)predictions = linear_model(X, theta)error = predictions - ycost = (1 / (2 * m)) * np.sum(error ** 2) # 公式: J(θ) = 1/(2m) * Σ(hθ(Xⁱ) - yⁱ)²return cost
def gradient_descent(X, y, theta, alpha, num_iters):"""批量梯度下降算法"""m = len(y)cost_history = [] # 记录每次迭代的损失值for _ in range(num_iters):predictions = linear_model(X, theta)error = predictions - ygradients = (1 / m) * X.T.dot(error) # 公式: ∇J(θ) = 1/m * Xᵀ(Xθ - y)theta -= alpha * gradients # 参数更新: θ := θ - α∇J(θ)cost = compute_cost(X, y, theta)cost_history.append(cost)return theta, cost_history# 在特征矩阵 X 中添加偏置项(x₀ = 1)
X_b = np.c_[np.ones((m, 1)), X] # 形状: (m, 2)# 初始化参数 θ(θ₀, θ₁)
theta_initial = np.random.randn(2, 1)# 设置超参数
alpha = 0.1 # 学习率
num_iters = 1000 # 迭代次数# 运行梯度下降
theta_optimized, cost_history = gradient_descent(X_b, y, theta_initial, alpha, num_iters)# 打印最优参数
print(f"最优参数: θ₀ = {theta_optimized:.3f}, θ₁ = {theta_optimized:.3f}")# 绘制损失函数下降曲线
plt.plot(range(num_iters), cost_history)
plt.xlabel("Iteration")
plt.ylabel("Cost (MSE)")
plt.title("Cost Function Convergence")
plt.show()# 绘制拟合直线和数据点
plt.scatter(X, y, alpha=0.7, label="Data")
plt.plot(X, X_b.dot(theta_optimized), color='red', linewidth=2, label="Linear Regression Fit")
plt.xlabel("X")
plt.ylabel("y")
plt.legend()
plt.title("Linear Regression with Gradient Descent")
plt.show()