pytorch框架下的线性回归小demo

线性回归demo

一个使用pytorch框架训练线性回归的小demo

  • 用cpu就能很快跑出来
  • 不需要额外的输入文件
import torch
import numpy as np
import torch.nn as nn


x_values = [i for i in range(11)] # [0,1,2,3,4,5,6,7,8,9,10]
x_train = np.array(x_values, dtype=np.float32)
x_train = x_train.reshape(-1, 1) # 将x_train调整为11*1的矩阵

y_values = [2.5*i+3.5 for i in x_values] # y=2.5x+3.5
y_train = np.array(y_values, dtype=np.float32)
y_train = y_train.reshape(-1, 1)

class LinearRegressionModel(nn.Module): # 继承自nn包的Module类
    
    def __init__(self, input_dim, output_dim):
        super(LinearRegressionModel, self).__init__() # 执行父类的构造函数
        self.linear = nn.Linear(input_dim, output_dim)
        # nn.Linear(输入数据维度, 输出数据维度) 全连接层

    def forward(self, x):
        out = self.linear(x)
        return out

input_dim = 1
output_dim = 1
model = LinearRegressionModel(input_dim, output_dim)

epochs = 1000 # 训练次数
learning_rate = 0.01 # 学习率
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# 优化器,使用基本的优化器SGD,传入需要更新的参数(model中的全部参数)和学习率
criterion = nn.MSELoss() # 回归任务可选用MSE等

for epoch in range(epochs):
    epoch += 1
    # x_train和y_train均为numpy.ndarry格式,需要转换为tensor格式才可以传入框架
    inputs = torch.from_numpy(x_train)
    labels = torch.from_numpy(y_train)

    # 每次迭代开始时 梯度需要清零
    optimizer.zero_grad()

    # 前向传播
    outputs = model.forward(inputs)

    # 计算损失
    loss = criterion(outputs, labels)

    # 反向传播
    loss.backward()

    # 更新权重参数
    optimizer.step()

    # 每50个epoch输出一次,以显示训练进度
    if epoch % 50 == 0:
        print('epoch {}, loss {}'.format(epoch, loss.item()))

y_predicted = model.forward(torch.from_numpy(x_train)).data.numpy()
# 前向传播 传入训练数据x 输出预测结果y 用以测试
# .data.numpy() 将结果转换成numpy
print(y_predicted)


上一篇:redo log 与 binlog


下一篇:VFB直接使用TCP发送电子邮件源码