PyTorch实现线性回归

Linear Regression

t_y = [0.5, 14.0, 15.0, 28.0, 11.0, 8.0, 3.0, -4.0, 6.0, 13.0, 21.0]
t_x = [35.7, 55.9, 58.2, 81.9, 56.3, 48.9, 33.9, 21.8, 48.4, 60.4, 68.4]


import torch

t_y = torch.tensor(t_y)
t_x = torch.tensor(t_x)


def model(t_x,w,b):
return w*t_x + b

def loss_fn(t_y, t_p):
squared_diff = (t_y-t_p)**2
return squared_diff.mean()


w = torch.randn(1)
b = torch.zeros(1)
t_p = model(t_x, w, b)
loss = loss_fn(t_y,t_p) //tensor(2478.7595)


def dloss_fn(t_y, t_p):
return 2*(t_y-t_p)

def dmodel_dw(t_x,w,b):
return t_x

def dmodel_db(t_x,w,b):
return 1.0

dw = dloss_fn(t_y,t_p) * dmodel_dw(t_x,w,b)
db = dloss_fn(t_y,t_p) * dmodel_db(t_x,w,b)


def train(learning_rate,w,b,x,y):
t_p = model(x, w, b)
loss = loss_fn(y, t_p)
print("loss: ",loss)
w = w - learning_rate * grad_fn(t_x,w,b,t_y,t_p)[0]
b = b - learning_rate * grad_fn(t_x,w,b,t_y,t_p)[1]
t_p = model(x, w, b)
loss = loss_fn(y, t_p)
print("loss: ",loss)
return (w,b)


train(learning_rate=1e-2,
w=torch.tensor(1.0),
b=torch.tensor(0.0),
x=t_x,
y=t_y)

loss:  1763.8846435546875
loss:  5802484.5


train(learning_rate=1e-4,
w=torch.tensor(1.0),
b=torch.tensor(0.0),
x=t_x,
y=t_y)
#----------------------------
loss:  1763.8846435546875
loss:  323.0905456542969


t_xn = t_x * 0.1
train(learning_rate=1e-2,
w=torch.tensor(1.0),
b=torch.tensor(0.0),
x=t_xn,
y=t_y)
#----------------------------
loss:  80.36434173583984
loss:  37.57491683959961


def train_loop(epochs, learning_rate, params, x, y):
for epoch in range(1, epochs + 1):
w,b = params
t_p = model(x, w, b)
loss = loss_fn(y, t_p)
params = params - learning_rate * grad
print(f'Epoch: {epoch}, Loss: {float(loss)}')
return params

param = train_loop(epochs = 5000,
learning_rate = 1e-2,
params = torch.tensor([1.0,0.0]),
x = t_x,
y = t_y)
print("w,b",float(param[0]), float(param[1]))
#----------------------------
Epoch: 1, Loss:  80.36434173583984
Epoch: 2, Loss:  37.57491683959961
...
Epoch: 4999, Loss: 2.927647352218628
Epoch: 5000, Loss: 2.927647590637207
#----------------------------
w,b 5.367083549499512 -17.301189422607422


a = torch.tensor(3.0, requires_grad=True)


u = b*c
v = a+u
j = 3*v


>>> u
>>> j
>>> v


j.backward()
tensor(3.)
tensor(24.)
tensor(15.)


# use autograd
def train_loop(epochs, learning_rate, params, x, y):
for epoch in range(1, epochs + 1):
w,b = params
t_p = model(x, w, b)
loss = loss_fn(y, t_p)
print(f'Epoch: {epoch}, Loss: {float(loss)}')
return params

param = train_loop(epochs = 5000,
learning_rate = 1e-2,
params = params,
x = t_xn,
y = t_y)
print("w,b",float(param[0]), float(param[1]))


Optimizers

import torch.optim as optim
dir(optim))
#----------------------------
'LBFGS', 'Optimizer', 'RMSprop', 'Rprop', 'SGD', 'SparseAdam',
'__name__', '__package__', '__path__', '__spec__', 'lr_scheduler']


Optimizer通常和autograd配合使用，因为在训练的时候它需要修改tensor的梯度值，因此Optimizer内部会retain传入tensor。使用Optimizer的方式也很简单，它提供两个API，一个是zero_grad()用于清空tensor上保存的导数值，另一个是step()用来实现具体的optimize的操作。接下来我们为上面的demo引入一个optimizaer

params = torch.tensor([1.0,0.0], requires_grad=True)
learning_rate = 1e-2
optimizer = optim.SGD([params],lr=learning_rate)


def train_loop(epochs, learning_rate, params, x, y):
for epoch in range(1, epochs + 1):
w,b = params
t_p = model(x, w, b)
loss = loss_fn(y, t_p)
loss.backward()
optimizer.step() #update params