Tuesday, October 15, 2019

PyTorch (From SungKim)



In [23]:
import torch
torch.__version__
Out[23]:
'1.2.0'
In [0]:
import numpy as np
import matplotlib.pyplot as plt
In [0]:
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
In [0]:
def forward(x):
    return x * w
In [0]:
def loss(x,y):
    y_pred = forward(x)
    return (y_pred - y) * (y_pred - y)
In [29]:
w_list = []
mse_list = []
for w in np.arange(0.0, 4.1,0.1):
    print(f"W={w}")
    l_sum = 0
    for x_val, y_val in zip(x_data,y_data):
        l = loss(x_val,y_val)
        l_sum += l
    print(f"MSE = {l_sum/3}")
    print("*"*10)
    w_list.append(w)
    mse_list.append(l_sum/3)
W=0.0
MSE = 18.666666666666668
**********
W=0.1
MSE = 16.846666666666668
**********
W=0.2
MSE = 15.120000000000003
**********
W=0.30000000000000004
MSE = 13.486666666666665
**********
W=0.4
MSE = 11.946666666666667
**********
W=0.5
MSE = 10.5
**********
W=0.6000000000000001
MSE = 9.146666666666663
**********
W=0.7000000000000001
MSE = 7.886666666666666
**********
W=0.8
MSE = 6.719999999999999
**********
W=0.9
MSE = 5.646666666666666
**********
W=1.0
MSE = 4.666666666666667
**********
W=1.1
MSE = 3.779999999999999
**********
W=1.2000000000000002
MSE = 2.986666666666665
**********
W=1.3
MSE = 2.2866666666666657
**********
W=1.4000000000000001
MSE = 1.6799999999999995
**********
W=1.5
MSE = 1.1666666666666667
**********
W=1.6
MSE = 0.746666666666666
**********
W=1.7000000000000002
MSE = 0.4199999999999995
**********
W=1.8
MSE = 0.1866666666666665
**********
W=1.9000000000000001
MSE = 0.046666666666666586
**********
W=2.0
MSE = 0.0
**********
W=2.1
MSE = 0.046666666666666835
**********
W=2.2
MSE = 0.18666666666666698
**********
W=2.3000000000000003
MSE = 0.42000000000000054
**********
W=2.4000000000000004
MSE = 0.7466666666666679
**********
W=2.5
MSE = 1.1666666666666667
**********
W=2.6
MSE = 1.6800000000000008
**********
W=2.7
MSE = 2.2866666666666693
**********
W=2.8000000000000003
MSE = 2.986666666666668
**********
W=2.9000000000000004
MSE = 3.780000000000003
**********
W=3.0
MSE = 4.666666666666667
**********
W=3.1
MSE = 5.646666666666668
**********
W=3.2
MSE = 6.720000000000003
**********
W=3.3000000000000003
MSE = 7.886666666666668
**********
W=3.4000000000000004
MSE = 9.14666666666667
**********
W=3.5
MSE = 10.5
**********
W=3.6
MSE = 11.94666666666667
**********
W=3.7
MSE = 13.486666666666673
**********
W=3.8000000000000003
MSE = 15.120000000000005
**********
W=3.9000000000000004
MSE = 16.84666666666667
**********
W=4.0
MSE = 18.666666666666668
**********
In [30]:
plt.plot(w_list, mse_list)
Out[30]:
[<matplotlib.lines.Line2D at 0x7f81fd485588>]

Gradient Descent

In [0]:
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = 1.0
In [0]:
def forward(x):
    return x * w
In [0]:
def loss(x,y):
    y_pred = forward(x)
    return (y_pred - y)**2
In [0]:
def gradient(x, y):
    return 2 * x * ( x*w - y)
In [40]:
print("Predict (before training)", forward(4.0))
Predict (before training 4.0
In [41]:
for epoch in range(10):
    for x,y in zip(x_data, y_data):
        l = loss(x,y)
        w = w - 0.01 * gradient(x,y)
    print(f"After Epoch: {epoch}, Loss: {l/3}, w:{w}")
After Epoch: 0, Loss: 2.4386476799999994, w:1.260688
After Epoch: 1, Loss: 1.3329214952735635, w:1.453417766656
After Epoch: 2, Loss: 0.7285512077588492, w:1.5959051959019805
After Epoch: 3, Loss: 0.3982131462423004, w:1.701247862192685
After Epoch: 4, Loss: 0.21765623082005736, w:1.7791289594933983
After Epoch: 5, Loss: 0.11896702874286423, w:1.836707389300983
After Epoch: 6, Loss: 0.06502526426457467, w:1.8792758133988885
After Epoch: 7, Loss: 0.03554165416551504, w:1.910747160155559
After Epoch: 8, Loss: 0.019426436710527316, w:1.9340143044689266
After Epoch: 9, Loss: 0.010618145163155871, w:1.9512159834655312
In [42]:
print("Predict (After training", forward(4.0))
Predict (After training 7.804863933862125

Linear Regression with PyTorch

In [0]:
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = torch.tensor(1.0, requires_grad=True)
In [0]:
def forward(x):
    return x * w
In [0]:
def loss(x,y):
    y_pred = forward(x)
    return (y_pred - y)**2
In [64]:
forward(4.0).item()
Out[64]:
4.0
In [65]:
for epoch in range(10):
    for x,y in zip(x_data, y_data):
        l = loss(x,y)
        l.backward()
        with torch.no_grad():
            w.data = w.data - 0.01 * w.grad
            w.grad.zero_()
    print(w)
tensor(1.2607, requires_grad=True)
tensor(1.4534, requires_grad=True)
tensor(1.5959, requires_grad=True)
tensor(1.7012, requires_grad=True)
tensor(1.7791, requires_grad=True)
tensor(1.8367, requires_grad=True)
tensor(1.8793, requires_grad=True)
tensor(1.9107, requires_grad=True)
tensor(1.9340, requires_grad=True)
tensor(1.9512, requires_grad=True)

Linear Regression using PyTorch

In [0]:
import torch
import torch.nn as nn
In [0]:
x_data = torch.tensor([[1.], [2.], [3.]])
y_data = torch.tensor([[2.], [4.], [6.]])
In [0]:
class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.linear = nn.Linear(1,1)
    def forward(self,x):
        y_pred = self.linear(x)
        return y_pred
In [0]:
model = Model()
In [117]:
model.forward(torch.tensor([4.]))
Out[117]:
tensor([-0.5874], grad_fn=<AddBackward0>)
In [118]:
list(model.parameters())
Out[118]:
[Parameter containing:
 tensor([[0.0638]], requires_grad=True), Parameter containing:
 tensor([-0.8427], requires_grad=True)]
In [0]:
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
In [120]:
for epoch in range(10):
    y_pred = model(x_data)
    loss = criterion(y_pred, y_data)
    loss.backward()
    optimizer.step()
    optimizer.zero_grad()
    print(loss)
tensor(24.7310, grad_fn=<MseLossBackward>)
tensor(0.2954, grad_fn=<MseLossBackward>)
tensor(0.0035, grad_fn=<MseLossBackward>)
tensor(4.9161e-05, grad_fn=<MseLossBackward>)
tensor(7.1796e-06, grad_fn=<MseLossBackward>)
tensor(6.3653e-06, grad_fn=<MseLossBackward>)
tensor(6.0568e-06, grad_fn=<MseLossBackward>)
tensor(5.7686e-06, grad_fn=<MseLossBackward>)
tensor(5.4954e-06, grad_fn=<MseLossBackward>)
tensor(5.2341e-06, grad_fn=<MseLossBackward>)
In [121]:
list(model.parameters())
Out[121]:
[Parameter containing:
 tensor([[1.9974]], requires_grad=True), Parameter containing:
 tensor([0.0059], requires_grad=True)]
In [122]:
model.forward(torch.tensor([4.]))
Out[122]:
tensor([7.9955], grad_fn=<AddBackward0>)

Logistic Regression

In [0]:
import torch
import torch.nn as nn
In [0]:
x_data = torch.tensor([[1.], [2.], [3.], [4.0], [5.0], [6.0]])
y_data = torch.tensor([[0.], [0.], [0.], [1.], [1.], [1.]])
In [0]:
class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.linear = nn.Linear(1,1)
    def forward(self,x):
        y_pred = torch.sigmoid(self.linear(x))
        return y_pred
In [0]:
model = Model()
In [174]:
model.forward(torch.tensor([3.]))
Out[174]:
tensor([0.2660], grad_fn=<SigmoidBackward>)
In [175]:
list(model.parameters())
Out[175]:
[Parameter containing:
 tensor([[-0.3846]], requires_grad=True), Parameter containing:
 tensor([0.1387], requires_grad=True)]
In [176]:
criterion = nn.BCELoss(size_average=True)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
/usr/local/lib/python3.6/dist-packages/torch/nn/_reduction.py:43: UserWarning: size_average and reduce args will be deprecated, please use reduction='mean' instead.
  warnings.warn(warning.format(ret))
In [177]:
for epoch in range(50):
    y_pred = model(x_data)
    loss = criterion(y_pred, y_data)
    loss.backward()
    optimizer.step()
    optimizer.zero_grad()
    print(loss)
tensor(1.1917, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.8951, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.7467, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6876, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6640, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6530, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6466, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6422, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6385, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6351, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6319, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6289, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6258, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6228, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6198, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6169, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6140, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6111, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6082, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6054, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.6026, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5998, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5970, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5943, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5915, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5888, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5861, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5835, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5809, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5782, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5757, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5731, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5705, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5680, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5655, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5630, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5606, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5581, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5557, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5533, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5509, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5486, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5462, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5439, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5416, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5393, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5371, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5348, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5326, grad_fn=<BinaryCrossEntropyBackward>)
tensor(0.5304, grad_fn=<BinaryCrossEntropyBackward>)
In [178]:
list(model.parameters())
Out[178]:
[Parameter containing:
 tensor([[0.3344]], requires_grad=True), Parameter containing:
 tensor([-0.5356], requires_grad=True)]
In [179]:
model.forward(torch.tensor([9.]))
Out[179]:
tensor([0.9223], grad_fn=<SigmoidBackward>)
In [180]:
model.forward(torch.tensor([2.]))
Out[180]:
tensor([0.5332], grad_fn=<SigmoidBackward>)
In [181]:
model.forward(torch.tensor([1.]))
Out[181]:
tensor([0.4499], grad_fn=<SigmoidBackward>)
In [0]:
 

No comments :

Post a Comment