In [0]:
import numpy as np
import matplotlib.pyplot as plt
In [0]:
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
In [0]:
def forward(x):
return x * w
In [0]:
def loss(x,y):
y_pred = forward(x)
return (y_pred - y) * (y_pred - y)
In [29]:
w_list = []
mse_list = []
for w in np.arange(0.0, 4.1,0.1):
print(f"W={w}")
l_sum = 0
for x_val, y_val in zip(x_data,y_data):
l = loss(x_val,y_val)
l_sum += l
print(f"MSE = {l_sum/3}")
print("*"*10)
w_list.append(w)
mse_list.append(l_sum/3)
In [30]:
plt.plot(w_list, mse_list)
Out[30]:
Gradient Descent¶
In [0]:
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = 1.0
In [0]:
def forward(x):
return x * w
In [0]:
def loss(x,y):
y_pred = forward(x)
return (y_pred - y)**2
In [0]:
def gradient(x, y):
return 2 * x * ( x*w - y)
In [40]:
print("Predict (before training)", forward(4.0))
In [41]:
for epoch in range(10):
for x,y in zip(x_data, y_data):
l = loss(x,y)
w = w - 0.01 * gradient(x,y)
print(f"After Epoch: {epoch}, Loss: {l/3}, w:{w}")
In [42]:
print("Predict (After training", forward(4.0))
Linear Regression with PyTorch¶
In [0]:
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = torch.tensor(1.0, requires_grad=True)
In [0]:
def forward(x):
return x * w
In [0]:
def loss(x,y):
y_pred = forward(x)
return (y_pred - y)**2
In [64]:
forward(4.0).item()
Out[64]:
In [65]:
for epoch in range(10):
for x,y in zip(x_data, y_data):
l = loss(x,y)
l.backward()
with torch.no_grad():
w.data = w.data - 0.01 * w.grad
w.grad.zero_()
print(w)
Linear Regression using PyTorch¶
In [0]:
import torch
import torch.nn as nn
In [0]:
x_data = torch.tensor([[1.], [2.], [3.]])
y_data = torch.tensor([[2.], [4.], [6.]])
In [0]:
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1,1)
def forward(self,x):
y_pred = self.linear(x)
return y_pred
In [0]:
model = Model()
In [117]:
model.forward(torch.tensor([4.]))
Out[117]:
In [118]:
list(model.parameters())
Out[118]:
In [0]:
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
In [120]:
for epoch in range(10):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print(loss)
In [121]:
list(model.parameters())
Out[121]:
In [122]:
model.forward(torch.tensor([4.]))
Out[122]:
Logistic Regression¶
In [0]:
import torch
import torch.nn as nn
In [0]:
x_data = torch.tensor([[1.], [2.], [3.], [4.0], [5.0], [6.0]])
y_data = torch.tensor([[0.], [0.], [0.], [1.], [1.], [1.]])
In [0]:
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1,1)
def forward(self,x):
y_pred = torch.sigmoid(self.linear(x))
return y_pred
In [0]:
model = Model()
In [174]:
model.forward(torch.tensor([3.]))
Out[174]:
In [175]:
list(model.parameters())
Out[175]:
In [176]:
criterion = nn.BCELoss(size_average=True)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
In [177]:
for epoch in range(50):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print(loss)
In [178]:
list(model.parameters())
Out[178]:
In [179]:
model.forward(torch.tensor([9.]))
Out[179]:
In [180]:
model.forward(torch.tensor([2.]))
Out[180]:
In [181]:
model.forward(torch.tensor([1.]))
Out[181]:
In [0]:
No comments :
Post a Comment