Tuesday, November 12, 2019

Optimisation



In [0]:
import torch
import numpy as np
import matplotlib.pyplot as plt
In [76]:
x = torch.Tensor([i for i in range(10,20)])
x
Out[76]:
tensor([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.])
In [0]:
y = x*1.609 
In [78]:
plt.scatter(x,y)
Out[78]:
<matplotlib.collections.PathCollection at 0x7f3718db57f0>
In [0]:
import torch.nn as nn
In [0]:
class LinearRegression(nn.Module):
    def __init__(self):
        super().__init__()
        self.linear = nn.Linear(1,1,bias=False)
    def forward(self,x):
        return self.linear(x)
In [92]:
model = LinearRegression()
w1= model.parameters()
print(list(w1))
[Parameter containing:
tensor([[0.4194]], requires_grad=True)]
In [0]:
optimizer = torch.optim.SGD(model.parameters(), lr = 0.001)
criterion = nn.MSELoss()
In [94]:
loss_list = []
for epoch in range(20):
    y_pred = model.forward(x.reshape(-1,1))
    loss = criterion(y_pred, y.reshape(-1,1))
    loss.backward()
    print(f"Epoch={epoch} --> Loss = {loss.item()}")
    loss_list.append(loss.item())
    optimizer.step()
    optimizer.zero_grad()
Epoch=0 --> Loss = 309.2274169921875
Epoch=1 --> Loss = 98.01548767089844
Epoch=2 --> Loss = 31.06787109375
Epoch=3 --> Loss = 9.847551345825195
Epoch=4 --> Loss = 3.1213696002960205
Epoch=5 --> Loss = 0.9893785119056702
Epoch=6 --> Loss = 0.3136022984981537
Epoch=7 --> Loss = 0.09940174967050552
Epoch=8 --> Loss = 0.03150755539536476
Epoch=9 --> Loss = 0.009987023659050465
Epoch=10 --> Loss = 0.0031654895283281803
Epoch=11 --> Loss = 0.0010033544385805726
Epoch=12 --> Loss = 0.00031806365586817265
Epoch=13 --> Loss = 0.00010081244545290247
Epoch=14 --> Loss = 3.195372846676037e-05
Epoch=15 --> Loss = 1.0126556844625156e-05
Epoch=16 --> Loss = 3.2112154713104246e-06
Epoch=17 --> Loss = 1.019165210891515e-06
Epoch=18 --> Loss = 3.2389544912803103e-07
Epoch=19 --> Loss = 1.0280818685259874e-07
In [95]:
list(model.parameters())
Out[95]:
[Parameter containing:
 tensor([[1.6090]], requires_grad=True)]
In [0]:
 
In [104]:
model = LinearRegression()
w1= model.parameters()
print(list(w1))
[Parameter containing:
tensor([[-0.0252]], requires_grad=True)]
In [0]:
optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)
criterion = nn.MSELoss()
In [106]:
loss_list = []
for epoch in range(20):
    y_pred = model.forward(x.reshape(-1,1))
    loss = criterion(y_pred, y.reshape(-1,1))
    loss.backward()
    print(f"Epoch={epoch} --> Loss = {loss.item()}")
    loss_list.append(loss.item())
    optimizer.step()
    optimizer.zero_grad()
Epoch=0 --> Loss = 583.5047607421875
Epoch=1 --> Loss = 576.3853149414062
Epoch=2 --> Loss = 569.3107299804688
Epoch=3 --> Loss = 562.28173828125
Epoch=4 --> Loss = 555.2991333007812
Epoch=5 --> Loss = 548.3635864257812
Epoch=6 --> Loss = 541.4757690429688
Epoch=7 --> Loss = 534.636474609375
Epoch=8 --> Loss = 527.8462524414062
Epoch=9 --> Loss = 521.1057739257812
Epoch=10 --> Loss = 514.4155883789062
Epoch=11 --> Loss = 507.7763671875
Epoch=12 --> Loss = 501.1886291503906
Epoch=13 --> Loss = 494.65283203125
Epoch=14 --> Loss = 488.1695556640625
Epoch=15 --> Loss = 481.73919677734375
Epoch=16 --> Loss = 475.3621826171875
Epoch=17 --> Loss = 469.0389404296875
Epoch=18 --> Loss = 462.76983642578125
Epoch=19 --> Loss = 456.55517578125
In [0]:
 
In [107]:
import os
os.listdir()
Out[107]:
['.config', 'diabetes.csv.gz', 'sample_data']
In [0]:
import pandas as pd
In [0]:
data = pd.read_csv('diabetes.csv.gz', compression='gzip', header=None, dtype=np.float32)
In [110]:
data.head()
Out[110]:
0 1 2 3 4 5 6 7 8
0 -0.294118 0.487437 0.180328 -0.292929 0.000000 0.001490 -0.531170 -0.033333 0.0
1 -0.882353 -0.145729 0.081967 -0.414141 0.000000 -0.207153 -0.766866 -0.666667 1.0
2 -0.058823 0.839196 0.049180 0.000000 0.000000 -0.305514 -0.492741 -0.633333 0.0
3 -0.882353 -0.105528 0.081967 -0.535354 -0.777778 -0.162444 -0.923997 0.000000 1.0
4 0.000000 0.376884 -0.344262 -0.292929 -0.602837 0.284650 0.887276 -0.600000 0.0
In [111]:
data.shape
Out[111]:
(759, 9)
In [0]:
X = data.iloc[:,:-1].values
y = data.iloc[:,-1].values
In [0]:
X = torch.from_numpy(X)
y = torch.from_numpy(y)
In [0]:
class LinearRegression(nn.Module):
    def __init__(self):
        super().__init__()
        self.l1 = nn.Linear(8,1)
    def forward(self,x):
        #x = torch.relu(self.l1(x))
        x = torch.sigmoid(self.l1(x))
        #x = torch.sigmoid(self.l3(x))
        return x
In [130]:
model = LinearRegression()
optimizer = torch.optim.SGD(model.parameters(), lr = 0.01)
criterion = nn.BCELoss()

for epoch in range(150):
    y_pred = model(X)
    loss = criterion(y_pred, y)
    loss.backward()
    print(loss.item())
    optimizer.step()
    optimizer.zero_grad()
0.6467465162277222
0.6463687419891357
0.6459944844245911
0.6456233859062195
0.6452562212944031
0.6448914408683777
0.644530177116394
0.6441720128059387
0.6438170671463013
0.6434657573699951
0.6431170105934143
0.6427711844444275
0.6424285769462585
0.6420890092849731
0.6417526006698608
0.6414187550544739
0.6410880088806152
0.6407593488693237
0.6404342651367188
0.6401126980781555
0.6397926211357117
0.6394757032394409
0.6391616463661194
0.6388500928878784
0.6385414004325867
0.6382346749305725
0.6379315853118896
0.6376306414604187
0.6373316049575806
0.637035608291626
0.6367424726486206
0.6364511847496033
0.6361619234085083
0.6358758211135864
0.6355921626091003
0.6353098750114441
0.6350308060646057
0.6347540020942688
0.6344791054725647
0.634206235408783
0.6339364051818848
0.6336681842803955
0.6334023475646973
0.6331388354301453
0.6328768134117126
0.6326172947883606
0.6323593258857727
0.6321045160293579
0.6318504810333252
0.6315992474555969
0.6313503384590149
0.6311025619506836
0.6308567523956299
0.6306136846542358
0.630371630191803
0.6301316618919373
0.6298938989639282
0.6296578645706177
0.6294233202934265
0.6291911005973816
0.6289604306221008
0.6287313103675842
0.6285042762756348
0.628278374671936
0.628055214881897
0.6278324127197266
0.6276121735572815
0.6273939609527588
0.6271770000457764
0.6269612908363342
0.6267476081848145
0.6265351176261902
0.6263241767883301
0.6261149644851685
0.6259075403213501
0.625701367855072
0.6254960298538208
0.6252930760383606
0.6250919103622437
0.6248911023139954
0.6246929168701172
0.6244948506355286
0.6242987513542175
0.6241044998168945
0.6239112615585327
0.6237198710441589
0.6235290169715881
0.6233397722244263
0.6231517195701599
0.6229652166366577
0.6227799654006958
0.622596263885498
0.6224135756492615
0.6222313642501831
0.6220518946647644
0.6218727231025696
0.6216949820518494
0.6215187311172485
0.6213428974151611
0.6211690306663513
0.6209959387779236
0.6208235025405884
0.6206526160240173
0.6204831600189209
0.6203149557113647
0.6201468110084534
0.6199804544448853
0.6198158860206604
0.6196510195732117
0.6194883584976196
0.6193261742591858
0.619164764881134
0.6190044283866882
0.6188456416130066
0.6186877489089966
0.6185306906700134
0.6183749437332153
0.6182196736335754
0.618065357208252
0.6179123520851135
0.6177601218223572
0.617608904838562
0.6174580454826355
0.617309033870697
0.6171603798866272
0.6170124411582947
0.6168656945228577
0.6167199611663818
0.6165749430656433
0.6164308190345764
0.6162872910499573
0.6161446571350098
0.6160027980804443
0.6158624291419983
0.6157222390174866
0.6155832409858704
0.6154447197914124
0.6153070330619812
0.6151704788208008
0.6150341033935547
0.6148985028266907
0.6147643327713013
0.6146302223205566
0.6144970655441284
0.61436527967453
0.6142334342002869
0.6141027212142944
0.6139726042747498
0.6138426065444946
0.6137142181396484
/usr/local/lib/python3.6/dist-packages/torch/nn/modules/loss.py:498: UserWarning: Using a target size (torch.Size([759])) that is different to the input size (torch.Size([759, 1])) is deprecated. Please ensure they have the same size.
  return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)
In [0]:
 
In [0]:
 
In [0]:
 
In [0]:
 
In [0]:
 
In [0]:
 
In [0]:
 
In [0]:
 
In [0]:
 
In [0]:
 
In [0]:
 
In [0]:
 
In [0]:
 

No comments :

Post a Comment