In [2]:
data = pd.read_csv("diabetes.csv", header = None)
In [3]:
data.head()
Out[3]:
In [4]:
torch.__version__
Out[4]:
In [10]:
class DiabetesDataSet(Dataset):
def __init__(self):
super(DiabetesDataSet, self).__init__()
self.data = pd.read_csv("diabetes.csv", header= None)
self.len = len(self.data)
self.X = torch.tensor(torch.from_numpy(data.iloc[:,0:-1].values), dtype=torch.float)
self.y = torch.tensor(torch.from_numpy(data.iloc[:,-1].values), dtype = torch.float).reshape(-1,1)
def __len__(self):
return self.len
def __getitem__(self, index):
return self.X[index], self.y[index]
In [11]:
dataset = DiabetesDataSet()
In [12]:
data_len = len(data); data_len
Out[12]:
In [13]:
val_size = 100
In [14]:
indices = np.arange(data_len)
In [15]:
valid_index = np.random.choice(indices, val_size, replace = False); valid_index
Out[15]:
In [16]:
train_index = list(set(indices) - set(valid_index))
In [17]:
train_sampler = RandomSampler(train_index)
valid_sampler = RandomSampler(valid_index)
In [18]:
train_loader = torch.utils.data.DataLoader(dataset, batch_size=64, sampler=train_sampler )
valid_loader = torch.utils.data.DataLoader(dataset, batch_size=1, sampler=valid_sampler )
In [19]:
list(valid_loader)[0:5]
Out[19]:
Model building¶
In [87]:
class LogisticRegression(torch.nn.Module):
def __init__(self):
super(LogisticRegression, self).__init__()
self.l1 = torch.nn.Linear(8,6)
self.l2 = torch.nn.Linear(6,1)
self.tanh = torch.nn.Tanh()
self.sigmoid = torch.nn.Sigmoid()
self.relu = torch.nn.ReLU()
def forward(self, x):
o1 = self.tanh(self.l1(x))
o2 = self.tanh(self.l2(o1))
return self.sigmoid(o2)
In [96]:
model = LogisticRegression()
criterion = torch.nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)
In [97]:
list(model.parameters())
Out[97]:
In [98]:
tr_loss = []
vr_loss = []
for epoch in range(100):
train_loss = []
val_loss = []
for i, (inputs,labels) in enumerate(train_loader):
#inputs, labels = torch.tensor(inputs), torch.tensor(labels)
pred = model(inputs)
loss = criterion(pred, labels)
loss.backward()
optimizer.step()
optimizer.zero_grad()
train_loss.append(loss.item())
tr_loss.append(np.mean(train_loss))
for i, (inputs, labels) in enumerate(valid_loader):
#inputs, labels = torch.tensor(inputs), torch.tensor(labels)
pred = model(inputs)
loss = criterion(pred, labels)
val_loss.append(loss.item())
vr_loss.append(np.mean(val_loss))
In [99]:
plt.plot(tr_loss)
plt.plot(vr_loss)
Out[99]:
In [100]:
pred_all = []
labels_all = []
for i, (inputs, labels) in enumerate(valid_loader):
#inputs, labels = torch.tensor(inputs), torch.tensor(labels)
pred = model(inputs)
loss = criterion(pred, labels)
pred_all.append(pred.item())
labels_all.append(labels.item())
In [101]:
pred_all = np.array(pred_all) > 0.5
In [102]:
pred_all
Out[102]:
In [103]:
np.mean(pred_all == labels_all)
Out[103]:
No comments :
Post a Comment