Wednesday, December 26, 2018

Neural Network on Fashion MNIST dataset using Pytorch



In [1]:
import torch
import torchvision
In [2]:
from torchvision import datasets,transforms

1 Prepare data

In [3]:
transform = transforms.Compose([transforms.ToTensor(),
                               transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))])
In [4]:
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', train = True, transform=transform, download=True)
In [5]:
trainloader = torch.utils.data.DataLoader(trainset, shuffle=True, batch_size=64)
In [6]:
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', train = False, transform=transform, download = True)
In [7]:
testloader = torch.utils.data.DataLoader(testset, shuffle=False, batch_size=64)

2 Design Model

In [8]:
from torch import nn, optim
import torch.nn.functional as F 
In [9]:
class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.l1 = nn.Linear(784, 256)
        self.l2 = nn.Linear(256, 128)
        self.l3 = nn.Linear(128,64)
        self.l4 = nn.Linear(64,10)
    def forward(self,x):
        x = x.view(x.shape[0],-1)
        x = F.relu(self.l1(x))
        x = F.relu(self.l2(x))
        x = F.relu(self.l3(x))
        x = F.log_softmax(self.l4(x), dim=1)
        return x
In [10]:
model = Model()
In [11]:
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(),lr=0.005)

3 Train

In [12]:
epoch = 30

train_losses, test_losses = [],[]
for e in range(epoch):
    train_loss = 0
    test_loss = 0
    accuracy = 0
    for images, labels in trainloader:
        optimizer.zero_grad()
        op = model(images)
        loss = criterion(op, labels)
        train_loss += loss.item()
        loss.backward()
        optimizer.step()
    else:
        with torch.no_grad():
            model.eval()
            for images,labels in testloader:
                log_ps = model(images)
                prob = torch.exp(log_ps)
                top_probs, top_classes = prob.topk(1, dim=1)
                equals = labels == top_classes.view(labels.shape)
                accuracy += equals.type(torch.FloatTensor).mean()
                test_loss += criterion(log_ps, labels)
        model.train()
    print("Epoch: {}/{}.. ".format(e+1, epoch),
              "Training Loss: {:.3f}.. ".format(train_loss/len(trainloader)),
              "Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
              "Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
    train_losses.append(train_loss/len(trainloader))
    test_losses.append(test_loss/len(testloader))
        
        
Epoch: 1/30..  Training Loss: 1.820..  Test Loss: 1.064..  Test Accuracy: 0.644
Epoch: 2/30..  Training Loss: 0.822..  Test Loss: 0.706..  Test Accuracy: 0.743
Epoch: 3/30..  Training Loss: 0.638..  Test Loss: 0.611..  Test Accuracy: 0.771
Epoch: 4/30..  Training Loss: 0.568..  Test Loss: 0.566..  Test Accuracy: 0.791
Epoch: 5/30..  Training Loss: 0.525..  Test Loss: 0.535..  Test Accuracy: 0.803
Epoch: 6/30..  Training Loss: 0.494..  Test Loss: 0.515..  Test Accuracy: 0.809
Epoch: 7/30..  Training Loss: 0.472..  Test Loss: 0.488..  Test Accuracy: 0.821
Epoch: 8/30..  Training Loss: 0.454..  Test Loss: 0.479..  Test Accuracy: 0.825
Epoch: 9/30..  Training Loss: 0.441..  Test Loss: 0.475..  Test Accuracy: 0.825
Epoch: 10/30..  Training Loss: 0.429..  Test Loss: 0.462..  Test Accuracy: 0.829
Epoch: 11/30..  Training Loss: 0.419..  Test Loss: 0.461..  Test Accuracy: 0.832
Epoch: 12/30..  Training Loss: 0.410..  Test Loss: 0.439..  Test Accuracy: 0.842
Epoch: 13/30..  Training Loss: 0.401..  Test Loss: 0.440..  Test Accuracy: 0.840
Epoch: 14/30..  Training Loss: 0.394..  Test Loss: 0.430..  Test Accuracy: 0.844
Epoch: 15/30..  Training Loss: 0.387..  Test Loss: 0.427..  Test Accuracy: 0.846
Epoch: 16/30..  Training Loss: 0.380..  Test Loss: 0.417..  Test Accuracy: 0.849
Epoch: 17/30..  Training Loss: 0.374..  Test Loss: 0.418..  Test Accuracy: 0.850
Epoch: 18/30..  Training Loss: 0.367..  Test Loss: 0.407..  Test Accuracy: 0.854
Epoch: 19/30..  Training Loss: 0.362..  Test Loss: 0.405..  Test Accuracy: 0.853
Epoch: 20/30..  Training Loss: 0.356..  Test Loss: 0.401..  Test Accuracy: 0.855
Epoch: 21/30..  Training Loss: 0.351..  Test Loss: 0.404..  Test Accuracy: 0.853
Epoch: 22/30..  Training Loss: 0.346..  Test Loss: 0.388..  Test Accuracy: 0.859
Epoch: 23/30..  Training Loss: 0.341..  Test Loss: 0.392..  Test Accuracy: 0.859
Epoch: 24/30..  Training Loss: 0.337..  Test Loss: 0.379..  Test Accuracy: 0.862
Epoch: 25/30..  Training Loss: 0.332..  Test Loss: 0.391..  Test Accuracy: 0.860
Epoch: 26/30..  Training Loss: 0.327..  Test Loss: 0.384..  Test Accuracy: 0.862
Epoch: 27/30..  Training Loss: 0.323..  Test Loss: 0.390..  Test Accuracy: 0.861
Epoch: 28/30..  Training Loss: 0.318..  Test Loss: 0.421..  Test Accuracy: 0.845
Epoch: 29/30..  Training Loss: 0.315..  Test Loss: 0.374..  Test Accuracy: 0.867
Epoch: 30/30..  Training Loss: 0.311..  Test Loss: 0.368..  Test Accuracy: 0.869
In [13]:
import matplotlib.pyplot as plt
%matplotlib inline
In [14]:
plt.plot(train_losses,label = "Train losses")
plt.plot(test_losses, label = "Test losses")
plt.legend()
Out[14]:
<matplotlib.legend.Legend at 0x7f2b81a64048>

4 Model with dropout

In [15]:
class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.l1 = nn.Linear(784, 256)
        self.l2 = nn.Linear(256, 128)
        self.l3 = nn.Linear(128,64)
        self.l4 = nn.Linear(64,10)
        
        self.dropout = nn.Dropout(p=0.2)
    def forward(self,x):
        x = x.view(x.shape[0],-1)
        x = self.dropout(F.relu(self.l1(x)))
        x = self.dropout(F.relu(self.l2(x)))
        x = self.dropout(F.relu(self.l3(x)))
        x = F.log_softmax(self.l4(x), dim=1)
        return x
In [16]:
model = Model()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(),lr=0.005)
In [17]:
epoch = 30

train_losses, test_losses = [],[]
for e in range(epoch):
    train_loss = 0
    test_loss = 0
    accuracy = 0
    for images, labels in trainloader:
        optimizer.zero_grad()
        op = model(images)
        loss = criterion(op, labels)
        train_loss += loss.item()
        loss.backward()
        optimizer.step()
    else:
        with torch.no_grad():
            model.eval()
            for images,labels in testloader:
                log_ps = model(images)
                prob = torch.exp(log_ps)
                top_probs, top_classes = prob.topk(1, dim=1)
                equals = labels == top_classes.view(labels.shape)
                accuracy += equals.type(torch.FloatTensor).mean()
                test_loss += criterion(log_ps, labels)
        model.train()
    print("Epoch: {}/{}.. ".format(e+1, epoch),
              "Training Loss: {:.3f}.. ".format(train_loss/len(trainloader)),
              "Test Loss: {:.3f}.. ".format(test_loss/len(testloader)),
              "Test Accuracy: {:.3f}".format(accuracy/len(testloader)))
    train_losses.append(train_loss/len(trainloader))
    test_losses.append(test_loss/len(testloader))
        
        
Epoch: 1/30..  Training Loss: 0.653..  Test Loss: 0.488..  Test Accuracy: 0.826
Epoch: 2/30..  Training Loss: 0.556..  Test Loss: 0.495..  Test Accuracy: 0.826
Epoch: 3/30..  Training Loss: 0.525..  Test Loss: 0.459..  Test Accuracy: 0.839
Epoch: 4/30..  Training Loss: 0.511..  Test Loss: 0.468..  Test Accuracy: 0.840
Epoch: 5/30..  Training Loss: 0.498..  Test Loss: 0.499..  Test Accuracy: 0.829
Epoch: 6/30..  Training Loss: 0.497..  Test Loss: 0.446..  Test Accuracy: 0.845
Epoch: 7/30..  Training Loss: 0.484..  Test Loss: 0.450..  Test Accuracy: 0.845
Epoch: 8/30..  Training Loss: 0.486..  Test Loss: 0.466..  Test Accuracy: 0.844
Epoch: 9/30..  Training Loss: 0.488..  Test Loss: 0.452..  Test Accuracy: 0.853
Epoch: 10/30..  Training Loss: 0.486..  Test Loss: 0.422..  Test Accuracy: 0.857
Epoch: 11/30..  Training Loss: 0.463..  Test Loss: 0.427..  Test Accuracy: 0.852
Epoch: 12/30..  Training Loss: 0.483..  Test Loss: 0.428..  Test Accuracy: 0.853
Epoch: 13/30..  Training Loss: 0.473..  Test Loss: 0.428..  Test Accuracy: 0.850
Epoch: 14/30..  Training Loss: 0.472..  Test Loss: 0.438..  Test Accuracy: 0.846
Epoch: 15/30..  Training Loss: 0.474..  Test Loss: 0.437..  Test Accuracy: 0.856
Epoch: 16/30..  Training Loss: 0.477..  Test Loss: 0.462..  Test Accuracy: 0.858
Epoch: 17/30..  Training Loss: 0.462..  Test Loss: 0.429..  Test Accuracy: 0.844
Epoch: 18/30..  Training Loss: 0.472..  Test Loss: 0.450..  Test Accuracy: 0.848
Epoch: 19/30..  Training Loss: 0.479..  Test Loss: 0.440..  Test Accuracy: 0.849
Epoch: 20/30..  Training Loss: 0.460..  Test Loss: 0.436..  Test Accuracy: 0.856
Epoch: 21/30..  Training Loss: 0.455..  Test Loss: 0.467..  Test Accuracy: 0.845
Epoch: 22/30..  Training Loss: 0.461..  Test Loss: 0.450..  Test Accuracy: 0.850
Epoch: 23/30..  Training Loss: 0.454..  Test Loss: 0.462..  Test Accuracy: 0.849
Epoch: 24/30..  Training Loss: 0.452..  Test Loss: 0.431..  Test Accuracy: 0.854
Epoch: 25/30..  Training Loss: 0.457..  Test Loss: 0.450..  Test Accuracy: 0.844
Epoch: 26/30..  Training Loss: 0.456..  Test Loss: 0.425..  Test Accuracy: 0.858
Epoch: 27/30..  Training Loss: 0.471..  Test Loss: 0.455..  Test Accuracy: 0.852
Epoch: 28/30..  Training Loss: 0.448..  Test Loss: 0.433..  Test Accuracy: 0.860
Epoch: 29/30..  Training Loss: 0.451..  Test Loss: 0.456..  Test Accuracy: 0.854
Epoch: 30/30..  Training Loss: 0.452..  Test Loss: 0.451..  Test Accuracy: 0.854
In [18]:
plt.plot(train_losses,label = "Train losses")
plt.plot(test_losses, label = "Test losses")
plt.legend()
Out[18]:
<matplotlib.legend.Legend at 0x7f2b81992ef0>
In [19]:
import numpy as np

5 Inference

In [20]:
def imshow(image, ax=None, title=None, normalize=True):
    """Imshow for Tensor."""
    if ax is None:
        fig, ax = plt.subplots()
    image = image.numpy().transpose((1, 2, 0))

    if normalize:
        mean = np.array([0.5, 0.5, 0.5])
        std = np.array([0.5, 0.5, 0.5])
        image = std * image + mean
        image = np.clip(image, 0, 1)

    ax.imshow(image)
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.spines['left'].set_visible(False)
    ax.spines['bottom'].set_visible(False)
    ax.tick_params(axis='both', length=0)
    ax.set_xticklabels('')
    ax.set_yticklabels('')
In [21]:
image,labels = next(iter(testloader))
In [22]:
labels
Out[22]:
tensor([9, 2, 1, 1, 6, 1, 4, 6, 5, 7, 4, 5, 7, 3, 4, 1, 2, 4, 8, 0, 2, 5, 7, 9,
        1, 4, 6, 0, 9, 3, 8, 8, 3, 3, 8, 0, 7, 5, 7, 9, 6, 1, 3, 7, 6, 7, 2, 1,
        2, 2, 4, 4, 5, 8, 2, 2, 8, 4, 8, 0, 7, 7, 8, 5])
In [23]:
imshow(image[0])
In [24]:
def view_classify(img, ps, version="MNIST", title = 0):
    ''' Function for viewing an image and it's predicted classes.
    '''
    title_order = ['T-shirt/top',
                            'Trouser',
                            'Pullover',
                            'Dress',
                            'Coat',
                            'Sandal',
                            'Shirt',
                            'Sneaker',
                            'Bag',
                            'Ankle Boot']
    ps = ps.data.numpy().squeeze()

    fig, (ax1, ax2) = plt.subplots(figsize=(6,9), ncols=2)
    ax1.imshow(img.resize_(1, 28, 28).numpy().squeeze())
    ax1.axis('off')
    ax2.barh(np.arange(10), ps)
    ax2.set_aspect(0.1)
    ax2.set_yticks(np.arange(10))
    if version == "MNIST":
        ax2.set_yticklabels(np.arange(10))
    elif version == "Fashion":
        ax2.set_yticklabels(title_order, size='small');
    ax2.set_title('Class Probability')
    ax2.set_xlim(0, 1.1)

    plt.tight_layout()
    plt.title("True: " + title_order[title])
In [25]:
def display_test(i):
    model.eval()
    dataiter = iter(testloader)
    images, labels = dataiter.next()
    img = images[i]
    lbl = labels[i]
    with torch.no_grad():
        output = model.forward(img)
    ps = torch.exp(output)
    view_classify(img.view(1,28,28), ps, version='Fashion', title=lbl)
    
In [27]:
for i in range(10):
    r = np.random.randint(0,63)
    display_test(r)

No comments :

Post a Comment