cs194 project 4

Mandi Zhao

SID 3032880866

Note: I worked on Google Colab and this is the downloaded .ipynb

In [0]:
import torch
import torchvision
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter # TensorBoard support
import torchvision.transforms as transforms
import pandas as pd
import json
from IPython.display import clear_output

transform = transforms.Compose([transforms.ToTensor()])
trainset = torchvision.datasets.FashionMNIST(root="FashionMNIST", train=True, 
                                             transform=transform, target_transform=None, download=True)
train_set, val_set = torch.utils.data.random_split(trainset, [50000, 10000])
trainloader = torch.utils.data.DataLoader(train_set, batch_size=4,
                                          shuffle=True, num_workers=2)
valloader = torch.utils.data.DataLoader(val_set, batch_size=4,
                                          shuffle=True, num_workers=2)
testset = torchvision.datasets.FashionMNIST(root="FashionMNIST", train=False, 
                                             transform=transform, target_transform=None, download=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
                                         shuffle=False, num_workers=2)

Part 1: Classification

In [0]:
from google.colab import drive
drive.mount('/content/gdrive')
Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount("/content/gdrive", force_remount=True).

Below shows some images in the dataset

In [0]:
import matplotlib.pyplot as plt
import numpy as np

def imshow(img):
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly

Enter your authorization code:
··········
Mounted at /content/gdrive
In [0]:
def get_acc(net, loader):
  correct, total = 0, 0
  with torch.no_grad():
    for data in loader:
        images, labels = data
        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()
  print('Accuracy: %d %%' % (100 * correct / total))
  return 100 * correct / total
In [0]:
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(32, 32, 5)
        self.fc1 = nn.Linear(512, 200)
        self.fc2 = nn.Linear(200, 10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.reshape(-1, self.num_flat_features(x))
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x
    def num_flat_features(self, x):
        size = x.size()[1:]  # all dimensions except the batch dimension
        num_features = 1
        for s in size:
            num_features *= s
        return num_features

net = Net()
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
In [0]:
epochs = 10
for epoch in range(epochs):  # loop over the dataset multiple times
    running_loss = 0.0
    loss_hist = []
    val_acc_hist = []
    train_acc_hist = []
    losses = []
    for i, data in enumerate(trainloader, 0):
        # get the inputs; data is a list of [inputs, labels]
        inputs, labels = data
        # zero the parameter gradients
        optimizer.zero_grad()
        # forward + backward + optimize
        outputs = net(inputs)
        loss = loss_fn(outputs, labels)
        loss.backward()
        optimizer.step()
        # print statistics
        running_loss += loss.item()
        loss_hist.append(loss.item())
        if i % 2000 == 1999:    # print every 4000 mini-batches
            print('[%d, %5d] loss: %.3f' %
                  (epoch + 1, i + 1, running_loss / 2000))
            losses.append(running_loss)
            running_loss = 0.0
            ##### get train acc##########
            train_acc_hist.append(get_acc(net, trainloader))
            ##############################
            ## get test acc
            val_acc_hist.append(get_acc(net, valloader))

print('Finished Training')
[1,  2000] loss: 0.724
Accuracy: 79 %
Accuracy: 79 %
[1,  4000] loss: 0.476
Accuracy: 85 %
Accuracy: 84 %
[1,  6000] loss: 0.409
Accuracy: 85 %
Accuracy: 85 %
[1,  8000] loss: 0.389
Accuracy: 85 %
Accuracy: 84 %
[1, 10000] loss: 0.375
Accuracy: 87 %
Accuracy: 87 %
[1, 12000] loss: 0.352
Accuracy: 88 %
Accuracy: 87 %
[2,  2000] loss: 0.326
Accuracy: 88 %
Accuracy: 87 %
[2,  4000] loss: 0.327
Accuracy: 88 %
Accuracy: 87 %
[2,  6000] loss: 0.311
Accuracy: 89 %
Accuracy: 88 %
[2,  8000] loss: 0.309
Accuracy: 88 %
Accuracy: 87 %
[2, 10000] loss: 0.310
Accuracy: 89 %
Accuracy: 88 %
[2, 12000] loss: 0.293
Accuracy: 89 %
Accuracy: 88 %
[3,  2000] loss: 0.267
Accuracy: 89 %
Accuracy: 88 %
[3,  4000] loss: 0.285
Accuracy: 90 %
Accuracy: 89 %
[3,  6000] loss: 0.290
Accuracy: 90 %
Accuracy: 88 %
[3,  8000] loss: 0.284
Accuracy: 89 %
Accuracy: 88 %
[3, 10000] loss: 0.282
Accuracy: 88 %
Accuracy: 86 %
[3, 12000] loss: 0.270
Accuracy: 90 %
Accuracy: 89 %
[4,  2000] loss: 0.259
Accuracy: 90 %
Accuracy: 89 %
[4,  4000] loss: 0.250
Accuracy: 91 %
Accuracy: 89 %
[4,  6000] loss: 0.259
Accuracy: 90 %
Accuracy: 89 %
[4,  8000] loss: 0.264
Accuracy: 91 %
Accuracy: 89 %
[4, 10000] loss: 0.253
Accuracy: 90 %
Accuracy: 88 %
[4, 12000] loss: 0.265
Accuracy: 91 %
Accuracy: 89 %
[5,  2000] loss: 0.220
Accuracy: 90 %
Accuracy: 88 %
[5,  4000] loss: 0.237
Accuracy: 92 %
Accuracy: 90 %
[5,  6000] loss: 0.244
Accuracy: 91 %
Accuracy: 89 %
[5,  8000] loss: 0.252
Accuracy: 91 %
Accuracy: 89 %
[5, 10000] loss: 0.248
Accuracy: 91 %
Accuracy: 88 %
[5, 12000] loss: 0.245
Accuracy: 92 %
Accuracy: 89 %
[6,  2000] loss: 0.218
Accuracy: 91 %
Accuracy: 88 %
[6,  4000] loss: 0.213
Accuracy: 91 %
Accuracy: 89 %
[6,  6000] loss: 0.237
Accuracy: 92 %
Accuracy: 89 %
[6,  8000] loss: 0.232
Accuracy: 92 %
Accuracy: 89 %
[6, 10000] loss: 0.235
Accuracy: 92 %
Accuracy: 89 %
[6, 12000] loss: 0.238
Accuracy: 92 %
Accuracy: 89 %
[7,  2000] loss: 0.209
Accuracy: 90 %
Accuracy: 87 %
[7,  4000] loss: 0.207
Accuracy: 92 %
Accuracy: 89 %
[7,  6000] loss: 0.200
Accuracy: 92 %
Accuracy: 89 %
[7,  8000] loss: 0.226
Accuracy: 92 %
Accuracy: 89 %
[7, 10000] loss: 0.235
Accuracy: 92 %
Accuracy: 89 %
[7, 12000] loss: 0.217
Accuracy: 93 %
Accuracy: 90 %
[8,  2000] loss: 0.200
Accuracy: 93 %
Accuracy: 90 %
[8,  4000] loss: 0.201
Accuracy: 91 %
Accuracy: 89 %
[8,  6000] loss: 0.225
Accuracy: 93 %
Accuracy: 89 %
[8,  8000] loss: 0.207
Accuracy: 93 %
Accuracy: 90 %
[8, 10000] loss: 0.195
Accuracy: 92 %
Accuracy: 90 %
[8, 12000] loss: 0.211
Accuracy: 92 %
Accuracy: 89 %
[9,  2000] loss: 0.183
Accuracy: 93 %
Accuracy: 89 %
[9,  4000] loss: 0.190
Accuracy: 93 %
Accuracy: 89 %
[9,  6000] loss: 0.193
Accuracy: 91 %
Accuracy: 89 %
[9,  8000] loss: 0.194
Accuracy: 92 %
Accuracy: 88 %
[9, 10000] loss: 0.204
Accuracy: 93 %
Accuracy: 90 %
[9, 12000] loss: 0.210
Accuracy: 93 %
Accuracy: 89 %
[10,  2000] loss: 0.185
Accuracy: 93 %
Accuracy: 89 %
[10,  4000] loss: 0.169
Accuracy: 94 %
Accuracy: 90 %
[10,  6000] loss: 0.178
Accuracy: 92 %
Accuracy: 89 %
[10,  8000] loss: 0.206
Accuracy: 93 %
Accuracy: 89 %
[10, 10000] loss: 0.195
Accuracy: 93 %
Accuracy: 89 %
[10, 12000] loss: 0.192
Accuracy: 93 %
Accuracy: 90 %
Finished Training
In [0]:
model_save_name = '1_classifier.pt'
path = F"/content/gdrive/My Drive/{model_save_name}" 
torch.save(net.state_dict(), path)
In [0]:
#net.load_state_dict(torch.load(path))
Out[0]:
<All keys matched successfully>
In [0]:
import matplotlib.pyplot as plt
plt.style.use("seaborn-darkgrid")
x = range(6)
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
axs[0].plot(x, train_acc_hist)
axs[0].set_title("Accuracy history on training set")
axs[1].plot(x, val_acc_hist)
axs[1].set_title("Accuracy history on validation set")
axs[2].plot([l/2000 for l in losses])
axs[2].set_title("Loss history during training")
#axs[1].plot(np.convolve(test_acc_hist, np.ones(10), 'valid') / 10)
#axs[2].plot(np.convolve(loss_hist, np.ones(30), 'valid') / 30)
plotname = 'training.jpg'
path = F"/content/gdrive/My Drive/{plotname}" 
plt.savefig(path)
In [0]:
correct, total = 0, 0
with torch.no_grad():
    for data in testloader:
        images, labels = data
        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

print('Accuracy of the network on the test images: %d %%' % (
    100 * correct / total))
Accuracy of the network on the test images: 88 %

Accuracy of the network on the test images: 88 %

In [0]:
def class_acc(class_idx, net, testloader, b_size):
  corr, total = 0, 0
  with torch.no_grad():
    for data in testloader:
      images, labels = data
      outputs = net(images)
      _, predicted = torch.max(outputs.data, 1)
      for b in range(b_size):
        if labels[b] == class_idx:
          total += 1
          if predicted[b] == class_idx:
            corr += 1
    print("Class accuracy on class"+str(class_idx)+": ", (100 * corr / total))
    return 100 * corr / total

Below is calculating per-class accuracy produced by the network:

In [0]:
class_accs = [class_acc(k, net, testloader, 4) for k in range(10)]
plt.plot(class_accs)
Class accuracy on class0:  85.6
Class accuracy on class1:  97.7
Class accuracy on class2:  77.3
Class accuracy on class3:  89.9
Class accuracy on class4:  85.9
Class accuracy on class5:  97.4
Class accuracy on class6:  64.9
Class accuracy on class7:  96.4
Class accuracy on class8:  98.5
Class accuracy on class9:  95.9
Out[0]:
[<matplotlib.lines.Line2D at 0x7f5168a9d390>]

We can see from above that, class-8 has highest test accuracy ~98%, but class-6 has only ~65% test accuracy, below are some images with correct classifications:

In [0]:
success, fail, false_pred = [], [], []
b_size = 4
with torch.no_grad():
    for data in testloader:
      images, labels = data
      outputs = net(images)
      _, predicted = torch.max(outputs.data, 1)
      for b in range(b_size):
        if labels[b] == 8 and predicted[b] == 8:
          success.append(images[b])
        if labels[b] == 6 and predicted[b] != 6:
          fail.append(images[b])
          false_pred.append(predicted[b])
      if len(success) > 4 and len(fail) > 4:
        break

Below are some images that the network successfully predicts to be hangbags (class 8)

In [0]:
imshow(torchvision.utils.make_grid(success))

Below we can see that for these images, network fails to recognize them as class 6, but predicts class 4, 3, 4, 2, 0 respectively instead.

In [0]:
imshow(torchvision.utils.make_grid(fail))
print([pred.numpy() for pred in false_pred])
[array(4), array(3), array(4), array(2), array(0)]
In [0]:
plotname = 'class_acc.jpg'
path = F"/content/gdrive/My Drive/{plotname}" 
plt.savefig(path)
<Figure size 432x288 with 0 Axes>

Below we visualize the 32 filters in the first conv layer of the network

In [0]:
filter = net.conv1.weight
In [0]:
f = filter.detach().numpy() 
In [0]:
f.shape
Out[0]:
(32, 1, 5, 5)
In [0]:
fig, axs = plt.subplots(1, 8, figsize=(24, 3))
for i in range(8):
  axs[i].imshow(f[i, 0,:])
plotname = 'filters8.jpg'
path = F"/content/gdrive/My Drive/{plotname}" 
plt.savefig(path)
In [0]:
fig, axs = plt.subplots(1, 8, figsize=(24, 3))
for i in range(8):
  axs[i].imshow(f[i+8,0,:])
plotname = 'filters16.jpg'
path = F"/content/gdrive/My Drive/{plotname}" 
plt.savefig(path)
In [0]:
fig, axs = plt.subplots(1, 8, figsize=(24, 3))
for i in range(8):
  axs[i].imshow(f[i+16,0,:])
plotname = 'filters24.jpg'
path = F"/content/gdrive/My Drive/{plotname}" 
plt.savefig(path)
In [0]:
fig, axs = plt.subplots(1, 8, figsize=(24, 3))
for i in range(8):
  axs[i].imshow(f[i+24,0,:])
plotname = 'filters32.jpg'
path = F"/content/gdrive/My Drive/{plotname}" 
plt.savefig(path)
In [0]:
 
In [0]:
 

Part 2: Segmentation

My network architecture:

    self.layers = nn.Sequential(

        nn.Conv2d(3, 64, 3, padding=3),
        nn.ReLU(inplace=True),

        nn.Conv2d(64, 64, 3, padding=1),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(2),

        nn.ConvTranspose2d(64, 64, 2, stride=2, padding=0),
        nn.Conv2d(64, 128, 3, padding=1),
        nn.ReLU(inplace=True),

        nn.Conv2d(128, 128, 3, padding=1),
        nn.MaxPool2d(2),

        nn.ConvTranspose2d(128, 128, 2, stride=2, padding=0),
        nn.Conv2d(128, self.n_class, 5, padding=0),
    )
In [0]:
!unzip gdrive/'My Drive'/part2.zip
In [74]:
!ls ### note: this is Colab directory 
FashionMNIST  gdrive  __MACOSX	part2  sample_data
In [82]:
!python part2/train.py
load train dataset start
    from: part2/starter_set/
    range: [0, 800)
load dataset done
load test_dev dataset start
    from: part2/starter_set/
    range: [0, 113)
load dataset done
load test_dev dataset start
    from: part2/starter_set/
    range: [0, 113)
load dataset done

Start training
-----------------Epoch = 1-----------------
100% 50/50 [00:29<00:00,  1.69it/s]
[epoch 1] loss: 1.251 elapsed time 29.632
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.74it/s]
1.214173470224653
-----------------Epoch = 2-----------------
100% 50/50 [00:29<00:00,  1.71it/s]
[epoch 2] loss: 1.181 elapsed time 29.171
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.81it/s]
1.105357391493661
-----------------Epoch = 3-----------------
100% 50/50 [00:29<00:00,  1.72it/s]
[epoch 3] loss: 1.161 elapsed time 29.035
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.84it/s]
1.0402773363249642
-----------------Epoch = 4-----------------
100% 50/50 [00:29<00:00,  1.72it/s]
[epoch 4] loss: 1.131 elapsed time 29.109
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.85it/s]
1.0134815573692322
-----------------Epoch = 5-----------------
100% 50/50 [00:28<00:00,  1.73it/s]
[epoch 5] loss: 1.119 elapsed time 28.940
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.85it/s]
0.9960261327879769
-----------------Epoch = 6-----------------
100% 50/50 [00:28<00:00,  1.73it/s]
[epoch 6] loss: 1.107 elapsed time 28.944
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.87it/s]
0.9855861152921405
-----------------Epoch = 7-----------------
100% 50/50 [00:29<00:00,  1.72it/s]
[epoch 7] loss: 1.106 elapsed time 29.028
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.83it/s]
0.9797338843345642
-----------------Epoch = 8-----------------
100% 50/50 [00:29<00:00,  1.71it/s]
[epoch 8] loss: 1.095 elapsed time 29.193
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.76it/s]
0.965831824711391
-----------------Epoch = 9-----------------
100% 50/50 [00:29<00:00,  1.72it/s]
[epoch 9] loss: 1.067 elapsed time 29.143
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.79it/s]
0.9399185010365078
-----------------Epoch = 10-----------------
100% 50/50 [00:29<00:00,  1.71it/s]
[epoch 10] loss: 1.060 elapsed time 29.296
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.74it/s]
0.9179572207587106
-----------------Epoch = 11-----------------
100% 50/50 [00:29<00:00,  1.70it/s]
[epoch 11] loss: 1.032 elapsed time 29.334
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.77it/s]
0.9002114193780082
-----------------Epoch = 12-----------------
100% 50/50 [00:29<00:00,  1.69it/s]
[epoch 12] loss: 1.023 elapsed time 29.513
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.78it/s]
0.8827240296772548
-----------------Epoch = 13-----------------
100% 50/50 [00:29<00:00,  1.69it/s]
[epoch 13] loss: 1.005 elapsed time 29.530
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.75it/s]
0.8685245258467538
-----------------Epoch = 14-----------------
100% 50/50 [00:29<00:00,  1.70it/s]
[epoch 14] loss: 0.989 elapsed time 29.380
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.76it/s]
0.8638482008661542
-----------------Epoch = 15-----------------
100% 50/50 [00:29<00:00,  1.70it/s]
[epoch 15] loss: 0.981 elapsed time 29.485
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.74it/s]
0.8585308023861477
-----------------Epoch = 16-----------------
100% 50/50 [00:29<00:00,  1.69it/s]
[epoch 16] loss: 0.973 elapsed time 29.526
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.76it/s]
0.8507117458752224
-----------------Epoch = 17-----------------
100% 50/50 [00:29<00:00,  1.69it/s]
[epoch 17] loss: 0.967 elapsed time 29.552
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.69it/s]
0.8395993624414716
-----------------Epoch = 18-----------------
100% 50/50 [00:29<00:00,  1.70it/s]
[epoch 18] loss: 0.967 elapsed time 29.464
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.76it/s]
0.836676938193185
-----------------Epoch = 19-----------------
100% 50/50 [00:29<00:00,  1.70it/s]
[epoch 19] loss: 0.964 elapsed time 29.334
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.79it/s]
0.8319266949381147
-----------------Epoch = 20-----------------
100% 50/50 [00:29<00:00,  1.72it/s]
[epoch 20] loss: 0.958 elapsed time 29.141
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.75it/s]
0.8238045062337603
-----------------Epoch = 21-----------------
100% 50/50 [00:28<00:00,  1.73it/s]
[epoch 21] loss: 0.949 elapsed time 28.977
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.81it/s]
0.8217411381857735
-----------------Epoch = 22-----------------
100% 50/50 [00:28<00:00,  1.73it/s]
[epoch 22] loss: 0.945 elapsed time 28.987
load train dataset start
    from: part2/starter_set/
    range: [800, 906)
load dataset done
100% 7/7 [00:02<00:00,  2.83it/s]
0.8163694483893258

Finished Training, Testing on test set
100% 8/8 [00:02<00:00,  3.00it/s]
0.8803454414010048

Generating Unlabeled Result
100% 113/113 [00:10<00:00, 10.83it/s]
100% 8/8 [00:01<00:00,  5.49it/s]
AP = 0.6398084168547654
AP = 0.7598914526856534
AP = 0.12780097593809658
AP = 0.7694882625129862
AP = 0.3478574926158466
In [89]:
avg_AP = (0.6398084168547654+0.7598914526856534+0.12780097593809658
          +0.7694882625129862+0.3478574926158466)/5
print("Average AP is: ", avg_AP)
Average AP is:  0.5289693201214697
In [0]:
train_loss = [1.251, 1.181, 1.161, 1.131, 1.119, 1.107, 1.106, 1.095, 1.067, 1.060, 1.032, 1.023, 1.005, 0.989, 0.981, 0.973, 0.967, 0.967, 0.964, 0.958, 0.949, 0.945]
val_loss = [1.214173470224653, 1.105357391493661, 1.0402773363249642, 1.0134815573692322, 0.9960261327879769, 0.9855861152921405, 0.9797338843345642, 0.965831824711391, 0.9399185010365078, 0.9179572207587106, 0.9002114193780082, 0.8827240296772548, 0.8685245258467538, 0.8638482008661542, 0.8585308023861477, 0.8507117458752224, 0.8395993624414716, 0.836676938193185, 0.8319266949381147, 0.8238045062337603, 0.8217411381857735, 0.8163694483893258, 0.8803454414010048]
In [91]:
plt.plot(train_loss)
plt.plot(val_loss)
plt.legend(["Loss on trainset", "Loss on validation set"])
Out[91]:
<matplotlib.legend.Legend at 0x7f5168d2b240>

Below demonstrates the network's output on a test image versus the groud truth labels:

In [121]:
out = plt.imread("part2/output_test/y15.png")
truth = plt.imread("part2/starter_set/test_dev/eecs442_0015.png")
ori = plt.imread("part2/output_test/x15.png")
fig, axs = plt.subplots(1, 3, figsize=(15, 3))
axs[0].imshow(out)
axs[1].imshow(truth)
axs[2].imshow(ori)
Out[121]:
<matplotlib.image.AxesImage at 0x7f51437417f0>

Below I added my own image to test_dev and produced output labels:

In [110]:
!python part2/train.py
load test_dev dataset start
    from: part2/starter_set/
    range: [114, 115)
load dataset done
100% 1/1 [00:00<00:00, 10.91it/s]
In [122]:
out = plt.imread("part2/y0.png")
truth = plt.imread("part2/x0.png")
fig, axs = plt.subplots(1, 2)
axs[0].imshow(out)
axs[1].imshow(truth)
Out[122]:
<matplotlib.image.AxesImage at 0x7f5143689160>
In [0]: