completed deep learning section

This commit is contained in:
2019-07-24 00:09:38 +01:00
parent bb46088e76
commit 46660262e0
18 changed files with 7161 additions and 0 deletions

View File

@@ -0,0 +1,104 @@
import torch
from torch import nn
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self, input_size, output_size, hidden_layers, drop_p=0.5):
''' Builds a feedforward network with arbitrary hidden layers.
Arguments
---------
input_size: integer, size of the input layer
output_size: integer, size of the output layer
hidden_layers: list of integers, the sizes of the hidden layers
'''
super().__init__()
# Input to a hidden layer
self.hidden_layers = nn.ModuleList(
[nn.Linear(input_size, hidden_layers[0])])
# Add a variable number of more hidden layers
layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])
self.hidden_layers.extend([nn.Linear(h1, h2)
for h1, h2 in layer_sizes])
self.output = nn.Linear(hidden_layers[-1], output_size)
self.dropout = nn.Dropout(p=drop_p)
def forward(self, x):
''' Forward pass through the network, returns the output logits '''
for each in self.hidden_layers:
x = F.relu(each(x))
x = self.dropout(x)
x = self.output(x)
return F.log_softmax(x, dim=1)
def validation(model, testloader, criterion):
accuracy = 0
test_loss = 0
for images, labels in testloader:
images = images.resize_(images.size()[0], 784)
output = model.forward(images)
test_loss += criterion(output, labels).item()
# Calculating the accuracy
# Model's output is log-softmax, take exponential to get the probabilities
ps = torch.exp(output)
# Class with highest probability is our predicted class, compare with true label
equality = (labels.data == ps.max(1)[1])
# Accuracy is number of correct predictions divided by all predictions, just take the mean
accuracy += equality.type_as(torch.FloatTensor()).mean()
return test_loss, accuracy
def train(model, trainloader, testloader, criterion, optimizer, epochs=5, print_every=40):
steps = 0
running_loss = 0
for e in range(epochs):
# Model in training mode, dropout is on
model.train()
for images, labels in trainloader:
steps += 1
# Flatten images into a 784 long vector
images.resize_(images.size()[0], 784)
optimizer.zero_grad()
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
# Model in inference mode, dropout is off
model.eval()
# Turn off gradients for validation, will speed up inference
with torch.no_grad():
test_loss, accuracy = validation(
model, testloader, criterion)
print("Epoch: {}/{}.. ".format(e + 1, epochs),
"Training Loss: {:.3f}.. ".format(
running_loss / print_every),
"Test Loss: {:.3f}.. ".format(
test_loss / len(testloader)),
"Test Accuracy: {:.3f}".format(accuracy / len(testloader)))
running_loss = 0
# Make sure dropout and grads are on for training
model.train()

View File

@@ -0,0 +1,130 @@
import torch
from torchvision import datasets, transforms
from torch import nn, optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))])
# Download and load the training data
trainset = datasets.FashionMNIST(
'.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST(
'.pytorch/F_MNIST_data/', download=True, train=False,
transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
class Classifier(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 10)
def forward(self, x):
# make sure input tensor is flattened
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.log_softmax(self.fc4(x), dim=1)
return x
model = Classifier()
images, labels = next(iter(testloader))
# Get the class probabilities
ps = torch.exp(model(images))
# Make sure the shape is appropriate, we should get 10 class probabilities for
# 64 examples
print(ps.shape)
top_p, top_class = ps.topk(1, dim=1)
# Look at the most likely classes for the first 10 examples
print(top_class[:10, :])
equals = top_class == labels.view(*top_class.shape)
accuracy = torch.mean(equals.type(torch.FloatTensor))
print(f'Accuracy: {accuracy.item()*100}%')
# Model begins
model = Classifier()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
epochs = 30
steps = 0
trainLosses, testLosses = [], []
for e in range(epochs):
runningLoss = 0
for images, labels in trainloader:
optimizer.zero_grad()
log_ps = model(images)
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
runningLoss += loss.item()
else:
testLoss = 0
accuracy = 0
# Turn off gradients for validation step
with torch.no_grad():
for images, labels in testloader:
# Get the output
log_ps = model(images)
# Get the loss
testLoss += criterion(log_ps, labels)
# Get the probabilities
ps = torch.exp(log_ps)
# Get the most likely class for each prediction
top_p, top_class = ps.topk(1, dim=1)
# Check if the predictions match the actual label
equals = top_class == labels.view(*top_class.shape)
# Update accuracy
accuracy += torch.mean(equals.type(torch.FloatTensor))
# Update train loss
trainLosses.append(runningLoss / len(trainloader))
# Update test loss
testLosses.append(testLoss / len(testloader))
# Print output
print(f'Epoch: {e+1} out of {epochs}')
print(f'Training Loss: {runningLoss/len(trainloader):.3f}')
print(f'Test Loss: {testLoss/len(testloader):.3f}')
print(f'Test Accuracy: {accuracy/len(testloader):.3f}')
print()
plt.plot(trainLosses, label='Training loss')
plt.plot(testLosses, label='Validation loss')
plt.legend(frameon=False)
plt.show()