# Import the libraries we need for this lab
import torch
import torch.nn as nn
from torch import sigmoid
import matplotlib.pylab as plt
import numpy as np
0) torch.manual_seed(
Simple One Hidden Layer Neural Network
Training Two Parameter, Mini-Batch Gradient Decent, Training Two Parameter Mini-Batch Gradient Decent
Objective
- How to create simple Neural Network in pytorch.
Table of Contents
In this lab, you will use a single-layer neural network to classify non linearly seprable data in 1-Ddatabase.
- Neural Network Module and Training Function
- Make Some Data
- Define the Neural Network, Criterion Function, Optimizer, and Train the Model
Estimated Time Needed: 25 min
Preparation
We’ll need the following libraries
Used for plotting the model
# The function for plotting the model
def PlotStuff(X, Y, model, epoch, leg=True):
=('epoch ' + str(epoch)))
plt.plot(X.numpy(), model(X).detach().numpy(), label'r')
plt.plot(X.numpy(), Y.numpy(), 'x')
plt.xlabel(if leg == True:
plt.legend()else:
pass
Neural Network Module and Training Function
Define the activations and the output of the first linear layer as an attribute. Note that this is not good practice.
# Define the class Net
class Net(nn.Module):
# Constructor
def __init__(self, D_in, H, D_out):
super(Net, self).__init__()
# hidden layer
self.linear1 = nn.Linear(D_in, H)
self.linear2 = nn.Linear(H, D_out)
# Define the first linear layer as an attribute, this is not good practice
self.a1 = None
self.l1 = None
self.l2=None
# Prediction
def forward(self, x):
self.l1 = self.linear1(x)
self.a1 = sigmoid(self.l1)
self.l2=self.linear2(self.a1)
= sigmoid(self.linear2(self.a1))
yhat return yhat
Define the training function:
# Define the training function
def train(Y, X, model, optimizer, criterion, epochs=1000):
= []
cost =0
totalfor epoch in range(epochs):
=0
totalfor y, x in zip(Y, X):
= model(x)
yhat = criterion(yhat, y)
loss
loss.backward()
optimizer.step()
optimizer.zero_grad()#cumulative loss
+=loss.item()
total
cost.append(total)if epoch % 300 == 0:
=True)
PlotStuff(X, Y, model, epoch, leg
plt.show()
model(X)0], model.a1.detach().numpy()[:, 1], c=Y.numpy().reshape(-1))
plt.scatter(model.a1.detach().numpy()[:, 'activations')
plt.title(
plt.show()return cost
Make Some Data
# Make some data
= torch.arange(-20, 20, 1).view(-1, 1).type(torch.FloatTensor)
X = torch.zeros(X.shape[0])
Y 0] > -4) & (X[:, 0] < 4)] = 1.0 Y[(X[:,
Define the Neural Network, Criterion Function, Optimizer and Train the Model
Create the Cross-Entropy loss function:
# The loss function
def criterion_cross(outputs, labels):
= -1 * torch.mean(labels * torch.log(outputs) + (1 - labels) * torch.log(1 - outputs))
out return out
Define the Neural Network, Optimizer, and Train the Model:
# Train the model
# size of input
= 1
D_in # size of hidden layer
= 2
H # number of outputs
= 1
D_out # learning rate
= 0.1
learning_rate # create the model
= Net(D_in, H, D_out)
model #optimizer
= torch.optim.SGD(model.parameters(), lr=learning_rate)
optimizer #train the model usein
= train(Y, X, model, optimizer, criterion_cross, epochs=1000)
cost_cross #plot the loss
plt.plot(cost_cross)'epoch')
plt.xlabel('cross entropy loss') plt.title(
By examining the output of the activation, you see by the 600th epoch that the data has been mapped to a linearly separable space.
we can make a prediction for a arbitrary one tensors
=torch.tensor([0.0])
x=model(x)
yhat yhat
we can make a prediction for some arbitrary one tensors
=torch.tensor([[0.0],[2.0],[3.0]])
X_=model(X_)
Yhat Yhat
we can threshold the predication
=Yhat>0.5
Yhat Yhat
Practice
Repeat the previous steps above by using the MSE cost or total loss:
# Practice: Train the model with MSE Loss Function
# Type your code here
Double-click here for the solution.