# Import the libraries we need for this lab
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from torch.utils.data import Dataset, DataLoader
Noisy XO
Training Two Parameter, Mini-Batch Gradient Decent, Training Two Parameter Mini-Batch Gradient Decent
Practice: Neural Networks with One Hidden Layer: Noisy XOR
Objective
- How to create a neural network model with multiple neurons.
Table of Contents
In this lab, you will see how many neurons it takes to classify noisy XOR data with one hidden layer neural network.
Estimated Time Needed: 25 min
Preparation
We’ll need the following libraries
Use the following function to plot the data:
# Plot the data
def plot_decision_regions_2class(model,data_set):
= ListedColormap(['#FFAAAA', '#AAFFAA', '#00AAFF'])
cmap_light = ListedColormap(['#FF0000', '#00FF00', '#00AAFF'])
cmap_bold = data_set.x.numpy()
X = data_set.y.numpy()
y = .02
h = X[:, 0].min() - 0.1 , X[:, 0].max() + 0.1
x_min, x_max = X[:, 1].min() - 0.1 , X[:, 1].max() + 0.1
y_min, y_max = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h))
xx, yy = torch.Tensor(np.c_[xx.ravel(), yy.ravel()])
XX
= np.logical_not((model(XX)[:, 0] > 0.5).numpy()).reshape(xx.shape)
yhat =cmap_light)
plt.pcolormesh(xx, yy, yhat, cmap0] == 0, 0], X[y[:, 0] == 0, 1], 'o', label='y=0')
plt.plot(X[y[:, 0] == 1, 0], X[y[:, 0] == 1, 1], 'ro', label='y=1')
plt.plot(X[y[:, "decision region")
plt.title( plt.legend()
Use the following function to calculate accuracy:
# Calculate the accuracy
def accuracy(model, data_set):
return np.mean(data_set.y.view(-1).numpy() == (model(data_set.x)[:, 0] > 0.5).numpy())
Neural Network Module and Training Function
Define the neural network module or class:
# Define the class Net with one hidden layer
class Net(nn.Module):
# Constructor
def __init__(self, D_in, H, D_out):
super(Net, self).__init__()
#hidden layer
self.linear1 = nn.Linear(D_in, H)
#output layer
self.linear2 = nn.Linear(H, D_out)
# Prediction
def forward(self, x):
= torch.sigmoid(self.linear1(x))
x = torch.sigmoid(self.linear2(x))
x return x
Define a function to train the model:
# Define the train model
def train(data_set, model, criterion, train_loader, optimizer, epochs=5):
= []
COST = []
ACC for epoch in range(epochs):
=0
totalfor x, y in train_loader:
optimizer.zero_grad()= model(x)
yhat = criterion(yhat, y)
loss
optimizer.zero_grad()
loss.backward()
optimizer.step()#cumulative loss
+=loss.item()
total
ACC.append(accuracy(model, data_set))
COST.append(total)
= plt.subplots()
fig, ax1 = 'tab:red'
color =color)
ax1.plot(COST, color'epoch', color=color)
ax1.set_xlabel('total loss', color=color)
ax1.set_ylabel(='y', color=color)
ax1.tick_params(axis
= ax1.twinx()
ax2 = 'tab:blue'
color 'accuracy', color=color) # we already handled the x-label with ax1
ax2.set_ylabel(=color)
ax2.plot(ACC, color='y', color=color)
ax2.tick_params(axis# otherwise the right y-label is slightly clipped
fig.tight_layout()
plt.show()
return COST
Make Some Data
Dataset class:
# Define the class XOR_Data
class XOR_Data(Dataset):
# Constructor
def __init__(self, N_s=100):
self.x = torch.zeros((N_s, 2))
self.y = torch.zeros((N_s, 1))
for i in range(N_s // 4):
self.x[i, :] = torch.Tensor([0.0, 0.0])
self.y[i, 0] = torch.Tensor([0.0])
self.x[i + N_s // 4, :] = torch.Tensor([0.0, 1.0])
self.y[i + N_s // 4, 0] = torch.Tensor([1.0])
self.x[i + N_s // 2, :] = torch.Tensor([1.0, 0.0])
self.y[i + N_s // 2, 0] = torch.Tensor([1.0])
self.x[i + 3 * N_s // 4, :] = torch.Tensor([1.0, 1.0])
self.y[i + 3 * N_s // 4, 0] = torch.Tensor([0.0])
self.x = self.x + 0.01 * torch.randn((N_s, 2))
self.len = N_s
# Getter
def __getitem__(self, index):
return self.x[index],self.y[index]
# Get Length
def __len__(self):
return self.len
# Plot the data
def plot_stuff(self):
self.x[self.y[:, 0] == 0, 0].numpy(), self.x[self.y[:, 0] == 0, 1].numpy(), 'o', label="y=0")
plt.plot(self.x[self.y[:, 0] == 1, 0].numpy(), self.x[self.y[:, 0] == 1, 1].numpy(), 'ro', label="y=1")
plt.plot( plt.legend()
Dataset object:
# Create dataset object
= XOR_Data()
data_set data_set.plot_stuff()
One Neuron
Try
Create a neural network model
with one neuron. Then, use the following code to train it:
# Practice: create a model with one neuron
# Type your code here
Double-click here for the solution.
# Train the model
= 0.001
learning_rate = nn.BCELoss()
criterion = torch.optim.SGD(model.parameters(), lr=learning_rate)
optimizer = DataLoader(dataset=data_set, batch_size=1)
train_loader = train(data_set, model, criterion, train_loader, optimizer, epochs=500)
LOSS12 plot_decision_regions_2class(model, data_set)
Two Neurons
Try
Create a neural network model
with two neurons. Then, use the following code to train it:
# Practice: create a model with two neuron
# Type your code here
Double-click here for the solution.
# Train the model
= 0.1
learning_rate = nn.BCELoss()
criterion = torch.optim.SGD(model.parameters(), lr=learning_rate)
optimizer = DataLoader(dataset=data_set, batch_size=1)
train_loader = train(data_set, model, criterion, train_loader, optimizer, epochs=500)
LOSS12 plot_decision_regions_2class(model, data_set)
Three Neurons
Try
Create a neural network model
with three neurons. Then, use the following code to train it:
# Practice: create a model with two neuron
= Net(2, 4, 1)
model # Type your code here
Double-click here for the solution.
# Train the model
= 0.1
learning_rate = nn.BCELoss()
criterion = torch.optim.SGD(model.parameters(), lr=learning_rate)
optimizer = DataLoader(dataset=data_set, batch_size=1)
train_loader = train(data_set, model, criterion, train_loader, optimizer, epochs=500)
LOSS12 plot_decision_regions_2class(model, data_set)