# Import the libraries we need for this lab
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
Logistic Regression and Bad Initialization Value
Training Two Parameter, Mini-Batch Gradient Decent, Training Two Parameter Mini-Batch Gradient Decent
Objective
- How bad initialization value can affects the accuracy of model. .
Table of Contents
In this lab, you will see what happens when you use the root mean square error cost or total loss function and select a bad initialization value for the parameter values.
- Make Some Data
- Create the Model and Cost Function the PyTorch way
- Train the Model:Batch Gradient Descent
Estimated Time Needed: 30 min
Preparation
We’ll need the following libraries:
Helper functions
The class plot_error_surfaces
is just to help you visualize the data space and the Parameter space during training and has nothing to do with Pytorch.
# Create class for plotting and the function for plotting
class plot_error_surfaces(object):
# Construstor
def __init__(self, w_range, b_range, X, Y, n_samples = 30, go = True):
= np.linspace(-w_range, w_range, n_samples)
W = np.linspace(-b_range, b_range, n_samples)
B = np.meshgrid(W, B)
w, b = np.zeros((30, 30))
Z = 0
count1 self.y = Y.numpy()
self.x = X.numpy()
for w1, b1 in zip(w, b):
= 0
count2 for w2, b2 in zip(w1, b1):
= np.mean((self.y - (1 / (1 + np.exp(-1*w2 * self.x - b2)))) ** 2)
Z[count1, count2] += 1
count2 += 1
count1 self.Z = Z
self.w = w
self.b = b
self.W = []
self.B = []
self.LOSS = []
self.n = 0
if go == True:
plt.figure()=(7.5, 5))
plt.figure(figsize='3d').plot_surface(self.w, self.b, self.Z, rstride=1, cstride=1, cmap='viridis', edgecolor='none')
plt.axes(projection'Loss Surface')
plt.title('w')
plt.xlabel('b')
plt.ylabel(
plt.show()
plt.figure()'Loss Surface Contour')
plt.title('w')
plt.xlabel('b')
plt.ylabel(self.w, self.b, self.Z)
plt.contour(
plt.show()
# Setter
def set_para_loss(self, model, loss):
self.n = self.n + 1
self.W.append(list(model.parameters())[0].item())
self.B.append(list(model.parameters())[1].item())
self.LOSS.append(loss)
# Plot diagram
def final_plot(self):
= plt.axes(projection='3d')
ax self.w, self.b, self.Z)
ax.plot_wireframe(self.W, self.B, self.LOSS, c='r', marker='x', s=200, alpha=1)
ax.scatter(
plt.figure()self.w, self.b, self.Z)
plt.contour(self.W, self.B, c='r', marker='x')
plt.scatter('w')
plt.xlabel('b')
plt.ylabel(
plt.show()
# Plot diagram
def plot_ps(self):
121)
plt.subplot(
plt.ylimself.x, self.y, 'ro', label="training points")
plt.plot(self.x, self.W[-1] * self.x + self.B[-1], label="estimated line")
plt.plot(self.x, 1 / (1 + np.exp(-1 * (self.W[-1] * self.x + self.B[-1]))), label='sigmoid')
plt.plot('x')
plt.xlabel('y')
plt.ylabel(-0.1, 2))
plt.ylim(('Data Space Iteration: ' + str(self.n))
plt.title(
plt.show()122)
plt.subplot(self.w, self.b, self.Z)
plt.contour(self.W, self.B, c='r', marker='x')
plt.scatter('Loss Surface Contour Iteration' + str(self.n))
plt.title('w')
plt.xlabel('b')
plt.ylabel(
# Plot the diagram
def PlotStuff(X, Y, model, epoch, leg=True):
=('epoch ' + str(epoch)))
plt.plot(X.numpy(), model(X).detach().numpy(), label'r')
plt.plot(X.numpy(), Y.numpy(), if leg == True:
plt.legend()else:
pass
Set the random seed:
# Set random seed
0) torch.manual_seed(
<torch._C.Generator at 0x7c89b4a521f0>
Get Some Data
Create the Data
class
# Create the data class
class Data(Dataset):
# Constructor
def __init__(self):
self.x = torch.arange(-1, 1, 0.1).view(-1, 1)
self.y = torch.zeros(self.x.shape[0], 1)
self.y[self.x[:, 0] > 0.2] = 1
self.len = self.x.shape[0]
# Getter
def __getitem__(self, index):
return self.x[index], self.y[index]
# Get Length
def __len__(self):
return self.len
Make Data
object
# Create Data object
= Data() data_set
Create the Model and Total Loss Function (Cost)
Create a custom module for logistic regression:
# Create logistic_regression class
class logistic_regression(nn.Module):
# Constructor
def __init__(self, n_inputs):
super(logistic_regression, self).__init__()
self.linear = nn.Linear(n_inputs, 1)
# Prediction
def forward(self, x):
= torch.sigmoid(self.linear(x))
yhat return yhat
Create a logistic regression object or model:
# Create the logistic_regression result
= logistic_regression(1) model
Replace the random initialized variable values with some predetermined values that will not converge:
# Set the weight and bias
'linear.weight'].data[0] = torch.tensor([[-5]])
model.state_dict() ['linear.bias'].data[0] = torch.tensor([[-10]])
model.state_dict() [print("The parameters: ", model.state_dict())
The parameters: OrderedDict({'linear.weight': tensor([[-5.]]), 'linear.bias': tensor([-10.])})
Create a plot_error_surfaces
object to visualize the data space and the parameter space during training:
# Create the plot_error_surfaces object
= plot_error_surfaces(15, 13, data_set[:][0], data_set[:][1], 30) get_surface
<Figure size 640x480 with 0 Axes>
Define the dataloader, the cost or criterion function, the optimizer:
# Create dataloader object, crierion function and optimizer.
= DataLoader(dataset=data_set, batch_size=3)
trainloader = nn.MSELoss()
criterion_rms = 2
learning_rate = torch.optim.SGD(model.parameters(), lr=learning_rate) optimizer
Train the Model via Batch Gradient Descent
Train the model
# Train the model
def train_model(epochs):
for epoch in range(epochs):
for x, y in trainloader:
= model(x)
yhat = criterion_rms(yhat, y)
loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
get_surface.set_para_loss(model, loss.tolist())if epoch % 20 == 0:
get_surface.plot_ps()
100) train_model(
Get the actual class of each sample and calculate the accuracy on the test data:
# Make the Prediction
= model(data_set.x)
yhat = yhat > 0.5
label print("The accuracy: ", torch.mean((label == data_set.y.type(torch.ByteTensor)).type(torch.float)))
The accuracy: tensor(0.6500)
Accuracy is 60% compared to 100% in the last lab using a good Initialization value.