Skip to content
Snippets Groups Projects
Commit 6eccb6b2 authored by Michael Mutote's avatar Michael Mutote
Browse files

22202956 - optimised training supervised training

parent 6d774326
No related branches found
No related tags found
No related merge requests found
......@@ -2,10 +2,13 @@ import numpy as np
import Training_data
rng = np.random.default_rng(123)
TEACHDATA = 10000
TESTDATA = 1000
# ETA = 0.5
T_NUMBER = 1 # Number to be detected 0-6
TEACHDATA = 9999
TESTDATA = 999
T_NUMBER = 6 # Number to be detected 0-6
test_data = Training_data.make_testset(TESTDATA, 0.3)
teach_data = Training_data.make_testset(TEACHDATA, 0.2)
def sigmoid(val):
......@@ -13,7 +16,6 @@ def sigmoid(val):
def linear_act(val):
# there is the option to make it linear in the valid range and make it constant otherwise
return val
......@@ -23,21 +25,16 @@ def threshold(val):
class Neuron:
test_data = Training_data.make_testset(TESTDATA)
teach_data = Training_data.make_testset(TEACHDATA)
def __init__(self, input_count, activation):
self.input_count = input_count
self.activation = activation
self.weights = rng.random(input_count + 1)
def test(self):
res = [0 for _ in range(len(Neuron.test_data))]
for number in range(len(Neuron.test_data)):
for sample in Neuron.test_data[number]:
ix = np.insert(sample.ravel(), 0, 1)
res[number] = res[number] + (self.activation(ix.dot(self.weights)))
return res
def test(self, data):
ix = np.insert(data.ravel(), 0, 1)
return self.activation(ix.dot(self.weights))
class ThresholdPerceptron(Neuron):
......@@ -47,9 +44,9 @@ class ThresholdPerceptron(Neuron):
def train(self, ETA):
for i in range(TEACHDATA):
old_weights = np.copy(self.weights)
for j in rng.permutation(len(Neuron.teach_data)):
for j in rng.permutation(len(teach_data)):
T = 1 if j == T_NUMBER else 0
ix = np.insert(Neuron.teach_data[j][i].ravel(), 0, 1)
ix = np.insert(teach_data[j][i].ravel(), 0, 1)
RI = self.activation(ix.dot(self.weights))
if RI != T:
delta = ETA * \
......@@ -65,17 +62,18 @@ class SGDPerceptron(Neuron):
super().__init__(input_count, activation)
def train(self, ETA):
for i in range(TEACHDATA):
old_weights = np.copy(self.weights)
delta = [0 for _ in range(len(old_weights))]
for j in rng.choice(rng.permutation(len(Neuron.teach_data)),3):
for j in rng.choice(rng.permutation(len(teach_data)), 3):
T = (j == T_NUMBER)
ix = np.insert(Neuron.teach_data[j][i].ravel(), 0, 1)
ix = np.insert(teach_data[j][i].ravel(), 0, 1)
z = ix.dot(self.weights)
RI = self.activation(z)
delta = ETA * (T - RI) * RI * (1 - RI) * ix
self.weights += delta
# if np.linalg.norm(old_weights - self.weights) == 0.00:
# return self.weights
return self.weights
......@@ -87,14 +85,11 @@ class LinearPerceptron(Neuron):
for i in range(TEACHDATA):
old_weights = np.copy(self.weights)
delta = [0 for _ in range(len(old_weights))]
for j in rng.permutation(len(Neuron.teach_data)):
for j in rng.permutation(len(teach_data)):
T = (j == T_NUMBER)
ix = np.insert(Neuron.teach_data[j][i].ravel(), 0, 1)
ix = np.insert(teach_data[j][i].ravel(), 0, 1)
delta += ETA * (T - self.activation(ix.dot(self.weights))) * ix
self.weights = self.weights + delta
if np.linalg.norm(old_weights - self.weights) == 0.00:
return self.weights
# if np.linalg.norm(old_weights - self.weights) == 0.00:
# return self.weights
return self.weights
import numpy as np
from prettytable import PrettyTable
import Perceptrons
# Define the table
table = PrettyTable()
trial_data = Perceptrons.test_data
REPETION = 1
def run_test(neuron, learning_rate):
global table
table = PrettyTable()
table.field_names = ["ETA", "0", "1", "2", "3", "4", "5", "6"]
for ETA in learning_rate: # the list of values for ETA
neuron.train(ETA)
res = [0] * len(trial_data)
for i, n_digits in enumerate(trial_data):
for trial_array in n_digits:
res[i] = res[i] + round(abs(neuron.test(trial_array)))
res = ["{:5d}".format(r) for r in res]
table.add_row([ETA] + res)
print(table)
return neuron
for i in range(7):
Perceptrons.T_NUMBER = i
E = np.array([0.05, 0.1, 0.2, 0.4, 0.75, 1, 2, 5])
print("Threshold Perceptron is looking for: ", Perceptrons.T_NUMBER)
run_test(Perceptrons.ThresholdPerceptron(20), E/4)
print("Linear Perceptron is looking for: ", Perceptrons.T_NUMBER)
run_test(Perceptrons.LinearPerceptron(20), E / 160)
print("Sigmoid Gradient Descent Perceptron is looking for: ", Perceptrons.T_NUMBER)
x = run_test(Perceptrons.SGDPerceptron(20), E)
def test_function(ETA, p):
results = []
output = np.round(p.test())
results.append((ETA, output))
return results
for ETA in ([0.05, 0.1, 0.2, 0.4, 0.75, 1, 2, 5]): # the list of values for ETA
w = Perceptrons.ThresholdPerceptron(20)
w.train(ETA)
x = Perceptrons.LinearPerceptron(20)
x.train(ETA/200)
y = Perceptrons.SGDPerceptron(20)
y.train(ETA)
for i in range(1):
res = test_function(ETA, w)
print("Thres", res) # print the results list
for i in range(1):
res = test_function(ETA/200, x)
print("Lin", res) # print the results list
for i in range(1):
res = test_function(ETA, y)
print("sgd", res) # print the results list
print("\n\n")
import numpy as np
import copy
ideal = dict([])
ideal[0] = np.array([[0, 1, 1, 0],
......@@ -46,7 +45,7 @@ ideal[6] = np.array([[0, 1, 1, 0],
[0, 1, 1, 0]])
def make_testset(set_size):
def make_testset(set_size, NOISE):
data = [[] for _ in range(len(ideal))]
rng = np.random.default_rng(123)
for number, value in ideal.items():
......@@ -55,6 +54,6 @@ def make_testset(set_size):
for _ in range(set_size):
# scale is the standard deviation affecting the "spread" plays a role int eh results
# new_digit = ideal[number] + rng.normal(loc=0, scale=0.3, size=(5, 4))
new_digit = ideal[number] + rng.normal(loc=0, scale=0.1, size=(5, 4))
new_digit = ideal[number] + rng.normal(loc=0, scale=NOISE, size=(5, 4))
data[number].append(new_digit)
return data
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment