From f5d96de04f1cc3a846ef93eec68a81484dfa3ee4 Mon Sep 17 00:00:00 2001
From: tnbeats <tnbeatsprod@gmail.com>
Date: Sun, 19 Nov 2023 14:03:26 +0100
Subject: [PATCH] 22211572

---
 Reinforcement_Learning/Perceptrons.py    | 10 ++++-
 Reinforcement_Learning/PerceptronsSGD.py | 47 ++++++++++++++++++++++++
 2 files changed, 55 insertions(+), 2 deletions(-)
 create mode 100644 Reinforcement_Learning/PerceptronsSGD.py

diff --git a/Reinforcement_Learning/Perceptrons.py b/Reinforcement_Learning/Perceptrons.py
index 07325c1..d3c37c2 100644
--- a/Reinforcement_Learning/Perceptrons.py
+++ b/Reinforcement_Learning/Perceptrons.py
@@ -8,6 +8,10 @@ TESTDATA = 1000
 T_NUMBER = 0
 
 
+def sigmoid(val):
+    return 1 / (1 + np.exp(-val))
+
+
 def linear_act(val):
     return val
 
@@ -31,7 +35,8 @@ class Perceptron:
                 ix = np.insert(teach_data[j][i].ravel(), 0, 1)
                 RI = self.activation(ix.dot(self.weights))
                 if RI != T:
-                    delta = ETA * (T - self.activation(ix.dot(self.weights))) * ix
+                    delta = ETA * \
+                        (T - self.activation(ix.dot(self.weights))) * ix
                     self.weights = self.weights + delta
                 # print(self.weights[0], self.weights[1], self.weights[2], self.weights[3], self.weights[4], self.weights[5], self.weights[6])
             if np.linalg.norm(old_weights - self.weights) == 0.00:
@@ -45,5 +50,6 @@ class Perceptron:
         for number in range(len(test_data)):
             for sample in test_data[number]:
                 ix = np.insert(sample.ravel(), 0, 1)
-                res[number] = res[number] + (self.activation(ix.dot(self.weights)))
+                res[number] = res[number] + \
+                    (self.activation(ix.dot(self.weights)))
         return res
diff --git a/Reinforcement_Learning/PerceptronsSGD.py b/Reinforcement_Learning/PerceptronsSGD.py
new file mode 100644
index 0000000..805f222
--- /dev/null
+++ b/Reinforcement_Learning/PerceptronsSGD.py
@@ -0,0 +1,47 @@
+import numpy as np
+import Training_data
+
+rng = np.random.default_rng(123)
+TEACHDATA = 10000
+TESTDATA = 1000
+# ETA = 0.5
+T_NUMBER = 0
+
+
+def sigmoid(val):
+    return 1 / (1 + np.exp(-val))
+
+
+def sigmoid_derivative(val):
+    return sigmoid(val) * (1 - sigmoid(val))
+
+
+class PerceptronSGD:
+    def __init__(self, input_count, activation=sigmoid, activation_derivative=sigmoid_derivative):
+        self.input_count = input_count
+        self.activation = activation
+        self.activation_derivative = activation_derivative
+        self.weights = rng.random(input_count + 1)
+
+    def train(self, ETA, NUM_EPOCHS):
+        teach_data = Training_data.make_testset(TEACHDATA)
+        for epoch in range(NUM_EPOCHS):
+            for i in range(TEACHDATA):
+                for j in rng.permutation(len(teach_data)):
+                    T = 1 if j == T_NUMBER else 0
+                    ix = np.insert(teach_data[j][i].ravel(), 0, 1)
+                    z = ix.dot(self.weights)
+                    RI = self.activation(z)
+                    error = T - RI
+                    delta = ETA * error * self.activation_derivative(z) * ix
+                    self.weights += delta
+        return self.weights
+
+    def test(self):
+        test_data = Training_data.make_testset(TESTDATA)
+        res = [0 for _ in range(len(test_data))]
+        for number in range(len(test_data)):
+            for sample in test_data[number]:
+                ix = np.insert(sample.ravel(), 0, 1)
+                res[number] += (self.activation(ix.dot(self.weights)))
+        return res
-- 
GitLab