diff --git a/Reinforcement_Learning/NN_with_Backtracking.py b/Reinforcement_Learning/NN_with_Backtracking.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/Reinforcement_Learning/Perceptrons.py b/Reinforcement_Learning/Perceptrons.py
index 67671654e43ac57b4bbb3078682644d849de13d3..7028ba4caf03b6ed02c6d0f762e36e6bcb1c4420 100644
--- a/Reinforcement_Learning/Perceptrons.py
+++ b/Reinforcement_Learning/Perceptrons.py
@@ -1,8 +1,9 @@
 import numpy as np
 import Training_data
+import matplotlib.pyplot as plt
 
 rng = np.random.default_rng(123)
-TEACHDATA = 9999
+TEACHDATA = 99999
 TESTDATA = 999
 
 T_NUMBER = 6  # Number to be detected 0-6
@@ -31,6 +32,14 @@ class Neuron:
         self.input_count = input_count
         self.activation = activation
         self.weights = rng.random(input_count + 1)
+        self.errors = []
+
+    def plot_errors(self, titel):
+        plt.plot(self.errors)
+        plt.xlabel('Iteration')
+        plt.ylabel('Error')
+        plt.title(titel)
+        plt.show()
 
     def test(self, data):
         ix = np.insert(data.ravel(), 0, 1)
@@ -49,12 +58,13 @@ class ThresholdPerceptron(Neuron):
                 ix = np.insert(teach_data[j][i].ravel(), 0, 1)
                 RI = self.activation(ix.dot(self.weights))
                 if RI != T:
-                    delta = ETA * \
-                            (T - self.activation(ix.dot(self.weights))) * ix
+                    err = T - self.activation(ix.dot(self.weights))
+                    delta = ETA * err * ix
                     self.weights = self.weights + delta
-            if np.linalg.norm(old_weights - self.weights) == 0.00:
-                return self.weights
-        return self.weights
+                    self.errors.append(abs(err))
+            # if np.linalg.norm(old_weights - self.weights) == 0.00:
+            #     return
+        return
 
 
 class SGDPerceptron(Neuron):
@@ -65,16 +75,17 @@ class SGDPerceptron(Neuron):
         for i in range(TEACHDATA):
             old_weights = np.copy(self.weights)
             delta = [0 for _ in range(len(old_weights))]
-            for j in rng.choice(rng.permutation(len(teach_data)), 3):
+            for j in rng.choice(rng.permutation(len(teach_data)), 5):
                 T = (j == T_NUMBER)
                 ix = np.insert(teach_data[j][i].ravel(), 0, 1)
-                z = ix.dot(self.weights)
-                RI = self.activation(z)
-                delta = ETA * (T - RI) * RI * (1 - RI) * ix
+                RI = self.activation(ix.dot(self.weights))
+                err = T - RI
+                delta = ETA * err * RI * (1 - RI) * ix
+                self.errors.append(abs(err))
             self.weights += delta
-            # if np.linalg.norm(old_weights - self.weights) == 0.00:
-            #     return self.weights
-        return self.weights
+            if np.linalg.norm(old_weights - self.weights) == 0.00:
+                return
+        return
 
 
 class LinearPerceptron(Neuron):
@@ -88,8 +99,10 @@ class LinearPerceptron(Neuron):
             for j in rng.permutation(len(teach_data)):
                 T = (j == T_NUMBER)
                 ix = np.insert(teach_data[j][i].ravel(), 0, 1)
-                delta += ETA * (T - self.activation(ix.dot(self.weights))) * ix
+                err = T - self.activation(ix.dot(self.weights))
+                delta += ETA * err * ix
+                self.errors.append(abs(err))
             self.weights = self.weights + delta
-            # if np.linalg.norm(old_weights - self.weights) == 0.00:
-            #     return self.weights
-        return self.weights
+            if np.linalg.norm(old_weights - self.weights) == 0.00:
+                return
+        return
diff --git a/Reinforcement_Learning/Solution_Testing_1.py b/Reinforcement_Learning/Solution_Testing_1.py
index 3e57838b5fa897a4f7726aaeb3e2339abe7daf12..eb5d8d30ef6ff4c5494a248d5470a99fa65b9e9d 100644
--- a/Reinforcement_Learning/Solution_Testing_1.py
+++ b/Reinforcement_Learning/Solution_Testing_1.py
@@ -1,3 +1,9 @@
+# README
+# Threshold perceptron stagnates although not very accurate so the line
+# for ending the training on the threshold early was commented out.
+# Plot was added but commented out as the plots that happen too frequently cause an error.
+
+
 import numpy as np
 from prettytable import PrettyTable
 import Perceptrons
@@ -10,7 +16,7 @@ trial_data = Perceptrons.test_data
 REPETION = 1
 
 
-def run_test(neuron, learning_rate):
+def run_test(neuron, learning_rate, titel):
     global table
     table = PrettyTable()
     table.field_names = ["ETA", "0", "1", "2", "3", "4", "5", "6"]
@@ -22,18 +28,19 @@ def run_test(neuron, learning_rate):
                 res[i] = res[i] + round(abs(neuron.test(trial_array)))
         res = ["{:5d}".format(r) for r in res]
         table.add_row([ETA] + res)
+        # neuron.plot_errors(titel + f"{ETA}")
     print(table)
     return neuron
 
-for i in range(7):
+for i in range(6):
     Perceptrons.T_NUMBER = i
     E = np.array([0.05, 0.1, 0.2, 0.4, 0.75, 1, 2, 5])
     print("Threshold Perceptron is looking for: ", Perceptrons.T_NUMBER)
-    run_test(Perceptrons.ThresholdPerceptron(20), E/4)
+    run_test(Perceptrons.ThresholdPerceptron(20), E/4, f"Threshold Perceptron digit {i} " )
     print("Linear Perceptron is looking for: ", Perceptrons.T_NUMBER)
-    run_test(Perceptrons.LinearPerceptron(20), E / 160)
+    run_test(Perceptrons.LinearPerceptron(20), E / 160, f"Linear Perceptron digit {i} ")
     print("Sigmoid Gradient Descent Perceptron is looking for: ", Perceptrons.T_NUMBER)
-    x = run_test(Perceptrons.SGDPerceptron(20), E)
+    x = run_test(Perceptrons.SGDPerceptron(20), E, f"SGD Perceptron digit {i} ")