Split from monorepo
This commit is contained in:
commit
4b12aa44f2
5 changed files with 305 additions and 0 deletions
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Auto detect text files and perform LF normalization
|
||||
* text=auto
|
104
.gitignore
vendored
Normal file
104
.gitignore
vendored
Normal file
|
@ -0,0 +1,104 @@
|
|||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
.hypothesis/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# celery beat schedule file
|
||||
celerybeat-schedule
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.idea/inspectionProfiles/Project_Default.xml
|
||||
.idea/misc.xml
|
||||
.idea/modules.xml
|
||||
.idea/workspace.xml
|
||||
.idea/NeuralNetwork.iml
|
84
Matrix.py
Normal file
84
Matrix.py
Normal file
|
@ -0,0 +1,84 @@
|
|||
import random
|
||||
|
||||
|
||||
class Matrix:
|
||||
def __init__(self, rows, cols):
|
||||
self.rows = rows
|
||||
self.cols = cols
|
||||
self.data = []
|
||||
|
||||
for i in range(0, rows):
|
||||
self.data.append([0 for _ in range(0, cols)])
|
||||
|
||||
@staticmethod
|
||||
def from_list(lst):
|
||||
m = Matrix(len(lst), 1)
|
||||
m.map(lambda x, i, j: lst[i])
|
||||
return m
|
||||
|
||||
@staticmethod
|
||||
def product(a, b):
|
||||
"""Matrix product"""
|
||||
if a.cols != b.rows:
|
||||
return None
|
||||
|
||||
result = Matrix(a.rows, b.cols)
|
||||
result.map(lambda x, i, j: sum([a.data[i][k] * b.data[k][j] for k in range(0, a.cols)]))
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def transpose(m):
|
||||
"""Transpose a matrix"""
|
||||
result = Matrix(m.cols, m.rows)
|
||||
result.map(lambda x, i, j: m.data[j][i])
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def subtract(a, b):
|
||||
result = Matrix(a.rows, a.cols)
|
||||
result.map(lambda x, i, j: a.data[i][j] - b.data[i][j])
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def clone(m):
|
||||
result = Matrix(m.rows, m.cols)
|
||||
result.map(lambda x, i, j: m.data[i][j])
|
||||
return result
|
||||
|
||||
def map(self, fun):
|
||||
"""Execute a function on every element of the list, use the return value as the new value of the element"""
|
||||
for i in range(0, self.rows):
|
||||
for j in range(0, self.cols):
|
||||
self.data[i][j] = fun(self.data[i][j], i, j)
|
||||
|
||||
def randomize(self):
|
||||
"""Fill the matrix with random integers"""
|
||||
self.map(lambda x, i, j: random.randint(-1, 1))
|
||||
|
||||
def multiply(self, n):
|
||||
"""Hadamard or scalar product"""
|
||||
if type(n) is Matrix:
|
||||
self.map(lambda x, i, j: x * n.data[i][j])
|
||||
else:
|
||||
self.map(lambda x, i, j: x * n)
|
||||
|
||||
def add(self, n):
|
||||
"""Entrywise or scalar addition"""
|
||||
if type(n) is Matrix:
|
||||
self.map(lambda x, i, j: x + n.data[i][j])
|
||||
else:
|
||||
self.map(lambda x, i, j: x + n)
|
||||
|
||||
def print(self):
|
||||
"""Pretty print the matrix"""
|
||||
for row in self.data:
|
||||
for elem in row:
|
||||
print(elem, end='\t')
|
||||
print()
|
||||
|
||||
def to_list(self):
|
||||
"""Returns the internal matrix as a list"""
|
||||
lst = []
|
||||
self.map(lambda x, i, j: lst.append(x))
|
||||
return lst
|
83
NeuralNetwork.py
Normal file
83
NeuralNetwork.py
Normal file
|
@ -0,0 +1,83 @@
|
|||
import math
|
||||
from Matrix import Matrix
|
||||
|
||||
|
||||
def sigmoid(x):
|
||||
"""The Sigmoid function"""
|
||||
return 1 / (1 + math.exp(-x))
|
||||
|
||||
|
||||
def dsigmoid(x):
|
||||
"""The derivative of the Sigmoid function"""
|
||||
return sigmoid(x) * (1 - sigmoid(x))
|
||||
|
||||
|
||||
class NeuralNetwork:
|
||||
def __init__(self, num_i, num_h, num_o):
|
||||
self.learning_rate = 0.1
|
||||
self.input_nodes = num_i
|
||||
self.hidden_nodes = num_h
|
||||
self.output_nodes = num_o
|
||||
|
||||
self.weights_ih = Matrix(self.hidden_nodes, self.input_nodes)
|
||||
self.weights_ho = Matrix(self.output_nodes, self.hidden_nodes)
|
||||
self.bias_h = Matrix(self.hidden_nodes, 1)
|
||||
self.bias_o = Matrix(self.output_nodes, 1)
|
||||
|
||||
self.weights_ih.randomize()
|
||||
self.weights_ho.randomize()
|
||||
self.bias_h.randomize()
|
||||
self.bias_o.randomize()
|
||||
|
||||
def feedforward(self, input_list):
|
||||
"""Feedforward algorithm, basically the hidden layer"""
|
||||
inputs = Matrix.from_list(input_list)
|
||||
hidden = Matrix.product(self.weights_ih, inputs)
|
||||
hidden.add(self.bias_h)
|
||||
hidden.map(lambda x, i, j: sigmoid(x))
|
||||
|
||||
output = Matrix.product(self.weights_ho, hidden)
|
||||
output.add(self.bias_o)
|
||||
output.map(lambda x, i, j: sigmoid(x))
|
||||
|
||||
return output.to_list()
|
||||
|
||||
def train(self, input_list, answer):
|
||||
"""Train the NN on a known input/answer combination"""
|
||||
inputs = Matrix.from_list(input_list)
|
||||
hidden = Matrix.product(self.weights_ih, inputs)
|
||||
hidden.add(self.bias_h)
|
||||
hidden.map(lambda x, i, j: sigmoid(x))
|
||||
|
||||
outputs = Matrix.product(self.weights_ho, hidden)
|
||||
outputs.add(self.bias_o)
|
||||
outputs.map(lambda x, i, j: sigmoid(x))
|
||||
|
||||
targets = Matrix.from_list(answer)
|
||||
|
||||
output_errors = Matrix.subtract(targets, outputs)
|
||||
|
||||
gradients = Matrix.clone(outputs)
|
||||
gradients.map(lambda x, i, j: x * (1 - x))
|
||||
gradients.multiply(output_errors)
|
||||
gradients.multiply(self.learning_rate)
|
||||
|
||||
hidden_t = Matrix.transpose(hidden)
|
||||
weight_ho_deltas = Matrix.product(gradients, hidden_t)
|
||||
|
||||
self.weights_ho.add(weight_ho_deltas)
|
||||
self.bias_o.add(gradients)
|
||||
|
||||
who_t = Matrix.transpose(self.weights_ho)
|
||||
hidden_errors = Matrix.product(who_t, output_errors)
|
||||
|
||||
hidden_gradient = Matrix.clone(hidden)
|
||||
hidden_gradient.map(lambda x, i, j: x * (1 - x))
|
||||
hidden_gradient.multiply(hidden_errors)
|
||||
hidden_gradient.multiply(self.learning_rate)
|
||||
|
||||
inputs_t = Matrix.transpose(inputs)
|
||||
weight_ih_deltas = Matrix.product(hidden_gradient, inputs_t)
|
||||
|
||||
self.weights_ih.add(weight_ih_deltas)
|
||||
self.bias_h.add(hidden_gradient)
|
32
Perceptron.py
Normal file
32
Perceptron.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
import random
|
||||
|
||||
class Perceptron:
|
||||
def __init__(self, n):
|
||||
self.weights = [random.randint(-1, 1) for _ in range(0, n)]
|
||||
self.lr = 0.1
|
||||
|
||||
@staticmethod
|
||||
def sign(output):
|
||||
if output >= 0:
|
||||
return 1
|
||||
else:
|
||||
return -1
|
||||
|
||||
def guess(self, inputs):
|
||||
s = 0
|
||||
for i, inp in enumerate(inputs):
|
||||
s += inp * self.weights[i]
|
||||
|
||||
return Perceptron.sign(s)
|
||||
|
||||
def guessY(self, x):
|
||||
w0 = self.weights[0]
|
||||
w1 = self.weights[1]
|
||||
w2 = self.weights[2]
|
||||
|
||||
return -(w2 / w1) - (w0 / w1) * x
|
||||
|
||||
def train(self, inputs, target):
|
||||
error = target - self.guess(inputs)
|
||||
for i, w in enumerate(self.weights):
|
||||
self.weights[i] += error * inputs[i] * self.lr
|
Loading…
Reference in a new issue