1
0
Fork 0
neural-network/NeuralNetwork.py

83 lines
2.6 KiB
Python

import math
from Matrix import Matrix
def sigmoid(x):
"""The Sigmoid function"""
return 1 / (1 + math.exp(-x))
def dsigmoid(x):
"""The derivative of the Sigmoid function"""
return sigmoid(x) * (1 - sigmoid(x))
class NeuralNetwork:
def __init__(self, num_i, num_h, num_o):
self.learning_rate = 0.1
self.input_nodes = num_i
self.hidden_nodes = num_h
self.output_nodes = num_o
self.weights_ih = Matrix(self.hidden_nodes, self.input_nodes)
self.weights_ho = Matrix(self.output_nodes, self.hidden_nodes)
self.bias_h = Matrix(self.hidden_nodes, 1)
self.bias_o = Matrix(self.output_nodes, 1)
self.weights_ih.randomize()
self.weights_ho.randomize()
self.bias_h.randomize()
self.bias_o.randomize()
def feedforward(self, input_list):
"""Feedforward algorithm, basically the hidden layer"""
inputs = Matrix.from_list(input_list)
hidden = Matrix.product(self.weights_ih, inputs)
hidden.add(self.bias_h)
hidden.map(lambda x, i, j: sigmoid(x))
output = Matrix.product(self.weights_ho, hidden)
output.add(self.bias_o)
output.map(lambda x, i, j: sigmoid(x))
return output.to_list()
def train(self, input_list, answer):
"""Train the NN on a known input/answer combination"""
inputs = Matrix.from_list(input_list)
hidden = Matrix.product(self.weights_ih, inputs)
hidden.add(self.bias_h)
hidden.map(lambda x, i, j: sigmoid(x))
outputs = Matrix.product(self.weights_ho, hidden)
outputs.add(self.bias_o)
outputs.map(lambda x, i, j: sigmoid(x))
targets = Matrix.from_list(answer)
output_errors = Matrix.subtract(targets, outputs)
gradients = Matrix.clone(outputs)
gradients.map(lambda x, i, j: x * (1 - x))
gradients.multiply(output_errors)
gradients.multiply(self.learning_rate)
hidden_t = Matrix.transpose(hidden)
weight_ho_deltas = Matrix.product(gradients, hidden_t)
self.weights_ho.add(weight_ho_deltas)
self.bias_o.add(gradients)
who_t = Matrix.transpose(self.weights_ho)
hidden_errors = Matrix.product(who_t, output_errors)
hidden_gradient = Matrix.clone(hidden)
hidden_gradient.map(lambda x, i, j: x * (1 - x))
hidden_gradient.multiply(hidden_errors)
hidden_gradient.multiply(self.learning_rate)
inputs_t = Matrix.transpose(inputs)
weight_ih_deltas = Matrix.product(hidden_gradient, inputs_t)
self.weights_ih.add(weight_ih_deltas)
self.bias_h.add(hidden_gradient)