This program implements the back propagation algorithm of neural network with an example. Can we make it more efficient?
from setuptools.namespaces import flatten
import numpy as np
import math
inputvalues = [0.1, 0.9, 0.5, 0.2]
initialweights = [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6], [0.7, 0.8]]
hidden_initial_weights = [[[0.2, 0.3, 0.4], [0.5, 0.6, 0.7]],
[[0.8, 0.9, 0.1, 0.2], [0.3, 0.4, 0.5, 0.6], [0.7, 0.8, 0.9, 0.1]],
[[0.2, 0.3, 0.4, 0.5, 0.6], [0.7, 0.8, 0.9, 0.1, 0.2], [0.3, 0.4, 0.5, 0.6, 0.7], [0.9, 0.8, 0.7, 0.6, 0.5]]]
Hidden_Output_Connection_weights = [[0.5, 0.4, 0.3], [0.6, 0.7, 0.8], [0.1, 0.3, 0.5], [0.7, 0.8, 0.9], [0.45, 0.34, 0.32]]
Target_Value = 0.9
learning_rate = 0.4
weight_matrix = [[initialweights],hidden_initial_weights,[Hidden_Output_Connection_weights]]
# Forward Propagation
intermediate = [inputvalues] # Intermediate contains output from each layer
for j in range(len(weight_matrix)):
for l in range(len(weight_matrix[j])):
result = np.dot(inputvalues,weight_matrix[j][l])
result1 = [(1-math.exp(-k))/(1+math.exp(-k))for k in result]
intermediate.append(result1)
inputvalues = result1
Final_error_from_output_layer_nodes = []
for i in intermediate[-1]:
Error_in_final_output = (Target_Value - i)*(i)*(1-i)
Final_error_from_output_layer_nodes.append(Error_in_final_output)
rr = [] # rr is the derivative
for i in reversed(range(len(intermediate)-1)): # subtract one because it is length of output layer
temp = []
for j in range(len(intermediate[i])):
temp.append(intermediate[i][j]*(1-intermediate[i][j]))
rr.append(temp)
transposed_weight_matrix = [] #transposed_weight_matrix
for i in reversed(range(len(weight_matrix))):
for j in reversed(range(len(weight_matrix[i]))):
transposed_weight_matrix.append(list(map(list, zip(* weight_matrix[i][j]))))
Backward_propagation_output = []
final_errors = [Final_error_from_output_layer_nodes]
for i in range(len(transposed_weight_matrix)-1):
intermediate_error = np.dot(Final_error_from_output_layer_nodes,transposed_weight_matrix[i])
temp =[]
for num1, num2 in zip(intermediate_error,rr[i]):
temp.append(num1 * num2)
Backward_propagation_output.append(temp)
Final_error_from_output_layer_nodes = Backward_propagation_output[i]
final_errors.append(Final_error_from_output_layer_nodes)
final_errors.reverse()
change_in_weights = []
for i in range(len(intermediate)-1):
temp1 = []
for j in range(len(intermediate[i])):
temp = []
for k in range(len(final_errors[i])):
temp.append(learning_rate*intermediate[i][j]*final_errors[i][k])
temp1.append(temp)
change_in_weights.append(temp1)
# print(change_in_weights)
# Updated weights
updated_weights = []
r = list(flatten(weight_matrix))
for i in range(len(r)):
temp = []
for j in range(len(r[i])):
temp1 = []
for k in range(len(r[i][j])):
temp1.append(r[i][j][k]+change_in_weights[i][j][k])
temp.append(temp1)
updated_weights.append(temp)