Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import matplotlib.pyplot as plt
- import math
- from matplotlib import cm
- from mpl_toolkits.mplot3d import Axes3D
- def get_alphas(nr):
- return np.linspace(0.0, 2*np.pi, nr)
- def list_to_nparray(list_to_convert):
- # https://stackoverflow.com/questions/62994636/numpy-stop-numpy-array-from-trying-to-reconcile-elements-create-ndarry-from
- np_errors_biases = np.empty(len(list_to_convert), dtype=object)
- np_errors_biases[:] = list_to_convert
- return np_errors_biases
- def clipped_sig(z):
- return 1.0/(1.0+np.exp(-np.clip(z, -700, 700)))
- def generate_network_weights_and_biases_for_single_tower(number_of_neurons=20, width=0.1, x0=0.5, y0=0.5):
- """
- We need two neurons per bump
- The first neuron creates a step at first
- Then the second neuron creates a step which will be subtracted from the first so we get a bump
- Since we have 2 input neurons we will have two incoming weights
- """
- if number_of_neurons % 2:
- # number of neurons need to be a multiple of two since we need two neurons per bump
- number_of_neurons += 1
- weights_layer_0 = np.zeros((number_of_neurons, 2))
- weights_layer_1 = np.zeros((1, number_of_neurons))
- biases = np.zeros((number_of_neurons, 1))
- for index, alpha in enumerate(get_alphas(number_of_neurons//2)):
- # constant multiplier for increasing the steepness of the step
- # divide by the width, because the smaller the width is the steeper our neurons must be
- c = 10000/width
- # wx1,wx2 = wy1,wy2 because they are only influenced by the angle
- wx = math.cos(alpha) * c
- wy = math.sin(alpha) * c
- bias = (-math.cos(alpha)*x0 - math.sin(alpha)*y0) * c
- bias2 = bias - width * c
- # Add First Neuron
- weights_layer_0[index * 2][0] = wx
- weights_layer_0[index * 2][1] = wy
- biases[index * 2][0] = bias
- weights_layer_1[0][index * 2] = 4.0 / number_of_neurons
- # Add second Neuron
- weights_layer_0[index * 2 + 1][0] = wx
- weights_layer_0[index * 2 + 1][1] = wy
- biases[index * 2 + 1][0] = bias2
- weights_layer_1[0][index * 2 + 1] = -4.0 / number_of_neurons
- return list_to_nparray([weights_layer_0, weights_layer_1]), list_to_nparray([biases, np.array([float(number_of_neurons) * -0.0]).reshape(1, 1)])
- def compute_z_network(xs, ys, tower):
- z = np.zeros((len(xs), len(ys)))
- weights, biases = generate_network_weights_and_biases_for_single_tower(**tower)
- for index_x, x in enumerate(xs):
- for index_y, y in enumerate(ys):
- activations_layer_1 = clipped_sig(np.dot(weights[0], np.array([x, y]).reshape(-1, 1)) + biases[0])
- z[index_x][index_y] = np.dot(weights[1], activations_layer_1).sum()
- return z
- def main():
- start = 0.4999995
- end = 0.5000005
- xy_resolution = 300 # the performance required grows with the square. Consider tuning start and end instead.
- xs = np.linspace(start, end, num=xy_resolution)
- ys = np.linspace(start, end, num=xy_resolution)
- tower = {"width": 0.0000001, "x0": 0.5, "y0": 0.5, "number_of_neurons": 1000}
- z_surface = compute_z_network(xs, ys, tower)
- x_grid, y_grid = np.meshgrid(xs, ys)
- fig = plt.figure()
- fig.suptitle("Step Width: %s (radius)\nNumber of Neurons: %s" % (tower["width"], tower["number_of_neurons"]))
- ax = fig.gca(projection=Axes3D.name)
- ax.plot_surface(x_grid, y_grid, z_surface, cmap=cm.coolwarm)
- plt.show()
- if __name__ == '__main__':
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement