Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import math
- import matplotlib.pyplot as plt
- import random
- dataset_size = 1000
- data_0 = np.array([(max(0, np.random.normal(3, 0.5)), np.random.uniform(0, math.pi * 2)) for x in range(dataset_size)])
- data_1 = np.array([(max(0, np.random.normal(6, 0.5)), np.random.uniform(0, math.pi * 2)) for x in range(dataset_size)])
- train_y= np.concatenate([np.zeros(dataset_size), np.ones(dataset_size)])
- train_x = np.concatenate([data_0, data_1], axis=0)
- together = list(zip(train_x, train_y))
- print('?', together[0])
- #print(together)
- random.shuffle(together)
- together = [ (np.array([math.cos(x[0][1]) * x[0][0], math.sin(x[0][1]) * x[0][0]]), x[1]) for x in together]
- tmp = np.array(together)
- print('!', tmp.shape)
- print('?', together[0])
- train_x = np.array([x[0] for x in together])
- train_y = np.array([x[1] for x in together])
- print(train_y.shape)
- print(train_x.shape)
- train_y = train_y.astype(dtype=np.int32)
- print(train_y)
- print(max(train_y))
- print(train_y.dtype)
- color_name = 'rb'
- color_letter = [color_name[x] for x in train_y]
- #plt.scatter(train_x[:, 0], train_x[:, 1], color=color_letter)
- #plt.show()
- num_units = [2, 20, 20, 1]
- num_layers = len(num_units) - 1
- var_dict = {}
- for i in range(num_layers):
- var_dict['w_' + str(i)] = np.random.normal(0, 0.05, (num_units[i], num_units[i + 1]))
- var_dict['b_' + str(i)] = np.zeros((num_units[i + 1],))
- #for key, value in var_dict.items():
- #print(key, value.shape)
- def dense_forward(x, layer_id):
- x = np.matmul(x, w) + b
- return x
- def relu_forward(x, layer_id):
- x = np.max(x, 0)
- return x
- def sigmoid_forward(x):
- x = np.exp(-x)
- x = 1.0 / (1 + x)
- return x
- batch_size = 32
- for i in range(1000):
- idx = np.random.randint(0, train_x.shape[0], batch_size)
- batch_x = train_x[idx]
- batch_y = train_y[idx]
- backprop_dense = []
- backprop_relu = []
- backprop_sigmoid
- for i in range(num_layers):
- batch_x, mem = dense_forward(batch_x, i)
- backprop_dense.append(mem)
- if i + 1 != num_layers:
- batch_x, mem = relu_forward(batch_x, i)
- backprop_relu.append(mem)
- batch_x, mem = sigmoid_forward(batch_x)
- backpro_sigmoid = mem
- print(data_0)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement