Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- vector <float> forward(vector <float> input)
- {
- unsigned in_size = input.size();
- unsigned out_size = output.size();
- unsigned matrix_size = matrix.size();
- for (int i = 0; i < in_size; i++)
- matrix[0][i].input = input[i];
- for (int i = 0; i < matrix_size; i++)
- {
- unsigned next_layer = i + 1;
- unsigned layer_size = matrix[i].size();
- for (int j = 0; j < layer_size; j++)
- {
- matrix[i][j].input += leaky_relu(matrix[i][j].bias);
- matrix[i][j].output = leaky_relu(matrix[i][j].input);
- unsigned bond_size = matrix[i][j].bond.size();
- for (int k = 0; k < bond_size; k++)
- matrix[next_layer][k].input += matrix[i][j].output * matrix[i][j].bond[k].weight;
- }
- }
- matrix_size--;
- vector <float> output(matrix[matrix_size].size());
- for (int i = 0; i < out_size; i++)
- output[i] = matrix[matrix_size][i].output;
- return output;
- }
- float backward(vector <float> target)
- {
- float error = 0;
- unsigned matrix_size = matrix.size() - 1;
- unsigned target_size = target.size();
- for (int i = 0; i < target_size; i++)
- {
- matrix[matrix_size][i].gradient = (target[i] - matrix[matrix_size][i].output) * leaky_relu_deriv(matrix[matrix_size][i].output);
- error += abs(matrix[matrix_size][i].gradient);
- }
- error /= target_size + !target_size;
- for (int i = matrix_size - 1; i >= 0; i--)
- {
- unsigned layer_size = matrix[i].size();
- unsigned next_layer = i + 1;
- for (int j = 0; j < layer_size; j++)
- {
- float delta = 0;
- unsigned bond_size = matrix[i][j].bond.size();
- for (int k = 0; k < bond_size; k++)
- delta += matrix[i][j].bond[k].weight * matrix[next_layer][k].gradient;
- matrix[i][j].gradient = delta * leaky_relu_deriv(matrix[i][j].output);
- }
- }
- for (int i = 0; i < matrix_size; i++)
- {
- unsigned layer_size = matrix[i].size();
- unsigned next_layer = i + 1;
- for (int j = 0; j < layer_size; j++)
- {
- matrix[i][j].bias += learning_rate * matrix[i][j].gradient;
- unsigned bond_size = matrix[i][j].bond.size();
- for (int k = 0; k < bond_size; k++)
- {
- matrix[i][j].bond[k].weight += learning_rate * matrix[next_layer][k].gradient * matrix[i][j].output;
- }
- }
- }
- return error;
- }
- vector <unsigned> topology(4);
- topology[0] = 2;
- topology[1] = 4;
- topology[2] = 4;
- topology[3] = 1;
- NET network(topology);
- unsigned count = 0;
- while (true)
- {
- vector <double> input(2);
- input[0] = (double)rand() / RAND_MAX;
- input[1] = (double)rand() / RAND_MAX;
- vector <double> target(1);
- target[0] = (input[0] > 0.5 && input[1] < 0.5) || (input[0] < 0.5 && input[1] > 0.5);
- vector <double> output = network.forward(input);
- network.backward(target);
- double error = 0;
- for (int i= 0; i < output.size(); i++)
- error += abs(target[i] - output[i]);
- if (count % 1000 == 0)
- {
- cout << error << endl;
- }
- network.cleanup();
- count++;
- }
Add Comment
Please, Sign In to add comment