Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #include <iostream>
- #include "mat.h"
- #define SLOPE 1
- #define ETA 0.1
- using namespace std;
- double transferFunction(double d);
- double transferFunctionDerrivative(double d);
- double stepFunction(double d);
- Matrix addBias(Matrix m);
- class NeuronLayer {
- public:
- NeuronLayer(int numNeurons, int numInputs) {
- matrix = new Matrix(numInputs, numNeurons);
- matrix->rand(0.0, 1.0);
- }
- Matrix *matrix;
- };
- class NeuralNetwork {
- public:
- NeuronLayer *l1;
- NeuronLayer *l2;
- void train(Matrix input, Matrix target) {
- bool running = true;
- int count = 0;
- double sum = 0;
- while (running) {
- Matrix l1Out = input.dot(l1->matrix);
- l1Out.map(transferFunction);
- l1Out = addBias(l1Out);
- Matrix l2Out = l1Out.dot(l2->matrix);
- l2Out.map(transferFunction);
- Matrix l2Error = new Matrix(target);
- l2Error.sub(l2Out);
- Matrix l2D = new Matrix(l2Out);
- l2D.map(transferFunctionDerrivative);
- Matrix l2Delta = l2Error.mul(l2D);
- Matrix l1Error = l2Delta.dotT(l2->matrix);
- Matrix l1D = new Matrix(l1Out);
- l1D.map(transferFunctionDerrivative);
- Matrix l1Delta = l1Error.mul(l1D);
- Matrix l1Adjustment = input.Tdot(l1Delta);
- l1Adjustment = l1Adjustment.extract(0, 0, l1Adjustment.numRows(), l1Adjustment.numCols() - 1);
- Matrix l2Adjustment = l1Out.Tdot(l2Delta);
- l1Adjustment.scalarMul(ETA);
- l2Adjustment.scalarMul(ETA);
- l1->matrix->add(l1Adjustment);
- l2->matrix->add(l2Adjustment);
- // sum error
- sum = 0;
- Matrix error = new Matrix(target);
- error.sub(l2Out);
- for (int r = 0; r < error.numRows(); r++) {
- for (int c = 0; c < error.numCols(); c++) {
- sum += error.get(r,c) * error.get(r, c);
- }
- }
- running = sum > 0.01 && count++ < 100000;
- }
- }
- Matrix think(Matrix input) {
- Matrix l1Out = input.dot(l1->matrix);
- l1Out.map(transferFunction);
- l1Out = addBias(l1Out);
- Matrix l2Out = l1Out.dot(l2->matrix);
- l2Out.map(transferFunction);
- return l2Out;
- }
- };
- int main() {
- initRand();
- int numInputs, numHiddenNodes, numClasses, numOutputs;
- scanf("%d %d %d", &numInputs, &numHiddenNodes, &numClasses);
- Matrix raw;
- raw.read();
- numOutputs = raw.numCols() - numInputs;
- Matrix input = raw.extract(0,0,raw.numRows(), numInputs);
- input.setName("input");
- input = addBias(input);
- Matrix target = raw.extract(0, numInputs, 0, 0);
- target.setName("target");
- auto nn = new NeuralNetwork();
- nn->l1 = new NeuronLayer(numHiddenNodes, numInputs + 1);
- nn->l2 = new NeuronLayer(numOutputs, numHiddenNodes + 1);
- nn->train(input, target);
- raw.read();
- Matrix testInput = raw.extract(0, 0, raw.numRows(), numInputs);
- testInput = addBias(testInput);
- Matrix testTarget = raw.extract(0, numInputs, 0, 0);
- Matrix testOutput = nn->think(testInput);
- testOutput.map(stepFunction);
- target.printfmt("Target", "%.4f ", false);
- testOutput.printfmt("Predicted", "%.4f ", false);
- Matrix confusionMatrix = new Matrix(2, 2);
- confusionMatrix.map([](double d) -> double {return 0;});
- for (int r = 0; r < testTarget.numRows(); r++) {
- for (int c = 0; c < testTarget.numCols(); c++) {
- double actual = testTarget.get(r, c);
- double predicted = testOutput.get(r, c);
- int row = (int) actual == 1 ? 1 : 0;
- int col = (int) predicted == 1 ? 1 : 0;
- confusionMatrix.inc(row, col);
- }
- }
- confusionMatrix.printfmt("Confusion Matrix", "%.4f ", false);
- }
- double transferFunction(double d) {
- return 1.0/(1.0 + exp(-SLOPE*d));
- }
- double transferFunctionDerrivative(double d) {
- return SLOPE * d * (1 - d);
- }
- double stepFunction(double d) {
- return d >= 0.5 ? 1.0 : 0.0;
- }
- Matrix addBias(Matrix m) {
- Matrix matrix = new Matrix(m.numRows(), m.numCols() + 1);
- matrix.setName(m.getName());
- for (int r = 0; r < m.numRows(); r++) {
- for (int c = 0; c < m.numCols(); c++) {
- matrix.set(r, c, m.get(r, c));
- }
- }
- matrix.mapCol(m.numCols(), [](double d) -> double {return 1.0;});
- return matrix;
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement