Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #include <iostream>
- #include <random>
- #include "Eigen/Dense"
- #define SLOPE 4
- #define ETA 0.1
- #define THRESHOLD 0.01
- #define RANDOM true
- using Eigen::MatrixXd;
- using namespace std;
- MatrixXd v;
- MatrixXd w;
- MatrixXd randomUniform(int rows, int cols, double min, double max);
- double sumSquaredError(const Eigen::Ref<const MatrixXd> target, const Eigen::Ref<const MatrixXd> predicted);
- double tf(double d);
- double dtf(double d);
- int main() {
- srand((unsigned int) time(0));
- int numInputs, numHiddenNodes, numClasses, numOutputs, numRows, numCols;
- long n; //Temp storage variable
- scanf("%d %d %d", &numInputs, &numHiddenNodes, &numClasses);
- scanf("%d %d", &numRows, &numCols);
- numOutputs = numCols - numInputs;
- MatrixXd raw(numRows, numCols);
- for (int r = 0; r < numRows; r++) {
- for (int c = 0; c < numCols; c++) {
- double d;
- scanf("%lf", &d);
- raw(r, c) = d;
- }
- }
- MatrixXd x = raw.block(0, 0, numRows, numInputs);
- MatrixXd xb(numRows, numInputs + 1);
- n = x.cols();
- xb << Eigen::VectorXd::Constant(x.rows(), -1.0), x.leftCols(n);
- MatrixXd t = raw.block(0, numInputs, numRows, numOutputs);
- v = randomUniform(numInputs + 1, numHiddenNodes, 0, 1.0);
- w = randomUniform(numHiddenNodes + 1, numOutputs, 0, 1.0);
- // Enter training loop
- int count = 0;
- double sum = 1;
- while (count++ < 100000 && sum > THRESHOLD) {
- MatrixXd h = xb * v;
- h = h.unaryExpr(&tf).eval();
- MatrixXd hb(h.rows(), h.cols() + 1);
- n = h.cols();
- hb << Eigen::VectorXd::Constant(h.rows(),-1.0), h.leftCols(n);
- MatrixXd y = hb * w;
- y = y.unaryExpr(&tf).eval();
- sum = sumSquaredError(t, y);
- if (count % 1000 == 0) cout << "Current Sum: " << sum << endl;
- MatrixXd dy = (t-y).cwiseProduct(y.unaryExpr(&dtf));
- MatrixXd dhb = hb.unaryExpr(&dtf).cwiseProduct(dy * w.transpose());
- w += (ETA * (hb.transpose() * dy).array()).matrix();
- MatrixXd dh(dhb.rows(), dhb.cols() - 1);
- dh << dhb.leftCols(dhb.cols() - 1);
- v += (ETA * (xb.transpose() * dh).array()).matrix();
- }
- int numTestRows, numTestCols;
- scanf("%d %d", &numTestRows, &numTestCols);
- MatrixXd testRaw(numTestRows, numTestCols);
- for (int r = 0; r < numTestRows; r++) {
- for (int c = 0; c < numTestCols; c++) {
- double d;
- scanf("%lf", &d);
- testRaw(r, c) = d;
- }
- }
- MatrixXd testX = testRaw.block(0, 0, numTestRows, numInputs);
- MatrixXd testXb(testX.rows(), testX.cols() + 1);
- testXb << Eigen::VectorXd::Constant(testX.rows(), -1.0), testX.leftCols(testX.cols());
- MatrixXd testT = raw.block(0, numInputs, numTestRows, numOutputs);
- MatrixXd testH = testXb * v;
- testH = testH.unaryExpr(&tf).eval();
- MatrixXd testHb(testH.rows(), testH.cols() + 1);
- testHb << Eigen::VectorXd::Constant(testH.rows(), -1.0), testH.leftCols(testH.cols());
- MatrixXd testY = testHb * w;
- testY = testY.unaryExpr(&tf).eval();
- cout << "Target" << endl;
- cout << testT << endl;
- cout << "Predicted" << endl;
- cout << testY << endl;
- cout << "Error on Test Data: " << sumSquaredError(testT, testY) << endl;
- return 0;
- }
- double tf(double d) {
- return 1.0/(1+exp(-SLOPE*d));
- }
- double dtf(double d) {
- return d * (1.0 - d);
- }
- double sumSquaredError(const Eigen::Ref<const MatrixXd> target, const Eigen::Ref<const MatrixXd> predicted) {
- MatrixXd error = target-predicted;
- error = error.cwiseProduct(error);
- return error.sum();
- }
- MatrixXd randomUniform(int rows, int cols, double min, double max) {
- MatrixXd out(rows, cols);
- unsigned int seed = 0;
- if (RANDOM) {
- std::random_device rd;
- seed = rd();
- cout << seed << endl;
- }
- std::mt19937 generator(seed);
- std::uniform_real_distribution<double> distribution(min, max);
- for (int r = 0; r < rows; r++) {
- for (int c = 0; c < cols; c++) {
- out(r, c) = distribution(generator);
- }
- }
- return out;
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement