Guest User

Untitled

a guest
May 22nd, 2018
82
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.12 KB | None | 0 0
  1. Unhandled exception at 0x7712A9F2 in eye_tracking.exe: Microsoft C++ exception: std::future_error at memory location 0x010FEA50.
  2.  
  3. //CONCURRENCE
  4. std::vector<costGrad*> threadGrads;
  5. std::vector<std::thread> threads;
  6. std::vector<std::future<costGrad*>> ftr(maxThreads);
  7.  
  8. for (int i = 0; i < maxThreads; i++) //Creating threads
  9. {
  10. int start = floor(xValsB.rows() / (double)maxThreads * i);
  11. int end = floor(xValsB.rows() / (double)maxThreads * (i+1));
  12. int length = end-start;
  13. std::promise<costGrad*> prms;
  14. ftr[i] = prms.get_future();
  15. threads.push_back(std::thread([&]() {costThread(std::move(prms), params, xValsB.block(start, 0, length, xValsB.cols()), yVals.block(start, 0, length, yVals.cols()), lambda, m); }));
  16. }
  17.  
  18. for (int i = 0; i < maxThreads; i++) //Collecting future
  19. threadGrads.push_back(ftr[i].get()); <-------I THINK THIS IS WHERE I'M MESSING UP
  20.  
  21. for (int i = 0; i < maxThreads; i++) //Joining threads
  22. threads[i].join();
  23.  
  24. void costThread(std::promise<costGrad*> && pmrs, const std::vector<Eigen::MatrixXd>& params, const Eigen::MatrixXd& xValsB, const Eigen::MatrixXd& yVals, const double lambda, const int m) {
  25.  
  26. try
  27. {
  28.  
  29. costGrad* temp = new costGrad; //"Cost / Gradient" struct to be returned at end
  30.  
  31. temp->forw = 0;
  32. temp->back = 0;
  33.  
  34. std::vector<Eigen::MatrixXd> matA; //Contains the activation values including bias, first entry will be xVals
  35. std::vector<Eigen::MatrixXd> matAb; //Contains the activation values excluding bias, first entry will be xVals
  36. std::vector<Eigen::MatrixXd> matZ; //Contains the activation values prior to sigmoid
  37. std::vector<Eigen::MatrixXd> paramTrunc; //Contains the parameters exluding bias terms
  38.  
  39. clock_t t1, t2, t3;
  40. t1 = clock();
  41.  
  42. //FORWARD PROPAGATION PREP
  43.  
  44. Eigen::MatrixXd xVals = Eigen::MatrixXd::Constant(xValsB.rows(), xValsB.cols() + 1, 1); //Add bias units onto xVal
  45. xVals.block(0, 1, xValsB.rows(), xValsB.cols()) = xValsB;
  46.  
  47. matA.push_back(xVals);
  48. matAb.push_back(xValsB);
  49.  
  50. //FORWARD PROPAGATION
  51.  
  52. for (int i = 0; i < params.size(); i++)
  53. {
  54. Eigen::MatrixXd paramTemp = params[i].block(0, 1, params[i].rows(), params[i].cols() - 1); //Setting up paramTrunc
  55.  
  56. paramTrunc.push_back(paramTemp);
  57.  
  58. matZ.push_back(matA.back() * params[i].transpose());
  59. matAb.push_back(sigmoid(matZ.back()));
  60.  
  61.  
  62. Eigen::MatrixXd tempA = Eigen::MatrixXd::Constant(matAb.back().rows(), matAb.back().cols() + 1, 1); //Add bias units
  63. tempA.block(0, 1, matAb.back().rows(), matAb.back().cols()) = matAb.back();
  64.  
  65. matA.push_back(tempA);
  66. }
  67.  
  68. t2 = clock();
  69.  
  70. //COST CALCULATION
  71.  
  72. temp->J = (yVals.array()*(0 - log(matAb.back().array())) - (1 - yVals.array())*log(1 - matAb.back().array())).sum() / m;
  73.  
  74. //BACK PROPAGATION
  75.  
  76. std::vector<Eigen::MatrixXd> del;
  77. std::vector<Eigen::MatrixXd> grad;
  78.  
  79. del.push_back(matAb.back() - yVals);
  80.  
  81. for (int i = 0; i < params.size() - 1; i++)
  82. {
  83. del.push_back((del.back() * paramTrunc[paramTrunc.size() - 1 - i]).array() * sigmoidGrad(matZ[matZ.size() - 2 - i]).array());
  84. }
  85. for (int i = 0; i < params.size(); i++)
  86. {
  87. grad.push_back(del.back().transpose() * matA[i] / m);
  88. del.pop_back();
  89. }
  90. for (int i = 0; i < params.size(); i++)
  91. {
  92. int rws = grad[i].rows();
  93. int cls = grad[i].cols() - 1;
  94. Eigen::MatrixXd tmp = grad[i].block(0, 1, rws, cls);
  95. grad[i].block(0, 1, rws, cls) = tmp.array() + lambda / m*paramTrunc[i].array();
  96. }
  97.  
  98. temp->grad = grad;
  99.  
  100. t3 = clock();
  101.  
  102. temp->forw = ((float)t2 - (float)t1) / 1000;
  103. temp->back = ((float)t3 - (float)t2) / 1000;
  104.  
  105. pmrs.set_value(temp);
  106. }
  107.  
  108. catch (...)
  109. {
  110. pmrs.set_exception(std::current_exception());
  111. }
  112. //return temp;
  113. }
  114.  
  115. pmrs.set_value(temp);
  116.  
  117. for (int i = 0; i < maxThreads; i++) //Collecting future
  118. threadGrads.push_back(ftr[i].get());
Add Comment
Please, Sign In to add comment