Advertisement
Guest User

K

a guest
Jan 25th, 2020
138
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 20.40 KB | None | 0 0
  1. #include <iostream>
  2. #include <vector>
  3. #include <cmath>
  4.  
  5. using namespace std;
  6.  
  7. void print2DMatrix(vector<vector<double>> Z) {
  8. for (size_t h = 0; h < Z.size(); ++h) {
  9. for (size_t w = 0; w < Z[0].size(); ++w) {
  10. cout << Z[h][w] << ' ';
  11. }
  12. }
  13. cout << endl;
  14. }
  15.  
  16. void print3DMatrix(vector<vector<vector<double>>> Z) {
  17. for (size_t c = 0; c < Z[0][0].size(); ++c) {
  18. for (size_t h = 0; h < Z.size(); ++h) {
  19. for (size_t w = 0; w < Z[0].size(); ++w) {
  20. cout << Z[h][w][c] << ' ';
  21. }
  22. }
  23. }
  24. cout << endl;
  25. }
  26.  
  27. void print4DMatrix(vector<vector<vector<vector<double>>>> W) {
  28. for (size_t c_new = 0; c_new < W[0][0][0].size(); ++c_new) {
  29. for (size_t c_prev = 0; c_prev < W[0][0].size(); ++c_prev) {
  30. for (size_t h = 0; h < W.size(); ++h) {
  31. for (size_t w = 0; w < W[0].size(); ++w) {
  32. cout << W[h][w][c_prev][c_new] << ' ';
  33. }
  34. }
  35. }
  36. }
  37. cout << endl;
  38. }
  39.  
  40. struct layerDesc {
  41. string type;
  42. double alpha;
  43. int S;
  44.  
  45. int H;
  46. int K;
  47. int P;
  48.  
  49. vector<double> biases;
  50. vector<double> db;
  51.  
  52. vector<vector<vector<vector<double>>>> W;
  53. vector<vector<vector<vector<double>>>> dW;
  54.  
  55. vector<vector<vector<double>>> A;
  56. vector<vector<vector<double>>> dZ;
  57. };
  58.  
  59. int N, D, L;
  60. vector<vector<vector<double>>> input;
  61. vector<layerDesc *> network;
  62.  
  63. vector<vector<vector<double>>> bias(vector<vector<vector<double>>> &A, vector<double> &biases) {
  64. vector<vector<vector<double>>> res(A.size(),
  65. vector<vector<double>>(A[0].size(),
  66. vector<double>(A[0][0].size())));
  67. for (size_t h = 0; h < A.size(); ++h) {
  68. for (size_t w = 0; w < A[0].size(); ++w) {
  69. for (size_t c = 0; c < A[0][0].size(); ++c) {
  70. res[h][w][c] = A[h][w][c] + biases[c];
  71. }
  72. }
  73. }
  74. return res;
  75. }
  76.  
  77. void biasBack(int layerNum) {
  78. auto Aprev = network[layerNum - 1]->A;
  79. auto dZ = network[layerNum]->dZ;
  80. auto biases = network[layerNum]->biases;
  81.  
  82. int h_new = dZ.size();
  83. int w_new = dZ[0].size();
  84. int c_new = dZ[0][0].size();
  85.  
  86. vector<double> db(biases.size(), 0.);
  87.  
  88. for (int h = 0; h < h_new; ++h) {
  89. for (int w = 0; w < w_new; ++w) {
  90. for (int c = 0; c < c_new; ++c) {
  91. db[c] += dZ[h][w][c];
  92. }
  93. }
  94. }
  95.  
  96. network[layerNum - 1]->dZ = dZ; /// link
  97. network[layerNum]->db = db;
  98. }
  99.  
  100. double matrixMax(vector<vector<vector<double>>> &A,
  101. int vert_start, int vert_end, int horiz_start, int horiz_end, int c) {
  102. double ans = A[vert_start][horiz_start][c];
  103.  
  104. for (size_t h = vert_start; h < vert_end; ++h) {
  105. for (size_t w = horiz_start; w < horiz_end; ++w) {
  106. ans = A[h][w][c] > ans ? A[h][w][c] : ans;
  107. }
  108. }
  109.  
  110. return ans;
  111. }
  112.  
  113. vector<vector<vector<double>>> pool(vector<vector<vector<double>>> &A, int f) {
  114. int h_prev = A.size();
  115. int w_prev = A[0].size();
  116. int c_prev = A[0][0].size();
  117.  
  118. int h_new = int(1 + (h_prev - f) / f);
  119. int w_new = int(1 + (w_prev - f) / f);
  120. int c_new = c_prev;
  121.  
  122. vector<vector<vector<double>>> res(h_new,
  123. vector<vector<double>>(w_new,vector<double>(c_new, 0.)));
  124.  
  125. for (int h = 0; h < h_new; ++h) {
  126. int vert_start = h * f;
  127. int vert_end = vert_start + f;
  128.  
  129. for (int w = 0; w < w_new; ++w) {
  130. int horiz_start = w * f;
  131. int horiz_end = horiz_start + f;
  132.  
  133. for (size_t c = 0; c < c_new; ++c) {
  134. res[h][w][c] = matrixMax(A, vert_start, vert_end, horiz_start, horiz_end, c);
  135. }
  136. }
  137. }
  138. return res;
  139. }
  140.  
  141. vector<vector<double>> getPoolMaxMask(vector<vector<vector<double>>> &X,
  142. int vert_start, int horiz_start, int f, int cFix) {
  143. vector<vector<double>> res(f, vector<double>(f, 0.));
  144.  
  145. double max = X[vert_start][horiz_start][cFix];
  146. for (size_t h = vert_start; h < vert_start + f; ++h) {
  147. for (size_t w = horiz_start; w < horiz_start + f; ++w) {
  148. max = X[h][w][cFix] > max ? X[h][w][cFix] : max;
  149. }
  150. }
  151.  
  152. for (size_t h = 0; h < f; ++h) {
  153. for (size_t w = 0; w < f; ++w) {
  154. res[h][w] = X[vert_start + h][horiz_start + w][cFix] == max ? 1. : 0.;
  155. }
  156. }
  157.  
  158. return res;
  159. }
  160.  
  161. void mulMaskMatrix(vector<vector<vector<double>>> &dAprev,
  162. double dZhwc, vector<vector<double>> &mask,
  163. int vert_start, int horiz_start, int f, int cFix) {
  164. for (size_t h = vert_start; h < vert_start + f; ++h) {
  165. for (size_t w = horiz_start; w < horiz_start + f; ++w) {
  166. dAprev[h][w][cFix] += dZhwc * mask[h - vert_start][w - horiz_start];
  167. }
  168. }
  169. }
  170.  
  171. void poolBack(int layerNum) {
  172. auto Aprev = network[layerNum - 1]->A;
  173. auto dZ = network[layerNum]->dZ;
  174. int f = network[layerNum]->S;
  175.  
  176. int h_prev = Aprev.size();
  177. int w_prev = Aprev[0].size();
  178. int c_prev = Aprev[0][0].size();
  179.  
  180. int h_new = dZ.size();
  181. int w_new = dZ[0].size();
  182. int c_new = dZ[0][0].size();
  183.  
  184. vector<vector<vector<double>>> dAprev(h_prev,
  185. vector<vector<double>>(w_prev, vector<double>(c_prev, 0.)));
  186.  
  187. for (int h = 0; h < h_new; ++h) {
  188. for (int w = 0; w < w_new; ++w) {
  189. for (size_t c = 0; c < c_new; ++c) {
  190. int vert_start = h * f;
  191. int horiz_start = w * f;
  192.  
  193. auto mask = getPoolMaxMask(Aprev, vert_start, horiz_start, f, c);
  194. mulMaskMatrix(dAprev, dZ[h][w][c], mask, vert_start, horiz_start, f, c);
  195. }
  196. }
  197. }
  198.  
  199. network[layerNum - 1]->dZ = dAprev;
  200. }
  201.  
  202. vector<vector<vector<double>>> relu(vector<vector<vector<double>>> &A, double alpha) {
  203. vector<vector<vector<double>>> res(A.size(),
  204. vector<vector<double>>(A[0].size(),
  205. vector<double>(A[0][0].size())));
  206. for (size_t h = 0; h < A.size(); ++h) {
  207. for (size_t w = 0; w < A[0].size(); ++w) {
  208. for (size_t c = 0; c < A[0][0].size(); ++c) {
  209. res[h][w][c] = A[h][w][c] >= 0 ? A[h][w][c] : alpha * A[h][w][c];
  210. }
  211. }
  212. }
  213. return res;
  214. }
  215.  
  216. void reluBack(int layerNum) {
  217. auto A = network[layerNum - 1]->A;
  218. auto dZ = network[layerNum]->dZ;
  219. double alpha = network[layerNum]->alpha;
  220.  
  221. vector<vector<vector<double>>> dA(dZ.size(),
  222. vector<vector<double>>(dZ[0].size(), vector<double>(dZ[0][0].size())));
  223.  
  224. for (size_t h = 0; h < dZ.size(); ++h) {
  225. for (size_t w = 0; w < dZ[0].size(); ++w) {
  226. for (size_t c = 0; c < dZ[0][0].size(); ++c) {
  227. dA[h][w][c] = A[h][w][c] < 0 ? alpha * dZ[h][w][c] : dZ[h][w][c];
  228. }
  229. }
  230. }
  231.  
  232. network[layerNum - 1]->dZ = dA;
  233. }
  234.  
  235. vector<vector<vector<double>>> fillPad(vector<vector<vector<double>>> &A, int pad, const string& type) {
  236. vector<vector<vector<double>>> res(A.size() + 2 * pad,
  237. vector<vector<double>>(A[0].size() + 2 * pad,
  238. vector<double>(A[0][0].size())));
  239.  
  240. // A в центре
  241. for (size_t c = 0; c < A[0][0].size(); ++c) {
  242. for (size_t i = 0; i < A.size(); ++i) {
  243. for (size_t j = 0; j < A[0].size(); ++j) {
  244. res[i + pad][j + pad][c] = A[i][j][c];
  245. }
  246. }
  247. }
  248.  
  249. if (type == "cnvm") {
  250. for (size_t c = 0; c < res[0][0].size(); ++c) {
  251. for (size_t i = 0; i < res.size(); ++i) {
  252. for (size_t j = 0; j < res[0].size(); ++j) {
  253. size_t ii = i < pad ? pad - i : i < res.size() - pad ? i - pad :
  254. A.size() - 2 - (pad - (res.size() - i));
  255. size_t jj = j < pad ? pad - j : j < res[0].size() - pad ? j - pad :
  256. A[0].size() - 2 - (pad - (res[0].size() - j));
  257. res[i][j][c] = A[ii][jj][c];
  258. }
  259. }
  260. }
  261.  
  262. } else if (type == "cnve") {
  263. for (size_t c = 0; c < A[0][0].size(); ++c) {
  264. // сверху снизу без углов
  265. for (size_t i = 0; i < pad; ++i) {
  266. for (size_t j = pad; j < res.size() - pad; ++j) {
  267. res[i][j][c] = A[0][j - pad][c];
  268. res[res.size() - 1 - i][j][c] = A[A.size() - 1][j - pad][c];
  269. }
  270. }
  271.  
  272. // слева справа
  273. for (size_t i = 0; i < res.size(); ++i) {
  274. for (size_t j = 0; j < pad; ++j) {
  275. res[i][j][c] = res[i][pad][c];
  276. res[i][res.size() - 1 - j][c] = res[i][res.size() - 1 - pad][c];
  277. }
  278. }
  279. }
  280.  
  281. } else if (type == "cnvc") {
  282. for (size_t c = 0; c < A[0][0].size(); ++c) {
  283. // сверху снизу без углов
  284. for (size_t i = 0; i < pad; ++i) {
  285. for (size_t j = pad; j < res.size() - pad; ++j) {
  286. res[i][j][c] = A[A.size() - pad + i][j - pad][c];
  287. res[res.size() - 1 - i][j][c] = A[pad - 1 - i][j - pad][c];
  288. }
  289. }
  290.  
  291. // слева справа
  292. for (size_t i = 0; i < res.size(); ++i) {
  293. for (size_t j = 0; j < pad; ++j) {
  294. res[i][j][c] = res[i][res.size() - 2 * pad + j][c];
  295. res[i][res.size() - 1 - j][c] = res[i][2 * pad - 1 - j][c];
  296. }
  297. }
  298. }
  299. }
  300.  
  301. return res;
  302. }
  303.  
  304. vector<vector<vector<double>>> clearPad(vector<vector<vector<double>>> &A, int pad, const string& type) {
  305. vector<vector<vector<double>>> res(A.size() - 2 * pad,
  306. vector<vector<double>>(A[0].size() - 2 * pad,
  307. vector<double>(A[0][0].size())));
  308.  
  309. if (type == "cnvm") {
  310. for (size_t c = 0; c < A[0][0].size(); ++c) {
  311. for (size_t i = 0; i < A.size(); ++i) {
  312. for (size_t j = 0; j < A[0].size(); ++j) {
  313. size_t ii = i < pad ? pad - i : i < A.size() - pad ? i - pad :
  314. res.size() - 2 - (pad - (A.size() - i));
  315. size_t jj = j < pad ? pad - j : j < A[0].size() - pad ? j - pad :
  316. res[0].size() - 2 - (pad - (A[0].size() - j));
  317. res[ii][jj][c] += A[i][j][c];
  318. }
  319. }
  320. }
  321.  
  322. } else if (type == "cnve") {
  323. for (size_t c = 0; c < A[0][0].size(); ++c) {
  324. for (size_t i = 0; i < A.size(); ++i) {
  325. for (size_t j = 0; j < A[0].size(); ++j) {
  326. size_t ii = i < pad ? 0 : i < A.size() - pad ? i - pad : res.size() - 1;
  327. size_t jj = j < pad ? 0 : j < A[0].size() - pad ? j - pad : res[0].size() - 1;
  328. res[ii][jj][c] += A[i][j][c];
  329. }
  330. }
  331. }
  332.  
  333. } else if (type == "cnvc") {
  334. for (size_t c = 0; c < A[0][0].size(); ++c) {
  335. for (size_t i = 0; i < A.size(); ++i) {
  336. for (size_t j = 0; j < A[0].size(); ++j) {
  337. size_t ii = i < pad ? res.size() - pad + i : i < A.size() - pad ? i - pad :
  338. (pad - (A.size() - i));
  339. size_t jj = j < pad ? res[0].size() - pad + j : j < A[0].size() - pad ? j - pad :
  340. (pad - (A[0].size() - j));
  341. res[ii][jj][c] += A[i][j][c];
  342. }
  343. }
  344. }
  345. }
  346.  
  347. return res;
  348. }
  349.  
  350. double convSingleStep(vector<vector<vector<double>>> &A,
  351. vector<vector<vector<vector<double>>>> &W,
  352. int vert_start, int horiz_start, int fixC) {
  353.  
  354. double ans = 0.;
  355.  
  356. for (size_t h = 0; h < W.size(); ++h) {
  357. for (size_t w = 0; w < W[0].size(); ++w) {
  358. for (size_t c = 0; c < A[0][0].size(); ++c) {
  359. ans += A[vert_start + h][horiz_start + w][c] * W[h][w][c][fixC];
  360. }
  361. }
  362. }
  363.  
  364. return ans;
  365. }
  366.  
  367. vector<vector<vector<double>>> conv(vector<vector<vector<double>>> &A,
  368. vector<vector<vector<vector<double>>>> &W, int S, int P, const string& type) {
  369.  
  370. int f = W.size();
  371.  
  372. int h_prev = A.size();
  373. int w_prev = A[0].size();
  374. int c_prev = A[0][0].size();
  375.  
  376. int h_new = int((h_prev - f + 2 * P) / S) + 1;
  377. int w_new = int((w_prev - f + 2 * P) / S) + 1;
  378. int c_new = W[0][0][0].size();
  379.  
  380. vector<vector<vector<double>>> res(h_new,
  381. vector<vector<double>>(w_new, vector<double>(c_new, 0.)));
  382.  
  383. auto Apad = fillPad(A, P, type);
  384.  
  385. // for (size_t i = 0; i < A.size(); ++i) {
  386. // for (size_t j = 0; j < A.size(); ++j) {
  387. // cout << A[i][j][0] << ' ';
  388. // }
  389. // cout << '\n';
  390. // }
  391. // cout << '\n' << '\n';
  392. //
  393. // for (size_t i = 0; i < Apad.size(); ++i) {
  394. // for (size_t j = 0; j < Apad.size(); ++j) {
  395. // cout << Apad[i][j][0] << ' ';
  396. // }
  397. // cout << '\n';
  398. // }
  399.  
  400. for (int h = 0; h < h_new; ++h) {
  401. int vert_start = h * S;
  402.  
  403. for (int w = 0; w < w_new; ++w) {
  404. int horiz_start = w * S;
  405.  
  406. for (int c = 0; c < c_new; ++c) {
  407. res[h][w][c] = convSingleStep(Apad, W,
  408. vert_start, horiz_start, c);
  409. }
  410. }
  411. }
  412.  
  413. return res;
  414. }
  415.  
  416. void convBackStep(vector<vector<vector<double>>> &AprevPad,
  417. vector<vector<vector<double>>> &dAprevPad,
  418. vector<vector<vector<vector<double>>>> &W,
  419. vector<vector<vector<vector<double>>>> &dW,
  420. double dZhwc, int vert_start, int horiz_start, int fixC) {
  421.  
  422. for (size_t h = 0; h < W.size(); ++h) {
  423. for (size_t w = 0; w < W[0].size(); ++w) {
  424. for (size_t c = 0; c < W[0][0].size(); ++c) {
  425. dAprevPad[vert_start + h][horiz_start + w][c] += W[h][w][c][fixC] * dZhwc;
  426. dW[h][w][c][fixC] += AprevPad[vert_start + h][horiz_start + w][c] * dZhwc;
  427. }
  428. }
  429. }
  430. }
  431.  
  432. void convBack(int layerNum, const string& type) {
  433. auto Aprev = network[layerNum - 1]->A;
  434. auto dZ = network[layerNum]->dZ;
  435. auto W = network[layerNum]->W;
  436. int S = network[layerNum]->S;
  437. int P = network[layerNum]->P;
  438.  
  439. int h_prev = Aprev.size();
  440. int w_prev = Aprev[0].size();
  441. int c_prev = Aprev[0][0].size();
  442.  
  443. int f = W.size();
  444.  
  445. int h_new = dZ.size();
  446. int w_new = dZ[0].size();
  447. int c_new = dZ[0][0].size();
  448.  
  449. vector<vector<vector<double>>> dAprev(h_prev,
  450. vector<vector<double>>(w_prev, vector<double>(c_prev, 0.)));
  451.  
  452. vector<vector<vector<vector<double>>>> dW(f, vector<vector<vector<double>>>(f,
  453. vector<vector<double>>(c_prev, vector<double>(c_new, 0.))));
  454.  
  455. auto AprevPad = fillPad(Aprev, P, type);
  456. auto dAprevPad = fillPad(dAprev, P, type);
  457.  
  458. for (int h = 0; h < h_new; ++h) {
  459. for (int w = 0; w < w_new; ++w) {
  460. for (int c = 0; c < c_new; ++c) {
  461. int vert_start = h * S;
  462. int horiz_start = w * S;
  463.  
  464. convBackStep(AprevPad, dAprevPad, W, dW, dZ[h][w][c], vert_start, horiz_start, c);
  465. }
  466. }
  467. }
  468.  
  469. network[layerNum - 1]->dZ = clearPad(dAprevPad, P, type);
  470. network[layerNum]->dW = dW;
  471.  
  472. // for (size_t i = 0; i < AprevPad.size(); ++i) {
  473. // for (size_t j = 0; j < AprevPad.size(); ++j) {
  474. // cout << dAprevPad[i][j][0] << ' ';
  475. // }
  476. // cout << '\n';
  477. // }
  478. // cout << '\n' << '\n';
  479. }
  480.  
  481. layerDesc *forward(string &s, int layerNum) {
  482. auto layer = new layerDesc();
  483. layer->type = s;
  484.  
  485. if (s == "relu") {
  486. int alpha;
  487. cin >> alpha;
  488. layer->alpha = 1. / (double) alpha;
  489. layer->A = relu(network[layerNum - 1]->A, layer->alpha);
  490.  
  491. } else if (s == "pool") {
  492.  
  493. cin >> layer->S;
  494. layer->A = pool(network[layerNum - 1]->A, layer->S);
  495. } else if (s == "bias") {
  496. int deep = network[layerNum - 1]->A[0][0].size();
  497. // int deep = D;
  498. layer->biases.resize(deep);
  499. int x;
  500. for (size_t i = 0; i < deep; ++i) {
  501. cin >> x;
  502. layer->biases[i] = (double) x;
  503. }
  504. layer->A = bias(network[layerNum - 1]->A, layer->biases);
  505.  
  506. } else if (s == "cnvm" || s == "cnve" || s == "cnvc") {
  507. int deep = network[layerNum - 1]->A[0][0].size();
  508. // int deep = D;
  509. cin >> layer->H >> layer->K >> layer->S >> layer->P;
  510.  
  511. layer->W.assign(layer->K, vector<vector<vector<double>>>(layer->K,
  512. vector<vector<double>>(deep, vector<double>(layer->H))));
  513.  
  514. int x;
  515. for (size_t h = 0; h < layer->H; ++h) {
  516. for (size_t d = 0; d < deep; ++d) {
  517. for (size_t kh = 0; kh < layer->K; ++kh) {
  518. for (size_t kw = 0; kw < layer->K; ++kw) {
  519. cin >> x;
  520. layer->W[kh][kw][d][h] = (double) x;
  521. }
  522. }
  523. }
  524. }
  525. layer->A = conv(network[layerNum - 1]->A, layer->W, layer->S, layer->P, s);
  526. }
  527.  
  528. return layer;
  529. }
  530.  
  531. void backward(int layerNum) {
  532. auto layer = network[layerNum];
  533.  
  534. if (layer->type == "relu") {
  535. reluBack(layerNum);
  536. } else if (layer->type == "pool") {
  537. poolBack(layerNum);
  538. } else if (layer->type == "bias") {
  539. biasBack(layerNum);
  540. } else if (layer->type == "cnvm" || layer->type == "cnve" || layer->type == "cnvc") {
  541. convBack(layerNum, layer->type);
  542. }
  543. }
  544.  
  545. void readOutDerivative() {
  546. auto Z = network[L]->A;
  547. network[L]->dZ.assign(Z.size(),
  548. vector<vector<double>>(Z[0].size(), vector<double>(Z[0][0].size())));
  549. int x;
  550. for (size_t c = 0; c < Z[0][0].size(); ++c) {
  551. for (size_t h = 0; h < Z.size(); ++h) {
  552. for (size_t w = 0; w < Z[0].size(); ++w) {
  553. cin >> x;
  554. network[L]->dZ[h][w][c] = (double) x;
  555. }
  556. }
  557. }
  558. }
  559.  
  560. void printDerivatives(int layerNum) {
  561. auto layer = network[layerNum];
  562. if (layer->type == "cnvm" || layer->type == "cnve" || layer->type == "cnvc") {
  563. // cout << "cnvm" << '\n';
  564. print4DMatrix(layer->dW);
  565. } else if (layer->type == "bias") {
  566. // cout << "bias" << '\n';
  567. for (double i : layer->db) {
  568. cout << i << ' ';
  569. }
  570. cout << endl;
  571. }
  572. }
  573.  
  574. void printing(int layerNum) {
  575. auto layer = network[layerNum];
  576. auto A = network[layerNum]->A;
  577. // cout << "A Layer: " << layerNum << endl;
  578. // cout << " ";
  579. // for (size_t c = 0; c < A[0][0].size(); ++c) {
  580. // for (size_t h = 0; h < A.size(); ++h) {
  581. // for (size_t w = 0; w < A[0].size(); ++w) {
  582. // cout << A[h][w][c] << ' ';
  583. // }
  584. // }
  585. // }
  586. // cout << endl;
  587.  
  588. // A = network[layerNum]->dZ;
  589. // cout << "dZ Layer: " << layerNum << endl;
  590. // for (size_t c = 0; c < A[0][0].size(); ++c) {
  591. // for (size_t h = 0; h < A.size(); ++h) {
  592. // for (size_t w = 0; w < A[0].size(); ++w) {
  593. // cout << A[h][w][c] << ' ';
  594. // }
  595. // }
  596. // }
  597. // cout << endl;
  598. }
  599.  
  600. int main() {
  601. //ios_base::sync_with_stdio(false);
  602.  
  603. cin >> N >> D;
  604.  
  605. input.assign(N, vector<vector<double>>(N, vector<double>(D)));
  606.  
  607. int x;
  608. for (size_t c = 0; c < D; ++c) {
  609. for (size_t h = 0; h < N; ++h) {
  610. for (size_t w = 0; w < N; ++w) {
  611. cin >> x;
  612. input[h][w][c] = (double) x;
  613. }
  614. }
  615. }
  616.  
  617. cin >> L;
  618. network.resize(L + 1);
  619.  
  620. network[0] = new layerDesc();
  621. network[0]->type = "start";
  622. network[0]->A = input;
  623.  
  624. for (size_t i = 1; i <= L; ++i) {
  625. string s;
  626. cin >> s;
  627. network[i] = forward(s, i);
  628. }
  629.  
  630. readOutDerivative();
  631. print3DMatrix(network[L]->A);
  632.  
  633. for (size_t i = L; i > 0; --i) {
  634. backward(i);
  635. }
  636.  
  637. print3DMatrix(network[0]->dZ);
  638.  
  639. for (size_t i = 1; i <= L; ++i) {
  640. printDerivatives(i);
  641. }
  642.  
  643. cout << '\n' << '\n';
  644. for (size_t i = 0; i <= L; ++i) {
  645. printing(i);
  646. }
  647.  
  648. return 0;
  649. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement