Advertisement
Guest User

Untitled

a guest
Apr 21st, 2019
80
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 14.41 KB | None | 0 0
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "code",
  5. "execution_count": 1,
  6. "metadata": {
  7. "collapsed": true
  8. },
  9. "outputs": [],
  10. "source": [
  11. "import torch"
  12. ]
  13. },
  14. {
  15. "cell_type": "code",
  16. "execution_count": 2,
  17. "metadata": {
  18. "collapsed": false
  19. },
  20. "outputs": [
  21. {
  22. "data": {
  23. "text/plain": [
  24. "'1.0.0'"
  25. ]
  26. },
  27. "execution_count": 2,
  28. "metadata": {},
  29. "output_type": "execute_result"
  30. }
  31. ],
  32. "source": [
  33. "torch.__version__"
  34. ]
  35. },
  36. {
  37. "cell_type": "code",
  38. "execution_count": 3,
  39. "metadata": {
  40. "collapsed": true
  41. },
  42. "outputs": [],
  43. "source": [
  44. "x = torch.tensor([[1, 2, 3], [4, 5, 6]])\n",
  45. "y = torch.tensor([[7, 8, 9], [10, 11, 12]])"
  46. ]
  47. },
  48. {
  49. "cell_type": "code",
  50. "execution_count": 4,
  51. "metadata": {
  52. "collapsed": false
  53. },
  54. "outputs": [
  55. {
  56. "data": {
  57. "text/plain": [
  58. "<torch._C.Generator at 0x1b55d9d1db0>"
  59. ]
  60. },
  61. "execution_count": 4,
  62. "metadata": {},
  63. "output_type": "execute_result"
  64. }
  65. ],
  66. "source": [
  67. "torch.manual_seed(42)"
  68. ]
  69. },
  70. {
  71. "cell_type": "code",
  72. "execution_count": 5,
  73. "metadata": {
  74. "collapsed": false
  75. },
  76. "outputs": [
  77. {
  78. "name": "stdout",
  79. "output_type": "stream",
  80. "text": [
  81. "tensor([[0.8823, 0.9150, 0.3829],\n",
  82. " [0.9593, 0.3904, 0.6009]])\n"
  83. ]
  84. }
  85. ],
  86. "source": [
  87. "print(torch.rand([2, 3]))"
  88. ]
  89. },
  90. {
  91. "cell_type": "code",
  92. "execution_count": 6,
  93. "metadata": {
  94. "collapsed": true
  95. },
  96. "outputs": [],
  97. "source": [
  98. "import numpy as np"
  99. ]
  100. },
  101. {
  102. "cell_type": "code",
  103. "execution_count": 7,
  104. "metadata": {
  105. "collapsed": false
  106. },
  107. "outputs": [
  108. {
  109. "ename": "TypeError",
  110. "evalue": "add(): argument 'other' (position 1) must be Tensor, not numpy.ndarray",
  111. "output_type": "error",
  112. "traceback": [
  113. "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
  114. "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
  115. "\u001b[1;32m<ipython-input-7-2e651424c109>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[0mxnp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0marray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m2\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m3\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m5\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m6\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mf2\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mxnp\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0my\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
  116. "\u001b[1;31mTypeError\u001b[0m: add(): argument 'other' (position 1) must be Tensor, not numpy.ndarray"
  117. ]
  118. }
  119. ],
  120. "source": [
  121. "xnp = np.array([[1, 2, 3], [4, 5, 6]])\n",
  122. "f2 = xnp + y"
  123. ]
  124. },
  125. {
  126. "cell_type": "markdown",
  127. "metadata": {},
  128. "source": [
  129. "えっ??? \n",
  130. "numpy_arrayとTensorって計算できるの?????\n",
  131. "\n",
  132. "-> torchのversionが1.0.0だとエラー吐くらしい. \n",
  133. "その前のversionだと通るっぽい"
  134. ]
  135. },
  136. {
  137. "cell_type": "markdown",
  138. "metadata": {},
  139. "source": [
  140. "1. p.10 installing-pytorchまで\n",
  141. "2. p.13 終わりまで\n",
  142. "3. p.14-18\n",
  143. "4. p.19-22(loading dataの前まで)(伊藤)\n",
  144. "5. p.22(loading dataから)-26(Data Loaderまで)\n",
  145. "6. p.26-(DataLoader以降最後まで)"
  146. ]
  147. },
  148. {
  149. "cell_type": "markdown",
  150. "metadata": {},
  151. "source": [
  152. "# 輪講chapter1\n",
  153. "## Slicing and indexing and reshaping(P.19)"
  154. ]
  155. },
  156. {
  157. "cell_type": "code",
  158. "execution_count": 8,
  159. "metadata": {
  160. "collapsed": false
  161. },
  162. "outputs": [
  163. {
  164. "data": {
  165. "text/plain": [
  166. "tensor([[1, 2, 3],\n",
  167. " [4, 5, 6]])"
  168. ]
  169. },
  170. "execution_count": 8,
  171. "metadata": {},
  172. "output_type": "execute_result"
  173. }
  174. ],
  175. "source": [
  176. "x"
  177. ]
  178. },
  179. {
  180. "cell_type": "markdown",
  181. "metadata": {},
  182. "source": [
  183. "* 一個目の要素を表示"
  184. ]
  185. },
  186. {
  187. "cell_type": "code",
  188. "execution_count": 12,
  189. "metadata": {
  190. "collapsed": false
  191. },
  192. "outputs": [
  193. {
  194. "name": "stdout",
  195. "output_type": "stream",
  196. "text": [
  197. "tensor([1, 2, 3])\n"
  198. ]
  199. }
  200. ],
  201. "source": [
  202. "print(x[0])"
  203. ]
  204. },
  205. {
  206. "cell_type": "markdown",
  207. "metadata": {},
  208. "source": [
  209. "* 二個目の要素の0~1を表示 \n",
  210. "slicingは0始まりで,最後を含まないので注意 "
  211. ]
  212. },
  213. {
  214. "cell_type": "code",
  215. "execution_count": 10,
  216. "metadata": {
  217. "collapsed": false
  218. },
  219. "outputs": [
  220. {
  221. "name": "stdout",
  222. "output_type": "stream",
  223. "text": [
  224. "tensor([4, 5])\n"
  225. ]
  226. }
  227. ],
  228. "source": [
  229. "print(x[1][0:2])"
  230. ]
  231. },
  232. {
  233. "cell_type": "markdown",
  234. "metadata": {},
  235. "source": [
  236. "`view()`関数を使って既存のtensorをreshapeされたcopyを作れるよ. \n",
  237. "3つの例を示すね "
  238. ]
  239. },
  240. {
  241. "cell_type": "code",
  242. "execution_count": 11,
  243. "metadata": {
  244. "collapsed": false
  245. },
  246. "outputs": [
  247. {
  248. "name": "stdout",
  249. "output_type": "stream",
  250. "text": [
  251. "tensor([1, 2, 3, 4, 5, 6])\n"
  252. ]
  253. }
  254. ],
  255. "source": [
  256. "print(x.view(-1))"
  257. ]
  258. },
  259. {
  260. "cell_type": "code",
  261. "execution_count": 13,
  262. "metadata": {
  263. "collapsed": false
  264. },
  265. "outputs": [
  266. {
  267. "name": "stdout",
  268. "output_type": "stream",
  269. "text": [
  270. "tensor([[1, 2],\n",
  271. " [3, 4],\n",
  272. " [5, 6]])\n"
  273. ]
  274. }
  275. ],
  276. "source": [
  277. "print(x.view(3, 2))"
  278. ]
  279. },
  280. {
  281. "cell_type": "code",
  282. "execution_count": 14,
  283. "metadata": {
  284. "collapsed": false
  285. },
  286. "outputs": [
  287. {
  288. "name": "stdout",
  289. "output_type": "stream",
  290. "text": [
  291. "tensor([[1],\n",
  292. " [2],\n",
  293. " [3],\n",
  294. " [4],\n",
  295. " [5],\n",
  296. " [6]])\n"
  297. ]
  298. }
  299. ],
  300. "source": [
  301. "print(x.view(6, 1))"
  302. ]
  303. },
  304. {
  305. "cell_type": "markdown",
  306. "metadata": {},
  307. "source": [
  308. "(3, 2)と(6, 1)はわかりやすいけど,-1は何をやっているんだろう. \n",
  309. "必要な列数がわかっているけど,何行のデータが来るかわからないときに便利だよ. \n",
  310. "-1を使うと,全体の要素数から適切な行数(や列数)を計算してくれるよ. "
  311. ]
  312. },
  313. {
  314. "cell_type": "code",
  315. "execution_count": 15,
  316. "metadata": {
  317. "collapsed": false
  318. },
  319. "outputs": [
  320. {
  321. "name": "stdout",
  322. "output_type": "stream",
  323. "text": [
  324. "tensor([[1, 2],\n",
  325. " [3, 4],\n",
  326. " [5, 6]])\n"
  327. ]
  328. }
  329. ],
  330. "source": [
  331. "print(x.view(3, -1))"
  332. ]
  333. },
  334. {
  335. "cell_type": "markdown",
  336. "metadata": {},
  337. "source": [
  338. "重要な操作として,軸の入れ替えがあるよ. \n",
  339. "これは`tensor.transpose()`メソッド(2軸の入れ替えの場合)で実現できるよ. "
  340. ]
  341. },
  342. {
  343. "cell_type": "code",
  344. "execution_count": 16,
  345. "metadata": {
  346. "collapsed": false
  347. },
  348. "outputs": [
  349. {
  350. "data": {
  351. "text/plain": [
  352. "tensor([[1, 2, 3],\n",
  353. " [4, 5, 6]])"
  354. ]
  355. },
  356. "execution_count": 16,
  357. "metadata": {},
  358. "output_type": "execute_result"
  359. }
  360. ],
  361. "source": [
  362. "x"
  363. ]
  364. },
  365. {
  366. "cell_type": "code",
  367. "execution_count": 24,
  368. "metadata": {
  369. "collapsed": false
  370. },
  371. "outputs": [
  372. {
  373. "data": {
  374. "text/plain": [
  375. "tensor([[1, 4],\n",
  376. " [2, 5],\n",
  377. " [3, 6]])"
  378. ]
  379. },
  380. "execution_count": 24,
  381. "metadata": {},
  382. "output_type": "execute_result"
  383. }
  384. ],
  385. "source": [
  386. "x.transpose(0, 1)"
  387. ]
  388. },
  389. {
  390. "cell_type": "markdown",
  391. "metadata": {},
  392. "source": [
  393. "ここで注意したいのは,`transpose()`は一度に二軸の入れ替えしかできないよ. \n",
  394. "2軸より大きい軸同士を入れ替えたいときは,`permute()`メソッドを使うといいよ. \n",
  395. "軸の番号を指定することで利用できるよ. "
  396. ]
  397. },
  398. {
  399. "cell_type": "code",
  400. "execution_count": 31,
  401. "metadata": {
  402. "collapsed": true
  403. },
  404. "outputs": [],
  405. "source": [
  406. "a = torch.ones(1, 2, 3, 4)"
  407. ]
  408. },
  409. {
  410. "cell_type": "code",
  411. "execution_count": 32,
  412. "metadata": {
  413. "collapsed": false
  414. },
  415. "outputs": [
  416. {
  417. "data": {
  418. "text/plain": [
  419. "tensor([[[[1., 1., 1., 1.],\n",
  420. " [1., 1., 1., 1.],\n",
  421. " [1., 1., 1., 1.]],\n",
  422. "\n",
  423. " [[1., 1., 1., 1.],\n",
  424. " [1., 1., 1., 1.],\n",
  425. " [1., 1., 1., 1.]]]])"
  426. ]
  427. },
  428. "execution_count": 32,
  429. "metadata": {},
  430. "output_type": "execute_result"
  431. }
  432. ],
  433. "source": [
  434. "a"
  435. ]
  436. },
  437. {
  438. "cell_type": "code",
  439. "execution_count": 33,
  440. "metadata": {
  441. "collapsed": false
  442. },
  443. "outputs": [
  444. {
  445. "data": {
  446. "text/plain": [
  447. "torch.Size([4, 3, 2, 1])"
  448. ]
  449. },
  450. "execution_count": 33,
  451. "metadata": {},
  452. "output_type": "execute_result"
  453. }
  454. ],
  455. "source": [
  456. "# 2回に分けて全ての軸を入れ替えた場合\n",
  457. "a.transpose(0, 3).transpose(1, 2).size()"
  458. ]
  459. },
  460. {
  461. "cell_type": "code",
  462. "execution_count": 34,
  463. "metadata": {
  464. "collapsed": false
  465. },
  466. "outputs": [
  467. {
  468. "data": {
  469. "text/plain": [
  470. "torch.Size([4, 3, 2, 1])"
  471. ]
  472. },
  473. "execution_count": 34,
  474. "metadata": {},
  475. "output_type": "execute_result"
  476. }
  477. ],
  478. "source": [
  479. "# permuteを使うと一回で済むよ\n",
  480. "a.permute(3, 2, 1, 0).size()"
  481. ]
  482. },
  483. {
  484. "cell_type": "markdown",
  485. "metadata": {},
  486. "source": [
  487. "二次元までのtensorだとフラットなtableで表せるよ. \n",
  488. "より高次元になると表せないよ. \n",
  489. "magic of deep learning では問題にならないよ. \n",
  490. "実世界の特徴量はデータ構造の次元にエンコードされています. \n",
  491. "なので,"
  492. ]
  493. },
  494. {
  495. "cell_type": "markdown",
  496. "metadata": {},
  497. "source": [
  498. "## In place operations(p.21-22)\n",
  499. "上書きする関数と,しない関数の違いを理解することは大事だよ. \n",
  500. "例えば,`transpose(x)`だと処理後の変数が戻り値として返ってきたけれど,元のxは変更されませんでした. \n",
  501. "今までの例で使ったのは,上書きしないものばかりでした. \n",
  502. "別に `x = x.transpose(x)` と書いてもいいんだけど,より便利な方法として上書きする関数を使う方法があるよ. \n",
  503. "大体アンダースコアをつけると上書き版になるよ. "
  504. ]
  505. },
  506. {
  507. "cell_type": "code",
  508. "execution_count": 35,
  509. "metadata": {
  510. "collapsed": false
  511. },
  512. "outputs": [
  513. {
  514. "data": {
  515. "text/plain": [
  516. "tensor([[1, 2, 3],\n",
  517. " [4, 5, 6]])"
  518. ]
  519. },
  520. "execution_count": 35,
  521. "metadata": {},
  522. "output_type": "execute_result"
  523. }
  524. ],
  525. "source": [
  526. "x"
  527. ]
  528. },
  529. {
  530. "cell_type": "code",
  531. "execution_count": 36,
  532. "metadata": {
  533. "collapsed": false
  534. },
  535. "outputs": [
  536. {
  537. "data": {
  538. "text/plain": [
  539. "tensor([[1, 4],\n",
  540. " [2, 5],\n",
  541. " [3, 6]])"
  542. ]
  543. },
  544. "execution_count": 36,
  545. "metadata": {},
  546. "output_type": "execute_result"
  547. }
  548. ],
  549. "source": [
  550. "x.transpose_(1, 0)"
  551. ]
  552. },
  553. {
  554. "cell_type": "code",
  555. "execution_count": 37,
  556. "metadata": {
  557. "collapsed": false
  558. },
  559. "outputs": [
  560. {
  561. "data": {
  562. "text/plain": [
  563. "tensor([[1, 4],\n",
  564. " [2, 5],\n",
  565. " [3, 6]])"
  566. ]
  567. },
  568. "execution_count": 37,
  569. "metadata": {},
  570. "output_type": "execute_result"
  571. }
  572. ],
  573. "source": [
  574. "x"
  575. ]
  576. },
  577. {
  578. "cell_type": "code",
  579. "execution_count": 38,
  580. "metadata": {
  581. "collapsed": false
  582. },
  583. "outputs": [
  584. {
  585. "data": {
  586. "text/plain": [
  587. "tensor([[ 7, 8, 9],\n",
  588. " [10, 11, 12]])"
  589. ]
  590. },
  591. "execution_count": 38,
  592. "metadata": {},
  593. "output_type": "execute_result"
  594. }
  595. ],
  596. "source": [
  597. "y"
  598. ]
  599. },
  600. {
  601. "cell_type": "code",
  602. "execution_count": 39,
  603. "metadata": {
  604. "collapsed": false
  605. },
  606. "outputs": [
  607. {
  608. "ename": "RuntimeError",
  609. "evalue": "The size of tensor a (3) must match the size of tensor b (2) at non-singleton dimension 1",
  610. "output_type": "error",
  611. "traceback": [
  612. "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
  613. "\u001b[1;31mRuntimeError\u001b[0m Traceback (most recent call last)",
  614. "\u001b[1;32m<ipython-input-39-e46502eb7b4e>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0my\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0madd_\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m*\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
  615. "\u001b[1;31mRuntimeError\u001b[0m: The size of tensor a (3) must match the size of tensor b (2) at non-singleton dimension 1"
  616. ]
  617. }
  618. ],
  619. "source": [
  620. "y.add_(x*2)"
  621. ]
  622. },
  623. {
  624. "cell_type": "markdown",
  625. "metadata": {},
  626. "source": [
  627. "いや,上で上書きさせといて,下はそれかよ \n",
  628. "copyしとくとか書いとけや "
  629. ]
  630. },
  631. {
  632. "cell_type": "code",
  633. "execution_count": null,
  634. "metadata": {
  635. "collapsed": true
  636. },
  637. "outputs": [],
  638. "source": []
  639. }
  640. ],
  641. "metadata": {
  642. "anaconda-cloud": {},
  643. "kernelspec": {
  644. "display_name": "Python [conda env:Anaconda3]",
  645. "language": "python",
  646. "name": "conda-env-Anaconda3-py"
  647. },
  648. "language_info": {
  649. "codemirror_mode": {
  650. "name": "ipython",
  651. "version": 3
  652. },
  653. "file_extension": ".py",
  654. "mimetype": "text/x-python",
  655. "name": "python",
  656. "nbconvert_exporter": "python",
  657. "pygments_lexer": "ipython3",
  658. "version": "3.5.5"
  659. }
  660. },
  661. "nbformat": 4,
  662. "nbformat_minor": 1
  663. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement