Advertisement
Guest User

Untitled

a guest
Oct 31st, 2014
154
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 17.66 KB | None | 0 0
  1. Lephenixnoir@Chaos /tmp $ sh3eb-elf-objdump -S _moddi3.o
  2.  
  3. _moddi3.o: file format elf32-sh
  4.  
  5.  
  6. Disassembly of section .text:
  7.  
  8. 00000000 <___moddi3>:
  9. #endif
  10.  
  11. #ifdef L_moddi3
  12. DWtype
  13. __moddi3 (DWtype u, DWtype v)
  14. {
  15. 0: 2f 86 mov.l r8,@-r15
  16. 2: 2f 96 mov.l r9,@-r15
  17. 4: 2f a6 mov.l r10,@-r15
  18. 6: 2f b6 mov.l r11,@-r15
  19. 8: 2f c6 mov.l r12,@-r15
  20. a: 2f d6 mov.l r13,@-r15
  21. c: 4f 22 sts.l pr,@-r15
  22. Wtype c = 0;
  23. DWunion uu = {.ll = u};
  24. DWunion vv = {.ll = v};
  25. DWtype w;
  26.  
  27. if (uu.s.high < 0)
  28. e: 44 11 cmp/pz r4
  29. 10: 89 01 bt 16 <___moddi3+0x16>
  30. 12: a0 a8 bra 166 <___moddi3+0x166>
  31. 14: 00 08 clrt
  32. 16: 68 43 mov r4,r8
  33. 18: 69 53 mov r5,r9
  34.  
  35. #ifdef L_moddi3
  36. DWtype
  37. __moddi3 (DWtype u, DWtype v)
  38. {
  39. Wtype c = 0;
  40. 1a: ea 00 mov #0,r10
  41. DWtype w;
  42.  
  43. if (uu.s.high < 0)
  44. c = ~c,
  45. uu.ll = -uu.ll;
  46. if (vv.s.high < 0)
  47. 1c: 46 11 cmp/pz r6
  48. 1e: 89 01 bt 24 <___moddi3+0x24>
  49. 20: a0 9e bra 160 <___moddi3+0x160>
  50. 22: 00 08 clrt
  51. 24: 62 63 mov r6,r2
  52. 26: 63 73 mov r7,r3
  53. }
  54. }
  55.  
  56. #else /* UDIV_NEEDS_NORMALIZATION */
  57.  
  58. if (d1 == 0)
  59. 28: 22 28 tst r2,r2
  60. DWunion rr;
  61. UWtype d0, d1, n0, n1, n2;
  62. UWtype q0, q1;
  63. UWtype b, bm;
  64.  
  65. d0 = dd.s.low;
  66. 2a: 67 33 mov r3,r7
  67. d1 = dd.s.high;
  68. 2c: 61 23 mov r2,r1
  69. n0 = nn.s.low;
  70. 2e: 6b 93 mov r9,r11
  71. }
  72. }
  73.  
  74. #else /* UDIV_NEEDS_NORMALIZATION */
  75.  
  76. if (d1 == 0)
  77. 30: 8f 42 bf.s b8 <___moddi3+0xb8>
  78. 32: 66 83 mov r8,r6
  79. {
  80. if (d0 > n1)
  81. 34: 33 86 cmp/hi r8,r3
  82. 36: 8f 64 bf.s 102 <___moddi3+0x102>
  83. 38: 23 38 tst r3,r3
  84. {
  85. /* 0q = nn / 0D */
  86.  
  87. count_leading_zeros (bm, d0);
  88. 3a: d1 85 mov.l 250 <___moddi3+0x250>,r1 ! ffff
  89. 3c: 33 16 cmp/hi r1,r3
  90. 3e: 89 01 bt 44 <___moddi3+0x44>
  91. 40: a0 9b bra 17a <___moddi3+0x17a>
  92. 42: 00 09 nop
  93. 44: d1 83 mov.l 254 <___moddi3+0x254>,r1 ! ffffff
  94. 46: 33 16 cmp/hi r1,r3
  95. 48: 8b 01 bf 4e <___moddi3+0x4e>
  96. 4a: a0 f5 bra 238 <___moddi3+0x238>
  97. 4c: e1 18 mov #24,r1
  98. 4e: e1 10 mov #16,r1
  99. 50: e2 10 mov #16,r2
  100. 52: 61 1b neg r1,r1
  101. 54: 60 73 mov r7,r0
  102. 56: 40 1d shld r1,r0
  103. 58: d1 7f mov.l 258 <___moddi3+0x258>,r1 ! 0 <___moddi3>
  104. 5a: 01 1c mov.b @(r0,r1),r1
  105. 5c: 61 1c extu.b r1,r1
  106. 5e: 31 2c add r2,r1
  107. 60: 61 1b neg r1,r1
  108. 62: 68 13 mov r1,r8
  109. 64: 78 20 add #32,r8
  110.  
  111. if (bm != 0)
  112. 66: 28 88 tst r8,r8
  113. 68: 8f 02 bf.s 70 <___moddi3+0x70>
  114. 6a: 60 b3 mov r11,r0
  115. 6c: a0 05 bra 7a <___moddi3+0x7a>
  116. 6e: e8 00 mov #0,r8
  117. {
  118. /* Normalize, i.e. make the most significant bit of the
  119. denominator set. */
  120.  
  121. d0 = d0 << bm;
  122. n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
  123. 70: 46 8d shld r8,r6
  124. 72: 40 1d shld r1,r0
  125. if (bm != 0)
  126. {
  127. /* Normalize, i.e. make the most significant bit of the
  128. denominator set. */
  129.  
  130. d0 = d0 << bm;
  131. 74: 47 8d shld r8,r7
  132. n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
  133. n0 = n0 << bm;
  134. 76: 4b 8d shld r8,r11
  135. {
  136. /* Normalize, i.e. make the most significant bit of the
  137. denominator set. */
  138.  
  139. d0 = d0 << bm;
  140. n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
  141. 78: 26 0b or r0,r6
  142. n0 = n0 << bm;
  143. }
  144.  
  145. udiv_qrnnd (q0, n0, n1, n0, d0);
  146. 7a: d9 78 mov.l 25c <___moddi3+0x25c>,r9 ! 0 <___moddi3>
  147. 7c: 60 63 mov r6,r0
  148. 7e: 65 73 mov r7,r5
  149. 80: 64 b9 swap.w r11,r4
  150. 82: 66 59 swap.w r5,r6
  151. 84: 49 0b jsr @r9
  152. 86: 46 28 shll16 r6
  153. 88: 64 49 swap.w r4,r4
  154. 8a: 49 0b jsr @r9
  155. 8c: 67 19 swap.w r1,r7
  156. 8e: 27 1b or r1,r7
  157. 90: 63 03 mov r0,r3
  158. /* Remainder in n0 >> bm. */
  159. }
  160.  
  161. if (rp != 0)
  162. {
  163. rr.s.low = n0 >> bm;
  164. 92: 68 8b neg r8,r8
  165. rr.s.high = 0;
  166. *rp = rr.ll;
  167. 94: 61 33 mov r3,r1
  168. 96: 41 8d shld r8,r1
  169. 98: e0 00 mov #0,r0
  170. uu.ll = -uu.ll;
  171. if (vv.s.high < 0)
  172. vv.ll = -vv.ll;
  173.  
  174. (void) __udivmoddi4 (uu.ll, vv.ll, (UDWtype*)&w);
  175. if (c)
  176. 9a: 2a a8 tst r10,r10
  177. 9c: 89 04 bt a8 <___moddi3+0xa8>
  178. 9e: 00 08 clrt
  179. w = -w;
  180. a0: 63 1a negc r1,r3
  181. a2: 62 0a negc r0,r2
  182. a4: 60 23 mov r2,r0
  183. a6: 61 33 mov r3,r1
  184.  
  185. return w;
  186. }
  187. a8: 4f 26 lds.l @r15+,pr
  188. aa: 6d f6 mov.l @r15+,r13
  189. ac: 6c f6 mov.l @r15+,r12
  190. ae: 6b f6 mov.l @r15+,r11
  191. b0: 6a f6 mov.l @r15+,r10
  192. b2: 69 f6 mov.l @r15+,r9
  193. b4: 00 0b rts
  194. b6: 68 f6 mov.l @r15+,r8
  195. }
  196. #endif /* UDIV_NEEDS_NORMALIZATION */
  197.  
  198. else
  199. {
  200. if (d1 > n1)
  201. b8: 32 86 cmp/hi r8,r2
  202. ba: 8f 02 bf.s c2 <___moddi3+0xc2>
  203. bc: 60 83 mov r8,r0
  204. /* Remainder in n1n0. */
  205. if (rp != 0)
  206. {
  207. rr.s.low = n0;
  208. rr.s.high = n1;
  209. *rp = rr.ll;
  210. be: af ec bra 9a <___moddi3+0x9a>
  211. c0: 61 93 mov r9,r1
  212. }
  213. else
  214. {
  215. /* 0q = NN / dd */
  216.  
  217. count_leading_zeros (bm, d1);
  218. c2: d2 63 mov.l 250 <___moddi3+0x250>,r2 ! ffff
  219. c4: 31 26 cmp/hi r2,r1
  220. c6: 8b 5e bf 186 <___moddi3+0x186>
  221. c8: d2 62 mov.l 254 <___moddi3+0x254>,r2 ! ffffff
  222. ca: 31 26 cmp/hi r2,r1
  223. cc: 8b 01 bf d2 <___moddi3+0xd2>
  224. ce: a0 b8 bra 242 <___moddi3+0x242>
  225. d0: e2 18 mov #24,r2
  226. d2: e2 10 mov #16,r2
  227. d4: e5 10 mov #16,r5
  228. d6: 62 2b neg r2,r2
  229. d8: 60 13 mov r1,r0
  230. da: 40 2d shld r2,r0
  231. dc: d2 5e mov.l 258 <___moddi3+0x258>,r2 ! 0 <___moddi3>
  232. de: 03 2c mov.b @(r0,r2),r3
  233. e0: 63 3c extu.b r3,r3
  234. e2: 33 5c add r5,r3
  235. e4: 62 3b neg r3,r2
  236. e6: 68 23 mov r2,r8
  237. e8: 78 20 add #32,r8
  238. if (bm == 0)
  239. ea: 28 88 tst r8,r8
  240. ec: 8f 51 bf.s 192 <___moddi3+0x192>
  241. ee: 31 62 cmp/hs r6,r1
  242.  
  243. This special case is necessary, not an optimization. */
  244.  
  245. /* The condition on the next line takes advantage of that
  246. n1 >= d1 (true due to program flow). */
  247. if (n1 > d1 || n0 >= d0)
  248. f0: 8f 01 bf.s f6 <___moddi3+0xf6>
  249. f2: 37 b6 cmp/hi r11,r7
  250. f4: 89 02 bt fc <___moddi3+0xfc>
  251. {
  252. q0 = 1;
  253. sub_ddmmss (n1, n0, n1, n0, d1, d0);
  254. f6: 00 08 clrt
  255. f8: 3b 7a subc r7,r11
  256. fa: 36 1a subc r1,r6
  257.  
  258. if (rp != 0)
  259. {
  260. rr.s.low = n0;
  261. rr.s.high = n1;
  262. *rp = rr.ll;
  263. fc: 60 63 mov r6,r0
  264. fe: af cc bra 9a <___moddi3+0x9a>
  265. 100: 61 b3 mov r11,r1
  266. }
  267. else
  268. {
  269. /* qq = NN / 0d */
  270.  
  271. if (d0 == 0)
  272. 102: 8f 04 bf.s 10e <___moddi3+0x10e>
  273. 104: e4 01 mov #1,r4
  274. d0 = 1 / d0; /* Divide intentionally by zero. */
  275. 106: d7 56 mov.l 260 <___moddi3+0x260>,r7 ! 0 <___moddi3>
  276. 108: 47 0b jsr @r7
  277. 10a: e5 00 mov #0,r5
  278. 10c: 67 03 mov r0,r7
  279.  
  280. count_leading_zeros (bm, d0);
  281. 10e: d1 50 mov.l 250 <___moddi3+0x250>,r1 ! ffff
  282. 110: 37 16 cmp/hi r1,r7
  283. 112: 8b 2c bf 16e <___moddi3+0x16e>
  284. 114: d1 4f mov.l 254 <___moddi3+0x254>,r1 ! ffffff
  285. 116: 37 16 cmp/hi r1,r7
  286. 118: 8b 01 bf 11e <___moddi3+0x11e>
  287. 11a: a0 8b bra 234 <___moddi3+0x234>
  288. 11c: e1 18 mov #24,r1
  289. 11e: e1 10 mov #16,r1
  290. 120: e2 10 mov #16,r2
  291. 122: 61 1b neg r1,r1
  292. 124: 60 73 mov r7,r0
  293. 126: 40 1d shld r1,r0
  294. 128: d1 4b mov.l 258 <___moddi3+0x258>,r1 ! 0 <___moddi3>
  295. 12a: 01 1c mov.b @(r0,r1),r1
  296. 12c: 61 1c extu.b r1,r1
  297. 12e: 31 2c add r2,r1
  298. 130: 61 1b neg r1,r1
  299. 132: 68 13 mov r1,r8
  300. 134: 78 20 add #32,r8
  301.  
  302. if (bm == 0)
  303. 136: 28 88 tst r8,r8
  304. 138: 8f 5a bf.s 1f0 <___moddi3+0x1f0>
  305. 13a: 6c 63 mov r6,r12
  306. 13c: d9 47 mov.l 25c <___moddi3+0x25c>,r9 ! 0 <___moddi3>
  307. leading quotient digit q1 = 1).
  308.  
  309. This special case is necessary, not an optimization.
  310. (Shifts counts of W_TYPE_SIZE are undefined.) */
  311.  
  312. n1 -= d0;
  313. 13e: 60 63 mov r6,r0
  314. 140: 30 78 sub r7,r0
  315. udiv_qrnnd (q1, n1, n2, n1, d0);
  316. }
  317.  
  318. /* n1 != d0... */
  319.  
  320. udiv_qrnnd (q0, n0, n1, n0, d0);
  321. 142: 65 73 mov r7,r5
  322. 144: 64 b9 swap.w r11,r4
  323. 146: 66 59 swap.w r5,r6
  324. 148: 49 0b jsr @r9
  325. 14a: 46 28 shll16 r6
  326. 14c: 64 49 swap.w r4,r4
  327. 14e: 49 0b jsr @r9
  328. 150: 67 19 swap.w r1,r7
  329. 152: 27 1b or r1,r7
  330. 154: 63 03 mov r0,r3
  331. /* Remainder in n0 >> bm. */
  332. }
  333.  
  334. if (rp != 0)
  335. {
  336. rr.s.low = n0 >> bm;
  337. 156: 68 8b neg r8,r8
  338. rr.s.high = 0;
  339. *rp = rr.ll;
  340. 158: 61 33 mov r3,r1
  341. 15a: e0 00 mov #0,r0
  342. 15c: af 9d bra 9a <___moddi3+0x9a>
  343. 15e: 41 8d shld r8,r1
  344.  
  345. if (uu.s.high < 0)
  346. c = ~c,
  347. uu.ll = -uu.ll;
  348. if (vv.s.high < 0)
  349. vv.ll = -vv.ll;
  350. 160: 63 7a negc r7,r3
  351. 162: af 61 bra 28 <___moddi3+0x28>
  352. 164: 62 6a negc r6,r2
  353. DWunion vv = {.ll = v};
  354. DWtype w;
  355.  
  356. if (uu.s.high < 0)
  357. c = ~c,
  358. uu.ll = -uu.ll;
  359. 166: 69 5a negc r5,r9
  360. 168: 68 4a negc r4,r8
  361. DWunion uu = {.ll = u};
  362. DWunion vv = {.ll = v};
  363. DWtype w;
  364.  
  365. if (uu.s.high < 0)
  366. c = ~c,
  367. 16a: af 57 bra 1c <___moddi3+0x1c>
  368. 16c: ea ff mov #-1,r10
  369. /* qq = NN / 0d */
  370.  
  371. if (d0 == 0)
  372. d0 = 1 / d0; /* Divide intentionally by zero. */
  373.  
  374. count_leading_zeros (bm, d0);
  375. 16e: 91 6e mov.w 24e <___moddi3+0x24e>,r1 ! ff
  376. 170: 37 16 cmp/hi r1,r7
  377. 172: 89 59 bt 228 <___moddi3+0x228>
  378. 174: e1 00 mov #0,r1
  379. 176: af d4 bra 122 <___moddi3+0x122>
  380. 178: e2 00 mov #0,r2
  381. {
  382. if (d0 > n1)
  383. {
  384. /* 0q = nn / 0D */
  385.  
  386. count_leading_zeros (bm, d0);
  387. 17a: 91 68 mov.w 24e <___moddi3+0x24e>,r1 ! ff
  388. 17c: 33 16 cmp/hi r1,r3
  389. 17e: 89 56 bt 22e <___moddi3+0x22e>
  390. 180: e1 00 mov #0,r1
  391. 182: af 67 bra 54 <___moddi3+0x54>
  392. 184: 61 1b neg r1,r1
  393. }
  394. else
  395. {
  396. /* 0q = NN / dd */
  397.  
  398. count_leading_zeros (bm, d1);
  399. 186: 92 62 mov.w 24e <___moddi3+0x24e>,r2 ! ff
  400. 188: 31 26 cmp/hi r2,r1
  401. 18a: 89 57 bt 23c <___moddi3+0x23c>
  402. 18c: e2 00 mov #0,r2
  403. 18e: af a2 bra d6 <___moddi3+0xd6>
  404. 190: e5 00 mov #0,r5
  405. UWtype m1, m0;
  406. /* Normalize. */
  407.  
  408. b = W_TYPE_SIZE - bm;
  409.  
  410. d1 = (d1 << bm) | (d0 >> b);
  411. 192: 69 13 mov r1,r9
  412. d0 = d0 << bm;
  413. n2 = n1 >> b;
  414. n1 = (n1 << bm) | (n0 >> b);
  415. 194: 6c 63 mov r6,r12
  416. UWtype m1, m0;
  417. /* Normalize. */
  418.  
  419. b = W_TYPE_SIZE - bm;
  420.  
  421. d1 = (d1 << bm) | (d0 >> b);
  422. 196: 61 73 mov r7,r1
  423. 198: 41 2d shld r2,r1
  424. d0 = d0 << bm;
  425. n2 = n1 >> b;
  426. 19a: 46 2d shld r2,r6
  427. n1 = (n1 << bm) | (n0 >> b);
  428. n0 = n0 << bm;
  429. 19c: 6d b3 mov r11,r13
  430. UWtype m1, m0;
  431. /* Normalize. */
  432.  
  433. b = W_TYPE_SIZE - bm;
  434.  
  435. d1 = (d1 << bm) | (d0 >> b);
  436. 19e: 49 8d shld r8,r9
  437. d0 = d0 << bm;
  438. n2 = n1 >> b;
  439. n1 = (n1 << bm) | (n0 >> b);
  440. 1a0: 4b 2d shld r2,r11
  441. 1a2: 4c 8d shld r8,r12
  442. UWtype m1, m0;
  443. /* Normalize. */
  444.  
  445. b = W_TYPE_SIZE - bm;
  446.  
  447. d1 = (d1 << bm) | (d0 >> b);
  448. 1a4: 29 1b or r1,r9
  449. d0 = d0 << bm;
  450. n2 = n1 >> b;
  451. n1 = (n1 << bm) | (n0 >> b);
  452. 1a6: 2c bb or r11,r12
  453. n0 = n0 << bm;
  454.  
  455. udiv_qrnnd (q0, n1, n2, n1, d1);
  456. 1a8: 60 63 mov r6,r0
  457. /* Normalize. */
  458.  
  459. b = W_TYPE_SIZE - bm;
  460.  
  461. d1 = (d1 << bm) | (d0 >> b);
  462. d0 = d0 << bm;
  463. 1aa: 47 8d shld r8,r7
  464. n2 = n1 >> b;
  465. n1 = (n1 << bm) | (n0 >> b);
  466. n0 = n0 << bm;
  467.  
  468. udiv_qrnnd (q0, n1, n2, n1, d1);
  469. 1ac: db 2b mov.l 25c <___moddi3+0x25c>,r11 ! 0 <___moddi3>
  470. 1ae: 65 93 mov r9,r5
  471. 1b0: 64 c9 swap.w r12,r4
  472. 1b2: 66 59 swap.w r5,r6
  473. 1b4: 4b 0b jsr @r11
  474. 1b6: 46 28 shll16 r6
  475. 1b8: 64 49 swap.w r4,r4
  476. 1ba: 4b 0b jsr @r11
  477. 1bc: 6c 19 swap.w r1,r12
  478. 1be: 2c 1b or r1,r12
  479. umul_ppmm (m1, m0, q0, d0);
  480. 1c0: 37 c5 dmulu.l r12,r7
  481. 1c2: 06 1a sts macl,r6
  482. 1c4: 0c 0a sts mach,r12
  483.  
  484. if (m1 > n1 || (m1 == n1 && m0 > n0))
  485. 1c6: 30 c2 cmp/hs r12,r0
  486.  
  487. d1 = (d1 << bm) | (d0 >> b);
  488. d0 = d0 << bm;
  489. n2 = n1 >> b;
  490. n1 = (n1 << bm) | (n0 >> b);
  491. n0 = n0 << bm;
  492. 1c8: 4d 8d shld r8,r13
  493.  
  494. udiv_qrnnd (q0, n1, n2, n1, d1);
  495. umul_ppmm (m1, m0, q0, d0);
  496. 1ca: 61 c3 mov r12,r1
  497.  
  498. if (m1 > n1 || (m1 == n1 && m0 > n0))
  499. 1cc: 8f 26 bf.s 21c <___moddi3+0x21c>
  500. 1ce: 62 63 mov r6,r2
  501. 1d0: 30 c0 cmp/eq r12,r0
  502. 1d2: 8d 38 bt.s 246 <___moddi3+0x246>
  503. 1d4: 3d 62 cmp/hs r6,r13
  504.  
  505. /* Remainder in (n1n0 - m1m0) >> bm. */
  506. if (rp != 0)
  507. {
  508. sub_ddmmss (n1, n0, n1, n0, m1, m0);
  509. rr.s.low = (n1 << b) | (n0 >> bm);
  510. 1d6: 68 8b neg r8,r8
  511. q1 = 0;
  512.  
  513. /* Remainder in (n1n0 - m1m0) >> bm. */
  514. if (rp != 0)
  515. {
  516. sub_ddmmss (n1, n0, n1, n0, m1, m0);
  517. 1d8: 66 03 mov r0,r6
  518. 1da: 00 08 clrt
  519. 1dc: 3d 2a subc r2,r13
  520. 1de: 36 1a subc r1,r6
  521. rr.s.low = (n1 << b) | (n0 >> bm);
  522. 1e0: 4d 8d shld r8,r13
  523. 1e2: 62 63 mov r6,r2
  524. 1e4: 42 3d shld r3,r2
  525. rr.s.high = n1 >> bm;
  526. *rp = rr.ll;
  527. 1e6: 60 63 mov r6,r0
  528. 1e8: 61 d3 mov r13,r1
  529. 1ea: 40 8d shld r8,r0
  530. 1ec: af 55 bra 9a <___moddi3+0x9a>
  531. 1ee: 21 2b or r2,r1
  532. /* Normalize. */
  533.  
  534. b = W_TYPE_SIZE - bm;
  535.  
  536. d0 = d0 << bm;
  537. n2 = n1 >> b;
  538. 1f0: 63 63 mov r6,r3
  539. n1 = (n1 << bm) | (n0 >> b);
  540. 1f2: 62 b3 mov r11,r2
  541. /* Normalize. */
  542.  
  543. b = W_TYPE_SIZE - bm;
  544.  
  545. d0 = d0 << bm;
  546. n2 = n1 >> b;
  547. 1f4: 43 1d shld r1,r3
  548. n1 = (n1 << bm) | (n0 >> b);
  549. n0 = n0 << bm;
  550.  
  551. udiv_qrnnd (q1, n1, n2, n1, d0);
  552. 1f6: d9 19 mov.l 25c <___moddi3+0x25c>,r9 ! 0 <___moddi3>
  553.  
  554. b = W_TYPE_SIZE - bm;
  555.  
  556. d0 = d0 << bm;
  557. n2 = n1 >> b;
  558. n1 = (n1 << bm) | (n0 >> b);
  559. 1f8: 42 1d shld r1,r2
  560. 1fa: 4c 8d shld r8,r12
  561. {
  562. /* Normalize. */
  563.  
  564. b = W_TYPE_SIZE - bm;
  565.  
  566. d0 = d0 << bm;
  567. 1fc: 47 8d shld r8,r7
  568. n2 = n1 >> b;
  569. n1 = (n1 << bm) | (n0 >> b);
  570. 1fe: 2c 2b or r2,r12
  571. n0 = n0 << bm;
  572.  
  573. udiv_qrnnd (q1, n1, n2, n1, d0);
  574. 200: 60 33 mov r3,r0
  575. b = W_TYPE_SIZE - bm;
  576.  
  577. d0 = d0 << bm;
  578. n2 = n1 >> b;
  579. n1 = (n1 << bm) | (n0 >> b);
  580. n0 = n0 << bm;
  581. 202: 4b 8d shld r8,r11
  582.  
  583. udiv_qrnnd (q1, n1, n2, n1, d0);
  584. 204: 65 73 mov r7,r5
  585. 206: 64 c9 swap.w r12,r4
  586. 208: 66 59 swap.w r5,r6
  587. 20a: 49 0b jsr @r9
  588. 20c: 46 28 shll16 r6
  589. 20e: 64 49 swap.w r4,r4
  590. 210: 49 0b jsr @r9
  591. 212: 6c 19 swap.w r1,r12
  592. 214: 2c 1b or r1,r12
  593. 216: af 94 bra 142 <___moddi3+0x142>
  594. 218: 00 09 nop
  595. umul_ppmm (m1, m0, q0, d0);
  596.  
  597. if (m1 > n1 || (m1 == n1 && m0 > n0))
  598. {
  599. q0--;
  600. sub_ddmmss (m1, m0, m1, m0, d1, d0);
  601. 21a: 61 c3 mov r12,r1
  602. 21c: 62 63 mov r6,r2
  603. 21e: 00 08 clrt
  604. 220: 32 7a subc r7,r2
  605. 222: 31 9a subc r9,r1
  606.  
  607. /* Remainder in (n1n0 - m1m0) >> bm. */
  608. if (rp != 0)
  609. {
  610. sub_ddmmss (n1, n0, n1, n0, m1, m0);
  611. rr.s.low = (n1 << b) | (n0 >> bm);
  612. 224: af d8 bra 1d8 <___moddi3+0x1d8>
  613. 226: 68 8b neg r8,r8
  614. /* qq = NN / 0d */
  615.  
  616. if (d0 == 0)
  617. d0 = 1 / d0; /* Divide intentionally by zero. */
  618.  
  619. count_leading_zeros (bm, d0);
  620. 228: e1 08 mov #8,r1
  621. 22a: af 7a bra 122 <___moddi3+0x122>
  622. 22c: e2 08 mov #8,r2
  623. {
  624. if (d0 > n1)
  625. {
  626. /* 0q = nn / 0D */
  627.  
  628. count_leading_zeros (bm, d0);
  629. 22e: e1 08 mov #8,r1
  630. 230: af 0f bra 52 <___moddi3+0x52>
  631. 232: e2 08 mov #8,r2
  632. /* qq = NN / 0d */
  633.  
  634. if (d0 == 0)
  635. d0 = 1 / d0; /* Divide intentionally by zero. */
  636.  
  637. count_leading_zeros (bm, d0);
  638. 234: af 75 bra 122 <___moddi3+0x122>
  639. 236: e2 18 mov #24,r2
  640. {
  641. if (d0 > n1)
  642. {
  643. /* 0q = nn / 0D */
  644.  
  645. count_leading_zeros (bm, d0);
  646. 238: af 0b bra 52 <___moddi3+0x52>
  647. 23a: e2 18 mov #24,r2
  648. }
  649. else
  650. {
  651. /* 0q = NN / dd */
  652.  
  653. count_leading_zeros (bm, d1);
  654. 23c: e2 08 mov #8,r2
  655. 23e: af 4a bra d6 <___moddi3+0xd6>
  656. 240: e5 08 mov #8,r5
  657. 242: af 48 bra d6 <___moddi3+0xd6>
  658. 244: e5 18 mov #24,r5
  659. n0 = n0 << bm;
  660.  
  661. udiv_qrnnd (q0, n1, n2, n1, d1);
  662. umul_ppmm (m1, m0, q0, d0);
  663.  
  664. if (m1 > n1 || (m1 == n1 && m0 > n0))
  665. 246: 8b e8 bf 21a <___moddi3+0x21a>
  666. 248: 61 03 mov r0,r1
  667.  
  668. /* Remainder in (n1n0 - m1m0) >> bm. */
  669. if (rp != 0)
  670. {
  671. sub_ddmmss (n1, n0, n1, n0, m1, m0);
  672. rr.s.low = (n1 << b) | (n0 >> bm);
  673. 24a: af c5 bra 1d8 <___moddi3+0x1d8>
  674. 24c: 68 8b neg r8,r8
  675. 24e: 00 ff mac.l @r15+,@r0+
  676. 250: 00 00 .word 0x0000
  677. 252: ff ff .word 0xffff
  678. 254: 00 ff mac.l @r15+,@r0+
  679. 256: ff ff .word 0xffff
  680. ...
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement