Advertisement
Guest User

fastmemcpy-arm.S.preprocessed

a guest
Aug 13th, 2012
133
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 15.71 KB | None | 0 0
  1. /*
  2. * Copyright (C) 2008 The Android Open Source Project
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  16. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  17. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  18. * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  19. * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  21. * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
  22. * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
  23. * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  24. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  25. * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  26. * SUCH DAMAGE.
  27. */
  28.  
  29. #if defined(__arm__) && !defined(TARGET_ANDROID)
  30. #if defined(__ARM_NEON__)
  31.  
  32. .text
  33. #ifndef __APPLE__
  34. @.fpu neon
  35. .globl fast_memcpy
  36. @.type fast_memcpy, %function
  37. #else
  38. .globl _fast_memcpy
  39. #endif
  40. .align 4
  41.  
  42. /* a prefetch distance of 4 cache-lines works best experimentally */
  43. #define CACHE_LINE_SIZE 64
  44. #define PREFETCH_DISTANCE (CACHE_LINE_SIZE*4)
  45.  
  46. #ifndef __APPLE__
  47. .fnstart
  48. .save {r0, lr}
  49. fast_memcpy:
  50. #else
  51. _fast_memcpy:
  52. #endif
  53. stmfd sp!, {r0, lr}
  54.  
  55. /* start preloading as early as possible */
  56. pld [r1, #(CACHE_LINE_SIZE*0)]
  57. pld [r1, #(CACHE_LINE_SIZE*1)]
  58.  
  59. /* do we have at least 16-bytes to copy (needed for alignment below) */
  60. cmp r2, #16
  61. blo 5f
  62.  
  63. /* align destination to half cache-line for the write-buffer */
  64. rsb r3, r0, #0
  65. ands r3, r3, #0xF
  66. beq 0f
  67.  
  68. /* copy up to 15-bytes (count in r3) */
  69. sub r2, r2, r3
  70. movs ip, r3, lsl #31
  71. ldrmib lr, [r1], #1
  72. strmib lr, [r0], #1
  73. ldrcsb ip, [r1], #1
  74. ldrcsb lr, [r1], #1
  75. strcsb ip, [r0], #1
  76. strcsb lr, [r0], #1
  77. movs ip, r3, lsl #29
  78. bge 1f
  79. // copies 4 bytes, destination 32-bits aligned
  80. vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
  81. vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
  82. 1: bcc 2f
  83. // copies 8 bytes, destination 64-bits aligned
  84. vld1.8 {d0}, [r1]!
  85. vst1.8 {d0}, [r0, :64]!
  86. 2:
  87.  
  88. 0: /* preload immediately the next cache line, which we may need */
  89. pld [r1, #(CACHE_LINE_SIZE*0)]
  90. pld [r1, #(CACHE_LINE_SIZE*1)]
  91.  
  92. /* make sure we have at least 64 bytes to copy */
  93. subs r2, r2, #64
  94. blo 2f
  95.  
  96. /* preload all the cache lines we need.
  97. * NOTE: the number of pld below depends on PREFETCH_DISTANCE,
  98. * ideally would would increase the distance in the main loop to
  99. * avoid the goofy code below. In practice this doesn't seem to make
  100. * a big difference.
  101. */
  102. pld [r1, #(CACHE_LINE_SIZE*2)]
  103. pld [r1, #(CACHE_LINE_SIZE*3)]
  104. pld [r1, #(PREFETCH_DISTANCE)]
  105.  
  106. 1: /* The main loop copies 64 bytes at a time */
  107. vld1.8 {d0 - d3}, [r1]!
  108. vld1.8 {d4 - d7}, [r1]!
  109. pld [r1, #(PREFETCH_DISTANCE)]
  110. subs r2, r2, #64
  111. vst1.8 {d0 - d3}, [r0, :128]!
  112. vst1.8 {d4 - d7}, [r0, :128]!
  113. bhs 1b
  114.  
  115. 2: /* fix-up the remaining count and make sure we have >= 32 bytes left */
  116. add r2, r2, #64
  117. subs r2, r2, #32
  118. blo 4f
  119.  
  120. 3: /* 32 bytes at a time. These cache lines were already preloaded */
  121. vld1.8 {d0 - d3}, [r1]!
  122. subs r2, r2, #32
  123. vst1.8 {d0 - d3}, [r0, :128]!
  124. bhs 3b
  125.  
  126. 4: /* less than 32 left */
  127. add r2, r2, #32
  128. tst r2, #0x10
  129. beq 5f
  130. // copies 16 bytes, 128-bits aligned
  131. vld1.8 {d0, d1}, [r1]!
  132. vst1.8 {d0, d1}, [r0, :128]!
  133.  
  134. 5: /* copy up to 15-bytes (count in r2) */
  135. movs ip, r2, lsl #29
  136. bcc 1f
  137. vld1.8 {d0}, [r1]!
  138. vst1.8 {d0}, [r0]!
  139. 1: bge 2f
  140. vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
  141. vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]!
  142. 2: movs ip, r2, lsl #31
  143. ldrmib r3, [r1], #1
  144. ldrcsb ip, [r1], #1
  145. ldrcsb lr, [r1], #1
  146. strmib r3, [r0], #1
  147. strcsb ip, [r0], #1
  148. strcsb lr, [r0], #1
  149.  
  150. ldmfd sp!, {r0, lr}
  151. bx lr
  152. #ifndef __APPLE__
  153. .fnend
  154. #endif
  155.  
  156. #else /* __ARM_ARCH__ < 7 */
  157.  
  158.  
  159. .text
  160.  
  161. #ifndef __APPLE__
  162. .globl fast_memcpy
  163. @.type fast_memcpy, %function
  164. #else
  165. .globl _fast_memcpy
  166. #endif
  167. .align 4
  168.  
  169. /*
  170. * Optimized memcpy() for ARM.
  171. *
  172. * note that memcpy() always returns the destination pointer,
  173. * so we have to preserve R0.
  174. */
  175.  
  176. #ifndef __APPLE__
  177. fast_memcpy:
  178. #else
  179. _fast_memcpy:
  180. #endif
  181. /* The stack must always be 64-bits aligned to be compliant with the
  182. * ARM ABI. Since we have to save R0, we might as well save R4
  183. * which we can use for better pipelining of the reads below
  184. */
  185. #ifndef __APPLE__
  186. .fnstart
  187. .save {r0, r4, lr}
  188. #endif
  189. stmfd sp!, {r0, r4, lr}
  190. /* Making room for r5-r11 which will be spilled later */
  191. .pad #28
  192. sub sp, sp, #28
  193.  
  194. // preload the destination because we'll align it to a cache line
  195. // with small writes. Also start the source "pump".
  196. //PLD (r0, #0)
  197. //PLD (r1, #0)
  198. //PLD (r1, #32)
  199.  
  200. /* it simplifies things to take care of len<4 early */
  201. cmp r2, #4
  202. blo copy_last_3_and_return
  203.  
  204. /* compute the offset to align the source
  205. * offset = (4-(src&3))&3 = -src & 3
  206. */
  207. rsb r3, r1, #0
  208. ands r3, r3, #3
  209. beq src_aligned
  210.  
  211. /* align source to 32 bits. We need to insert 2 instructions between
  212. * a ldr[b|h] and str[b|h] because byte and half-word instructions
  213. * stall 2 cycles.
  214. */
  215. movs r12, r3, lsl #31
  216. sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
  217. ldrmib r3, [r1], #1
  218. ldrcsb r4, [r1], #1
  219. ldrcsb r12,[r1], #1
  220. strmib r3, [r0], #1
  221. strcsb r4, [r0], #1
  222. strcsb r12,[r0], #1
  223.  
  224. src_aligned:
  225.  
  226. /* see if src and dst are aligned together (congruent) */
  227. eor r12, r0, r1
  228. tst r12, #3
  229. bne non_congruent
  230.  
  231. /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
  232. * frame. Don't update sp.
  233. */
  234. stmea sp, {r5-r11}
  235.  
  236. /* align the destination to a cache-line */
  237. rsb r3, r0, #0
  238. ands r3, r3, #0x1C
  239. beq congruent_aligned32
  240. cmp r3, r2
  241. andhi r3, r2, #0x1C
  242.  
  243. /* conditionnaly copies 0 to 7 words (length in r3) */
  244. movs r12, r3, lsl #28
  245. ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
  246. ldmmiia r1!, {r8, r9} /* 8 bytes */
  247. stmcsia r0!, {r4, r5, r6, r7}
  248. stmmiia r0!, {r8, r9}
  249. tst r3, #0x4
  250. ldrne r10,[r1], #4 /* 4 bytes */
  251. strne r10,[r0], #4
  252. sub r2, r2, r3
  253.  
  254. congruent_aligned32:
  255. /*
  256. * here source is aligned to 32 bytes.
  257. */
  258.  
  259. cached_aligned32:
  260. subs r2, r2, #32
  261. blo less_than_32_left
  262.  
  263. /*
  264. * We preload a cache-line up to 64 bytes ahead. On the 926, this will
  265. * stall only until the requested world is fetched, but the linefill
  266. * continues in the the background.
  267. * While the linefill is going, we write our previous cache-line
  268. * into the write-buffer (which should have some free space).
  269. * When the linefill is done, the writebuffer will
  270. * start dumping its content into memory
  271. *
  272. * While all this is going, we then load a full cache line into
  273. * 8 registers, this cache line should be in the cache by now
  274. * (or partly in the cache).
  275. *
  276. * This code should work well regardless of the source/dest alignment.
  277. *
  278. */
  279.  
  280. // Align the preload register to a cache-line because the cpu does
  281. // "critical word first" (the first word requested is loaded first).
  282. bic r12, r1, #0x1F
  283. add r12, r12, #64
  284.  
  285. 1: ldmia r1!, { r4-r11 }
  286. //PLD (r12, #64)
  287. subs r2, r2, #32
  288.  
  289. // NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
  290. // for ARM9 preload will not be safely guarded by the preceding subs.
  291. // When it is safely guarded the only possibility to have SIGSEGV here
  292. // is because the caller overstates the length.
  293. ldrhi r3, [r12], #32 /* cheap ARM9 preload */
  294. stmia r0!, { r4-r11 }
  295. bhs 1b
  296.  
  297. add r2, r2, #32
  298.  
  299.  
  300.  
  301.  
  302. less_than_32_left:
  303. /*
  304. * less than 32 bytes left at this point (length in r2)
  305. */
  306.  
  307. /* skip all this if there is nothing to do, which should
  308. * be a common case (if not executed the code below takes
  309. * about 16 cycles)
  310. */
  311. tst r2, #0x1F
  312. beq 1f
  313.  
  314. /* conditionnaly copies 0 to 31 bytes */
  315. movs r12, r2, lsl #28
  316. ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
  317. ldmmiia r1!, {r8, r9} /* 8 bytes */
  318. stmcsia r0!, {r4, r5, r6, r7}
  319. stmmiia r0!, {r8, r9}
  320. movs r12, r2, lsl #30
  321. ldrcs r3, [r1], #4 /* 4 bytes */
  322. ldrmih r4, [r1], #2 /* 2 bytes */
  323. strcs r3, [r0], #4
  324. strmih r4, [r0], #2
  325. tst r2, #0x1
  326. ldrneb r3, [r1] /* last byte */
  327. strneb r3, [r0]
  328.  
  329. /* we're done! restore everything and return */
  330. 1: ldmfd sp!, {r5-r11}
  331. ldmfd sp!, {r0, r4, lr}
  332. bx lr
  333.  
  334. /********************************************************************/
  335.  
  336. non_congruent:
  337. /*
  338. * here source is aligned to 4 bytes
  339. * but destination is not.
  340. *
  341. * in the code below r2 is the number of bytes read
  342. * (the number of bytes written is always smaller, because we have
  343. * partial words in the shift queue)
  344. */
  345. cmp r2, #4
  346. blo copy_last_3_and_return
  347.  
  348. /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
  349. * frame. Don't update sp.
  350. */
  351. stmea sp, {r5-r11}
  352.  
  353. /* compute shifts needed to align src to dest */
  354. rsb r5, r0, #0
  355. and r5, r5, #3 /* r5 = # bytes in partial words */
  356. mov r12, r5, lsl #3 /* r12 = right */
  357. rsb lr, r12, #32 /* lr = left */
  358.  
  359. /* read the first word */
  360. ldr r3, [r1], #4
  361. sub r2, r2, #4
  362.  
  363. /* write a partial word (0 to 3 bytes), such that destination
  364. * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
  365. */
  366. movs r5, r5, lsl #31
  367. strmib r3, [r0], #1
  368. movmi r3, r3, lsr #8
  369. strcsb r3, [r0], #1
  370. movcs r3, r3, lsr #8
  371. strcsb r3, [r0], #1
  372. movcs r3, r3, lsr #8
  373.  
  374. cmp r2, #4
  375. blo partial_word_tail
  376.  
  377. /* Align destination to 32 bytes (cache line boundary) */
  378. 1: tst r0, #0x1c
  379. beq 2f
  380. ldr r5, [r1], #4
  381. sub r2, r2, #4
  382. orr r4, r3, r5, lsl lr
  383. mov r3, r5, lsr r12
  384. str r4, [r0], #4
  385. cmp r2, #4
  386. bhs 1b
  387. blo partial_word_tail
  388.  
  389. /* copy 32 bytes at a time */
  390. 2: subs r2, r2, #32
  391. blo less_than_thirtytwo
  392.  
  393. /* Use immediate mode for the shifts, because there is an extra cycle
  394. * for register shifts, which could account for up to 50% of
  395. * performance hit.
  396. */
  397.  
  398. cmp r12, #24
  399. beq loop24
  400. cmp r12, #8
  401. beq loop8
  402.  
  403. loop16:
  404. ldr r12, [r1], #4
  405. 1: mov r4, r12
  406. ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
  407. //PLD (r1, #64)
  408. subs r2, r2, #32
  409. ldrhs r12, [r1], #4
  410. orr r3, r3, r4, lsl #16
  411. mov r4, r4, lsr #16
  412. orr r4, r4, r5, lsl #16
  413. mov r5, r5, lsr #16
  414. orr r5, r5, r6, lsl #16
  415. mov r6, r6, lsr #16
  416. orr r6, r6, r7, lsl #16
  417. mov r7, r7, lsr #16
  418. orr r7, r7, r8, lsl #16
  419. mov r8, r8, lsr #16
  420. orr r8, r8, r9, lsl #16
  421. mov r9, r9, lsr #16
  422. orr r9, r9, r10, lsl #16
  423. mov r10, r10, lsr #16
  424. orr r10, r10, r11, lsl #16
  425. stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
  426. mov r3, r11, lsr #16
  427. bhs 1b
  428. b less_than_thirtytwo
  429.  
  430. loop8:
  431. ldr r12, [r1], #4
  432. 1: mov r4, r12
  433. ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
  434. //PLD (r1, #64)
  435. subs r2, r2, #32
  436. ldrhs r12, [r1], #4
  437. orr r3, r3, r4, lsl #24
  438. mov r4, r4, lsr #8
  439. orr r4, r4, r5, lsl #24
  440. mov r5, r5, lsr #8
  441. orr r5, r5, r6, lsl #24
  442. mov r6, r6, lsr #8
  443. orr r6, r6, r7, lsl #24
  444. mov r7, r7, lsr #8
  445. orr r7, r7, r8, lsl #24
  446. mov r8, r8, lsr #8
  447. orr r8, r8, r9, lsl #24
  448. mov r9, r9, lsr #8
  449. orr r9, r9, r10, lsl #24
  450. mov r10, r10, lsr #8
  451. orr r10, r10, r11, lsl #24
  452. stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
  453. mov r3, r11, lsr #8
  454. bhs 1b
  455. b less_than_thirtytwo
  456.  
  457. loop24:
  458. ldr r12, [r1], #4
  459. 1: mov r4, r12
  460. ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
  461. //PLD (r1, #64)
  462. subs r2, r2, #32
  463. ldrhs r12, [r1], #4
  464. orr r3, r3, r4, lsl #8
  465. mov r4, r4, lsr #24
  466. orr r4, r4, r5, lsl #8
  467. mov r5, r5, lsr #24
  468. orr r5, r5, r6, lsl #8
  469. mov r6, r6, lsr #24
  470. orr r6, r6, r7, lsl #8
  471. mov r7, r7, lsr #24
  472. orr r7, r7, r8, lsl #8
  473. mov r8, r8, lsr #24
  474. orr r8, r8, r9, lsl #8
  475. mov r9, r9, lsr #24
  476. orr r9, r9, r10, lsl #8
  477. mov r10, r10, lsr #24
  478. orr r10, r10, r11, lsl #8
  479. stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
  480. mov r3, r11, lsr #24
  481. bhs 1b
  482.  
  483.  
  484. less_than_thirtytwo:
  485. /* copy the last 0 to 31 bytes of the source */
  486. rsb r12, lr, #32 /* we corrupted r12, recompute it */
  487. add r2, r2, #32
  488. cmp r2, #4
  489. blo partial_word_tail
  490.  
  491. 1: ldr r5, [r1], #4
  492. sub r2, r2, #4
  493. orr r4, r3, r5, lsl lr
  494. mov r3, r5, lsr r12
  495. str r4, [r0], #4
  496. cmp r2, #4
  497. bhs 1b
  498.  
  499. partial_word_tail:
  500. /* we have a partial word in the input buffer */
  501. movs r5, lr, lsl #(31-3)
  502. strmib r3, [r0], #1
  503. movmi r3, r3, lsr #8
  504. strcsb r3, [r0], #1
  505. movcs r3, r3, lsr #8
  506. strcsb r3, [r0], #1
  507.  
  508. /* Refill spilled registers from the stack. Don't update sp. */
  509. ldmfd sp, {r5-r11}
  510.  
  511. copy_last_3_and_return:
  512. movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
  513. ldrmib r2, [r1], #1
  514. ldrcsb r3, [r1], #1
  515. ldrcsb r12,[r1]
  516. strmib r2, [r0], #1
  517. strcsb r3, [r0], #1
  518. strcsb r12,[r0]
  519.  
  520. /* we're done! restore sp and spilled registers and return */
  521. add sp, sp, #28
  522. ldmfd sp!, {r0, r4, lr}
  523. bx lr
  524. #ifndef __APPLE__
  525. .fnend
  526. #endif
  527.  
  528. #endif /* __ARM_ARCH__ < 7 */
  529. #endif
  530.  
  531. #if defined(__linux__) && defined(__ELF__)
  532. /* we don't need an executable stack */
  533. .section .note.GNU-stack,"",%progbits
  534. #endif
  535. .text
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement