Advertisement
Guest User

Untitled

a guest
Jan 12th, 2014
103
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 18.07 KB | None | 0 0
  1. #ifndef ASMARM_DMA_MAPPING_H
  2. #define ASMARM_DMA_MAPPING_H
  3.  
  4. #ifdef __KERNEL__
  5.  
  6. #include <linux/mm_types.h>
  7. #include <linux/scatterlist.h>
  8. #include <linux/dma-debug.h>
  9.  
  10. #include <asm-generic/dma-coherent.h>
  11. #include <asm/memory.h>
  12.  
  13. #ifdef __arch_page_to_dma
  14. #error Please update to __arch_pfn_to_dma
  15. #endif
  16.  
  17. /*
  18. * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
  19. * functions used internally by the DMA-mapping API to provide DMA
  20. * addresses. They must not be used by drivers.
  21. */
  22. #ifndef __arch_pfn_to_dma
  23. static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
  24. {
  25. return (dma_addr_t)__pfn_to_bus(pfn);
  26. }
  27.  
  28. static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
  29. {
  30. return __bus_to_pfn(addr);
  31. }
  32.  
  33. static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  34. {
  35. return (void *)__bus_to_virt(addr);
  36. }
  37.  
  38. static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  39. {
  40. return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
  41. }
  42. #else
  43. static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
  44. {
  45. return __arch_pfn_to_dma(dev, pfn);
  46. }
  47.  
  48. static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
  49. {
  50. return __arch_dma_to_pfn(dev, addr);
  51. }
  52.  
  53. static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  54. {
  55. return __arch_dma_to_virt(dev, addr);
  56. }
  57.  
  58. static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  59. {
  60. return __arch_virt_to_dma(dev, addr);
  61. }
  62. #endif
  63.  
  64. /*
  65. * The DMA API is built upon the notion of "buffer ownership". A buffer
  66. * is either exclusively owned by the CPU (and therefore may be accessed
  67. * by it) or exclusively owned by the DMA device. These helper functions
  68. * represent the transitions between these two ownership states.
  69. *
  70. * Note, however, that on later ARMs, this notion does not work due to
  71. * speculative prefetches. We model our approach on the assumption that
  72. * the CPU does do speculative prefetches, which means we clean caches
  73. * before transfers and delay cache invalidation until transfer completion.
  74. *
  75. * Private support functions: these are not part of the API and are
  76. * liable to change. Drivers must not use these.
  77. */
  78. static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
  79. enum dma_data_direction dir)
  80. {
  81. extern void ___dma_single_cpu_to_dev(const void *, size_t,
  82. enum dma_data_direction);
  83.  
  84. if (!arch_is_coherent())
  85. ___dma_single_cpu_to_dev(kaddr, size, dir);
  86. }
  87.  
  88. static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
  89. enum dma_data_direction dir)
  90. {
  91. extern void ___dma_single_dev_to_cpu(const void *, size_t,
  92. enum dma_data_direction);
  93.  
  94. if (!arch_is_coherent())
  95. ___dma_single_dev_to_cpu(kaddr, size, dir);
  96. }
  97.  
  98. static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
  99. size_t size, enum dma_data_direction dir)
  100. {
  101. extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
  102. size_t, enum dma_data_direction);
  103.  
  104. if (!arch_is_coherent())
  105. ___dma_page_cpu_to_dev(page, off, size, dir);
  106. }
  107.  
  108. static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
  109. size_t size, enum dma_data_direction dir)
  110. {
  111. extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
  112. size_t, enum dma_data_direction);
  113.  
  114. if (!arch_is_coherent())
  115. ___dma_page_dev_to_cpu(page, off, size, dir);
  116. }
  117.  
  118. /*
  119. * Return whether the given device DMA address mask can be supported
  120. * properly. For example, if your device can only drive the low 24-bits
  121. * during bus mastering, then you would pass 0x00ffffff as the mask
  122. * to this function.
  123. *
  124. * FIXME: This should really be a platform specific issue - we should
  125. * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
  126. */
  127. static inline int dma_supported(struct device *dev, u64 mask)
  128. {
  129. if (mask < ISA_DMA_THRESHOLD)
  130. return 0;
  131. return 1;
  132. }
  133.  
  134. static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  135. {
  136. #ifdef CONFIG_DMABOUNCE
  137. if (dev->archdata.dmabounce) {
  138. if (dma_mask >= ISA_DMA_THRESHOLD)
  139. return 0;
  140. else
  141. return -EIO;
  142. }
  143. #endif
  144. if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  145. return -EIO;
  146.  
  147. *dev->dma_mask = dma_mask;
  148.  
  149. return 0;
  150. }
  151.  
  152. /*
  153. * DMA errors are defined by all-bits-set in the DMA address.
  154. */
  155. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  156. {
  157. return dma_addr == ~0;
  158. }
  159.  
  160. /*
  161. * Dummy noncoherent implementation. We don't provide a dma_cache_sync
  162. * function so drivers using this API are highlighted with build warnings.
  163. */
  164. static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
  165. dma_addr_t *handle, gfp_t gfp)
  166. {
  167. return NULL;
  168. }
  169.  
  170. static inline void dma_free_noncoherent(struct device *dev, size_t size,
  171. void *cpu_addr, dma_addr_t handle)
  172. {
  173. }
  174.  
  175. /**
  176. * dma_alloc_coherent - allocate consistent memory for DMA
  177. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  178. * @size: required memory size
  179. * @handle: bus-specific DMA address
  180. *
  181. * Allocate some uncached, unbuffered memory for a device for
  182. * performing DMA. This function allocates pages, and will
  183. * return the CPU-viewed address, and sets @handle to be the
  184. * device-viewed address.
  185. */
  186. extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
  187.  
  188. /**
  189. * dma_alloc_writethrough - allocate consistent memory for DMA
  190. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  191. * @size: required memory size
  192. * @handle: bus-specific DMA address
  193. *
  194. * Allocate some writethrough cached, for a device for
  195. * performing DMA. This function allocates pages, and will
  196. * return the CPU-viewed address, and sets @handle to be the
  197. * device-viewed address.
  198. */
  199. extern void *dma_alloc_writethrough(struct device *, size_t, dma_addr_t *, gfp_t);
  200.  
  201. /**
  202. * dma_alloc_noncacheable - allocate consistent memory for DMA
  203. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  204. * @size: required memory size
  205. * @handle: bus-specific DMA address
  206. *
  207. * Allocate some noncacheable memory, for a device for
  208. * performing DMA. This function allocates pages, and will
  209. * return the CPU-viewed address, and sets @handle to be the
  210. * device-viewed address.
  211. */
  212. extern void *dma_alloc_noncacheable(struct device *, size_t, dma_addr_t *, gfp_t);
  213.  
  214. /**
  215. * dma_free_coherent - free memory allocated by dma_alloc_coherent
  216. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  217. * @size: size of memory originally requested in dma_alloc_coherent
  218. * @cpu_addr: CPU-view address returned from dma_alloc_coherent
  219. * @handle: device-view address returned from dma_alloc_coherent
  220. *
  221. * Free (and unmap) a DMA buffer previously allocated by
  222. * dma_alloc_coherent().
  223. *
  224. * References to memory and mappings associated with cpu_addr/handle
  225. * during and after this call executing are illegal.
  226. */
  227. extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
  228.  
  229. /**
  230. * dma_mmap_coherent - map a coherent DMA allocation into user space
  231. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  232. * @vma: vm_area_struct describing requested user mapping
  233. * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
  234. * @handle: device-view address returned from dma_alloc_coherent
  235. * @size: size of memory originally requested in dma_alloc_coherent
  236. *
  237. * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
  238. * into user space. The coherent DMA buffer must not be freed by the
  239. * driver until the user space mapping has been released.
  240. */
  241. int dma_mmap_coherent(struct device *, struct vm_area_struct *,
  242. void *, dma_addr_t, size_t);
  243.  
  244.  
  245. /**
  246. * dma_alloc_writecombine - allocate writecombining memory for DMA
  247. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  248. * @size: required memory size
  249. * @handle: bus-specific DMA address
  250. *
  251. * Allocate some uncached, buffered memory for a device for
  252. * performing DMA. This function allocates pages, and will
  253. * return the CPU-viewed address, and sets @handle to be the
  254. * device-viewed address.
  255. */
  256. extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
  257. gfp_t);
  258.  
  259. #define dma_free_writecombine(dev,size,cpu_addr,handle) \
  260. dma_free_coherent(dev,size,cpu_addr,handle)
  261.  
  262. int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
  263. void *, dma_addr_t, size_t);
  264.  
  265.  
  266. #ifdef CONFIG_DMABOUNCE
  267. /*
  268. * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
  269. * and utilize bounce buffers as needed to work around limited DMA windows.
  270. *
  271. * On the SA-1111, a bug limits DMA to only certain regions of RAM.
  272. * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
  273. * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
  274. *
  275. * The following are helper functions used by the dmabounce subystem
  276. *
  277. */
  278.  
  279. /**
  280. * dmabounce_register_dev
  281. *
  282. * @dev: valid struct device pointer
  283. * @small_buf_size: size of buffers to use with small buffer pool
  284. * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
  285. *
  286. * This function should be called by low-level platform code to register
  287. * a device as requireing DMA buffer bouncing. The function will allocate
  288. * appropriate DMA pools for the device.
  289. *
  290. */
  291. extern int dmabounce_register_dev(struct device *, unsigned long,
  292. unsigned long);
  293.  
  294. /**
  295. * dmabounce_unregister_dev
  296. *
  297. * @dev: valid struct device pointer
  298. *
  299. * This function should be called by low-level platform code when device
  300. * that was previously registered with dmabounce_register_dev is removed
  301. * from the system.
  302. *
  303. */
  304. extern void dmabounce_unregister_dev(struct device *);
  305.  
  306. /**
  307. * dma_needs_bounce
  308. *
  309. * @dev: valid struct device pointer
  310. * @dma_handle: dma_handle of unbounced buffer
  311. * @size: size of region being mapped
  312. *
  313. * Platforms that utilize the dmabounce mechanism must implement
  314. * this function.
  315. *
  316. * The dmabounce routines call this function whenever a dma-mapping
  317. * is requested to determine whether a given buffer needs to be bounced
  318. * or not. The function must return 0 if the buffer is OK for
  319. * DMA access and 1 if the buffer needs to be bounced.
  320. *
  321. */
  322. extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
  323.  
  324. /*
  325. * The DMA API, implemented by dmabounce.c. See below for descriptions.
  326. */
  327. extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
  328. enum dma_data_direction);
  329. extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
  330. enum dma_data_direction);
  331. extern dma_addr_t __dma_map_page(struct device *, struct page *,
  332. unsigned long, size_t, enum dma_data_direction);
  333. extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
  334. enum dma_data_direction);
  335.  
  336. /*
  337. * Private functions
  338. */
  339. int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
  340. size_t, enum dma_data_direction);
  341. int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
  342. size_t, enum dma_data_direction);
  343. #else
  344. static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
  345. unsigned long offset, size_t size, enum dma_data_direction dir)
  346. {
  347. return 1;
  348. }
  349.  
  350. static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
  351. unsigned long offset, size_t size, enum dma_data_direction dir)
  352. {
  353. return 1;
  354. }
  355.  
  356.  
  357. static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
  358. size_t size, enum dma_data_direction dir)
  359. {
  360. __dma_single_cpu_to_dev(cpu_addr, size, dir);
  361. return virt_to_dma(dev, cpu_addr);
  362. }
  363.  
  364. static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
  365. unsigned long offset, size_t size, enum dma_data_direction dir)
  366. {
  367. __dma_page_cpu_to_dev(page, offset, size, dir);
  368. return pfn_to_dma(dev, page_to_pfn(page)) + offset;
  369. }
  370.  
  371. static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
  372. size_t size, enum dma_data_direction dir)
  373. {
  374. __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
  375. }
  376.  
  377. static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
  378. size_t size, enum dma_data_direction dir)
  379. {
  380. __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
  381. handle & ~PAGE_MASK, size, dir);
  382. }
  383. #endif /* CONFIG_DMABOUNCE */
  384.  
  385. /**
  386. * dma_map_single - map a single buffer for streaming DMA
  387. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  388. * @cpu_addr: CPU direct mapped address of buffer
  389. * @size: size of buffer to map
  390. * @dir: DMA transfer direction
  391. *
  392. * Ensure that any data held in the cache is appropriately discarded
  393. * or written back.
  394. *
  395. * The device owns this memory once this call has completed. The CPU
  396. * can regain ownership by calling dma_unmap_single() or
  397. * dma_sync_single_for_cpu().
  398. */
  399. static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
  400. size_t size, enum dma_data_direction dir)
  401. {
  402. dma_addr_t addr;
  403.  
  404. BUG_ON(!valid_dma_direction(dir));
  405.  
  406. addr = __dma_map_single(dev, cpu_addr, size, dir);
  407. debug_dma_map_page(dev, virt_to_page(cpu_addr),
  408. (unsigned long)cpu_addr & ~PAGE_MASK, size,
  409. dir, addr, true);
  410.  
  411. return addr;
  412. }
  413.  
  414. /**
  415. * dma_map_page - map a portion of a page for streaming DMA
  416. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  417. * @page: page that buffer resides in
  418. * @offset: offset into page for start of buffer
  419. * @size: size of buffer to map
  420. * @dir: DMA transfer direction
  421. *
  422. * Ensure that any data held in the cache is appropriately discarded
  423. * or written back.
  424. *
  425. * The device owns this memory once this call has completed. The CPU
  426. * can regain ownership by calling dma_unmap_page().
  427. */
  428. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  429. unsigned long offset, size_t size, enum dma_data_direction dir)
  430. {
  431. dma_addr_t addr;
  432.  
  433. BUG_ON(!valid_dma_direction(dir));
  434.  
  435. addr = __dma_map_page(dev, page, offset, size, dir);
  436. debug_dma_map_page(dev, page, offset, size, dir, addr, false);
  437.  
  438. return addr;
  439. }
  440.  
  441. /**
  442. * dma_unmap_single - unmap a single buffer previously mapped
  443. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  444. * @handle: DMA address of buffer
  445. * @size: size of buffer (same as passed to dma_map_single)
  446. * @dir: DMA transfer direction (same as passed to dma_map_single)
  447. *
  448. * Unmap a single streaming mode DMA translation. The handle and size
  449. * must match what was provided in the previous dma_map_single() call.
  450. * All other usages are undefined.
  451. *
  452. * After this call, reads by the CPU to the buffer are guaranteed to see
  453. * whatever the device wrote there.
  454. */
  455. static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
  456. size_t size, enum dma_data_direction dir)
  457. {
  458. debug_dma_unmap_page(dev, handle, size, dir, true);
  459. __dma_unmap_single(dev, handle, size, dir);
  460. }
  461.  
  462. /**
  463. * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
  464. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  465. * @handle: DMA address of buffer
  466. * @size: size of buffer (same as passed to dma_map_page)
  467. * @dir: DMA transfer direction (same as passed to dma_map_page)
  468. *
  469. * Unmap a page streaming mode DMA translation. The handle and size
  470. * must match what was provided in the previous dma_map_page() call.
  471. * All other usages are undefined.
  472. *
  473. * After this call, reads by the CPU to the buffer are guaranteed to see
  474. * whatever the device wrote there.
  475. */
  476. static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
  477. size_t size, enum dma_data_direction dir)
  478. {
  479. debug_dma_unmap_page(dev, handle, size, dir, false);
  480. __dma_unmap_page(dev, handle, size, dir);
  481. }
  482.  
  483. /**
  484. * dma_sync_single_range_for_cpu
  485. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  486. * @handle: DMA address of buffer
  487. * @offset: offset of region to start sync
  488. * @size: size of region to sync
  489. * @dir: DMA transfer direction (same as passed to dma_map_single)
  490. *
  491. * Make physical memory consistent for a single streaming mode DMA
  492. * translation after a transfer.
  493. *
  494. * If you perform a dma_map_single() but wish to interrogate the
  495. * buffer using the cpu, yet do not wish to teardown the PCI dma
  496. * mapping, you must call this function before doing so. At the
  497. * next point you give the PCI dma address back to the card, you
  498. * must first the perform a dma_sync_for_device, and then the
  499. * device again owns the buffer.
  500. */
  501. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  502. dma_addr_t handle, unsigned long offset, size_t size,
  503. enum dma_data_direction dir)
  504. {
  505. BUG_ON(!valid_dma_direction(dir));
  506.  
  507. debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
  508.  
  509. if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
  510. return;
  511.  
  512. __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
  513. }
  514.  
  515. static inline void dma_sync_single_range_for_device(struct device *dev,
  516. dma_addr_t handle, unsigned long offset, size_t size,
  517. enum dma_data_direction dir)
  518. {
  519. BUG_ON(!valid_dma_direction(dir));
  520.  
  521. debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
  522.  
  523. if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
  524. return;
  525.  
  526. __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
  527. }
  528.  
  529. static inline void dma_sync_single_for_cpu(struct device *dev,
  530. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  531. {
  532. dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
  533. }
  534.  
  535. static inline void dma_sync_single_for_device(struct device *dev,
  536. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  537. {
  538. dma_sync_single_range_for_device(dev, handle, 0, size, dir);
  539. }
  540.  
  541. /*
  542. * The scatter list versions of the above methods.
  543. */
  544. extern int dma_map_sg(struct device *, struct scatterlist *, int,
  545. enum dma_data_direction);
  546. extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
  547. enum dma_data_direction);
  548. extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
  549. enum dma_data_direction);
  550. extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
  551. enum dma_data_direction);
  552.  
  553.  
  554. #endif /* __KERNEL__ */
  555. #endif
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement