Guest User

Untitled

a guest
May 21st, 2013
110
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 40.83 KB | None | 0 0
  1. /* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12.  
  13. /* Implements an interface between KGSL and the DRM subsystem. For now this
  14. * is pretty simple, but it will take on more of the workload as time goes
  15. * on
  16. */
  17. #include "drmP.h"
  18. #include "drm.h"
  19. #include <linux/android_pmem.h>
  20. #include <linux/notifier.h>
  21.  
  22. #include "kgsl.h"
  23. #include "kgsl_device.h"
  24. #include "kgsl_drm.h"
  25. #include "kgsl_mmu.h"
  26. #include "kgsl_sharedmem.h"
  27.  
  28. #define DRIVER_AUTHOR "Qualcomm"
  29. #define DRIVER_NAME "kgsl"
  30. #define DRIVER_DESC "KGSL DRM"
  31. #define DRIVER_DATE "20100127"
  32.  
  33. #define DRIVER_MAJOR 2
  34. #define DRIVER_MINOR 1
  35. #define DRIVER_PATCHLEVEL 1
  36.  
  37. #define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0)
  38.  
  39. #define ENTRY_EMPTY -1
  40. #define ENTRY_NEEDS_CLEANUP -2
  41.  
  42. #define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
  43. #define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
  44.  
  45. /* Returns true if the memory type is in PMEM */
  46.  
  47. #ifdef CONFIG_KERNEL_PMEM_SMI_REGION
  48. #define TYPE_IS_PMEM(_t) \
  49. (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
  50. ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \
  51. ((_t) & DRM_KGSL_GEM_TYPE_PMEM))
  52. #else
  53. #define TYPE_IS_PMEM(_t) \
  54. (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
  55. ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI)))
  56. #endif
  57.  
  58. /* Returns true if the memory type is regular */
  59.  
  60. #define TYPE_IS_MEM(_t) \
  61. (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \
  62. ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
  63. ((_t) & DRM_KGSL_GEM_TYPE_MEM))
  64.  
  65. #define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK)
  66.  
  67. /* Returns true if KMEM region is uncached */
  68.  
  69. #define IS_MEM_UNCACHED(_t) \
  70. ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
  71. (_t == DRM_KGSL_GEM_TYPE_KMEM) || \
  72. (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE)))
  73.  
  74. struct drm_kgsl_gem_object_wait_list_entry {
  75. struct list_head list;
  76. int pid;
  77. int in_use;
  78. wait_queue_head_t process_wait_q;
  79. };
  80.  
  81. struct drm_kgsl_gem_object_fence {
  82. int32_t fence_id;
  83. unsigned int num_buffers;
  84. int ts_valid;
  85. unsigned int timestamp;
  86. int ts_device;
  87. int lockpid;
  88. struct list_head buffers_in_fence;
  89. };
  90.  
  91. struct drm_kgsl_gem_object_fence_list_entry {
  92. struct list_head list;
  93. int in_use;
  94. struct drm_gem_object *gem_obj;
  95. };
  96.  
  97. static int32_t fence_id = 0x1;
  98.  
  99. static struct drm_kgsl_gem_object_fence
  100. gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES];
  101.  
  102. struct drm_kgsl_gem_object {
  103. struct drm_gem_object *obj;
  104. uint32_t type;
  105. struct kgsl_memdesc memdesc;
  106. struct kgsl_pagetable *pagetable;
  107. uint64_t mmap_offset;
  108. int bufcount;
  109. int flags;
  110. struct list_head list;
  111. int active;
  112.  
  113. struct {
  114. uint32_t offset;
  115. uint32_t gpuaddr;
  116. } bufs[DRM_KGSL_GEM_MAX_BUFFERS];
  117.  
  118. int bound;
  119. int lockpid;
  120. /* Put these here to avoid allocing all the time */
  121. struct drm_kgsl_gem_object_wait_list_entry
  122. wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES];
  123. /* Each object can only appear in a single fence */
  124. struct drm_kgsl_gem_object_fence_list_entry
  125. fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES];
  126.  
  127. struct list_head wait_list;
  128. };
  129.  
  130. /* This is a global list of all the memory currently mapped in the MMU */
  131. static struct list_head kgsl_mem_list;
  132.  
  133. static void kgsl_gem_mem_flush(struct kgsl_memdesc *memdesc, int type, int op)
  134. {
  135. int cacheop = 0;
  136.  
  137. switch (op) {
  138. case DRM_KGSL_GEM_CACHE_OP_TO_DEV:
  139. if (type & (DRM_KGSL_GEM_CACHE_WBACK |
  140. DRM_KGSL_GEM_CACHE_WBACKWA))
  141. cacheop = KGSL_CACHE_OP_CLEAN;
  142.  
  143. break;
  144.  
  145. case DRM_KGSL_GEM_CACHE_OP_FROM_DEV:
  146. if (type & (DRM_KGSL_GEM_CACHE_WBACK |
  147. DRM_KGSL_GEM_CACHE_WBACKWA |
  148. DRM_KGSL_GEM_CACHE_WTHROUGH))
  149. cacheop = KGSL_CACHE_OP_INV;
  150. }
  151.  
  152. kgsl_cache_range_op(memdesc, cacheop);
  153. }
  154.  
  155. /* Flush all the memory mapped in the MMU */
  156.  
  157. void kgsl_gpu_mem_flush(int op)
  158. {
  159. struct drm_kgsl_gem_object *entry;
  160.  
  161. list_for_each_entry(entry, &kgsl_mem_list, list) {
  162. kgsl_gem_mem_flush(&entry->memdesc, entry->type, op);
  163. }
  164.  
  165. /* Takes care of WT/WC case.
  166. * More useful when we go barrierless
  167. */
  168. dmb();
  169. }
  170.  
  171. /* TODO:
  172. * Add vsync wait */
  173.  
  174. static int kgsl_drm_load(struct drm_device *dev, unsigned long flags)
  175. {
  176. return 0;
  177. }
  178.  
  179. static int kgsl_drm_unload(struct drm_device *dev)
  180. {
  181. return 0;
  182. }
  183.  
  184. struct kgsl_drm_device_priv {
  185. struct kgsl_device *device[KGSL_DEVICE_MAX];
  186. struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
  187. };
  188.  
  189. static int kgsl_ts_notifier_cb(struct notifier_block *blk,
  190. unsigned long code, void *_param);
  191.  
  192. static struct notifier_block kgsl_ts_nb[KGSL_DEVICE_MAX];
  193.  
  194. static int kgsl_drm_firstopen(struct drm_device *dev)
  195. {
  196. int i;
  197.  
  198. for (i = 0; i < KGSL_DEVICE_MAX; i++) {
  199. struct kgsl_device *device = kgsl_get_device(i);
  200.  
  201. if (device == NULL)
  202. continue;
  203.  
  204. kgsl_ts_nb[i].notifier_call = kgsl_ts_notifier_cb;
  205. kgsl_register_ts_notifier(device, &kgsl_ts_nb[i]);
  206. }
  207.  
  208. return 0;
  209. }
  210.  
  211. void kgsl_drm_lastclose(struct drm_device *dev)
  212. {
  213. int i;
  214.  
  215. for (i = 0; i < KGSL_DEVICE_MAX; i++) {
  216. struct kgsl_device *device = kgsl_get_device(i);
  217. if (device == NULL)
  218. continue;
  219.  
  220. kgsl_unregister_ts_notifier(device, &kgsl_ts_nb[i]);
  221. }
  222. }
  223.  
  224. void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
  225. {
  226. }
  227.  
  228. static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state)
  229. {
  230. return 0;
  231. }
  232.  
  233. static int kgsl_drm_resume(struct drm_device *dev)
  234. {
  235. return 0;
  236. }
  237.  
  238. static void
  239. kgsl_gem_free_mmap_offset(struct drm_gem_object *obj)
  240. {
  241. struct drm_device *dev = obj->dev;
  242. struct drm_gem_mm *mm = dev->mm_private;
  243. struct drm_kgsl_gem_object *priv = obj->driver_private;
  244. struct drm_map_list *list;
  245.  
  246. list = &obj->map_list;
  247. drm_ht_remove_item(&mm->offset_hash, &list->hash);
  248. if (list->file_offset_node) {
  249. drm_mm_put_block(list->file_offset_node);
  250. list->file_offset_node = NULL;
  251. }
  252.  
  253. kfree(list->map);
  254. list->map = NULL;
  255.  
  256. priv->mmap_offset = 0;
  257. }
  258.  
  259. static int
  260. kgsl_gem_memory_allocated(struct drm_gem_object *obj)
  261. {
  262. struct drm_kgsl_gem_object *priv = obj->driver_private;
  263. return priv->memdesc.size ? 1 : 0;
  264. }
  265.  
  266. static int
  267. kgsl_gem_alloc_memory(struct drm_gem_object *obj)
  268. {
  269. struct drm_kgsl_gem_object *priv = obj->driver_private;
  270. int index;
  271.  
  272. /* Return if the memory is already allocated */
  273.  
  274. if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
  275. return 0;
  276.  
  277. if (TYPE_IS_PMEM(priv->type)) {
  278. int type;
  279.  
  280. if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
  281. priv->type & DRM_KGSL_GEM_PMEM_EBI)
  282. type = PMEM_MEMTYPE_EBI1;
  283. else
  284. type = PMEM_MEMTYPE_SMI;
  285.  
  286. priv->memdesc.physaddr =
  287. pmem_kalloc(obj->size * priv->bufcount,
  288. type | PMEM_ALIGNMENT_4K);
  289.  
  290. if (IS_ERR((void *) priv->memdesc.physaddr)) {
  291. DRM_ERROR("Unable to allocate PMEM memory\n");
  292. return -ENOMEM;
  293. }
  294.  
  295. priv->memdesc.size = obj->size * priv->bufcount;
  296.  
  297. } else if (TYPE_IS_MEM(priv->type)) {
  298. priv->memdesc.hostptr =
  299. vmalloc_user(obj->size * priv->bufcount);
  300.  
  301. if (priv->memdesc.hostptr == NULL) {
  302. DRM_ERROR("Unable to allocate vmalloc memory\n");
  303. return -ENOMEM;
  304. }
  305.  
  306. priv->memdesc.size = obj->size * priv->bufcount;
  307. priv->memdesc.ops = &kgsl_vmalloc_ops;
  308. } else
  309. return -EINVAL;
  310.  
  311. for (index = 0; index < priv->bufcount; index++)
  312. priv->bufs[index].offset = index * obj->size;
  313.  
  314.  
  315. return 0;
  316. }
  317.  
  318. #ifdef CONFIG_MSM_KGSL_MMU
  319. static void
  320. kgsl_gem_unmap(struct drm_gem_object *obj)
  321. {
  322. struct drm_kgsl_gem_object *priv = obj->driver_private;
  323.  
  324. if (!priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
  325. return;
  326.  
  327. kgsl_mmu_unmap(priv->pagetable, &priv->memdesc);
  328.  
  329. kgsl_mmu_putpagetable(priv->pagetable);
  330. priv->pagetable = NULL;
  331.  
  332. if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
  333. (priv->type & DRM_KGSL_GEM_CACHE_MASK))
  334. list_del(&priv->list);
  335.  
  336. priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
  337. }
  338. #else
  339. static void
  340. kgsl_gem_unmap(struct drm_gem_object *obj)
  341. {
  342. }
  343. #endif
  344.  
  345. static void
  346. kgsl_gem_free_memory(struct drm_gem_object *obj)
  347. {
  348. struct drm_kgsl_gem_object *priv = obj->driver_private;
  349.  
  350. if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
  351. return;
  352.  
  353. kgsl_gem_mem_flush(&priv->memdesc, priv->type,
  354. DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
  355.  
  356. kgsl_gem_unmap(obj);
  357.  
  358. if (TYPE_IS_PMEM(priv->type))
  359. pmem_kfree(priv->memdesc.physaddr);
  360.  
  361. kgsl_sharedmem_free(&priv->memdesc);
  362. }
  363.  
  364. int
  365. kgsl_gem_init_object(struct drm_gem_object *obj)
  366. {
  367. struct drm_kgsl_gem_object *priv;
  368. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  369. if (priv == NULL) {
  370. DRM_ERROR("Unable to create GEM object\n");
  371. return -ENOMEM;
  372. }
  373.  
  374. obj->driver_private = priv;
  375. priv->obj = obj;
  376.  
  377. return 0;
  378. }
  379.  
  380. void
  381. kgsl_gem_free_object(struct drm_gem_object *obj)
  382. {
  383. kgsl_gem_free_memory(obj);
  384. kgsl_gem_free_mmap_offset(obj);
  385. drm_gem_object_release(obj);
  386. kfree(obj->driver_private);
  387. }
  388.  
  389. static int
  390. kgsl_gem_create_mmap_offset(struct drm_gem_object *obj)
  391. {
  392. struct drm_device *dev = obj->dev;
  393. struct drm_gem_mm *mm = dev->mm_private;
  394. struct drm_kgsl_gem_object *priv = obj->driver_private;
  395. struct drm_map_list *list;
  396. int msize;
  397.  
  398. list = &obj->map_list;
  399. list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  400. if (list->map == NULL) {
  401. DRM_ERROR("Unable to allocate drm_map_list\n");
  402. return -ENOMEM;
  403. }
  404.  
  405. msize = obj->size * priv->bufcount;
  406.  
  407. list->map->type = _DRM_GEM;
  408. list->map->size = msize;
  409. list->map->handle = obj;
  410.  
  411. /* Allocate a mmap offset */
  412. list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  413. msize / PAGE_SIZE,
  414. 0, 0);
  415.  
  416. if (!list->file_offset_node) {
  417. DRM_ERROR("Failed to allocate offset for %d\n", obj->name);
  418. kfree(list->map);
  419. return -ENOMEM;
  420. }
  421.  
  422. list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  423. msize / PAGE_SIZE, 0);
  424.  
  425. if (!list->file_offset_node) {
  426. DRM_ERROR("Unable to create the file_offset_node\n");
  427. kfree(list->map);
  428. return -ENOMEM;
  429. }
  430.  
  431. list->hash.key = list->file_offset_node->start;
  432. if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
  433. DRM_ERROR("Failed to add to map hash\n");
  434. drm_mm_put_block(list->file_offset_node);
  435. kfree(list->map);
  436. return -ENOMEM;
  437. }
  438.  
  439. priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
  440.  
  441. return 0;
  442. }
  443.  
  444. int
  445. kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
  446. unsigned long *len)
  447. {
  448. struct file *filp;
  449. struct drm_device *dev;
  450. struct drm_file *file_priv;
  451. struct drm_gem_object *obj;
  452. struct drm_kgsl_gem_object *priv;
  453. int ret = 0;
  454.  
  455. filp = fget(drm_fd);
  456. if (unlikely(filp == NULL)) {
  457. DRM_ERROR("Unable to ghet the DRM file descriptor\n");
  458. return -EINVAL;
  459. }
  460. file_priv = filp->private_data;
  461. if (unlikely(file_priv == NULL)) {
  462. DRM_ERROR("Unable to get the file private data\n");
  463. fput(filp);
  464. return -EINVAL;
  465. }
  466. dev = file_priv->minor->dev;
  467. if (unlikely(dev == NULL)) {
  468. DRM_ERROR("Unable to get the minor device\n");
  469. fput(filp);
  470. return -EINVAL;
  471. }
  472.  
  473. obj = drm_gem_object_lookup(dev, file_priv, handle);
  474. if (unlikely(obj == NULL)) {
  475. DRM_ERROR("Invalid GEM handle %x\n", handle);
  476. fput(filp);
  477. return -EBADF;
  478. }
  479.  
  480. mutex_lock(&dev->struct_mutex);
  481. priv = obj->driver_private;
  482.  
  483. /* We can only use the MDP for PMEM regions */
  484.  
  485. if (TYPE_IS_PMEM(priv->type)) {
  486. *start = priv->memdesc.physaddr +
  487. priv->bufs[priv->active].offset;
  488.  
  489. *len = priv->memdesc.size;
  490.  
  491. kgsl_gem_mem_flush(&priv->memdesc,
  492. priv->type, DRM_KGSL_GEM_CACHE_OP_TO_DEV);
  493. } else {
  494. *start = 0;
  495. *len = 0;
  496. ret = -EINVAL;
  497. }
  498.  
  499. drm_gem_object_unreference(obj);
  500. mutex_unlock(&dev->struct_mutex);
  501.  
  502. fput(filp);
  503. return ret;
  504. }
  505.  
  506. static int
  507. kgsl_gem_init_obj(struct drm_device *dev,
  508. struct drm_file *file_priv,
  509. struct drm_gem_object *obj,
  510. int *handle)
  511. {
  512. struct drm_kgsl_gem_object *priv;
  513. int ret, i;
  514.  
  515. mutex_lock(&dev->struct_mutex);
  516. priv = obj->driver_private;
  517.  
  518. memset(&priv->memdesc, 0, sizeof(priv->memdesc));
  519. priv->bufcount = 1;
  520. priv->active = 0;
  521. priv->bound = 0;
  522.  
  523. /* To preserve backwards compatability, the default memory source
  524. is EBI */
  525.  
  526. priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI;
  527.  
  528. ret = drm_gem_handle_create(file_priv, obj, handle);
  529.  
  530. drm_gem_object_handle_unreference(obj);
  531. INIT_LIST_HEAD(&priv->wait_list);
  532.  
  533. for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
  534. INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]);
  535. priv->wait_entries[i].pid = 0;
  536. init_waitqueue_head(&priv->wait_entries[i].process_wait_q);
  537. }
  538.  
  539. for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
  540. INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]);
  541. priv->fence_entries[i].in_use = 0;
  542. priv->fence_entries[i].gem_obj = obj;
  543. }
  544.  
  545. mutex_unlock(&dev->struct_mutex);
  546. return ret;
  547. }
  548.  
  549. int
  550. kgsl_gem_create_ioctl(struct drm_device *dev, void *data,
  551. struct drm_file *file_priv)
  552. {
  553. struct drm_kgsl_gem_create *create = data;
  554. struct drm_gem_object *obj;
  555. int ret, handle;
  556.  
  557. /* Page align the size so we can allocate multiple buffers */
  558. create->size = ALIGN(create->size, 4096);
  559.  
  560. obj = drm_gem_object_alloc(dev, create->size);
  561.  
  562. if (obj == NULL) {
  563. DRM_ERROR("Unable to allocate the GEM object\n");
  564. return -ENOMEM;
  565. }
  566.  
  567. ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
  568. if (ret)
  569. return ret;
  570.  
  571. create->handle = handle;
  572. return 0;
  573. }
  574.  
  575. int
  576. kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data,
  577. struct drm_file *file_priv)
  578. {
  579. struct drm_kgsl_gem_create_fd *args = data;
  580. struct file *file;
  581. dev_t rdev;
  582. struct fb_info *info;
  583. struct drm_gem_object *obj;
  584. struct drm_kgsl_gem_object *priv;
  585. int ret, put_needed, handle;
  586.  
  587. file = fget_light(args->fd, &put_needed);
  588.  
  589. if (file == NULL) {
  590. DRM_ERROR("Unable to get the file object\n");
  591. return -EBADF;
  592. }
  593.  
  594. rdev = file->f_dentry->d_inode->i_rdev;
  595.  
  596. /* Only framebuffer objects are supported ATM */
  597.  
  598. if (MAJOR(rdev) != FB_MAJOR) {
  599. DRM_ERROR("File descriptor is not a framebuffer\n");
  600. ret = -EBADF;
  601. goto error_fput;
  602. }
  603.  
  604. info = registered_fb[MINOR(rdev)];
  605.  
  606. if (info == NULL) {
  607. DRM_ERROR("Framebuffer minor %d is not registered\n",
  608. MINOR(rdev));
  609. ret = -EBADF;
  610. goto error_fput;
  611. }
  612.  
  613. obj = drm_gem_object_alloc(dev, info->fix.smem_len);
  614.  
  615. if (obj == NULL) {
  616. DRM_ERROR("Unable to allocate GEM object\n");
  617. ret = -ENOMEM;
  618. goto error_fput;
  619. }
  620.  
  621. ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
  622.  
  623. if (ret)
  624. goto error_fput;
  625.  
  626. mutex_lock(&dev->struct_mutex);
  627.  
  628. priv = obj->driver_private;
  629. priv->memdesc.physaddr = info->fix.smem_start;
  630. priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM;
  631.  
  632. mutex_unlock(&dev->struct_mutex);
  633. args->handle = handle;
  634.  
  635. error_fput:
  636. fput_light(file, put_needed);
  637.  
  638. return ret;
  639. }
  640.  
  641. int
  642. kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
  643. struct drm_file *file_priv)
  644. {
  645. struct drm_kgsl_gem_memtype *args = data;
  646. struct drm_gem_object *obj;
  647. struct drm_kgsl_gem_object *priv;
  648. int ret = 0;
  649.  
  650. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  651.  
  652. if (obj == NULL) {
  653. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  654. return -EBADF;
  655. }
  656.  
  657. mutex_lock(&dev->struct_mutex);
  658. priv = obj->driver_private;
  659.  
  660. if (TYPE_IS_FD(priv->type))
  661. ret = -EINVAL;
  662. else {
  663. if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type))
  664. priv->type = args->type;
  665. else
  666. ret = -EINVAL;
  667. }
  668.  
  669. drm_gem_object_unreference(obj);
  670. mutex_unlock(&dev->struct_mutex);
  671.  
  672. return ret;
  673. }
  674.  
  675. int
  676. kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data,
  677. struct drm_file *file_priv)
  678. {
  679. struct drm_kgsl_gem_memtype *args = data;
  680. struct drm_gem_object *obj;
  681. struct drm_kgsl_gem_object *priv;
  682.  
  683. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  684.  
  685. if (obj == NULL) {
  686. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  687. return -EBADF;
  688. }
  689.  
  690. mutex_lock(&dev->struct_mutex);
  691. priv = obj->driver_private;
  692.  
  693. args->type = priv->type;
  694.  
  695. drm_gem_object_unreference(obj);
  696. mutex_unlock(&dev->struct_mutex);
  697.  
  698. return 0;
  699. }
  700.  
  701. int
  702. kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
  703. struct drm_file *file_priv)
  704. {
  705. struct drm_kgsl_gem_bind_gpu *args = data;
  706. struct drm_gem_object *obj;
  707. struct drm_kgsl_gem_object *priv;
  708.  
  709. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  710.  
  711. if (obj == NULL) {
  712. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  713. return -EBADF;
  714. }
  715.  
  716. mutex_lock(&dev->struct_mutex);
  717. priv = obj->driver_private;
  718.  
  719. if (--priv->bound == 0)
  720. kgsl_gem_unmap(obj);
  721.  
  722. drm_gem_object_unreference(obj);
  723. mutex_unlock(&dev->struct_mutex);
  724. return 0;
  725. }
  726.  
  727. #ifdef CONFIG_MSM_KGSL_MMU
  728. static int
  729. kgsl_gem_map(struct drm_gem_object *obj)
  730. {
  731. struct drm_kgsl_gem_object *priv = obj->driver_private;
  732. int index;
  733. int ret = -EINVAL;
  734.  
  735. if (priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
  736. return 0;
  737.  
  738. /* Get the global page table */
  739.  
  740. if (priv->pagetable == NULL) {
  741. priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
  742.  
  743. if (priv->pagetable == NULL) {
  744. DRM_ERROR("Unable to get the GPU MMU pagetable\n");
  745. return -EINVAL;
  746. }
  747. }
  748.  
  749. priv->memdesc.pagetable = priv->pagetable;
  750.  
  751. ret = kgsl_mmu_map(priv->pagetable, &priv->memdesc,
  752. GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
  753.  
  754. if (!ret) {
  755. for (index = 0; index < priv->bufcount; index++) {
  756. priv->bufs[index].gpuaddr =
  757. priv->memdesc.gpuaddr +
  758. priv->bufs[index].offset;
  759. }
  760. }
  761.  
  762. /* Add cached memory to the list to be cached */
  763.  
  764. if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
  765. priv->type & DRM_KGSL_GEM_CACHE_MASK)
  766. list_add(&priv->list, &kgsl_mem_list);
  767.  
  768. priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
  769.  
  770. return ret;
  771. }
  772. #else
  773. static int
  774. kgsl_gem_map(struct drm_gem_object *obj)
  775. {
  776. struct drm_kgsl_gem_object *priv = obj->driver_private;
  777. int index;
  778.  
  779. if (TYPE_IS_PMEM(priv->type)) {
  780. for (index = 0; index < priv->bufcount; index++)
  781. priv->bufs[index].gpuaddr =
  782. priv->memdesc.physaddr + priv->bufs[index].offset;
  783.  
  784. return 0;
  785. }
  786.  
  787. return -EINVAL;
  788. }
  789. #endif
  790.  
  791. int
  792. kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
  793. struct drm_file *file_priv)
  794. {
  795. struct drm_kgsl_gem_bind_gpu *args = data;
  796. struct drm_gem_object *obj;
  797. struct drm_kgsl_gem_object *priv;
  798. int ret = 0;
  799.  
  800. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  801.  
  802. if (obj == NULL) {
  803. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  804. return -EBADF;
  805. }
  806.  
  807. mutex_lock(&dev->struct_mutex);
  808. priv = obj->driver_private;
  809.  
  810. if (priv->bound++ == 0) {
  811.  
  812. if (!kgsl_gem_memory_allocated(obj)) {
  813. DRM_ERROR("Memory not allocated for this object\n");
  814. ret = -ENOMEM;
  815. goto out;
  816. }
  817.  
  818. ret = kgsl_gem_map(obj);
  819.  
  820. /* This is legacy behavior - use GET_BUFFERINFO instead */
  821. args->gpuptr = priv->bufs[0].gpuaddr;
  822. }
  823. out:
  824. drm_gem_object_unreference(obj);
  825. mutex_unlock(&dev->struct_mutex);
  826. return ret;
  827. }
  828.  
  829. /* Allocate the memory and prepare it for CPU mapping */
  830.  
  831. int
  832. kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data,
  833. struct drm_file *file_priv)
  834. {
  835. struct drm_kgsl_gem_alloc *args = data;
  836. struct drm_gem_object *obj;
  837. struct drm_kgsl_gem_object *priv;
  838. int ret;
  839.  
  840. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  841.  
  842. if (obj == NULL) {
  843. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  844. return -EBADF;
  845. }
  846.  
  847. mutex_lock(&dev->struct_mutex);
  848. priv = obj->driver_private;
  849.  
  850. ret = kgsl_gem_alloc_memory(obj);
  851.  
  852. if (ret) {
  853. DRM_ERROR("Unable to allocate object memory\n");
  854. } else if (!priv->mmap_offset) {
  855. ret = kgsl_gem_create_mmap_offset(obj);
  856. if (ret)
  857. DRM_ERROR("Unable to create a mmap offset\n");
  858. }
  859.  
  860. args->offset = priv->mmap_offset;
  861.  
  862. drm_gem_object_unreference(obj);
  863. mutex_unlock(&dev->struct_mutex);
  864.  
  865. return ret;
  866. }
  867.  
  868. int
  869. kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
  870. struct drm_file *file_priv)
  871. {
  872. struct drm_kgsl_gem_mmap *args = data;
  873. struct drm_gem_object *obj;
  874. unsigned long addr;
  875.  
  876. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  877.  
  878. if (obj == NULL) {
  879. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  880. return -EBADF;
  881. }
  882.  
  883. down_write(&current->mm->mmap_sem);
  884.  
  885. addr = do_mmap(obj->filp, 0, args->size,
  886. PROT_READ | PROT_WRITE, MAP_SHARED,
  887. args->offset);
  888.  
  889. up_write(&current->mm->mmap_sem);
  890.  
  891. mutex_lock(&dev->struct_mutex);
  892. drm_gem_object_unreference(obj);
  893. mutex_unlock(&dev->struct_mutex);
  894.  
  895. if (IS_ERR((void *) addr))
  896. return addr;
  897.  
  898. args->hostptr = (uint32_t) addr;
  899. return 0;
  900. }
  901.  
  902. /* This function is deprecated */
  903.  
  904. int
  905. kgsl_gem_prep_ioctl(struct drm_device *dev, void *data,
  906. struct drm_file *file_priv)
  907. {
  908. struct drm_kgsl_gem_prep *args = data;
  909. struct drm_gem_object *obj;
  910. struct drm_kgsl_gem_object *priv;
  911. int ret;
  912.  
  913. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  914.  
  915. if (obj == NULL) {
  916. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  917. return -EBADF;
  918. }
  919.  
  920. mutex_lock(&dev->struct_mutex);
  921. priv = obj->driver_private;
  922.  
  923. ret = kgsl_gem_alloc_memory(obj);
  924. if (ret) {
  925. DRM_ERROR("Unable to allocate object memory\n");
  926. drm_gem_object_unreference(obj);
  927. mutex_unlock(&dev->struct_mutex);
  928. return ret;
  929. }
  930.  
  931. if (priv->mmap_offset == 0) {
  932. ret = kgsl_gem_create_mmap_offset(obj);
  933. if (ret) {
  934. drm_gem_object_unreference(obj);
  935. mutex_unlock(&dev->struct_mutex);
  936. return ret;
  937. }
  938. }
  939.  
  940. args->offset = priv->mmap_offset;
  941. args->phys = priv->memdesc.physaddr;
  942.  
  943. drm_gem_object_unreference(obj);
  944. mutex_unlock(&dev->struct_mutex);
  945.  
  946. return 0;
  947. }
  948.  
  949. int
  950. kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data,
  951. struct drm_file *file_priv)
  952. {
  953. struct drm_kgsl_gem_bufinfo *args = data;
  954. struct drm_gem_object *obj;
  955. struct drm_kgsl_gem_object *priv;
  956. int ret = -EINVAL;
  957. int index;
  958.  
  959. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  960.  
  961. if (obj == NULL) {
  962. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  963. return -EBADF;
  964. }
  965.  
  966. mutex_lock(&dev->struct_mutex);
  967. priv = obj->driver_private;
  968.  
  969. if (!kgsl_gem_memory_allocated(obj)) {
  970. DRM_ERROR("Memory not allocated for this object\n");
  971. goto out;
  972. }
  973.  
  974. for (index = 0; index < priv->bufcount; index++) {
  975. args->offset[index] = priv->bufs[index].offset;
  976. args->gpuaddr[index] = priv->bufs[index].gpuaddr;
  977. }
  978.  
  979. args->count = priv->bufcount;
  980. args->active = priv->active;
  981.  
  982. ret = 0;
  983.  
  984. out:
  985. drm_gem_object_unreference(obj);
  986. mutex_unlock(&dev->struct_mutex);
  987.  
  988. return ret;
  989. }
  990.  
  991. int
  992. kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
  993. struct drm_file *file_priv)
  994. {
  995. struct drm_kgsl_gem_bufcount *args = data;
  996. struct drm_gem_object *obj;
  997. struct drm_kgsl_gem_object *priv;
  998. int ret = -EINVAL;
  999.  
  1000. if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS)
  1001. return -EINVAL;
  1002.  
  1003. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1004.  
  1005. if (obj == NULL) {
  1006. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  1007. return -EBADF;
  1008. }
  1009.  
  1010. mutex_lock(&dev->struct_mutex);
  1011. priv = obj->driver_private;
  1012.  
  1013. /* It is too much math to worry about what happens if we are already
  1014. allocated, so just bail if we are */
  1015.  
  1016. if (kgsl_gem_memory_allocated(obj)) {
  1017. DRM_ERROR("Memory already allocated - cannot change"
  1018. "number of buffers\n");
  1019. goto out;
  1020. }
  1021.  
  1022. priv->bufcount = args->bufcount;
  1023. ret = 0;
  1024.  
  1025. out:
  1026. drm_gem_object_unreference(obj);
  1027. mutex_unlock(&dev->struct_mutex);
  1028.  
  1029. return ret;
  1030. }
  1031.  
  1032. int
  1033. kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
  1034. struct drm_file *file_priv)
  1035. {
  1036. struct drm_kgsl_gem_active *args = data;
  1037. struct drm_gem_object *obj;
  1038. struct drm_kgsl_gem_object *priv;
  1039. int ret = -EINVAL;
  1040.  
  1041. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  1042.  
  1043. if (obj == NULL) {
  1044. DRM_ERROR("Invalid GEM handle %x\n", args->handle);
  1045. return -EBADF;
  1046. }
  1047.  
  1048. mutex_lock(&dev->struct_mutex);
  1049. priv = obj->driver_private;
  1050.  
  1051. if (args->active < 0 || args->active >= priv->bufcount) {
  1052. DRM_ERROR("Invalid active buffer %d\n", args->active);
  1053. goto out;
  1054. }
  1055.  
  1056. priv->active = args->active;
  1057. ret = 0;
  1058.  
  1059. out:
  1060. drm_gem_object_unreference(obj);
  1061. mutex_unlock(&dev->struct_mutex);
  1062.  
  1063. return ret;
  1064. }
  1065.  
  1066. int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1067. {
  1068. struct drm_gem_object *obj = vma->vm_private_data;
  1069. struct drm_device *dev = obj->dev;
  1070. struct drm_kgsl_gem_object *priv;
  1071. unsigned long offset, pg;
  1072. struct page *page;
  1073.  
  1074. mutex_lock(&dev->struct_mutex);
  1075.  
  1076. priv = obj->driver_private;
  1077.  
  1078. offset = (unsigned long) vmf->virtual_address - vma->vm_start;
  1079. pg = (unsigned long) priv->memdesc.hostptr + offset;
  1080.  
  1081. page = vmalloc_to_page((void *) pg);
  1082. if (!page) {
  1083. mutex_unlock(&dev->struct_mutex);
  1084. return VM_FAULT_SIGBUS;
  1085. }
  1086.  
  1087. get_page(page);
  1088. vmf->page = page;
  1089.  
  1090. mutex_unlock(&dev->struct_mutex);
  1091. return 0;
  1092. }
  1093.  
  1094. int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1095. {
  1096. struct drm_gem_object *obj = vma->vm_private_data;
  1097. struct drm_device *dev = obj->dev;
  1098. struct drm_kgsl_gem_object *priv;
  1099. unsigned long offset, pfn;
  1100. int ret = 0;
  1101.  
  1102. offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
  1103. PAGE_SHIFT;
  1104.  
  1105. mutex_lock(&dev->struct_mutex);
  1106.  
  1107. priv = obj->driver_private;
  1108.  
  1109. pfn = (priv->memdesc.physaddr >> PAGE_SHIFT) + offset;
  1110. ret = vm_insert_pfn(vma,
  1111. (unsigned long) vmf->virtual_address, pfn);
  1112. mutex_unlock(&dev->struct_mutex);
  1113.  
  1114. switch (ret) {
  1115. case -ENOMEM:
  1116. case -EAGAIN:
  1117. return VM_FAULT_OOM;
  1118. case -EFAULT:
  1119. return VM_FAULT_SIGBUS;
  1120. default:
  1121. return VM_FAULT_NOPAGE;
  1122. }
  1123. }
  1124.  
  1125. static struct vm_operations_struct kgsl_gem_kmem_vm_ops = {
  1126. .fault = kgsl_gem_kmem_fault,
  1127. .open = drm_gem_vm_open,
  1128. .close = drm_gem_vm_close,
  1129. };
  1130.  
  1131. static struct vm_operations_struct kgsl_gem_phys_vm_ops = {
  1132. .fault = kgsl_gem_phys_fault,
  1133. .open = drm_gem_vm_open,
  1134. .close = drm_gem_vm_close,
  1135. };
  1136.  
  1137. /* This is a clone of the standard drm_gem_mmap function modified to allow
  1138. us to properly map KMEM regions as well as the PMEM regions */
  1139.  
  1140. int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  1141. {
  1142. struct drm_file *priv = filp->private_data;
  1143. struct drm_device *dev = priv->minor->dev;
  1144. struct drm_gem_mm *mm = dev->mm_private;
  1145. struct drm_local_map *map = NULL;
  1146. struct drm_gem_object *obj;
  1147. struct drm_hash_item *hash;
  1148. struct drm_kgsl_gem_object *gpriv;
  1149. int ret = 0;
  1150.  
  1151. mutex_lock(&dev->struct_mutex);
  1152.  
  1153. if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
  1154. mutex_unlock(&dev->struct_mutex);
  1155. return drm_mmap(filp, vma);
  1156. }
  1157.  
  1158. map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
  1159. if (!map ||
  1160. ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
  1161. ret = -EPERM;
  1162. goto out_unlock;
  1163. }
  1164.  
  1165. /* Check for valid size. */
  1166. if (map->size < vma->vm_end - vma->vm_start) {
  1167. ret = -EINVAL;
  1168. goto out_unlock;
  1169. }
  1170.  
  1171. obj = map->handle;
  1172.  
  1173. gpriv = obj->driver_private;
  1174.  
  1175. /* VM_PFNMAP is only for memory that doesn't use struct page
  1176. * in other words, not "normal" memory. If you try to use it
  1177. * with "normal" memory then the mappings don't get flushed. */
  1178.  
  1179. if (TYPE_IS_MEM(gpriv->type)) {
  1180. vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
  1181. vma->vm_ops = &kgsl_gem_kmem_vm_ops;
  1182. } else {
  1183. vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP |
  1184. VM_DONTEXPAND;
  1185. vma->vm_ops = &kgsl_gem_phys_vm_ops;
  1186. }
  1187.  
  1188. vma->vm_private_data = map->handle;
  1189.  
  1190.  
  1191. /* Take care of requested caching policy */
  1192. if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM ||
  1193. gpriv->type & DRM_KGSL_GEM_CACHE_MASK) {
  1194. if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA)
  1195. vma->vm_page_prot =
  1196. pgprot_writebackwacache(vma->vm_page_prot);
  1197. else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK)
  1198. vma->vm_page_prot =
  1199. pgprot_writebackcache(vma->vm_page_prot);
  1200. else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH)
  1201. vma->vm_page_prot =
  1202. pgprot_writethroughcache(vma->vm_page_prot);
  1203. else
  1204. vma->vm_page_prot =
  1205. pgprot_writecombine(vma->vm_page_prot);
  1206. } else {
  1207. if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE)
  1208. vma->vm_page_prot =
  1209. pgprot_noncached(vma->vm_page_prot);
  1210. else
  1211. /* default pmem is WC */
  1212. vma->vm_page_prot =
  1213. pgprot_writecombine(vma->vm_page_prot);
  1214. }
  1215.  
  1216. /* flush out existing KMEM cached mappings if new ones are
  1217. * of uncached type */
  1218. if (IS_MEM_UNCACHED(gpriv->type))
  1219. kgsl_cache_range_op(&gpriv->memdesc,
  1220. KGSL_CACHE_OP_FLUSH);
  1221.  
  1222. /* Add the other memory types here */
  1223.  
  1224. /* Take a ref for this mapping of the object, so that the fault
  1225. * handler can dereference the mmap offset's pointer to the object.
  1226. * This reference is cleaned up by the corresponding vm_close
  1227. * (which should happen whether the vma was created by this call, or
  1228. * by a vm_open due to mremap or partial unmap or whatever).
  1229. */
  1230. drm_gem_object_reference(obj);
  1231.  
  1232. vma->vm_file = filp; /* Needed for drm_vm_open() */
  1233. drm_vm_open_locked(vma);
  1234.  
  1235. out_unlock:
  1236. mutex_unlock(&dev->struct_mutex);
  1237.  
  1238. return ret;
  1239. }
  1240.  
  1241. void
  1242. cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
  1243. {
  1244. int j;
  1245. struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
  1246. struct drm_kgsl_gem_object *unlock_obj;
  1247. struct drm_gem_object *obj;
  1248. struct drm_kgsl_gem_object_wait_list_entry *lock_next;
  1249.  
  1250. fence->ts_valid = 0;
  1251. fence->timestamp = -1;
  1252. fence->ts_device = -1;
  1253.  
  1254. /* Walk the list of buffers in this fence and clean up the */
  1255. /* references. Note that this can cause memory allocations */
  1256. /* to be freed */
  1257. for (j = fence->num_buffers; j > 0; j--) {
  1258. this_fence_entry =
  1259. (struct drm_kgsl_gem_object_fence_list_entry *)
  1260. fence->buffers_in_fence.prev;
  1261.  
  1262. this_fence_entry->in_use = 0;
  1263. obj = this_fence_entry->gem_obj;
  1264. unlock_obj = obj->driver_private;
  1265.  
  1266. /* Delete it from the list */
  1267.  
  1268. list_del(&this_fence_entry->list);
  1269.  
  1270. /* we are unlocking - see if there are other pids waiting */
  1271. if (check_waiting) {
  1272. if (!list_empty(&unlock_obj->wait_list)) {
  1273. lock_next =
  1274. (struct drm_kgsl_gem_object_wait_list_entry *)
  1275. unlock_obj->wait_list.prev;
  1276.  
  1277. list_del((struct list_head *)&lock_next->list);
  1278.  
  1279. unlock_obj->lockpid = 0;
  1280. wake_up_interruptible(
  1281. &lock_next->process_wait_q);
  1282. lock_next->pid = 0;
  1283.  
  1284. } else {
  1285. /* List is empty so set pid to 0 */
  1286. unlock_obj->lockpid = 0;
  1287. }
  1288. }
  1289.  
  1290. drm_gem_object_unreference(obj);
  1291. }
  1292. /* here all the buffers in the fence are released */
  1293. /* clear the fence entry */
  1294. fence->fence_id = ENTRY_EMPTY;
  1295. }
  1296.  
  1297. int
  1298. find_empty_fence(void)
  1299. {
  1300. int i;
  1301.  
  1302. for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
  1303. if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) {
  1304. gem_buf_fence[i].fence_id = fence_id++;
  1305. gem_buf_fence[i].ts_valid = 0;
  1306. INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence));
  1307. if (fence_id == 0xFFFFFFF0)
  1308. fence_id = 1;
  1309. return i;
  1310. } else {
  1311.  
  1312. /* Look for entries to be cleaned up */
  1313. if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP)
  1314. cleanup_fence(&gem_buf_fence[i], 0);
  1315. }
  1316. }
  1317.  
  1318. return ENTRY_EMPTY;
  1319. }
  1320.  
  1321. int
  1322. find_fence(int index)
  1323. {
  1324. int i;
  1325.  
  1326. for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
  1327. if (gem_buf_fence[i].fence_id == index)
  1328. return i;
  1329. }
  1330.  
  1331. return ENTRY_EMPTY;
  1332. }
  1333.  
  1334. void
  1335. wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
  1336. {
  1337. struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
  1338. struct drm_kgsl_gem_object_wait_list_entry *lock_next;
  1339. struct drm_kgsl_gem_object *unlock_obj;
  1340. struct drm_gem_object *obj;
  1341.  
  1342. /* TS has expired when we get here */
  1343. fence->ts_valid = 0;
  1344. fence->timestamp = -1;
  1345. fence->ts_device = -1;
  1346.  
  1347. list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) {
  1348. obj = this_fence_entry->gem_obj;
  1349. unlock_obj = obj->driver_private;
  1350.  
  1351. if (!list_empty(&unlock_obj->wait_list)) {
  1352. lock_next =
  1353. (struct drm_kgsl_gem_object_wait_list_entry *)
  1354. unlock_obj->wait_list.prev;
  1355.  
  1356. /* Unblock the pid */
  1357. lock_next->pid = 0;
  1358.  
  1359. /* Delete it from the list */
  1360. list_del((struct list_head *)&lock_next->list);
  1361.  
  1362. unlock_obj->lockpid = 0;
  1363. wake_up_interruptible(&lock_next->process_wait_q);
  1364.  
  1365. } else {
  1366. /* List is empty so set pid to 0 */
  1367. unlock_obj->lockpid = 0;
  1368. }
  1369. }
  1370. fence->fence_id = ENTRY_NEEDS_CLEANUP; /* Mark it as needing cleanup */
  1371. }
  1372.  
  1373. static int kgsl_ts_notifier_cb(struct notifier_block *blk,
  1374. unsigned long code, void *_param)
  1375. {
  1376. struct drm_kgsl_gem_object_fence *fence;
  1377. struct kgsl_device *device = kgsl_get_device(code);
  1378. int i;
  1379.  
  1380. /* loop through the fences to see what things can be processed */
  1381.  
  1382. for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
  1383. fence = &gem_buf_fence[i];
  1384. if (!fence->ts_valid || fence->ts_device != code)
  1385. continue;
  1386.  
  1387. if (kgsl_check_timestamp(device, fence->timestamp))
  1388. wakeup_fence_entries(fence);
  1389. }
  1390.  
  1391. return 0;
  1392. }
  1393.  
  1394. int
  1395. kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
  1396. struct drm_file *file_priv)
  1397. {
  1398. /* The purpose of this function is to lock a given set of handles. */
  1399. /* The driver will maintain a list of locked handles. */
  1400. /* If a request comes in for a handle that's locked the thread will */
  1401. /* block until it's no longer in use. */
  1402.  
  1403. struct drm_kgsl_gem_lock_handles *args = data;
  1404. struct drm_gem_object *obj;
  1405. struct drm_kgsl_gem_object *priv;
  1406. struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
  1407. struct drm_kgsl_gem_object_fence *fence;
  1408. struct drm_kgsl_gem_object_wait_list_entry *lock_item;
  1409. int i, j;
  1410. int result = 0;
  1411. uint32_t *lock_list;
  1412. uint32_t *work_list = NULL;
  1413. int32_t fence_index;
  1414.  
  1415. /* copy in the data from user space */
  1416. lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL);
  1417. if (!lock_list) {
  1418. DRM_ERROR("Unable allocate memory for lock list\n");
  1419. result = -ENOMEM;
  1420. goto error;
  1421. }
  1422.  
  1423. if (copy_from_user(lock_list, args->handle_list,
  1424. sizeof(uint32_t) * args->num_handles)) {
  1425. DRM_ERROR("Unable to copy the lock list from the user\n");
  1426. result = -EFAULT;
  1427. goto free_handle_list;
  1428. }
  1429.  
  1430.  
  1431. work_list = lock_list;
  1432. mutex_lock(&dev->struct_mutex);
  1433.  
  1434. /* build the fence for this group of handles */
  1435. fence_index = find_empty_fence();
  1436. if (fence_index == ENTRY_EMPTY) {
  1437. DRM_ERROR("Unable to find a empty fence\n");
  1438. args->lock_id = 0xDEADBEEF;
  1439. result = -EFAULT;
  1440. goto out_unlock;
  1441. }
  1442.  
  1443. fence = &gem_buf_fence[fence_index];
  1444. gem_buf_fence[fence_index].num_buffers = args->num_handles;
  1445. args->lock_id = gem_buf_fence[fence_index].fence_id;
  1446.  
  1447. for (j = args->num_handles; j > 0; j--, lock_list++) {
  1448. obj = drm_gem_object_lookup(dev, file_priv, *lock_list);
  1449.  
  1450. if (obj == NULL) {
  1451. DRM_ERROR("Invalid GEM handle %x\n", *lock_list);
  1452. result = -EBADF;
  1453. goto out_unlock;
  1454. }
  1455.  
  1456. priv = obj->driver_private;
  1457. this_fence_entry = NULL;
  1458.  
  1459. /* get a fence entry to hook into the fence */
  1460. for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
  1461. if (!priv->fence_entries[i].in_use) {
  1462. this_fence_entry = &priv->fence_entries[i];
  1463. this_fence_entry->in_use = 1;
  1464. break;
  1465. }
  1466. }
  1467.  
  1468. if (this_fence_entry == NULL) {
  1469. fence->num_buffers = 0;
  1470. fence->fence_id = ENTRY_EMPTY;
  1471. args->lock_id = 0xDEADBEAD;
  1472. result = -EFAULT;
  1473. drm_gem_object_unreference(obj);
  1474. goto out_unlock;
  1475. }
  1476.  
  1477. /* We're trying to lock - add to a fence */
  1478. list_add((struct list_head *)this_fence_entry,
  1479. &gem_buf_fence[fence_index].buffers_in_fence);
  1480. if (priv->lockpid) {
  1481.  
  1482. if (priv->lockpid == args->pid) {
  1483. /* now that things are running async this */
  1484. /* happens when an op isn't done */
  1485. /* so it's already locked by the calling pid */
  1486. continue;
  1487. }
  1488.  
  1489.  
  1490. /* if a pid already had it locked */
  1491. /* create and add to wait list */
  1492. for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
  1493. if (priv->wait_entries[i].in_use == 0) {
  1494. /* this one is empty */
  1495. lock_item = &priv->wait_entries[i];
  1496. lock_item->in_use = 1;
  1497. lock_item->pid = args->pid;
  1498. INIT_LIST_HEAD((struct list_head *)
  1499. &priv->wait_entries[i]);
  1500. break;
  1501. }
  1502. }
  1503.  
  1504. if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) {
  1505.  
  1506. result = -EFAULT;
  1507. drm_gem_object_unreference(obj);
  1508. goto out_unlock;
  1509. }
  1510.  
  1511. list_add_tail((struct list_head *)&lock_item->list,
  1512. &priv->wait_list);
  1513. mutex_unlock(&dev->struct_mutex);
  1514. /* here we need to block */
  1515. wait_event_interruptible_timeout(
  1516. priv->wait_entries[i].process_wait_q,
  1517. (priv->lockpid == 0),
  1518. msecs_to_jiffies(64));
  1519. mutex_lock(&dev->struct_mutex);
  1520. lock_item->in_use = 0;
  1521. }
  1522.  
  1523. /* Getting here means no one currently holds the lock */
  1524. priv->lockpid = args->pid;
  1525.  
  1526. args->lock_id = gem_buf_fence[fence_index].fence_id;
  1527. }
  1528. fence->lockpid = args->pid;
  1529.  
  1530. out_unlock:
  1531. mutex_unlock(&dev->struct_mutex);
  1532.  
  1533. free_handle_list:
  1534. kfree(work_list);
  1535.  
  1536. error:
  1537. return result;
  1538. }
  1539.  
  1540. int
  1541. kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
  1542. struct drm_file *file_priv)
  1543. {
  1544. struct drm_kgsl_gem_unlock_handles *args = data;
  1545. int result = 0;
  1546. int32_t fence_index;
  1547.  
  1548. mutex_lock(&dev->struct_mutex);
  1549. fence_index = find_fence(args->lock_id);
  1550. if (fence_index == ENTRY_EMPTY) {
  1551. DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
  1552. result = -EFAULT;
  1553. goto out_unlock;
  1554. }
  1555.  
  1556. cleanup_fence(&gem_buf_fence[fence_index], 1);
  1557.  
  1558. out_unlock:
  1559. mutex_unlock(&dev->struct_mutex);
  1560.  
  1561. return result;
  1562. }
  1563.  
  1564.  
  1565. int
  1566. kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
  1567. struct drm_file *file_priv)
  1568. {
  1569. struct drm_kgsl_gem_unlock_on_ts *args = data;
  1570. int result = 0;
  1571. int ts_done = 0;
  1572. int32_t fence_index, ts_device;
  1573. struct drm_kgsl_gem_object_fence *fence;
  1574. struct kgsl_device *device;
  1575.  
  1576. if (args->type == DRM_KGSL_GEM_TS_3D)
  1577. ts_device = KGSL_DEVICE_3D0;
  1578. else if (args->type == DRM_KGSL_GEM_TS_2D)
  1579. ts_device = KGSL_DEVICE_2D0;
  1580. else {
  1581. result = -EINVAL;
  1582. goto error;
  1583. }
  1584.  
  1585. device = kgsl_get_device(ts_device);
  1586. ts_done = kgsl_check_timestamp(device, args->timestamp);
  1587.  
  1588. mutex_lock(&dev->struct_mutex);
  1589.  
  1590. fence_index = find_fence(args->lock_id);
  1591. if (fence_index == ENTRY_EMPTY) {
  1592. DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
  1593. result = -EFAULT;
  1594. goto out_unlock;
  1595. }
  1596.  
  1597. fence = &gem_buf_fence[fence_index];
  1598. fence->ts_device = ts_device;
  1599.  
  1600. if (!ts_done)
  1601. fence->ts_valid = 1;
  1602. else
  1603. cleanup_fence(fence, 1);
  1604.  
  1605.  
  1606. out_unlock:
  1607. mutex_unlock(&dev->struct_mutex);
  1608.  
  1609. error:
  1610. return result;
  1611. }
  1612.  
  1613. struct drm_ioctl_desc kgsl_drm_ioctls[] = {
  1614. DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0),
  1615. DRM_IOCTL_DEF_DRV(KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0),
  1616. DRM_IOCTL_DEF_DRV(KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0),
  1617. DRM_IOCTL_DEF_DRV(KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0),
  1618. DRM_IOCTL_DEF_DRV(KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0),
  1619. DRM_IOCTL_DEF_DRV(KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0),
  1620. DRM_IOCTL_DEF_DRV(KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
  1621. DRM_IOCTL_DEF_DRV(KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
  1622. DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
  1623. DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT,
  1624. kgsl_gem_set_bufcount_ioctl, 0),
  1625. DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
  1626. DRM_IOCTL_DEF_DRV(KGSL_GEM_LOCK_HANDLE,
  1627. kgsl_gem_lock_handle_ioctl, 0),
  1628. DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_HANDLE,
  1629. kgsl_gem_unlock_handle_ioctl, 0),
  1630. DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_ON_TS,
  1631. kgsl_gem_unlock_on_ts_ioctl, 0),
  1632. DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl,
  1633. DRM_MASTER),
  1634. };
  1635.  
  1636. static struct drm_driver driver = {
  1637. .driver_features = DRIVER_USE_PLATFORM_DEVICE | DRIVER_GEM,
  1638. .load = kgsl_drm_load,
  1639. .unload = kgsl_drm_unload,
  1640. .firstopen = kgsl_drm_firstopen,
  1641. .lastclose = kgsl_drm_lastclose,
  1642. .preclose = kgsl_drm_preclose,
  1643. .suspend = kgsl_drm_suspend,
  1644. .resume = kgsl_drm_resume,
  1645. .reclaim_buffers = drm_core_reclaim_buffers,
  1646. .gem_init_object = kgsl_gem_init_object,
  1647. .gem_free_object = kgsl_gem_free_object,
  1648. .ioctls = kgsl_drm_ioctls,
  1649.  
  1650. .fops = {
  1651. .owner = THIS_MODULE,
  1652. .open = drm_open,
  1653. .release = drm_release,
  1654. .unlocked_ioctl = drm_ioctl,
  1655. .mmap = msm_drm_gem_mmap,
  1656. .poll = drm_poll,
  1657. .fasync = drm_fasync,
  1658. },
  1659.  
  1660. .name = DRIVER_NAME,
  1661. .desc = DRIVER_DESC,
  1662. .date = DRIVER_DATE,
  1663. .major = DRIVER_MAJOR,
  1664. .minor = DRIVER_MINOR,
  1665. .patchlevel = DRIVER_PATCHLEVEL,
  1666. };
  1667.  
  1668. int kgsl_drm_init(struct platform_device *dev)
  1669. {
  1670. int i;
  1671.  
  1672. driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
  1673. driver.platform_device = dev;
  1674.  
  1675. INIT_LIST_HEAD(&kgsl_mem_list);
  1676.  
  1677. for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
  1678. gem_buf_fence[i].num_buffers = 0;
  1679. gem_buf_fence[i].ts_valid = 0;
  1680. gem_buf_fence[i].fence_id = ENTRY_EMPTY;
  1681. }
  1682.  
  1683. return drm_init(&driver);
  1684. }
  1685.  
  1686. void kgsl_drm_exit(void)
  1687. {
  1688. drm_exit(&driver);
  1689. }
Advertisement
Add Comment
Please, Sign In to add comment