Advertisement
Guest User

Untitled

a guest
Feb 15th, 2021
111
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 6.44 KB | None | 0 0
  1. diff --git a/kgpu/kgpu/Makefile b/kgpu.my/kgpu/Makefile
  2. index e1cbf4c..551de23 100755
  3. --- a/kgpu/kgpu/Makefile
  4. +++ b/kgpu.my/kgpu/Makefile
  5. @@ -16,8 +16,8 @@ kgpu_log:
  6. helper: kgpu_log
  7. gcc -O2 -D__KGPU__ -c helper.c
  8. gcc -O2 -D__KGPU__ -c service.c
  9. - nvcc -O2 -D__KGPU__ -c -arch=sm_20 gpuops.cu
  10. - nvcc -link -D__KGPU__ -O2 -arch=sm_20 service.o helper.o kgpu_log_user.o gpuops.o -o helper -ldl
  11. + nvcc -O2 -D__KGPU__ -c -arch=sm_30 gpuops.cu
  12. + nvcc -link -D__KGPU__ -O2 -arch=sm_30 service.o helper.o kgpu_log_user.o gpuops.o -o helper -ldl
  13. $(if $(BUILD_DIR), cp helper $(BUILD_DIR)/ )
  14.  
  15. clean:
  16. diff --git a/kgpu/kgpu/kgpu_kutils.c b/kgpu.my/kgpu/kgpu_kutils.c
  17. index 731d7e7..4b1f5f0 100755
  18. --- a/kgpu/kgpu/kgpu_kutils.c
  19. +++ b/kgpu.my/kgpu/kgpu_kutils.c
  20. @@ -20,25 +20,33 @@
  21. struct page* kgpu_v2page(unsigned long v)
  22. {
  23. struct page *p = NULL;
  24. - pgd_t *pgd = pgd_offset(current->mm, v);
  25. + pgd_t *pgd;
  26. + p4d_t* p4d;
  27. + pud_t *pud;
  28. + pmd_t *pmd;
  29. +
  30. + pgd = pgd_offset(current->mm, v);
  31.  
  32. if (!pgd_none(*pgd)) {
  33. - pud_t *pud = pud_offset(pgd, v);
  34. - if (!pud_none(*pud)) {
  35. - pmd_t *pmd = pmd_offset(pud, v);
  36. - if (!pmd_none(*pmd)) {
  37. - pte_t *pte;
  38. + p4d = p4d_offset(pgd, v);
  39. + if (!p4d_none(*p4d)) {
  40. + pud = pud_offset(p4d, v);
  41. + if (!pud_none(*pud)) {
  42. + pmd = pmd_offset(pud, v);
  43. + if (!pmd_none(*pmd)) {
  44. + pte_t *pte;
  45.  
  46. - pte = pte_offset_map(pmd, v);
  47. - if (pte_present(*pte))
  48. - p = pte_page(*pte);
  49. + pte = pte_offset_map(pmd, v);
  50. + if (pte_present(*pte))
  51. + p = pte_page(*pte);
  52.  
  53. - /*
  54. - * although KGPU doesn't support x86_32, but in case
  55. - * some day it does, the pte_unmap should not be called
  56. - * because we want the pte stay in mem.
  57. - */
  58. - pte_unmap(pte);
  59. + /*
  60. + * although KGPU doesn't support x86_32, but in case
  61. + * some day it does, the pte_unmap should not be called
  62. + * because we want the pte stay in mem.
  63. + */
  64. + pte_unmap(pte);
  65. + }
  66. }
  67. }
  68. }
  69. diff --git a/kgpu/kgpu/main.c b/kgpu.my/kgpu/main.c
  70. index 921a3c5..68c7284 100755
  71. --- a/kgpu/kgpu/main.c
  72. +++ b/kgpu.my/kgpu/main.c
  73. @@ -402,7 +402,7 @@ void kgpu_unmap_area(unsigned long addr)
  74. }
  75. //kgpu_log(KGPU_LOG_PRINT, "unmap %d pages from %p\n", n, addr);
  76. if (n > 0) {
  77. - int ret;
  78. +// int ret;
  79. spin_lock(&kgpudev.vm_lock);
  80. bitmap_clear(kgpudev.vm.bitmap, idx, n);
  81. kgpudev.vm.alloc_sz[idx] = 0;
  82. @@ -411,10 +411,11 @@ void kgpu_unmap_area(unsigned long addr)
  83. addr, n<<PAGE_SHIFT, 1);*/
  84.  
  85. kgpudev.vm.vma->vm_flags |= VM_PFNMAP;
  86. - ret = zap_vma_ptes(kgpudev.vm.vma, addr, n<<PAGE_SHIFT);
  87. - if (ret)
  88. - kgpu_log(KGPU_LOG_ALERT,
  89. - "zap_vma_ptes returns %d\n", ret);
  90. + zap_vma_ptes(kgpudev.vm.vma, addr, n<<PAGE_SHIFT);
  91. +// ret = 0;
  92. +// if (ret)
  93. +// kgpu_log(KGPU_LOG_ALERT,
  94. +// "zap_vma_ptes returns %d\n", ret);
  95. kgpudev.vm.vma->vm_flags &= ~VM_PFNMAP;
  96. spin_unlock(&kgpudev.vm_lock);
  97. }
  98. @@ -425,7 +426,8 @@ EXPORT_SYMBOL_GPL(kgpu_unmap_area);
  99. int kgpu_map_page(struct page *p, unsigned long addr)
  100. {
  101. int ret = 0;
  102. - down_write(&kgpudev.vm.vma->vm_mm->mmap_sem);
  103. +// down_write(&kgpudev.vm.vma->vm_mm->mmap_sem);
  104. + mmap_write_lock(kgpudev.vm.vma->vm_mm);
  105.  
  106. ret = vm_insert_page(kgpudev.vm.vma, addr, p);
  107. if (unlikely(ret < 0)) {
  108. @@ -434,7 +436,8 @@ int kgpu_map_page(struct page *p, unsigned long addr)
  109. page_to_pfn(p), ret, page_count(p));
  110. }
  111.  
  112. - up_write(&kgpudev.vm.vma->vm_mm->mmap_sem);
  113. +// up_write(&kgpudev.vm.vma->vm_mm->mmap_sem);
  114. + mmap_write_unlock(kgpudev.vm.vma->vm_mm);
  115. return ret;
  116. }
  117. EXPORT_SYMBOL_GPL(kgpu_map_page);
  118. @@ -450,7 +453,8 @@ static void* map_page_units(void *units, int n, int is_page)
  119. return NULL;
  120. }
  121.  
  122. - down_write(&kgpudev.vm.vma->vm_mm->mmap_sem);
  123. +// down_write(&kgpudev.vm.vma->vm_mm->mmap_sem);
  124. + mmap_write_lock(kgpudev.vm.vma->vm_mm);
  125.  
  126. for (i=0; i<n; i++) {
  127. ret = vm_insert_page(
  128. @@ -460,7 +464,8 @@ static void* map_page_units(void *units, int n, int is_page)
  129. );
  130.  
  131. if (unlikely(ret < 0)) {
  132. - up_write(&kgpudev.vm.vma->vm_mm->mmap_sem);
  133. +// up_write(&kgpudev.vm.vma->vm_mm->mmap_sem);
  134. + mmap_write_unlock(kgpudev.vm.vma->vm_mm);
  135. kgpu_log(KGPU_LOG_ERROR,
  136. "can't remap pfn %lu, error code %d\n",
  137. is_page ? page_to_pfn(((struct page**)units)[i]) : ((unsigned long*)units)[i],
  138. @@ -469,7 +474,8 @@ static void* map_page_units(void *units, int n, int is_page)
  139. }
  140. }
  141.  
  142. - up_write(&kgpudev.vm.vma->vm_mm->mmap_sem);
  143. +// up_write(&kgpudev.vm.vma->vm_mm->mmap_sem);
  144. + mmap_write_unlock(kgpudev.vm.vma->vm_mm);
  145.  
  146. return (void*)addr;
  147.  
  148. @@ -792,9 +798,11 @@ static long kgpu_ioctl(struct file *filp,
  149. if (_IOC_NR(cmd) > KGPU_IOC_MAXNR) return -ENOTTY;
  150.  
  151. if (_IOC_DIR(cmd) & _IOC_READ)
  152. - err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
  153. +// err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
  154. + err = !access_ok((void __user *)arg, _IOC_SIZE(cmd));
  155. else if (_IOC_DIR(cmd) & _IOC_WRITE)
  156. - err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
  157. +// err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
  158. + err = !access_ok((void __user *)arg, _IOC_SIZE(cmd));
  159. if (err) return -EFAULT;
  160.  
  161. switch (cmd) {
  162. @@ -868,7 +876,8 @@ static int kgpu_vm_fault(struct vm_area_struct *vma,
  163. /* should never call this */
  164. kgpu_log(KGPU_LOG_ERROR,
  165. "kgpu mmap area being accessed without pre-mapping 0x%lX (0x%lX)\n",
  166. - (unsigned long)vmf->virtual_address,
  167. +// (unsigned long)vmf->virtual_address,
  168. + (unsigned long)vmf->address,
  169. (unsigned long)vma->vm_start);
  170. vmf->flags |= VM_FAULT_NOPAGE|VM_FAULT_ERROR;
  171. return VM_FAULT_SIGBUS;
  172. @@ -877,7 +886,7 @@ static int kgpu_vm_fault(struct vm_area_struct *vma,
  173. static struct vm_operations_struct kgpu_vm_ops = {
  174. .open = kgpu_vm_open,
  175. .close = kgpu_vm_close,
  176. - .fault = kgpu_vm_fault,
  177. +// .fault = kgpu_vm_fault,
  178. };
  179.  
  180. static void set_vm(struct vm_area_struct *vma)
  181. @@ -912,6 +921,11 @@ static void clean_vm(void)
  182.  
  183. static int kgpu_mmap(struct file *filp, struct vm_area_struct *vma)
  184. {
  185. +
  186. +#ifndef VM_RESERVED
  187. +# define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
  188. +#endif
  189. +
  190. if (vma->vm_end - vma->vm_start != KGPU_MMAP_SIZE) {
  191. kgpu_log(KGPU_LOG_ALERT,
  192. "mmap size incorrect from 0x$lX to 0x%lX with "
  193.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement