Advertisement
Guest User

Untitled

a guest
Mar 3rd, 2017
1,006
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 11.53 KB | None | 0 0
  1. From ca8b106738521823707f3567cedb41ca158792a3 Mon Sep 17 00:00:00 2001
  2. From: Alberto Milone <alberto.milone@canonical.com>
  3. Date: Wed, 15 Feb 2017 10:53:42 +0100
  4. Subject: [PATCH 1/1] Add support for Linux 4.10
  5.  
  6. ---
  7. common/inc/nv-mm.h | 9 ++++--
  8. nvidia-drm/nvidia-drm-fence.c | 72 +++++++++++++++++++++++++++++++++++++++++++
  9. nvidia-drm/nvidia-drm-gem.h | 6 ++++
  10. nvidia-drm/nvidia-drm-priv.h | 7 +++++
  11. nvidia/nv-pat.c | 40 ++++++++++++++++++++++++
  12. 5 files changed, 132 insertions(+), 2 deletions(-)
  13.  
  14. diff --git a/kernel/common/inc/nv-mm.h b/kernel/common/inc/nv-mm.h
  15. index 06d7da4..e5cc56a 100644
  16. --- a/kernel/common/inc/nv-mm.h
  17. +++ b/kernel/common/inc/nv-mm.h
  18. @@ -46,6 +46,8 @@
  19. * 2016 Dec 14:5b56d49fc31dbb0487e14ead790fc81ca9fb2c99
  20. */
  21.  
  22. +#include <linux/version.h>
  23. +
  24. #if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
  25. #if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS)
  26. #define NV_GET_USER_PAGES get_user_pages
  27. @@ -92,10 +94,13 @@
  28. pages, vmas, NULL);
  29.  
  30. #else
  31. -
  32. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  33. return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
  34. pages, vmas);
  35. -
  36. +#else
  37. + return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
  38. + pages, vmas, NULL);
  39. +#endif
  40. #endif
  41.  
  42. }
  43. diff --git a/kernel/nvidia-drm/nvidia-drm-fence.c b/kernel/nvidia-drm/nvidia-drm-fence.c
  44. index 5e98c5f..fa2c508 100644
  45. --- a/kernel/nvidia-drm/nvidia-drm-fence.c
  46. +++ b/kernel/nvidia-drm/nvidia-drm-fence.c
  47. @@ -31,7 +31,11 @@
  48.  
  49. #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
  50. struct nv_fence {
  51. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  52. struct fence base;
  53. +#else
  54. + struct dma_fence base;
  55. +#endif
  56. spinlock_t lock;
  57.  
  58. struct nvidia_drm_device *nv_dev;
  59. @@ -51,7 +55,11 @@ nv_fence_ready_to_signal(struct nv_fence *nv_fence)
  60.  
  61. static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
  62. (
  63. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  64. struct fence *fence
  65. +#else
  66. + struct dma_fence *fence
  67. +#endif
  68. )
  69. {
  70. return "NVIDIA";
  71. @@ -59,7 +67,11 @@ static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
  72.  
  73. static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
  74. (
  75. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  76. struct fence *fence
  77. +#else
  78. + struct dma_fence *fence
  79. +#endif
  80. )
  81. {
  82. return "nvidia.prime";
  83. @@ -67,7 +79,11 @@ static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
  84.  
  85. static bool nvidia_drm_gem_prime_fence_op_signaled
  86. (
  87. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  88. struct fence *fence
  89. +#else
  90. + struct dma_fence *fence
  91. +#endif
  92. )
  93. {
  94. struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
  95. @@ -99,7 +115,11 @@ unlock_struct_mutex:
  96.  
  97. static bool nvidia_drm_gem_prime_fence_op_enable_signaling
  98. (
  99. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  100. struct fence *fence
  101. +#else
  102. + struct dma_fence *fence
  103. +#endif
  104. )
  105. {
  106. bool ret = true;
  107. @@ -107,7 +127,11 @@ static bool nvidia_drm_gem_prime_fence_op_enable_signaling
  108. struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem;
  109. struct nvidia_drm_device *nv_dev = nv_fence->nv_dev;
  110.  
  111. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  112. if (fence_is_signaled(fence))
  113. +#else
  114. + if (dma_fence_is_signaled(fence))
  115. +#endif
  116. {
  117. return false;
  118. }
  119. @@ -136,7 +160,11 @@ static bool nvidia_drm_gem_prime_fence_op_enable_signaling
  120. }
  121.  
  122. nv_gem->fenceContext.softFence = fence;
  123. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  124. fence_get(fence);
  125. +#else
  126. + dma_fence_get(fence);
  127. +#endif
  128.  
  129. unlock_struct_mutex:
  130. mutex_unlock(&nv_dev->dev->struct_mutex);
  131. @@ -146,7 +174,11 @@ unlock_struct_mutex:
  132.  
  133. static void nvidia_drm_gem_prime_fence_op_release
  134. (
  135. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  136. struct fence *fence
  137. +#else
  138. + struct dma_fence *fence
  139. +#endif
  140. )
  141. {
  142. struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
  143. @@ -155,7 +187,11 @@ static void nvidia_drm_gem_prime_fence_op_release
  144.  
  145. static signed long nvidia_drm_gem_prime_fence_op_wait
  146. (
  147. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  148. struct fence *fence,
  149. +#else
  150. + struct dma_fence *fence,
  151. +#endif
  152. bool intr,
  153. signed long timeout
  154. )
  155. @@ -170,12 +206,20 @@ static signed long nvidia_drm_gem_prime_fence_op_wait
  156. * that it should never get hit during normal operation, but not so long
  157. * that the system becomes unresponsive.
  158. */
  159. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  160. return fence_default_wait(fence, intr,
  161. +#else
  162. + return dma_fence_default_wait(fence, intr,
  163. +#endif
  164. (timeout == MAX_SCHEDULE_TIMEOUT) ?
  165. msecs_to_jiffies(96) : timeout);
  166. }
  167.  
  168. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  169. static const struct fence_ops nvidia_drm_gem_prime_fence_ops = {
  170. +#else
  171. +static const struct dma_fence_ops nvidia_drm_gem_prime_fence_ops = {
  172. +#endif
  173. .get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name,
  174. .get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name,
  175. .signaled = nvidia_drm_gem_prime_fence_op_signaled,
  176. @@ -285,7 +329,11 @@ static void nvidia_drm_gem_prime_fence_signal
  177. bool force
  178. )
  179. {
  180. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  181. struct fence *fence = nv_gem->fenceContext.softFence;
  182. +#else
  183. + struct dma_fence *fence = nv_gem->fenceContext.softFence;
  184. +#endif
  185.  
  186. WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex));
  187.  
  188. @@ -301,10 +349,18 @@ static void nvidia_drm_gem_prime_fence_signal
  189.  
  190. if (force || nv_fence_ready_to_signal(nv_fence))
  191. {
  192. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  193. fence_signal(&nv_fence->base);
  194. +#else
  195. + dma_fence_signal(&nv_fence->base);
  196. +#endif
  197.  
  198. nv_gem->fenceContext.softFence = NULL;
  199. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  200. fence_put(&nv_fence->base);
  201. +#else
  202. + dma_fence_put(&nv_fence->base);
  203. +#endif
  204.  
  205. nvKms->disableChannelEvent(nv_dev->pDevice,
  206. nv_gem->fenceContext.cb);
  207. @@ -320,7 +376,11 @@ static void nvidia_drm_gem_prime_fence_signal
  208.  
  209. nv_fence = container_of(fence, struct nv_fence, base);
  210.  
  211. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  212. fence_signal(&nv_fence->base);
  213. +#else
  214. + dma_fence_signal(&nv_fence->base);
  215. +#endif
  216. }
  217. }
  218.  
  219. @@ -513,7 +573,11 @@ int nvidia_drm_gem_prime_fence_init
  220. * fence_context_alloc() cannot fail, so we do not need to check a return
  221. * value.
  222. */
  223. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  224. nv_gem->fenceContext.context = fence_context_alloc(1);
  225. +#else
  226. + nv_gem->fenceContext.context = dma_fence_context_alloc(1);
  227. +#endif
  228.  
  229. ret = nvidia_drm_gem_prime_fence_import_semaphore(
  230. nv_dev, nv_gem, p->index,
  231. @@ -670,7 +734,11 @@ int nvidia_drm_gem_prime_fence_attach
  232. nv_fence->nv_gem = nv_gem;
  233.  
  234. spin_lock_init(&nv_fence->lock);
  235. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  236. fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
  237. +#else
  238. + dma_fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
  239. +#endif
  240. &nv_fence->lock, nv_gem->fenceContext.context,
  241. p->sem_thresh);
  242.  
  243. @@ -680,7 +748,11 @@ int nvidia_drm_gem_prime_fence_attach
  244.  
  245. reservation_object_add_excl_fence(&nv_gem->fenceContext.resv,
  246. &nv_fence->base);
  247. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  248. fence_put(&nv_fence->base); /* Reservation object has reference */
  249. +#else
  250. + dma_fence_put(&nv_fence->base);
  251. +#endif
  252.  
  253. ret = 0;
  254.  
  255. diff --git a/kernel/nvidia-drm/nvidia-drm-gem.h b/kernel/nvidia-drm/nvidia-drm-gem.h
  256. index 4ff45e8..394576b 100644
  257. --- a/kernel/nvidia-drm/nvidia-drm-gem.h
  258. +++ b/kernel/nvidia-drm/nvidia-drm-gem.h
  259. @@ -29,6 +29,8 @@
  260.  
  261. #include "nvidia-drm-priv.h"
  262.  
  263. +#include <linux/version.h>
  264. +
  265. #include <drm/drmP.h>
  266. #include "nvkms-kapi.h"
  267.  
  268. @@ -98,7 +100,11 @@ struct nvidia_drm_gem_object
  269. /* Software signaling structures */
  270. struct NvKmsKapiChannelEvent *cb;
  271. struct nvidia_drm_gem_prime_soft_fence_event_args *cbArgs;
  272. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  273. struct fence *softFence; /* Fence for software signaling */
  274. +#else
  275. + struct dma_fence *softFence;
  276. +#endif
  277. } fenceContext;
  278. #endif
  279. };
  280. diff --git a/kernel/nvidia-drm/nvidia-drm-priv.h b/kernel/nvidia-drm/nvidia-drm-priv.h
  281. index 1e9b9f9..ae171e7 100644
  282. --- a/kernel/nvidia-drm/nvidia-drm-priv.h
  283. +++ b/kernel/nvidia-drm/nvidia-drm-priv.h
  284. @@ -25,6 +25,8 @@
  285.  
  286. #include "conftest.h" /* NV_DRM_AVAILABLE */
  287.  
  288. +#include <linux/version.h>
  289. +
  290. #if defined(NV_DRM_AVAILABLE)
  291.  
  292. #include <drm/drmP.h>
  293. @@ -34,7 +36,12 @@
  294. #endif
  295.  
  296. #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
  297. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  298. #include <linux/fence.h>
  299. +#else
  300. +#include <linux/dma-fence.h>
  301. +#endif
  302. +
  303. #include <linux/reservation.h>
  304. #endif
  305.  
  306. diff --git a/kernel/nvidia/nv-pat.c b/kernel/nvidia/nv-pat.c
  307. index df78020..0af7d47 100644
  308. --- a/kernel/nvidia/nv-pat.c
  309. +++ b/kernel/nvidia/nv-pat.c
  310. @@ -203,6 +203,7 @@ void nv_disable_pat_support(void)
  311. }
  312.  
  313. #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
  314. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  315. static int
  316. nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
  317. {
  318. @@ -234,6 +235,34 @@ static struct notifier_block nv_hotcpu_nfb = {
  319. .notifier_call = nvidia_cpu_callback,
  320. .priority = 0
  321. };
  322. +#else
  323. +static int nvidia_cpu_online(unsigned int hcpu)
  324. +{
  325. + unsigned int cpu = get_cpu();
  326. + if (cpu == hcpu)
  327. + nv_setup_pat_entries(NULL);
  328. + else
  329. + NV_SMP_CALL_FUNCTION(nv_setup_pat_entries, (void *)(long int)hcpu, 1);
  330. +
  331. + put_cpu();
  332. +
  333. + return 0;
  334. +}
  335. +
  336. +static int nvidia_cpu_down_prep(unsigned int hcpu)
  337. +{
  338. + unsigned int cpu = get_cpu();
  339. + if (cpu == hcpu)
  340. + nv_restore_pat_entries(NULL);
  341. + else
  342. + NV_SMP_CALL_FUNCTION(nv_restore_pat_entries, (void *)(long int)hcpu, 1);
  343. +
  344. + put_cpu();
  345. +
  346. + return 0;
  347. +}
  348. +#endif
  349. +
  350. #endif
  351.  
  352. int nv_init_pat_support(nvidia_stack_t *sp)
  353. @@ -255,7 +284,14 @@ int nv_init_pat_support(nvidia_stack_t *sp)
  354. #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
  355. if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
  356. {
  357. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  358. if (register_hotcpu_notifier(&nv_hotcpu_nfb) != 0)
  359. +#else
  360. + if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
  361. + "gpu/nvidia:online",
  362. + nvidia_cpu_online,
  363. + nvidia_cpu_down_prep) != 0)
  364. +#endif
  365. {
  366. nv_disable_pat_support();
  367. nv_printf(NV_DBG_ERRORS,
  368. @@ -280,7 +316,11 @@ void nv_teardown_pat_support(void)
  369. {
  370. nv_disable_pat_support();
  371. #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
  372. +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
  373. unregister_hotcpu_notifier(&nv_hotcpu_nfb);
  374. +#else
  375. + cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
  376. +#endif
  377. #endif
  378. }
  379. }
  380. --
  381. 2.7.4
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement