Advertisement
Guest User

Untitled

a guest
Oct 3rd, 2019
140
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.41 KB | None | 0 0
  1. Bail out if s_fence is no longer fresh.
  2. https://patchwork.freedesktop.org/patch/333645/
  3. --- a/drivers/gpu/drm/scheduler/sched_main.c
  4. +++ b/drivers/gpu/drm/scheduler/sched_main.c
  5. @@ -333,6 +333,10 @@ void drm_sched_increase_karma(struct drm
  6.  
  7. spin_lock(&rq->lock);
  8. list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
  9. + if (!smp_load_acquire(&bad->s_fence)) {
  10. + spin_unlock(&rq->lock);
  11. + return;
  12. + }
  13. if (bad->s_fence->scheduled.context ==
  14. entity->fence_context) {
  15. if (atomic_read(&bad->karma) >
  16. @@ -543,7 +547,7 @@ EXPORT_SYMBOL(drm_sched_job_init);
  17. void drm_sched_job_cleanup(struct drm_sched_job *job)
  18. {
  19. dma_fence_put(&job->s_fence->finished);
  20. - job->s_fence = NULL;
  21. + smp_store_release(&job->s_fence, 0);
  22. }
  23. EXPORT_SYMBOL(drm_sched_job_cleanup);
  24.  
  25.  
  26. Panfrost uses multiple schedulers (one for each slot, so 2 in reality),
  27. and on a timeout has to stop all the schedulers to safely perform a
  28. reset. However more than one scheduler can trigger a timeout at the same
  29. time. This race condition results in jobs being freed while they are
  30. still in use.
  31.  
  32. Modify drm_sched_stop() to call cancel_delayed_work_sync() when stopping
  33. a different scheduler to the one belonging to the passed in job.
  34. panfrost_job_timedout() is also modified to only allow one thread at a
  35. time to handle the reset. Any subsequent threads simply return assuming
  36. that the first thread will handle the situation.
  37. https://patchwork.freedesktop.org/patch/333258/
  38. MRFIXIT had to adjust
  39. diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
  40. index f503c566e99f..6441c7fba6c4 100644
  41. --- a/drivers/gpu/drm/panfrost/panfrost_device.h
  42. +++ b/drivers/gpu/drm/panfrost/panfrost_device.h
  43. @@ -99,6 +99,8 @@ struct panfrost_device {
  44. unsigned long cur_volt;
  45. struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS];
  46. } devfreq;
  47. +
  48. + bool is_resetting;
  49. };
  50.  
  51. struct panfrost_mmu {
  52. diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
  53. index 05c85f45a0de..1b2019e08b43 100644
  54. --- a/drivers/gpu/drm/panfrost/panfrost_job.c
  55. +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
  56. @@ -383,6 +383,14 @@
  57.  
  58. mutex_lock(&pfdev->reset_lock);
  59.  
  60. + if (pfdev->is_resetting) {
  61. + mutex_unlock(&pfdev->reset_lock);
  62. + return;
  63. + }
  64. + pfdev->is_resetting = true;
  65. +
  66. + mutex_unlock(&pfdev->reset_lock);
  67. +
  68. for (i = 0; i < NUM_JOB_SLOTS; i++)
  69. drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
  70.  
  71. @@ -398,7 +406,7 @@
  72. }
  73. spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
  74.  
  75. - /* panfrost_core_dump(pfdev); */
  76. + mutex_lock(&pfdev->reset_lock);
  77.  
  78. panfrost_devfreq_record_transition(pfdev, js);
  79. panfrost_device_reset(pfdev);
  80. @@ -410,6 +418,7 @@
  81. for (i = 0; i < NUM_JOB_SLOTS; i++)
  82. drm_sched_start(&pfdev->js->queue[i].sched, true);
  83.  
  84. + pfdev->is_resetting = false;
  85. mutex_unlock(&pfdev->reset_lock);
  86. }
  87.  
  88. diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
  89. index 148468447ba9..bc6d1862ec8a 100644
  90. --- a/drivers/gpu/drm/scheduler/sched_main.c
  91. +++ b/drivers/gpu/drm/scheduler/sched_main.c
  92. @@ -415,7 +415,10 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
  93. * this TDR finished and before the newly restarted jobs had a
  94. * chance to complete.
  95. */
  96. - cancel_delayed_work(&sched->work_tdr);
  97. + if (bad->sched != sched)
  98. + cancel_delayed_work_sync(&sched->work_tdr);
  99. + else
  100. + cancel_delayed_work(&sched->work_tdr);
  101. }
  102.  
  103. EXPORT_SYMBOL(drm_sched_stop);
  104.  
  105. https://patchwork.kernel.org/cover/10954237/
  106. "32-bit hack"
  107. --- a/drivers/iommu/io-pgtable-arm.c
  108. +++ b/drivers/iommu/io-pgtable-arm.c
  109. @@ -1023,7 +1023,9 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
  110. iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
  111. if (iop) {
  112. u64 mair, ttbr;
  113. + struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(&iop->ops);
  114.  
  115. + data->levels = 4;
  116. /* Copy values as union fields overlap */
  117. mair = cfg->arm_lpae_s1_cfg.mair[0];
  118. ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
  119.  
  120. --- a/drivers/iommu/io-pgtable-arm.c
  121. +++ b/drivers/iommu/io-pgtable-arm.c
  122. @@ -1016,7 +1016,7 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
  123. {
  124. struct io_pgtable *iop;
  125.  
  126. - if (cfg->ias != 48 || cfg->oas > 40)
  127. + if (cfg->ias > 48 || cfg->oas > 40)
  128. return NULL;
  129.  
  130. cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement