Guest User

0901-add-icu-smp-support.patch

a guest
Jan 30th, 2019
116
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. --- a/arch/mips/lantiq/irq.c    2019-01-30 02:20:35.739994259 +0100
  2. +++ b/arch/mips/lantiq/irq.c    2019-01-30 23:15:08.140803511 +0100
  3. @@ -49,8 +49,8 @@
  4.   */
  5.  #define LTQ_ICU_EBU_IRQ        22
  6.  
  7. -#define ltq_icu_w32(m, x, y)   ltq_w32((x), ltq_icu_membase[m] + (y))
  8. -#define ltq_icu_r32(m, x)  ltq_r32(ltq_icu_membase[m] + (x))
  9. +#define ltq_icu_w32(vpe, m, x, y)  ltq_w32((x), ltq_icu_membase[vpe][m] + (y))
  10. +#define ltq_icu_r32(vpe, m, x)     ltq_r32(ltq_icu_membase[vpe][m] + (x))
  11.  
  12.  #define ltq_eiu_w32(x, y)  ltq_w32((x), ltq_eiu_membase + (y))
  13.  #define ltq_eiu_r32(x)     ltq_r32(ltq_eiu_membase + (x))
  14. @@ -62,11 +62,50 @@
  15.  /* we have a cascade of 8 irqs */
  16.  #define MIPS_CPU_IRQ_CASCADE       8
  17.  
  18. +#define MAX_VPES 2
  19. +
  20. +/*
  21. + * Convenience Macro.  Should be somewhere generic.
  22. + */
  23. +#define get_current_vpe()   \
  24. +   ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
  25. +
  26. +
  27. +#if 1  // TODO debug? SMP cores can access at the same time
  28. +#if defined(CONFIG_SMP)
  29. +#define LOCK_VPE() \
  30. +   local_irq_save(flags); \
  31. +   mtflags = dmt()
  32. +
  33. +#define UNLOCK_VPE() \
  34. +   emt(mtflags); \
  35. +   local_irq_restore(flags)
  36. +
  37. +#define LOCK_CORE() \
  38. +   local_irq_save(flags); \
  39. +   mtflags = dvpe()
  40. +
  41. +#define UNLOCK_CORE() \
  42. +   evpe(mtflags); \
  43. +   local_irq_restore(flags)
  44. +#else /* CONFIG_SMP*/
  45. +#define LOCK_VPE()
  46. +#define UNLOCK_VPE()
  47. +#endif /* CONFIG_SMP */
  48. +
  49. +#else  // TODO debug future delete
  50. +#define LOCK_VPE() (void)flags;(void)mtflags
  51. +#define UNLOCK_VPE()
  52. +#define LOCK_CORE()    (void)flags;(void)mtflags
  53. +#define UNLOCK_CORE()
  54. +#endif
  55. +
  56.  static int exin_avail;
  57.  static u32 ltq_eiu_irq[MAX_EIU];
  58. -static void __iomem *ltq_icu_membase[MAX_IM];
  59. +static void __iomem *ltq_icu_membase[MAX_VPES][MAX_IM];
  60.  static void __iomem *ltq_eiu_membase;
  61.  static struct irq_domain *ltq_domain;
  62. +static DEFINE_SPINLOCK(ltq_eiu_lock);
  63.  static int ltq_perfcount_irq;
  64.  
  65.  int ltq_eiu_get_irq(int exin)
  66. @@ -81,9 +120,14 @@
  67.     u32 ier = LTQ_ICU_IM0_IER;
  68.     int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  69.     int im = offset / INT_NUM_IM_OFFSET;
  70. -
  71. +   int vpe = get_current_vpe();
  72. +#if defined(CONFIG_SMP)
  73. +   unsigned long flags, mtflags;
  74. +#endif
  75.     offset %= INT_NUM_IM_OFFSET;
  76. -   ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
  77. +   LOCK_VPE();
  78. +   ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, ier) & ~BIT(offset), ier);
  79. +   UNLOCK_VPE();
  80.  }
  81.  
  82.  void ltq_mask_and_ack_irq(struct irq_data *d)
  83. @@ -92,10 +136,16 @@
  84.     u32 isr = LTQ_ICU_IM0_ISR;
  85.     int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  86.     int im = offset / INT_NUM_IM_OFFSET;
  87. +   int vpe = get_current_vpe();
  88. +#if defined(CONFIG_SMP)
  89. +   unsigned long flags, mtflags;
  90. +#endif
  91.  
  92.     offset %= INT_NUM_IM_OFFSET;
  93. -   ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
  94. -   ltq_icu_w32(im, BIT(offset), isr);
  95. +   LOCK_VPE();
  96. +   ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, ier) & ~BIT(offset), ier);
  97. +   ltq_icu_w32(vpe, im, BIT(offset), isr);
  98. +   UNLOCK_VPE();
  99.  }
  100.  EXPORT_SYMBOL(ltq_mask_and_ack_irq);
  101.  
  102. @@ -104,24 +154,43 @@
  103.     u32 isr = LTQ_ICU_IM0_ISR;
  104.     int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  105.     int im = offset / INT_NUM_IM_OFFSET;
  106. +   int vpe = get_current_vpe();
  107. +#if defined(CONFIG_SMP)
  108. +   unsigned long flags, mtflags;
  109. +#endif
  110.  
  111.     offset %= INT_NUM_IM_OFFSET;
  112. -   ltq_icu_w32(im, BIT(offset), isr);
  113. +   LOCK_VPE();
  114. +   ltq_icu_w32(vpe, im, BIT(offset), isr);
  115. +   UNLOCK_VPE();
  116.  }
  117.  
  118.  void ltq_enable_irq(struct irq_data *d)
  119.  {
  120.     u32 ier = LTQ_ICU_IM0_IER;
  121. +// u32 isr = LTQ_ICU_IM0_ISR;
  122.     int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  123.     int im = offset / INT_NUM_IM_OFFSET;
  124. +   int vpe = get_current_vpe();
  125. +#if defined(CONFIG_SMP)
  126. +   unsigned long flags, mtflags;
  127. +#endif
  128.  
  129.     offset %= INT_NUM_IM_OFFSET;
  130. -   ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier);
  131. +   LOCK_VPE();
  132. +
  133. +   // TODO present in the v3.10 kernel, system is OK without it
  134. +   /* Bug fix for fake interrupt */
  135. +   //ltq_icu_w32(vpe, im, BIT(offset), isr);
  136. +
  137. +   ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, ier) | BIT(offset), ier);
  138. +   UNLOCK_VPE();
  139.  }
  140.  
  141.  static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
  142.  {
  143.     int i;
  144. +   unsigned long flags;
  145.  
  146.     for (i = 0; i < exin_avail; i++) {
  147.         if (d->hwirq == ltq_eiu_irq[i]) {
  148. @@ -158,8 +227,11 @@
  149.             if (edge)
  150.                 irq_set_handler(d->hwirq, handle_edge_irq);
  151.  
  152. -           ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
  153. -               (val << (i * 4)), LTQ_EIU_EXIN_C);
  154. +           // v3.10 kernel has this atomic for SMP
  155. +           spin_lock_irqsave(&ltq_eiu_lock, flags);
  156. +           ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) & (~ (val << (i * 4)))) |
  157. +                   (val << (i * 4)), LTQ_EIU_EXIN_C);
  158. +           spin_unlock_irqrestore(&ltq_eiu_lock, flags);
  159.         }
  160.     }
  161.  
  162. @@ -203,6 +275,36 @@
  163.     }
  164.  }
  165.  
  166. +#if defined(CONFIG_MIPS_MT_SMP)
  167. +static int ltq_icu_irq_set_affinity(struct irq_data *d,
  168. +                   const struct cpumask *cpumask, bool force)
  169. +{
  170. +   int cpu;
  171. +   unsigned long flags;
  172. +   unsigned int  mtflags;
  173. +   u32 ier = LTQ_ICU_IM0_IER;
  174. +   int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  175. +   int im = offset / INT_NUM_IM_OFFSET;
  176. +
  177. +   LOCK_CORE();
  178. +
  179. +   offset %= INT_NUM_IM_OFFSET;
  180. +
  181. +   for_each_online_cpu(cpu) {
  182. +       if (!cpumask_test_cpu(cpu, cpumask))
  183. +           ltq_icu_w32(cpu, im, ltq_icu_r32(cpu, im, ier) & ~BIT(offset), ier);
  184. +       else
  185. +           ltq_icu_w32(cpu, im, ltq_icu_r32(cpu, im, ier) | BIT(offset), ier);
  186. +   }
  187. +
  188. +   //v4 kernel requires this, taken from some other SMP board
  189. +   irq_data_update_effective_affinity(d, cpumask);
  190. +
  191. +   UNLOCK_CORE();
  192. +   return IRQ_SET_MASK_OK;
  193. +}
  194. +#endif
  195. +
  196.  static struct irq_chip ltq_irq_type = {
  197.     .name = "icu",
  198.     .irq_enable = ltq_enable_irq,
  199. @@ -211,6 +313,9 @@
  200.     .irq_ack = ltq_ack_irq,
  201.     .irq_mask = ltq_disable_irq,
  202.     .irq_mask_ack = ltq_mask_and_ack_irq,
  203. +#if defined(CONFIG_MIPS_MT_SMP)
  204. +   .irq_set_affinity = ltq_icu_irq_set_affinity,
  205. +#endif
  206.  };
  207.  
  208.  static struct irq_chip ltq_eiu_type = {
  209. @@ -231,8 +336,10 @@
  210.     int module = irq_desc_get_irq(desc) - 2;
  211.     u32 irq;
  212.     int hwirq;
  213. +   int vpe = get_current_vpe();
  214.  
  215. -   irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
  216. +   //v3.10 has lock_vpe around this, is it really necessary?
  217. +   irq = ltq_icu_r32(vpe, module, LTQ_ICU_IM0_IOSR);
  218.     if (irq == 0)
  219.         return;
  220.  
  221. @@ -275,29 +382,60 @@
  222.  int __init icu_of_init(struct device_node *node, struct device_node *parent)
  223.  {
  224.     struct device_node *eiu_node;
  225. +#if defined(CONFIG_MIPS_MT_SMP)
  226. +   struct device_node *icu1_node;
  227. +#endif
  228.     struct resource res;
  229.     int i, ret;
  230.  
  231.     for (i = 0; i < MAX_IM; i++) {
  232.         if (of_address_to_resource(node, i, &res))
  233. -           panic("Failed to get icu memory range");
  234. +           panic("Failed to get icu0 memory range");
  235.  
  236.         if (!request_mem_region(res.start, resource_size(&res),
  237.                     res.name))
  238. -           pr_err("Failed to request icu memory");
  239. +           pr_err("Failed to request icu0 memory");
  240. +
  241. +   if (of_node_cmp(node->name, "icu0") == 0) {
  242. +           ltq_icu_membase[0][i] = ioremap_nocache(res.start,
  243. +               resource_size(&res));
  244. +       }
  245. +       if (!ltq_icu_membase[0][i])
  246. +           panic("Failed to remap icu0 memory");
  247. +   }
  248. +
  249. +#if defined(CONFIG_MIPS_MT_SMP)
  250. +   // TODO add to the icu0 reg array or new node, what if requires a new node, TODO merge with icu0 node?
  251. +
  252. +   icu1_node = of_find_compatible_node(NULL, NULL, "lantiq,icu1");
  253. +   for (i = 0; i < MAX_IM; i++) {
  254. +       if (of_address_to_resource(icu1_node, i, &res))
  255. +           panic("Failed to get icu1 memory range");
  256.  
  257. -       ltq_icu_membase[i] = ioremap_nocache(res.start,
  258. +       if (request_mem_region(res.start, resource_size(&res),
  259. +                   res.name) < 0)
  260. +           pr_err("Failed to request icu1 memory");
  261. +
  262. +       if (of_node_cmp(icu1_node->name, "icu1") == 0){
  263. +           ltq_icu_membase[1][i] = ioremap_nocache(res.start,
  264.                     resource_size(&res));
  265. -       if (!ltq_icu_membase[i])
  266. -           panic("Failed to remap icu memory");
  267. +       }
  268. +
  269. +       if (!ltq_icu_membase[1][i])
  270. +           panic("Failed to remap icu1 memory");
  271.     }
  272. +#endif
  273.  
  274.     /* turn off all irqs by default */
  275.     for (i = 0; i < MAX_IM; i++) {
  276.         /* make sure all irqs are turned off by default */
  277. -       ltq_icu_w32(i, 0, LTQ_ICU_IM0_IER);
  278. +       ltq_icu_w32(0, i, 0, LTQ_ICU_IM0_IER);
  279.         /* clear all possibly pending interrupts */
  280. -       ltq_icu_w32(i, ~0, LTQ_ICU_IM0_ISR);
  281. +       ltq_icu_w32(0, i, ~0, LTQ_ICU_IM0_ISR);
  282. +#if defined(CONFIG_MIPS_MT_SMP)
  283. +       ltq_icu_w32(1, i, 0, LTQ_ICU_IM0_IER);
  284. +       ltq_icu_w32(1, i, ~0, LTQ_ICU_IM0_ISR);
  285. +#endif
  286.     }
  287.  
  288.     mips_cpu_irq_init();
RAW Paste Data