Advertisement
Guest User

0901-add-icu-smp-support.patch

a guest
Jun 4th, 2019
214
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 12.65 KB | None | 0 0
  1. --- a/arch/mips/lantiq/irq.c    2019-03-10 20:44:58.755134326 +0100
  2. +++ b/arch/mips/lantiq/irq.c    2019-05-17 05:13:50.302149058 +0200
  3. @@ -22,14 +22,21 @@
  4.  #include <lantiq_soc.h>
  5.  #include <irq.h>
  6.  
  7. +/*
  8. + * If defined, every IRQ enable call will switch the interrupt to
  9. + * the other VPE. You can limit used VPEs from the userspace.
  10. + *
  11. + * If not defined, only the first configured VPE from the userspace
  12. + * will be used.
  13. + */
  14. +#define AUTO_AFFINITY_ROTATION
  15. +
  16.  /* register definitions - internal irqs */
  17. -#define LTQ_ICU_IM0_ISR        0x0000
  18. -#define LTQ_ICU_IM0_IER        0x0008
  19. -#define LTQ_ICU_IM0_IOSR   0x0010
  20. -#define LTQ_ICU_IM0_IRSR   0x0018
  21. -#define LTQ_ICU_IM0_IMR        0x0020
  22. -#define LTQ_ICU_IM1_ISR        0x0028
  23. -#define LTQ_ICU_OFFSET     (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
  24. +#define LTQ_ICU_ISR        0x0000
  25. +#define LTQ_ICU_IER        0x0008
  26. +#define LTQ_ICU_IOSR       0x0010
  27. +#define LTQ_ICU_IRSR       0x0018
  28. +#define LTQ_ICU_IMR        0x0020
  29.  
  30.  /* register definitions - external irqs */
  31.  #define LTQ_EIU_EXIN_C     0x0000
  32. @@ -49,24 +56,27 @@
  33.   */
  34.  #define LTQ_ICU_EBU_IRQ        22
  35.  
  36. -#define ltq_icu_w32(m, x, y)   ltq_w32((x), ltq_icu_membase[m] + (y))
  37. -#define ltq_icu_r32(m, x)  ltq_r32(ltq_icu_membase[m] + (x))
  38. +#define ltq_icu_w32(vpe, m, x, y)  \
  39. +   ltq_w32((x), ltq_icu_membase[vpe] + m*0x28 + (y))
  40. +
  41. +#define ltq_icu_r32(vpe, m, x)     \
  42. +   ltq_r32(ltq_icu_membase[vpe] + m*0x28 + (x))
  43.  
  44.  #define ltq_eiu_w32(x, y)  ltq_w32((x), ltq_eiu_membase + (y))
  45.  #define ltq_eiu_r32(x)     ltq_r32(ltq_eiu_membase + (x))
  46.  
  47. -/* our 2 ipi interrupts for VSMP */
  48. -#define MIPS_CPU_IPI_RESCHED_IRQ   0
  49. -#define MIPS_CPU_IPI_CALL_IRQ      1
  50. -
  51.  /* we have a cascade of 8 irqs */
  52.  #define MIPS_CPU_IRQ_CASCADE       8
  53.  
  54. +#define MAX_VPES 2
  55. +
  56.  static int exin_avail;
  57.  static u32 ltq_eiu_irq[MAX_EIU];
  58. -static void __iomem *ltq_icu_membase[MAX_IM];
  59. +static void __iomem *ltq_icu_membase[MAX_VPES];
  60.  static void __iomem *ltq_eiu_membase;
  61.  static struct irq_domain *ltq_domain;
  62. +static DEFINE_SPINLOCK(ltq_eiu_lock);
  63. +static DEFINE_RAW_SPINLOCK(ltq_icu_lock);
  64.  static int ltq_perfcount_irq;
  65.  
  66.  int ltq_eiu_get_irq(int exin)
  67. @@ -78,50 +88,104 @@
  68.  
  69.  void ltq_disable_irq(struct irq_data *d)
  70.  {
  71. -   u32 ier = LTQ_ICU_IM0_IER;
  72. -   int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  73. -   int im = offset / INT_NUM_IM_OFFSET;
  74. +   unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  75. +   unsigned long im = offset / INT_NUM_IM_OFFSET;
  76. +   unsigned int vpe;
  77. +   unsigned long flags;
  78.  
  79.     offset %= INT_NUM_IM_OFFSET;
  80. -   ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
  81. +
  82. +   raw_spin_lock_irqsave(&ltq_icu_lock, flags);
  83. +   for_each_present_cpu(vpe) {
  84. +       ltq_icu_w32(vpe, im,
  85. +               ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
  86. +               LTQ_ICU_IER);
  87. +   }
  88. +   raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
  89.  }
  90.  
  91.  void ltq_mask_and_ack_irq(struct irq_data *d)
  92.  {
  93. -   u32 ier = LTQ_ICU_IM0_IER;
  94. -   u32 isr = LTQ_ICU_IM0_ISR;
  95. -   int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  96. -   int im = offset / INT_NUM_IM_OFFSET;
  97. +   unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  98. +   unsigned long im = offset / INT_NUM_IM_OFFSET;
  99. +   unsigned int vpe;
  100. +   unsigned long flags;
  101.  
  102.     offset %= INT_NUM_IM_OFFSET;
  103. -   ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
  104. -   ltq_icu_w32(im, BIT(offset), isr);
  105. +
  106. +   raw_spin_lock_irqsave(&ltq_icu_lock, flags);
  107. +   for_each_present_cpu(vpe) {
  108. +       ltq_icu_w32(vpe, im,
  109. +               ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
  110. +               LTQ_ICU_IER);
  111. +       ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
  112. +   }
  113. +   raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
  114.  }
  115.  EXPORT_SYMBOL(ltq_mask_and_ack_irq);
  116.  
  117.  static void ltq_ack_irq(struct irq_data *d)
  118.  {
  119. -   u32 isr = LTQ_ICU_IM0_ISR;
  120. -   int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  121. -   int im = offset / INT_NUM_IM_OFFSET;
  122. +   unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  123. +   unsigned long im = offset / INT_NUM_IM_OFFSET;
  124. +   unsigned int vpe;
  125. +   unsigned long flags;
  126.  
  127.     offset %= INT_NUM_IM_OFFSET;
  128. -   ltq_icu_w32(im, BIT(offset), isr);
  129. +
  130. +   raw_spin_lock_irqsave(&ltq_icu_lock, flags);
  131. +   for_each_present_cpu(vpe) {
  132. +       ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
  133. +   }
  134. +   raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
  135.  }
  136.  
  137.  void ltq_enable_irq(struct irq_data *d)
  138.  {
  139. -   u32 ier = LTQ_ICU_IM0_IER;
  140. -   int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  141. -   int im = offset / INT_NUM_IM_OFFSET;
  142. +   unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  143. +   unsigned long im = offset / INT_NUM_IM_OFFSET;
  144. +   unsigned int vpe;
  145. +   unsigned long flags;
  146.  
  147.     offset %= INT_NUM_IM_OFFSET;
  148. -   ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier);
  149. +
  150. +#if defined(AUTO_AFFINITY_ROTATION)
  151. +   vpe = cpumask_next(smp_processor_id(),
  152. +              irq_data_get_effective_affinity_mask(d));
  153. +
  154. +   /*
  155. +    * There is a theoretical race condition if affinity gets changed
  156. +    * meanwhile, but it would only caused a wrong VPE to be used until
  157. +    * the next IRQ enable. Also the SoC has only 2 VPEs which fits
  158. +    * the single u32. You can move spinlock before first mask readout
  159. +    * and add it to ltq_icu_irq_set_affinity.
  160. +    */
  161. +
  162. +   if (vpe >= nr_cpu_ids)
  163. +       vpe = cpumask_first(irq_data_get_effective_affinity_mask(d));
  164. +#else
  165. +   vpe = cpumask_first(irq_data_get_effective_affinity_mask(d));
  166. +#endif
  167. +
  168. +   /* This shouldn't be even possible, maybe during CPU hotplug spam */
  169. +   if (unlikely(vpe >= nr_cpu_ids))
  170. +       vpe = smp_processor_id();
  171. +
  172. +   raw_spin_lock_irqsave(&ltq_icu_lock, flags);
  173. +
  174. +   /* bugfix for fake interrupts? from UGW 3.x kernel */
  175. +   ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
  176. +
  177. +   ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset),
  178. +           LTQ_ICU_IER);
  179. +
  180. +   raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
  181.  }
  182.  
  183.  static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
  184.  {
  185.     int i;
  186. +   unsigned long flags;
  187.  
  188.     for (i = 0; i < exin_avail; i++) {
  189.         if (d->hwirq == ltq_eiu_irq[i]) {
  190. @@ -158,8 +222,12 @@
  191.             if (edge)
  192.                 irq_set_handler(d->hwirq, handle_edge_irq);
  193.  
  194. -           ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
  195. -               (val << (i * 4)), LTQ_EIU_EXIN_C);
  196. +           // v3.10 kernel has this atomic for SMP
  197. +           spin_lock_irqsave(&ltq_eiu_lock, flags);
  198. +           ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
  199. +                   (~(7 << (i * 4)))) | (val << (i * 4)),
  200. +                   LTQ_EIU_EXIN_C);
  201. +           spin_unlock_irqrestore(&ltq_eiu_lock, flags);
  202.         }
  203.     }
  204.  
  205. @@ -203,6 +271,21 @@
  206.     }
  207.  }
  208.  
  209. +#if defined(CONFIG_SMP)
  210. +static int ltq_icu_irq_set_affinity(struct irq_data *d,
  211. +                   const struct cpumask *cpumask, bool force)
  212. +{
  213. +   struct cpumask tmask;
  214. +
  215. +   if (!cpumask_and(&tmask, cpumask, cpu_online_mask))
  216. +       return -EINVAL;
  217. +
  218. +   irq_data_update_effective_affinity(d, &tmask);
  219. +
  220. +   return IRQ_SET_MASK_OK;
  221. +}
  222. +#endif
  223. +
  224.  static struct irq_chip ltq_irq_type = {
  225.     .name = "icu",
  226.     .irq_enable = ltq_enable_irq,
  227. @@ -211,6 +294,9 @@
  228.     .irq_ack = ltq_ack_irq,
  229.     .irq_mask = ltq_disable_irq,
  230.     .irq_mask_ack = ltq_mask_and_ack_irq,
  231. +#if defined(CONFIG_SMP)
  232. +   .irq_set_affinity = ltq_icu_irq_set_affinity,
  233. +#endif
  234.  };
  235.  
  236.  static struct irq_chip ltq_eiu_type = {
  237. @@ -224,15 +310,19 @@
  238.     .irq_mask = ltq_disable_irq,
  239.     .irq_mask_ack = ltq_mask_and_ack_irq,
  240.     .irq_set_type = ltq_eiu_settype,
  241. +#if defined(CONFIG_SMP)
  242. +   .irq_set_affinity = ltq_icu_irq_set_affinity,
  243. +#endif
  244.  };
  245.  
  246.  static void ltq_hw_irq_handler(struct irq_desc *desc)
  247.  {
  248. -   int module = irq_desc_get_irq(desc) - 2;
  249. +   unsigned int module = irq_desc_get_irq(desc) - 2;
  250.     u32 irq;
  251. -   int hwirq;
  252. +   irq_hw_number_t hwirq;
  253. +   unsigned int vpe = smp_processor_id();
  254.  
  255. -   irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
  256. +   irq = ltq_icu_r32(vpe, module, LTQ_ICU_IOSR);
  257.     if (irq == 0)
  258.         return;
  259.  
  260. @@ -253,6 +343,7 @@
  261.  static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
  262.  {
  263.     struct irq_chip *chip = &ltq_irq_type;
  264. +   struct irq_data *data;
  265.     int i;
  266.  
  267.     if (hw < MIPS_CPU_IRQ_CASCADE)
  268. @@ -262,6 +353,10 @@
  269.         if (hw == ltq_eiu_irq[i])
  270.             chip = &ltq_eiu_type;
  271.  
  272. +   data = irq_get_irq_data(irq);
  273. +
  274. +   irq_data_update_effective_affinity(data, cpumask_of(0));
  275. +
  276.     irq_set_chip_and_handler(irq, chip, handle_level_irq);
  277.  
  278.     return 0;
  279. @@ -276,28 +371,40 @@
  280.  {
  281.     struct device_node *eiu_node;
  282.     struct resource res;
  283. -   int i, ret;
  284. +   int i, ret, vpe;
  285.  
  286. -   for (i = 0; i < MAX_IM; i++) {
  287. -       if (of_address_to_resource(node, i, &res))
  288. -           panic("Failed to get icu memory range");
  289. +   /* TODO someone pls update+test devicetr5ee for danube (not SMP,
  290. +    * foreach bellow should run once), ar9, amazon, falcon (no info).
  291. +    * Addresses are known only for vr9, tested only in generic SMP
  292. +    * ( = no VPE reserved for firmware).
  293. +    */
  294. +   for_each_possible_cpu(vpe) {
  295. +       if (of_address_to_resource(node, vpe, &res))
  296. +           panic("Failed to get icu%i memory range", vpe);
  297.  
  298.         if (!request_mem_region(res.start, resource_size(&res),
  299.                     res.name))
  300. -           pr_err("Failed to request icu memory");
  301. +           pr_err("Failed to request icu%i memory\n", vpe);
  302.  
  303. -       ltq_icu_membase[i] = ioremap_nocache(res.start,
  304. +       ltq_icu_membase[vpe] = ioremap_nocache(res.start,
  305.                     resource_size(&res));
  306. -       if (!ltq_icu_membase[i])
  307. -           panic("Failed to remap icu memory");
  308. +
  309. +       if (!ltq_icu_membase[vpe])
  310. +           panic("Failed to remap icu%i memory", vpe);
  311.     }
  312.  
  313.     /* turn off all irqs by default */
  314. -   for (i = 0; i < MAX_IM; i++) {
  315. -       /* make sure all irqs are turned off by default */
  316. -       ltq_icu_w32(i, 0, LTQ_ICU_IM0_IER);
  317. -       /* clear all possibly pending interrupts */
  318. -       ltq_icu_w32(i, ~0, LTQ_ICU_IM0_ISR);
  319. +   for_each_possible_cpu(vpe) {
  320. +       for (i = 0; i < MAX_IM; i++) {
  321. +           /* make sure all irqs are turned off by default */
  322. +           ltq_icu_w32(vpe, i, 0, LTQ_ICU_IER);
  323. +
  324. +           /* clear all possibly pending interrupts */
  325. +           ltq_icu_w32(vpe, i, ~0, LTQ_ICU_ISR);
  326. +           ltq_icu_w32(vpe, i, ~0, LTQ_ICU_IMR);
  327. +
  328. +           ltq_icu_w32(vpe, i, 0, LTQ_ICU_IRSR);
  329. +       }
  330.     }
  331.  
  332.     mips_cpu_irq_init();
  333. @@ -358,7 +465,7 @@
  334.     return MIPS_CPU_TIMER_IRQ;
  335.  }
  336.  
  337. -static struct of_device_id __initdata of_irq_ids[] = {
  338. +static const struct of_device_id of_irq_ids[] __initconst = {
  339.     { .compatible = "lantiq,icu", .data = icu_of_init },
  340.     {},
  341.  };
  342. --- a/arch/mips/kernel/smp-mt.c 2019-03-05 17:58:03.000000000 +0100
  343. +++ b/arch/mips/kernel/smp-mt.c 2019-05-16 03:09:02.268859090 +0200
  344. @@ -125,6 +125,8 @@
  345.                      STATUSF_IP6 | STATUSF_IP7);
  346.     else
  347.         change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
  348. +                    STATUSF_IP2 | STATUSF_IP3 |
  349. +                    STATUSF_IP4 | STATUSF_IP5 |
  350.                      STATUSF_IP6 | STATUSF_IP7);
  351.  }
  352.  
  353. --- a/arch/mips/boot/dts/vr9.dtsi   2019-03-10 19:03:35.747182390 +0100
  354. +++ b/arch/mips/boot/dts/vr9.dtsi   2019-05-17 05:01:36.789319230 +0200
  355. @@ -38,11 +38,9 @@
  356.             #interrupt-cells = <1>;
  357.             interrupt-controller;
  358.             compatible = "lantiq,icu";
  359. -           reg = <0x80200 0x28
  360. -               0x80228 0x28
  361. -               0x80250 0x28
  362. -               0x80278 0x28
  363. -               0x802a0 0x28>;
  364. +
  365. +           reg = <0x80200 0xc8 /* icu0 */
  366. +               0x80300 0xc8>;  /* icu1 */
  367.         };
  368.  
  369.         watchdog@803f0 {
  370. --- a/arch/mips/boot/dts/danube.dtsi    2019-03-10 19:03:35.746182403 +0100
  371. +++ b/arch/mips/boot/dts/danube.dtsi    2019-05-17 04:55:28.777920006 +0200
  372. @@ -34,11 +34,12 @@
  373.             #interrupt-cells = <1>;
  374.             interrupt-controller;
  375.             compatible = "lantiq,icu";
  376. -           reg = <0x80200 0x28
  377. -               0x80228 0x28
  378. -               0x80250 0x28
  379. -               0x80278 0x28
  380. -               0x802a0 0x28>;
  381. +
  382. +           /*
  383. +            * There is a second ICU, but the SoC is not SMP
  384. +            * capable.
  385. +            */
  386. +           reg = <0x80200 0xc8>;
  387.         };
  388.  
  389.         watchdog@803f0 {
  390. --- a/arch/mips/boot/dts/ar9.dtsi   2019-03-10 19:03:35.746182403 +0100
  391. +++ b/arch/mips/boot/dts/ar9.dtsi   2019-05-17 04:58:17.080815930 +0200
  392. @@ -34,11 +34,8 @@
  393.             #interrupt-cells = <1>;
  394.             interrupt-controller;
  395.             compatible = "lantiq,icu";
  396. -           reg = <0x80200 0x28
  397. -               0x80228 0x28
  398. -               0x80250 0x28
  399. -               0x80278 0x28
  400. -               0x802a0 0x28>;
  401. +           reg = <0x80200 0xc8>;   /* ICU0 */
  402. +           /* TODO AR9 should have ICU1 (like VR9) too */
  403.         };
  404.  
  405.         watchdog@803f0 {
  406. --- a/arch/mips/boot/dts/amazonse.dtsi  2019-03-10 19:03:35.746182403 +0100
  407. +++ b/arch/mips/boot/dts/amazonse.dtsi  2019-05-17 04:58:56.343325081 +0200
  408. @@ -34,11 +34,7 @@
  409.             #interrupt-cells = <1>;
  410.             interrupt-controller;
  411.             compatible = "lantiq,icu";
  412. -           reg = <0x80200 0x28
  413. -               0x80228 0x28
  414. -               0x80250 0x28
  415. -               0x80278 0x28
  416. -               0x802a0 0x28>;
  417. +           reg = <0x80200 0xc8>;   /* only 1 ICU */
  418.         };
  419.  
  420.         watchdog@803f0 {
  421. --- a/arch/mips/boot/dts/falcon.dtsi    2019-03-10 19:03:35.747182390 +0100
  422. +++ b/arch/mips/boot/dts/falcon.dtsi    2019-05-17 05:00:42.536997478 +0200
  423. @@ -293,11 +293,8 @@
  424.             #interrupt-cells = <1>;
  425.             interrupt-controller;
  426.             compatible = "lantiq,icu";
  427. -           reg = <0x80200 0x28
  428. -               0x80228 0x28
  429. -               0x80250 0x28
  430. -               0x80278 0x28
  431. -               0x802a0 0x28>;
  432. +           reg = <0x80200 0xc8>;
  433. +           /* TODO I don't know if there is another ICU */
  434.         };
  435.  
  436.         watchdog@803F0 {
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement