Guest User

irq

a guest
Jan 22nd, 2019
146
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 16.33 KB | None | 0 0
  1. /*
  2.  *  This program is free software; you can redistribute it and/or modify it
  3.  *  under the terms of the GNU General Public License version 2 as published
  4.  *  by the Free Software Foundation.
  5.  *
  6.  * Copyright (C) 2010 John Crispin <john@phrozen.org>
  7.  * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
  8.  */
  9.  
  10. #include <linux/interrupt.h>
  11. #include <linux/ioport.h>
  12. #include <linux/sched.h>
  13. #include <linux/irqdomain.h>
  14. #include <linux/of_platform.h>
  15. #include <linux/of_address.h>
  16. #include <linux/of_irq.h>
  17. #include <linux/module.h>
  18.  
  19. #include <asm/bootinfo.h>
  20. #include <asm/irq_cpu.h>
  21.  
  22. #include <lantiq_soc.h>
  23. #include <irq.h>
  24.  
  25. /* register definitions - internal irqs */
  26. #define LTQ_ICU_IM0_ISR     0x0000
  27. #define LTQ_ICU_IM0_IER     0x0008
  28. #define LTQ_ICU_IM0_IOSR    0x0010
  29. #define LTQ_ICU_IM0_IRSR    0x0018
  30. #define LTQ_ICU_IM0_IMR     0x0020
  31. #define LTQ_ICU_IM1_ISR     0x0028
  32. #define LTQ_ICU_OFFSET      (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
  33.  
  34. /* register definitions - external irqs */
  35. #define LTQ_EIU_EXIN_C      0x0000
  36. #define LTQ_EIU_EXIN_INIC   0x0004
  37. #define LTQ_EIU_EXIN_INC    0x0008
  38. #define LTQ_EIU_EXIN_INEN   0x000C
  39.  
  40. /* number of external interrupts */
  41. #define MAX_EIU         6
  42.  
  43. /* the performance counter */
  44. #define LTQ_PERF_IRQ        (INT_NUM_IM4_IRL0 + 31)
  45.  
  46. /*
  47.  * irqs generated by devices attached to the EBU need to be acked in
  48.  * a special manner
  49.  */
  50. #define LTQ_ICU_EBU_IRQ     22
  51.  
  52. #define ltq_icu_w32(vpe, m, x, y)   ltq_w32((x), ltq_icu_membase[vpe][m] + (y))
  53. #define ltq_icu_r32(vpe, m, x)      ltq_r32(ltq_icu_membase[vpe][m] + (x))
  54.  
  55. #define ltq_eiu_w32(x, y)   ltq_w32((x), ltq_eiu_membase + (y))
  56. #define ltq_eiu_r32(x)      ltq_r32(ltq_eiu_membase + (x))
  57.  
  58. /* our 2 ipi interrupts for VSMP */
  59. #define MIPS_CPU_IPI_RESCHED_IRQ    0
  60. #define MIPS_CPU_IPI_CALL_IRQ       1
  61.  
  62. /* we have a cascade of 8 irqs */
  63. #define MIPS_CPU_IRQ_CASCADE        8
  64.  
  65. #define MAX_VPES 2
  66.  
  67. /*
  68.  * Convenience Macro.  Should be somewhere generic.
  69.  */
  70. #define get_current_vpe()   \
  71.         ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
  72.  
  73. #if 0
  74.  
  75. #ifdef CONFIG_SMP
  76. #define LOCK_VPE() \
  77.         local_irq_save(flags); \
  78.         mtflags = dmt()
  79.  
  80. #define UNLOCK_VPE() \
  81.         emt(mtflags); \
  82.         local_irq_restore(flags)
  83.  
  84. #define LOCK_CORE() \
  85.         local_irq_save(flags); \
  86.         mtflags = dvpe()
  87.  
  88. #define UNLOCK_CORE() \
  89.         evpe(mtflags); \
  90.         local_irq_restore(flags)
  91. #else /* CONFIG_SMP*/
  92. #define LOCK_VPE()
  93. #define UNLOCK_VPE()
  94. #endif /* CONFIG_SMP */
  95.  
  96. #else
  97. #define LOCK_VPE()      (void)flags;(void)mtflags
  98. #define UNLOCK_VPE()
  99. #define LOCK_CORE()     (void)flags;(void)mtflags
  100. #define UNLOCK_CORE()
  101. #endif
  102.  
  103. static int exin_avail;
  104. static u32 ltq_eiu_irq[MAX_EIU];
  105. static void __iomem *ltq_icu_membase[MAX_VPES][MAX_IM];
  106. static void __iomem *ltq_eiu_membase;
  107. static struct irq_domain *ltq_domain;
  108. static DEFINE_SPINLOCK(ltq_eiu_lock);
  109. static int ltq_perfcount_irq;
  110.  
  111. int ltq_eiu_get_irq(int exin)
  112. {
  113.     if (exin < exin_avail)
  114.         return ltq_eiu_irq[exin];
  115.     return -1;
  116. }
  117.  
  118. void ltq_disable_irq(struct irq_data *d)
  119. {
  120.     u32 ier = LTQ_ICU_IM0_IER;
  121.     int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  122.     int im = offset / INT_NUM_IM_OFFSET;
  123.         int vpe = get_current_vpe();
  124. #ifdef CONFIG_SMP
  125.         unsigned long flags, mtflags;
  126. #endif
  127.         offset %= INT_NUM_IM_OFFSET;
  128.         LOCK_VPE();
  129.     ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, ier) & ~BIT(offset), ier);
  130.         UNLOCK_VPE();
  131. }
  132.  
  133. void ltq_mask_and_ack_irq(struct irq_data *d)
  134. {
  135.     u32 ier = LTQ_ICU_IM0_IER;
  136.     u32 isr = LTQ_ICU_IM0_ISR;
  137.     int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  138.     int im = offset / INT_NUM_IM_OFFSET;
  139.         int vpe = get_current_vpe();
  140. #ifdef CONFIG_SMP
  141.         unsigned long flags, mtflags;
  142. #endif
  143.  
  144.     offset %= INT_NUM_IM_OFFSET;
  145.         LOCK_VPE();
  146.     ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, ier) & ~BIT(offset), ier);
  147.     ltq_icu_w32(vpe, im, BIT(offset), isr);
  148.         UNLOCK_VPE();
  149. }
  150. EXPORT_SYMBOL(ltq_mask_and_ack_irq);
  151.  
  152. static void ltq_ack_irq(struct irq_data *d)
  153. {
  154.     u32 isr = LTQ_ICU_IM0_ISR;
  155.     int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  156.     int im = offset / INT_NUM_IM_OFFSET;
  157.         int vpe = get_current_vpe();
  158. #ifdef CONFIG_SMP
  159.         unsigned long flags, mtflags;
  160. #endif
  161.  
  162.     offset %= INT_NUM_IM_OFFSET;
  163.         LOCK_VPE();
  164.     ltq_icu_w32(vpe, im, BIT(offset), isr);
  165.         UNLOCK_VPE();
  166. }
  167.  
  168. void ltq_enable_irq(struct irq_data *d)
  169. {
  170.     u32 ier = LTQ_ICU_IM0_IER;
  171.         u32 isr = LTQ_ICU_IM0_ISR;
  172.     int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  173.     int im = offset / INT_NUM_IM_OFFSET;
  174.         int vpe = get_current_vpe();
  175. #ifdef CONFIG_SMP
  176.         unsigned long flags, mtflags;
  177. #endif
  178.  
  179.     offset %= INT_NUM_IM_OFFSET;
  180.         LOCK_VPE();
  181. // TODO present in the v3.10 kernel, but system seems to work without, test?
  182. #if 0
  183.     /* Bug fix for dummy interrupt */
  184.         /* if this is a EBU irq, we need to ack it or get a deadlock */
  185.         if ((offset == LTQ_ICU_EBU_IRQ) && (im == 0) && LTQ_EBU_PCC_ISTAT)
  186.                 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
  187.                         LTQ_EBU_PCC_ISTAT);
  188.  
  189.     /* Bug fix for fake interrupt */
  190.         ltq_icu_w32(vpe, im, BIT(offset), isr);
  191. #else
  192.     (void)isr;(void)im;(void)ier;
  193. #endif
  194.  
  195.     ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, ier) | BIT(offset), ier);
  196.         UNLOCK_VPE();
  197. }
  198.  
  199. static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
  200. {
  201.     int i;
  202.     unsigned long flags;
  203.  
  204.     for (i = 0; i < exin_avail; i++) {
  205.         if (d->hwirq == ltq_eiu_irq[i]) {
  206.             int val = 0;
  207.             int edge = 0;
  208.  
  209.             switch (type) {
  210.             case IRQF_TRIGGER_NONE:
  211.                 break;
  212.             case IRQF_TRIGGER_RISING:
  213.                 val = 1;
  214.                 edge = 1;
  215.                 break;
  216.             case IRQF_TRIGGER_FALLING:
  217.                 val = 2;
  218.                 edge = 1;
  219.                 break;
  220.             case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
  221.                 val = 3;
  222.                 edge = 1;
  223.                 break;
  224.             case IRQF_TRIGGER_HIGH:
  225.                 val = 5;
  226.                 break;
  227.             case IRQF_TRIGGER_LOW:
  228.                 val = 6;
  229.                 break;
  230.             default:
  231.                 pr_err("invalid type %d for irq %ld\n",
  232.                     type, d->hwirq);
  233.                 return -EINVAL;
  234.             }
  235.  
  236.             if (edge)
  237.                 irq_set_handler(d->hwirq, handle_edge_irq);
  238.                
  239. spin_lock_irqsave(&ltq_eiu_lock, flags);
  240. // TODO this is atomic in v3.10 version
  241.             ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) & (~ (val << (i * 4)))) |
  242.                 (val << (i * 4)), LTQ_EIU_EXIN_C);
  243. spin_unlock_irqrestore(&ltq_eiu_lock, flags);
  244.                
  245.                
  246.         }
  247.     }
  248.  
  249.     return 0;
  250. }
  251.  
  252. static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
  253. {
  254.     int i;
  255.  
  256.     ltq_enable_irq(d);
  257.     for (i = 0; i < exin_avail; i++) {
  258.         if (d->hwirq == ltq_eiu_irq[i]) {
  259.             /* by default we are low level triggered */
  260.             ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
  261.             /* clear all pending */
  262.             ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
  263.                 LTQ_EIU_EXIN_INC);
  264.             /* enable */
  265.             ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
  266.                 LTQ_EIU_EXIN_INEN);
  267.             break;
  268.         }
  269.     }
  270.  
  271.     return 0;
  272. }
  273.  
  274. static void ltq_shutdown_eiu_irq(struct irq_data *d)
  275. {
  276.     int i;
  277.  
  278.     ltq_disable_irq(d);
  279.     for (i = 0; i < exin_avail; i++) {
  280.         if (d->hwirq == ltq_eiu_irq[i]) {
  281.             /* disable */
  282.             ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
  283.                 LTQ_EIU_EXIN_INEN);
  284.             break;
  285.         }
  286.     }
  287. }
  288.  
  289. #ifdef CONFIG_MIPS_MT_SMP
  290. static int ltq_icu_irq_set_affinity(struct irq_data *d,
  291.                                     const struct cpumask *cpumask, bool force)
  292. {
  293.         int cpu;
  294.         unsigned long flags;
  295.         unsigned int  mtflags;
  296.         u32 ier = LTQ_ICU_IM0_IER;
  297.         int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
  298.         int im = offset / INT_NUM_IM_OFFSET;
  299.  
  300.         LOCK_CORE();
  301.        
  302.     offset %= INT_NUM_IM_OFFSET;
  303.        
  304.         for_each_online_cpu(cpu) {
  305.  
  306. //pr_info("  cpu%i t=%i %08x %08x %px\n",cpu,cpumask_test_cpu(cpu, cpumask),im,offset,ltq_icu_membase[cpu][im] + (ier));
  307.  
  308. #if 1
  309.         if (!cpumask_test_cpu(cpu, cpumask))
  310.             ltq_icu_w32(cpu, im, ltq_icu_r32(cpu, im, ier) & ~BIT(offset), ier);
  311.                 else
  312.             ltq_icu_w32(cpu, im, ltq_icu_r32(cpu, im, ier) | BIT(offset), ier);
  313. #else
  314.     (void)im;(void)ier;
  315. #endif
  316.         }
  317.  
  318.     //v4 kernel requires this, taken from some other SMP board
  319.     irq_data_update_effective_affinity(d, cpumask);
  320.  
  321.         UNLOCK_CORE();
  322.         return IRQ_SET_MASK_OK;
  323. }
  324. #endif
  325.  
  326. static struct irq_chip ltq_irq_type = {
  327.     .name = "icu",
  328.     .irq_enable = ltq_enable_irq,
  329.     .irq_disable = ltq_disable_irq,
  330.     .irq_unmask = ltq_enable_irq,
  331.     .irq_ack = ltq_ack_irq,
  332.     .irq_mask = ltq_disable_irq,
  333.     .irq_mask_ack = ltq_mask_and_ack_irq,
  334. #ifdef CONFIG_MIPS_MT_SMP
  335.         .irq_set_affinity = ltq_icu_irq_set_affinity,
  336. #endif
  337. };
  338.  
  339. static struct irq_chip ltq_eiu_type = {
  340.     .name = "eiu",
  341.     .irq_startup = ltq_startup_eiu_irq,
  342.     .irq_shutdown = ltq_shutdown_eiu_irq,
  343.     .irq_enable = ltq_enable_irq,
  344.     .irq_disable = ltq_disable_irq,
  345.     .irq_unmask = ltq_enable_irq,
  346.     .irq_ack = ltq_ack_irq,
  347.     .irq_mask = ltq_disable_irq,
  348.     .irq_mask_ack = ltq_mask_and_ack_irq,
  349.     .irq_set_type = ltq_eiu_settype,
  350. };
  351.  
  352. static void ltq_hw_irqdispatch(int module)
  353. {
  354.     u32 irq;
  355.         int vpe = get_current_vpe();
  356. #ifdef CONFIG_SMP
  357.         unsigned long flags, mtflags;
  358. #endif
  359.  
  360.         LOCK_VPE();
  361.     irq = ltq_icu_r32(vpe, module, LTQ_ICU_IM0_IOSR);
  362.         UNLOCK_VPE();
  363.  
  364.     if (irq == 0)
  365.         return;
  366.  
  367.     /*
  368.      * silicon bug causes only the msb set to 1 to be valid. all
  369.      * other bits might be bogus
  370.      */
  371.     irq = __fls(irq);
  372.     do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
  373.  
  374.     /* if this is a EBU irq, we need to ack it or get a deadlock */
  375.     if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
  376.         ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
  377.             LTQ_EBU_PCC_ISTAT);
  378. }
  379.  
  380. #define DEFINE_HWx_IRQDISPATCH(x)                   \
  381.     static void ltq_hw ## x ## _irqdispatch(void)           \
  382.     {                               \
  383.         ltq_hw_irqdispatch(x);                  \
  384.     }
  385. DEFINE_HWx_IRQDISPATCH(0)
  386. DEFINE_HWx_IRQDISPATCH(1)
  387. DEFINE_HWx_IRQDISPATCH(2)
  388. DEFINE_HWx_IRQDISPATCH(3)
  389. DEFINE_HWx_IRQDISPATCH(4)
  390.  
  391. #if MIPS_CPU_TIMER_IRQ == 7
  392. static void ltq_hw5_irqdispatch(void)
  393. {
  394.     do_IRQ(MIPS_CPU_TIMER_IRQ);
  395. }
  396. #else
  397. DEFINE_HWx_IRQDISPATCH(5)
  398. #endif
  399.  
  400. static void ltq_hw_irq_handler(struct irq_desc *desc)
  401. {
  402.     ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
  403. }
  404.  
  405. #ifdef CONFIG_MIPS_MT_SMP
  406. void __init arch_init_ipiirq(int irq, struct irqaction *action)
  407. {
  408.     setup_irq(irq, action);
  409.     irq_set_handler(irq, handle_percpu_irq);
  410. }
  411.  
  412. static void ltq_sw0_irqdispatch(void)
  413. {
  414.     do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
  415. }
  416.  
  417. static void ltq_sw1_irqdispatch(void)
  418. {
  419.     do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
  420. }
  421. static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
  422. {
  423.     scheduler_ipi();
  424.     return IRQ_HANDLED;
  425. }
  426.  
  427. static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
  428. {
  429.     generic_smp_call_function_interrupt();
  430.     return IRQ_HANDLED;
  431. }
  432.  
  433. static struct irqaction irq_resched = {
  434.     .handler    = ipi_resched_interrupt,
  435.     .flags      = IRQF_PERCPU,
  436.     .name       = "IPI_resched"
  437. };
  438.  
  439. static struct irqaction irq_call = {
  440.     .handler    = ipi_call_interrupt,
  441.     .flags      = IRQF_PERCPU,
  442.     .name       = "IPI_call"
  443. };
  444. #endif
  445.  
  446. asmlinkage void plat_irq_dispatch(void)
  447. {
  448.     unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
  449.     int irq;
  450.  
  451.     if (!pending) {
  452.         spurious_interrupt();
  453.         return;
  454.     }
  455.  
  456.     pending >>= CAUSEB_IP;
  457.     while (pending) {
  458.         irq = fls(pending) - 1;
  459.         do_IRQ(MIPS_CPU_IRQ_BASE + irq);
  460.         pending &= ~BIT(irq);
  461.     }
  462. }
  463.  
  464. static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
  465. {
  466.     struct irq_chip *chip = &ltq_irq_type;
  467.     int i;
  468.  
  469.     if (hw < MIPS_CPU_IRQ_CASCADE)
  470.         return 0;
  471.  
  472.     for (i = 0; i < exin_avail; i++)
  473.         if (hw == ltq_eiu_irq[i])
  474.             chip = &ltq_eiu_type;
  475.  
  476.     irq_set_chip_and_handler(irq, chip, handle_level_irq);
  477.  
  478.     return 0;
  479. }
  480.  
  481. static const struct irq_domain_ops irq_domain_ops = {
  482.     .xlate = irq_domain_xlate_onetwocell,
  483.     .map = icu_map,
  484. };
  485.  
  486. int __init icu_of_init(struct device_node *node, struct device_node *parent)
  487. {
  488.     struct device_node *eiu_node;
  489. #if defined(CONFIG_MIPS_MT_SMP)
  490.     struct device_node *icu1_node;
  491. #endif
  492.     struct resource res;
  493.     int i, ret;
  494.  
  495.     for (i = 0; i < MAX_IM; i++) {
  496.         if (of_address_to_resource(node, i, &res))
  497.             panic("Failed to get icu0 memory range");
  498.  
  499.         if (!request_mem_region(res.start, resource_size(&res),
  500.                     res.name))
  501.             pr_err("Failed to request icu0 memory");
  502.  
  503. // a different name in devicetree
  504. pr_info("%s\n",node->name);
  505.  
  506.         if (of_node_cmp(node->name, "icu0") == 0) {
  507.             ltq_icu_membase[0][i] = ioremap_nocache(res.start,
  508.                 resource_size(&res));
  509.         }
  510.         if (!ltq_icu_membase[0][i])
  511.             panic("Failed to remap icu0 memory");
  512.     }
  513.  
  514. #if defined(CONFIG_MIPS_MT_SMP)
  515.         //requires a new node, TODO merge with icu0 node?
  516.         icu1_node = of_find_compatible_node(NULL, NULL, "lantiq,icu1");
  517.         for (i = 0; i < MAX_IM; i++) {
  518.                 if (of_address_to_resource(icu1_node, i, &res))
  519.                         panic("Failed to get icu1 memory range");
  520.  
  521.                 if (request_mem_region(res.start, resource_size(&res),
  522.                                         res.name) < 0)
  523.                         pr_err("Failed to request icu1 memory");
  524.  
  525.                 if (of_node_cmp(icu1_node->name, "icu1") == 0){
  526.                         ltq_icu_membase[1][i] = ioremap_nocache(res.start,
  527.                                         resource_size(&res));
  528.                 }
  529.  
  530.                 if (!ltq_icu_membase[1][i])
  531.                         panic("Failed to remap icu1 memory");
  532.  
  533.         }
  534. #endif
  535.  
  536.     /* turn off all irqs by default */
  537.     for (i = 0; i < MAX_IM; i++) {
  538.         /* make sure all irqs are turned off by default */
  539.         ltq_icu_w32(0, i, 0, LTQ_ICU_IM0_IER);
  540.         /* clear all possibly pending interrupts */
  541.         ltq_icu_w32(0, i, ~0, LTQ_ICU_IM0_ISR);
  542. #if defined(CONFIG_MIPS_MT_SMP)
  543.                 ltq_icu_w32(1, i, 0, LTQ_ICU_IM0_IER);
  544.                 ltq_icu_w32(1, i, ~0, LTQ_ICU_IM0_ISR);
  545. #endif
  546.     }
  547.  
  548.     mips_cpu_irq_init();
  549.  
  550.     for (i = 0; i < MAX_IM; i++)
  551.         irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
  552.  
  553.     if (cpu_has_vint) {
  554.         pr_info("Setting up vectored interrupts\n");
  555.         set_vi_handler(2, ltq_hw0_irqdispatch);
  556.         set_vi_handler(3, ltq_hw1_irqdispatch);
  557.         set_vi_handler(4, ltq_hw2_irqdispatch);
  558.         set_vi_handler(5, ltq_hw3_irqdispatch);
  559.         set_vi_handler(6, ltq_hw4_irqdispatch);
  560.         set_vi_handler(7, ltq_hw5_irqdispatch);
  561.     }
  562.  
  563.     ltq_domain = irq_domain_add_linear(node,
  564.         (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
  565.         &irq_domain_ops, 0);
  566.  
  567. #if defined(CONFIG_MIPS_MT_SMP)
  568.     if (cpu_has_vint) {
  569.         pr_info("Setting up IPI vectored interrupts\n");
  570.         set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch);
  571.         set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch);
  572.     }
  573.     arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
  574.         &irq_resched);
  575.     arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
  576. #endif
  577.  
  578. #ifndef CONFIG_MIPS_MT_SMP
  579.     set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
  580.         IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
  581. #else
  582.     set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
  583.         IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
  584. #endif
  585.  
  586.     /* tell oprofile which irq to use */
  587.     ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
  588.  
  589.     /*
  590.      * if the timer irq is not one of the mips irqs we need to
  591.      * create a mapping
  592.      */
  593.     if (MIPS_CPU_TIMER_IRQ != 7)
  594.         irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);
  595.  
  596.     /* the external interrupts are optional and xway only */
  597.     eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
  598.     if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
  599.         /* find out how many external irq sources we have */
  600.         exin_avail = of_property_count_u32_elems(eiu_node,
  601.                              "lantiq,eiu-irqs");
  602.  
  603.         if (exin_avail > MAX_EIU)
  604.             exin_avail = MAX_EIU;
  605.  
  606.         ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
  607.                         ltq_eiu_irq, exin_avail);
  608.         if (ret)
  609.             panic("failed to load external irq resources");
  610.  
  611.         if (!request_mem_region(res.start, resource_size(&res),
  612.                             res.name))
  613.             pr_err("Failed to request eiu memory");
  614.  
  615.         ltq_eiu_membase = ioremap_nocache(res.start,
  616.                             resource_size(&res));
  617.         if (!ltq_eiu_membase)
  618.             panic("Failed to remap eiu memory");
  619.     }
  620.  
  621.     return 0;
  622. }
  623.  
  624. int get_c0_perfcount_int(void)
  625. {
  626.     return ltq_perfcount_irq;
  627. }
  628. EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
  629.  
  630. unsigned int get_c0_compare_int(void)
  631. {
  632.     return MIPS_CPU_TIMER_IRQ;
  633. }
  634.  
  635. static struct of_device_id __initdata of_irq_ids[] = {
  636.     { .compatible = "lantiq,icu", .data = icu_of_init },
  637.     {},
  638. };
  639.  
  640. void __init arch_init_irq(void)
  641. {
  642.     of_irq_init(of_irq_ids);
  643. }
Add Comment
Please, Sign In to add comment