/* * Atheros AR71xx/AR724x/AR913x specific interrupt handling * * Copyright (C) 2010-2011 Jaiganesh Narayanan * Copyright (C) 2008-2011 Gabor Juhos * Copyright (C) 2008 Imre Kaloz * * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include "common.h" #if defined(CONFIG_AVM_SIMPLE_PROFILING) #include #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ #if defined(CONFIG_AVM_POWER) #include #endif /*--- #if defined(CONFIG_AVM_POWER) ---*/ static void (*ath79_ip2_handler)(void); static void (*ath79_ip3_handler)(void); /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void ath_dispatch_gpio_intr(void) { int pending, i; pending = ath_reg_rd (ATH_GPIO_INT_PENDING) & ath_reg_rd (ATH_GPIO_INT_ENABLE); ath_reg_rmw_clear (ATH_RST_MISC_INTERRUPT_MASK , BIT (MISC_BIT_GPIO)); ath_reg_rmw_clear (ATH_RST_MISC_INTERRUPT_STATUS, BIT (MISC_BIT_GPIO)); for (i = 0; i < ATH79_GPIO_IRQ_COUNT; i++) { if (pending & (1 << i)) { do_IRQ(ATH79_GPIO_IRQn(i)); } } ath_reg_rmw_set (ATH_GPIO_INT_ENABLE, BIT (MISC_BIT_GPIO)); } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc) { void __iomem *base = ath79_reset_base; u32 pending; pending = __raw_readl(base + ATH_RESET_REG_MISC_INTERRUPT_STATUS) & __raw_readl(base + ATH_RESET_REG_MISC_INTERRUPT_ENABLE); if (!pending) { spurious_interrupt(); return; } while (pending) { int bit = __ffs(pending); #if defined(CONFIG_AVM_SIMPLE_PROFILING) avm_simple_profiling_log(avm_profile_data_type_hw_irq_begin, (unsigned int)(irq_desc + ATH79_MISC_IRQ(bit)), ATH79_MISC_IRQ(bit)); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ if (bit == BIT(MISC_BIT_GPIO)) { ath_dispatch_gpio_intr(); } else { generic_handle_irq(ATH79_MISC_IRQ(bit)); } #if defined(CONFIG_AVM_SIMPLE_PROFILING) avm_simple_profiling_log(avm_profile_data_type_hw_irq_end, (unsigned int)(irq_desc + ATH79_MISC_IRQ(bit)), ATH79_MISC_IRQ(bit)); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ pending &= ~BIT(bit); } } static void ar71xx_misc_irq_unmask(struct irq_data *d) { unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE; void __iomem *base = ath79_reset_base; u32 t; t = __raw_readl(base + ATH_RESET_REG_MISC_INTERRUPT_ENABLE); __raw_writel(t | (1 << irq), base + ATH_RESET_REG_MISC_INTERRUPT_ENABLE); /* flush write */ __raw_readl(base + ATH_RESET_REG_MISC_INTERRUPT_ENABLE); } static void ar71xx_misc_irq_mask(struct irq_data *d) { unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE; void __iomem *base = ath79_reset_base; u32 t; t = __raw_readl(base + ATH_RESET_REG_MISC_INTERRUPT_ENABLE); __raw_writel(t & ~(1 << irq), base + ATH_RESET_REG_MISC_INTERRUPT_ENABLE); /* flush write */ __raw_readl(base + ATH_RESET_REG_MISC_INTERRUPT_ENABLE); } static void ar724x_misc_irq_ack(struct irq_data *d) { unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE; void __iomem *base = ath79_reset_base; u32 t; t = __raw_readl(base + ATH_RESET_REG_MISC_INTERRUPT_STATUS); __raw_writel(t & ~(1 << irq), base + ATH_RESET_REG_MISC_INTERRUPT_STATUS); /* flush write */ __raw_readl(base + ATH_RESET_REG_MISC_INTERRUPT_STATUS); } static struct irq_chip ath79_misc_irq_chip = { .name = "MISC", .irq_unmask = ar71xx_misc_irq_unmask, .irq_mask = ar71xx_misc_irq_mask, }; static void __init ath79_misc_irq_init(void) { void __iomem *base = ath79_reset_base; int i; __raw_writel(0, base + ATH_RESET_REG_MISC_INTERRUPT_ENABLE); __raw_writel(0, base + ATH_RESET_REG_MISC_INTERRUPT_STATUS); if (soc_is_ar71xx() || soc_is_ar913x()) { ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; } else if (soc_is_ar724x() || soc_is_ar933x() || soc_is_ar934x() || soc_is_qca953x() || soc_is_qca955x() || soc_is_qca956x()) { ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; } else BUG(); for (i = ATH79_MISC_IRQ_BASE; i < ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT; i++) { irq_set_chip_and_handler(i, &ath79_misc_irq_chip, handle_level_irq); } irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler); } #ifdef CONFIG_SOC_AR934X static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) { u32 status; disable_irq_nosync(irq); status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS); if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) { ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_PCIE); generic_handle_irq(ATH79_IP2_IRQ(0)); } else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) { ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_WMAC); generic_handle_irq(ATH79_IP2_IRQ(1)); } else { spurious_interrupt(); } enable_irq(irq); } static void ar934x_ip2_irq_init(void) { int i; for (i = ATH79_IP2_IRQ_BASE; i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++) { irq_set_chip_and_handler(i, &dummy_irq_chip, handle_level_irq); } irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch); } #endif #ifdef CONFIG_SOC_QCA953X #define QCA953X_PCIE_WMAC_INT_WMAC_ALL \ (BIT (QCA953X_SSB__RESET__RST_PCIE_WMAC_INTERRUPT_STATUS__WMAC_RXHP_INT) | \ BIT (QCA953X_SSB__RESET__RST_PCIE_WMAC_INTERRUPT_STATUS__WMAC_RXLP_INT) | \ BIT (QCA953X_SSB__RESET__RST_PCIE_WMAC_INTERRUPT_STATUS__WMAC_TX_INT ) | \ BIT (QCA953X_SSB__RESET__RST_PCIE_WMAC_INTERRUPT_STATUS__WMAC_MISC_INT) ) #define QCA953X_PCIE_WMAC_INT_PCIE_ALL \ (BIT (QCA953X_SSB__RESET__RST_PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC_INT3) | \ BIT (QCA953X_SSB__RESET__RST_PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC_INT2) | \ BIT (QCA953X_SSB__RESET__RST_PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC_INT1) | \ BIT (QCA953X_SSB__RESET__RST_PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC_INT0) | \ BIT (QCA953X_SSB__RESET__RST_PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC_IN ) ) static void qca953x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) { u32 status; disable_irq_nosync(irq); status = ath79_reset_rr(QCA953X_REGOFS__RESET__PCIE_WMAC_INTERRUPT_STATUS); if (status & QCA953X_PCIE_WMAC_INT_PCIE_ALL) { ath79_ddr_wb_flush (QCA953X_REGOFS__DDR__DDR_WB_FLUSH_PCIE); generic_handle_irq(ATH79_IP2_IRQ(0)); } else if (status & QCA953X_PCIE_WMAC_INT_WMAC_ALL) { ath79_ddr_wb_flush (QCA953X_REGOFS__DDR__DDR_WB_FLUSH_WMAC); generic_handle_irq(ATH79_IP2_IRQ(1)); } else { spurious_interrupt(); } enable_irq(irq); } static void qca953x_irq_init(void) { int i; for (i = ATH79_IP2_IRQ_BASE; i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++) { irq_set_chip_and_handler(i, &dummy_irq_chip, handle_level_irq); } irq_set_chained_handler(ATH79_CPU_IRQ (2), qca953x_ip2_irq_dispatch); } #endif #ifdef CONFIG_SOC_QCA955X static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) { u32 status; disable_irq_nosync(irq); status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS); status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL; if (status == 0) { spurious_interrupt(); goto enable; } if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) { /* TODO: flush DDR? */ generic_handle_irq(ATH79_IP2_IRQ(0)); } if (status & QCA955X_EXT_INT_WMAC_ALL) { /* TODO: flush DDR? */ generic_handle_irq(ATH79_IP2_IRQ(1)); } enable: enable_irq(irq); } static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc) { u32 status; disable_irq_nosync(irq); status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS); status &= QCA955X_EXT_INT_PCIE_RC2_ALL | QCA955X_EXT_INT_USB1 | QCA955X_EXT_INT_USB2; if (status == 0) { spurious_interrupt(); goto enable; } if (status & QCA955X_EXT_INT_USB1) { /* TODO: flush DDR? */ generic_handle_irq(ATH79_IP3_IRQ(0)); } if (status & QCA955X_EXT_INT_USB2) { /* TODO: flush DDR? */ generic_handle_irq(ATH79_IP3_IRQ(1)); } if (status & QCA955X_EXT_INT_PCIE_RC2_ALL) { /* TODO: flush DDR? */ generic_handle_irq(ATH79_IP3_IRQ(2)); } enable: enable_irq(irq); } static void qca955x_irq_init(void) { int i; for (i = ATH79_IP2_IRQ_BASE; i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++) irq_set_chip_and_handler(i, &dummy_irq_chip, handle_level_irq); irq_set_chained_handler(ATH79_CPU_IRQ(2), qca955x_ip2_irq_dispatch); for (i = ATH79_IP3_IRQ_BASE; i < ATH79_IP3_IRQ_BASE + ATH79_IP3_IRQ_COUNT; i++) irq_set_chip_and_handler(i, &dummy_irq_chip, handle_level_irq); irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch); } #endif #ifdef CONFIG_SOC_QCA956X #define QCA956X_MASK__RESET__PCIE_WMAC_INTERRUPT_STATUS__USB \ (BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__USB2_INT) | \ BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__USB1_INT) ) #define QCA956X_MASK__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC2 \ (BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC2_INT3) | \ BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC2_INT2) | \ BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC2_INT1) | \ BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC2_INT0) | \ BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC2_IN ) ) #define QCA956X_MASK__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC1 \ (BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC1_INT3) | \ BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC1_INT2) | \ BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC1_INT1) | \ BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC1_INT0) | \ BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC1_IN ) ) #define QCA956X_MASK__RESET__PCIE_WMAC_INTERRUPT_STATUS__WMAC \ (BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__WMAC_RXHP_INT) | \ BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__WMAC_RXLP_INT) | \ BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__WMAC_TX_INT ) | \ BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__WMAC_MISC_INT) ) static void qca956x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc) { u32 status; disable_irq_nosync(irq); status = ath79_reset_rr (QCA956X_REGOFS__RESET__PCIE_WMAC_INTERRUPT_STATUS); if (0 == (status & (QCA956X_MASK__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC1 | QCA956X_MASK__RESET__PCIE_WMAC_INTERRUPT_STATUS__WMAC ))) { spurious_interrupt(); goto enable; } if (status & QCA956X_MASK__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC1) { /* TODO: flush DDR? */ generic_handle_irq(ATH79_IP2_IRQ(0)); } if (status & QCA956X_MASK__RESET__PCIE_WMAC_INTERRUPT_STATUS__WMAC ) { /* TODO: flush DDR? */ generic_handle_irq(ATH79_IP2_IRQ(1)); } enable: enable_irq(irq); } static void qca956x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc) { u32 status; disable_irq_nosync(irq); status = ath79_reset_rr (QCA956X_REGOFS__RESET__PCIE_WMAC_INTERRUPT_STATUS); if (0 == (status & (QCA956X_MASK__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC2 | QCA956X_MASK__RESET__PCIE_WMAC_INTERRUPT_STATUS__USB ))) { spurious_interrupt(); goto enable; } if (status & BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__USB1_INT)) { /* TODO: flush DDR? */ generic_handle_irq(ATH79_IP3_IRQ(0)); } if (status & BIT (QCA956X_SSB__RESET__PCIE_WMAC_INTERRUPT_STATUS__USB2_INT)) { /* TODO: flush DDR? */ generic_handle_irq(ATH79_IP3_IRQ(1)); } if (status & QCA956X_MASK__RESET__PCIE_WMAC_INTERRUPT_STATUS__PCIE_RC2) { /* TODO: flush DDR? */ generic_handle_irq(ATH79_IP3_IRQ(2)); } enable: enable_irq(irq); } static void qca956x_enable_timer_cb(void) { u32 misc; misc = ath79_reset_rr (ATH_RESET_REG_MISC_INTERRUPT_MASK); misc |= BIT (QCA956X_SSB__RESET__RST_MISC_INTERRUPT_MASK__MIPS_SI_TIMERINT_MASK); ath79_reset_wr (ATH_RESET_REG_MISC_INTERRUPT_MASK, misc); } static void qca956x_irq_init(void) { int i; for (i = ATH79_IP2_IRQ_BASE; i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++) { irq_set_chip_and_handler (i, &dummy_irq_chip, handle_level_irq); } irq_set_chained_handler (ATH79_CPU_IRQ (2), qca956x_ip2_irq_dispatch); for (i = ATH79_IP3_IRQ_BASE; i < ATH79_IP3_IRQ_BASE + ATH79_IP3_IRQ_COUNT; i++) { irq_set_chip_and_handler (i, &dummy_irq_chip, handle_level_irq); } irq_set_chained_handler (ATH79_CPU_IRQ (3), qca956x_ip3_irq_dispatch); /* QCA956x timer init workaround has to be applied right before setting * up the clock. Else, there will be no jiffies */ late_time_init = &qca956x_enable_timer_cb; } #endif asmlinkage void plat_irq_dispatch(void) { unsigned long pending; #if defined(CONFIG_AVM_SIMPLE_PROFILING) unsigned int first = 1; struct pt_regs regs; regs.cp0_epc = read_c0_epc(); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ #if defined(CONFIG_AVM_POWER) avm_cpu_wait_end(); /*--- auch wenn es r4k_wait_irqoff gibt: trotzdem aufrufen, um system-load-Ausgabe zu triggern ---*/ #endif/*--- #if defined(CONFIG_AVM_POWER) ---*/ #if defined(CONFIG_NMI_ARBITER_WORKAROUND) ath_workaround_nmi_check_cpugrant(); #endif/*--- #if defined(CONFIG_NMI_ARBITER_WORKAROUND) ---*/ pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & STATUSF_IP7) { #if defined(CONFIG_AVM_SIMPLE_PROFILING) unsigned int irq = ATH79_CPU_IRQ(7); /*--- ATH_CPU_IRQ_TIMER ---*/ if(first) { avm_simple_profiling_enter_irqcontext(read_c0_epc()); /*--- wakeup or interrupted code ---*/ first = 0; } avm_simple_profiling_log(avm_profile_data_type_hw_irq_begin, (unsigned int)(irq_desc + irq), irq); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ do_IRQ(ATH79_CPU_IRQ(7)); #if defined(CONFIG_AVM_SIMPLE_PROFILING) avm_simple_profiling_log(avm_profile_data_type_hw_irq_end, (unsigned int)(irq_desc + irq), irq); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ } else if (pending & STATUSF_IP2) { #if defined(CONFIG_AVM_SIMPLE_PROFILING) unsigned int irq = ATH79_CPU_IRQ(2); /*--- ATH_CPU_IRQ_WLAN ---*/ if(first) { avm_simple_profiling_enter_irqcontext(read_c0_epc()); /*--- wakeup or interrupted code ---*/ first = 0; } avm_simple_profiling_log(avm_profile_data_type_hw_irq_begin, (unsigned int)(irq_desc + irq), irq); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ ath79_ip2_handler(); #if defined(CONFIG_AVM_SIMPLE_PROFILING) avm_simple_profiling_log(avm_profile_data_type_hw_irq_end, (unsigned int)(irq_desc + irq), irq); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ } else if (pending & STATUSF_IP4) { #if defined(CONFIG_AVM_SIMPLE_PROFILING) unsigned int irq = ATH79_CPU_IRQ(4); /*--- ATH_CPU_IRQ_GE0 ---*/ if(first) { avm_simple_profiling_enter_irqcontext(read_c0_epc()); /*--- wakeup or interrupted code ---*/ first = 0; } avm_simple_profiling_log(avm_profile_data_type_hw_irq_begin, (unsigned int)(irq_desc + irq), irq); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ do_IRQ(ATH79_CPU_IRQ(4)); #if defined(CONFIG_AVM_SIMPLE_PROFILING) avm_simple_profiling_log(avm_profile_data_type_hw_irq_end, (unsigned int)(irq_desc + irq), irq); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ } else if (pending & STATUSF_IP5) { #if defined(CONFIG_AVM_SIMPLE_PROFILING) unsigned int irq = ATH79_CPU_IRQ(5); /*--- ATH_CPU_IRQ_GE1 ---*/ if(first) { avm_simple_profiling_enter_irqcontext(read_c0_epc()); /*--- wakeup or interrupted code ---*/ first = 0; } avm_simple_profiling_log(avm_profile_data_type_hw_irq_begin, (unsigned int)(irq_desc + irq), irq); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ do_IRQ(ATH79_CPU_IRQ(5)); #if defined(CONFIG_AVM_SIMPLE_PROFILING) avm_simple_profiling_log(avm_profile_data_type_hw_irq_end, (unsigned int)(irq_desc + irq), irq); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ } else if (pending & STATUSF_IP3) { #if defined(CONFIG_AVM_SIMPLE_PROFILING) unsigned int irq = ATH79_CPU_IRQ(3); /*--- ATH_CPU_IRQ_USB ---*/ if(first) { avm_simple_profiling_enter_irqcontext(read_c0_epc()); /*--- wakeup or interrupted code ---*/ first = 0; } avm_simple_profiling_log(avm_profile_data_type_hw_irq_begin, (unsigned int)(irq_desc + irq), irq); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ ath79_ip3_handler(); #if defined(CONFIG_AVM_SIMPLE_PROFILING) avm_simple_profiling_log(avm_profile_data_type_hw_irq_end, (unsigned int)(irq_desc + irq), irq); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ } else if (pending & STATUSF_IP6) { do_IRQ(ATH79_CPU_IRQ(6)); } else spurious_interrupt(); } /* * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for * these devices typically allocate coherent DMA memory, however the * DMA controller may still have some unsynchronized data in the FIFO. * Issue a flush in the handlers to ensure that the driver sees * the update. */ static void ath79_default_ip2_handler(void) { do_IRQ(ATH79_CPU_IRQ(2)); } #if (!defined CONFIG_SOC_QCA953X) static void ath79_default_ip3_handler(void) { do_IRQ(ATH79_CPU_IRQ(3)); } #endif #ifdef CONFIG_SOC_AR71XX static void ar71xx_ip2_handler(void) { ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_PCI); do_IRQ(ATH79_CPU_IRQ(2)); } #endif #ifdef CONFIG_SOC_AR724X static void ar724x_ip2_handler(void) { ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_PCIE); do_IRQ(ATH79_CPU_IRQ(2)); } #endif #ifdef CONFIG_SOC_AR913X static void ar913x_ip2_handler(void) { ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_WMAC); do_IRQ(ATH79_CPU_IRQ(2)); } #endif #ifdef CONFIG_SOC_AR933X static void ar933x_ip2_handler(void) { ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_WMAC); do_IRQ(ATH79_CPU_IRQ(2)); } #endif #ifdef CONFIG_SOC_AR71XX static void ar71xx_ip3_handler(void) { ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_USB); do_IRQ(ATH79_CPU_IRQ(3)); } #endif #ifdef CONFIG_SOC_AR724X static void ar724x_ip3_handler(void) { ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_USB); do_IRQ(ATH79_CPU_IRQ(3)); } #endif #ifdef CONFIG_SOC_AR913X static void ar913x_ip3_handler(void) { ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_USB); do_IRQ(ATH79_CPU_IRQ(3)); } #endif #ifdef CONFIG_SOC_AR933X static void ar933x_ip3_handler(void) { ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_USB); do_IRQ(ATH79_CPU_IRQ(3)); } #endif #ifdef CONFIG_SOC_AR934X static void ar934x_ip3_handler(void) { ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_USB); do_IRQ(ATH79_CPU_IRQ(3)); } #endif #if (defined CONFIG_SOC_QCA953X) static void qca953x_ip3_handler(void) { ath79_ddr_wb_flush (QCA953X_REGOFS__DDR__DDR_WB_FLUSH_USB); do_IRQ(ATH79_CPU_IRQ(3)); } #endif /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ extern void ath_gpio_irq_init(int irq_base); void __init arch_init_irq(void) { #if defined CONFIG_SOC_AR71XX if (soc_is_ar71xx()) { ath79_ip2_handler = ar71xx_ip2_handler; ath79_ip3_handler = ar71xx_ip3_handler; } else #elif defined CONFIG_SOC_AR724X if (soc_is_ar724x()) { ath79_ip2_handler = ar724x_ip2_handler; ath79_ip3_handler = ar724x_ip3_handler; } else #elif defined CONFIG_SOC_AR913X if (soc_is_ar913x()) { ath79_ip2_handler = ar913x_ip2_handler; ath79_ip3_handler = ar913x_ip3_handler; } else #elif defined CONFIG_SOC_AR933X if (soc_is_ar933x()) { ath79_ip2_handler = ar933x_ip2_handler; ath79_ip3_handler = ar933x_ip3_handler; } else #elif defined CONFIG_SOC_AR934X if (soc_is_ar934x()) { ath79_ip2_handler = ath79_default_ip2_handler; ath79_ip3_handler = ar934x_ip3_handler; } else #elif (defined CONFIG_SOC_QCA953X) if (soc_is_qca953x()) { ath79_ip2_handler = ath79_default_ip2_handler; ath79_ip3_handler = qca953x_ip3_handler; } else #elif (defined CONFIG_SOC_QCA955X) if (soc_is_qca955x()) { ath79_ip2_handler = ath79_default_ip2_handler; ath79_ip3_handler = ath79_default_ip3_handler; } else #elif (defined CONFIG_SOC_QCA956X) if (soc_is_qca956x()) { ath79_ip2_handler = ath79_default_ip2_handler; ath79_ip3_handler = ath79_default_ip3_handler; } else #else #error "ip2/ip3 handler not assigned!" #endif BUG(); cp0_perfcount_irq = ATH79_MISC_IRQ(5); mips_cpu_irq_init(); ath79_misc_irq_init(); #if defined CONFIG_SOC_AR934X if (soc_is_ar934x()) { ar934x_ip2_irq_init(); } else #endif #if defined CONFIG_SOC_QCA953X if (soc_is_qca953x()) { qca953x_irq_init(); } else #endif #if defined CONFIG_SOC_QCA955X if (soc_is_qca955x()) { qca955x_irq_init(); } else #endif #if defined CONFIG_SOC_QCA956X if (soc_is_qca956x()) { qca956x_irq_init(); } #endif ath_gpio_irq_init(ATH79_GPIO_IRQ_BASE); }