/* * * Copyright (C) 2016 AVM GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_PROC_FS) #include #endif #include #ifdef CONFIG_AVM_FASTIRQ_TZ #include #include #define avm_secmon_fiqhandler_install(r) (avm_tz_disable_fiq_handling_in_linux(&r)) #define avm_secmon_fiqhandler_uninstall(r) (avm_tz_enable_fiq_handling_in_linux(&r)) #else unsigned int avm_secmon_fiqhandler_prepare(void); void avm_secmon_fiqhandler_cleanup(unsigned int mvbar); #define avm_secmon_fiqhandler_install(r) (r = avm_secmon_fiqhandler_prepare()) #define avm_secmon_fiqhandler_uninstall(r) (avm_secmon_fiqhandler_cleanup(r)) #endif #include #define FASTIRQ_HANDLED 1 #define MAX_FIQ_NAME_SIZE 63 #define __get_cpu_var(var) (*(raw_cpu_ptr((&var)))) #define DBG_TRC(args...) /*--- #define DBG_TRC(args...) printk(KERN_INFO args) ---*/ /* Externes */ // Externe Symbole des FIQ Low-Level Handlers extern unsigned int avm_fiq_stacks; extern void avm_fiq_handler(void); extern unsigned char avm_fiq_handler_begin, avm_fiq_handler_end; /* Internes */ // Interne Prototypen static void avm_fiq_setup_helper(void *handler); static void avm_set_fiq_handler(void *start, unsigned int length); static void avm_fiq_prepare_vector(void *handler); static void avm_fiq_finish_vector(void *handler); // Interne Datenstrukturen //static spinlock_t wd_mask_lock; static DEFINE_MUTEX(avm_fiq_lock); static DEFINE_PER_CPU(void *, avm_fiq_stack); static DEFINE_PER_CPU(void *, avm_fiq_task); static DEFINE_PER_CPU(void *, avm_fiq_registration); DEFINE_PER_CPU(struct pt_regs *, __fiq_regs); DEFINE_PER_CPU(struct pt_regs *, __exc_regs); DEFINE_PER_CPU(unsigned int, spurious_count); DEFINE_PER_CPU(unsigned int, fiq_fiq_preemptions); DEFINE_PER_CPU(unsigned int, recursion_depth); unsigned int *avm_fiq_stack_array[NR_CPUS]; EXPORT_SYMBOL(avm_fiq_stack_array); #define FIQ_STATISTIC #if defined(FIQ_STATISTIC) struct _fiq_stat { unsigned long last_t; unsigned long cnt; unsigned long consumption_min; unsigned long consumption_max; unsigned long long consumption_sum; unsigned long dtrigger_min; unsigned long dtrigger_max; unsigned long long dtrigger_sum; }; /** */ static void init_fiq_stat(struct _fiq_stat *pgstat) { pgstat->cnt = 0; pgstat->consumption_min = LONG_MAX; pgstat->consumption_max = 0; pgstat->consumption_sum = 0; pgstat->dtrigger_min = LONG_MAX; pgstat->dtrigger_max = 0; pgstat->dtrigger_sum = 0; } /** */ static void fiq_stat(struct _fiq_stat *pgstat, unsigned long tstart, unsigned long tend) { unsigned long dtrigger, consumption; if (pgstat->last_t == 0) { init_fiq_stat(pgstat); pgstat->last_t = tstart | 0x1; return; } consumption = tend - tstart; if (consumption > pgstat->consumption_max) pgstat->consumption_max = consumption; if (consumption < pgstat->consumption_min) pgstat->consumption_min = consumption; pgstat->consumption_sum += (unsigned long long)consumption; dtrigger = (tstart | 0x1) - pgstat->last_t; if (dtrigger > pgstat->dtrigger_max) pgstat->dtrigger_max = dtrigger; if (dtrigger < pgstat->dtrigger_min) pgstat->dtrigger_min = dtrigger; pgstat->dtrigger_sum += (unsigned long long)dtrigger; pgstat->cnt++; pgstat->last_t = tstart | 0x1; } /** */ static inline void fiq_stat_reset(struct _fiq_stat *pgstat) { pgstat->last_t = 0; } /** */ static char *fill_fiq_stat(char *txt, int txt_len, struct _fiq_stat *pgstat, unsigned int reset, unsigned long *consume_promille) { struct _fiq_stat gstat; unsigned long cnt; unsigned long long c_avg, l_avg; unsigned long c_avg_us, l_avg_us; cnt = pgstat->cnt; txt[0] = 0; if (cnt == 0) { if (consume_promille) *consume_promille = 0; return txt; } memcpy(&gstat, pgstat, sizeof(gstat)); if (reset) { fiq_stat_reset(pgstat); } c_avg = gstat.consumption_sum; do_div(c_avg, cnt); c_avg_us = avm_cycles_to_usec((unsigned long)c_avg); l_avg = gstat.dtrigger_sum; do_div(l_avg, cnt); l_avg_us = avm_cycles_to_usec((unsigned long)l_avg); snprintf(txt, txt_len, "consum:min %6lu max %6lu avg %6lu dt:min %6lu max %6lu avg %6lu us", avm_cycles_to_usec(gstat.consumption_min), avm_cycles_to_usec(gstat.consumption_max), c_avg_us, avm_cycles_to_usec(gstat.dtrigger_min), avm_cycles_to_usec(gstat.dtrigger_max), l_avg_us); if (consume_promille) { *consume_promille = ((c_avg_us + 1) * 1000) / l_avg_us; /*--- 32 bit range reicht (max = 1 Mio * 1000) ---*/ } return txt; } #endif/*--- #if defined(FIQ_STATISTIC) ---*/ typedef struct avm_fiq_registration_entry_s { atomic_t is_allocated; void *ref; avm_fiq_cb_t fiq_handler; atomic_t count; atomic_t unhandled_count; int prio; atomic_t busy; char name[MAX_FIQ_NAME_SIZE + 1]; #if defined(FIQ_STATISTIC) struct _fiq_stat statistic; #endif/*--- #if defined(FIQ_STATISTIC) ---*/ } avm_fiq_registration_entry_t; #define FIQ_REG_ENTRY_UNALLOCATED 0 #define FIQ_REG_ENTRY_ALLOCATED 1 #define FIQ_REG_ENTRY_ALLOCATED_TZ 2 avm_fiq_registration_entry_t *avm_fiq_registrations[NR_CPUS]; unsigned int fiq_stack[NR_CPUS][THREAD_SIZE / sizeof(unsigned int)] __attribute__((aligned(THREAD_SIZE), externally_visible)); unsigned int fiq_task[NR_CPUS][THREAD_SIZE / sizeof(unsigned int)] __attribute__((aligned(THREAD_SIZE), externally_visible)); #if defined(CONFIG_PROC_FS) static int fiq_proc_init(void); #endif/*--- #if defined(CONFIG_PROC_FS) ---*/ /*------------------------------------------------------------------------------------------*/ /* Interface-Funktionen */ /* * Belegten FIQ einschalten * ret: Ungültige CPU-Nr: -EINVAL * Ungültiger IRQ: -EINVAL * War nicht korrekt angefordert: -EINVAL * Sonst 0 */ int avm_enable_fiq_on(int cpu, unsigned int irq) { avm_fiq_registration_entry_t *reg_entry; int ret = 0; mutex_lock(&avm_fiq_lock); if (num_possible_cpus() - 1 < cpu) ret = -EINVAL; if (avm_gic_fiq_nr_ints() - 1 < irq) ret = -EINVAL; reg_entry = &(avm_fiq_registrations[cpu][irq]); if (atomic_read(®_entry->is_allocated) == FIQ_REG_ENTRY_UNALLOCATED) ret = -EINVAL; if (ret >= 0) { avm_gic_fiq_enable(cpu, irq); } mutex_unlock(&avm_fiq_lock); return ret; } EXPORT_SYMBOL(avm_enable_fiq_on); /* * Belegten FIQ ausschalten * ret: Ungültige CPU-Nr: -EINVAL * Ungültiger IRQ: -EINVAL * War nicht korrekt angefordert: -EINVAL * Sonst 0 */ int avm_disable_fiq_on(int cpu, unsigned int irq) { avm_fiq_registration_entry_t *reg_entry; int ret = 0; mutex_lock(&avm_fiq_lock); if (num_possible_cpus() - 1 < cpu) ret = -EINVAL; if (avm_gic_fiq_nr_ints() - 1 < irq) ret = -EINVAL; reg_entry = &(avm_fiq_registrations[cpu][irq]); if (atomic_read(®_entry->is_allocated) == FIQ_REG_ENTRY_UNALLOCATED) ret = -EINVAL; if (ret >= 0) { avm_gic_fiq_disable(cpu, irq); } mutex_unlock(&avm_fiq_lock); return ret; } EXPORT_SYMBOL(avm_disable_fiq_on); /* * FIQ belegen * ret: Ungültige CPU-Nr: -EINVAL * Ungültiger IRQ: -EINVAL * Bereits belegt: -EBUSY * Sonst 0 */ int avm_request_fiq_on(const struct cpumask *cpumask, unsigned int irq, avm_fiq_cb_t handler, unsigned long fiqflags, const char *devname, void *dev_id) { struct irq_desc *irq_desc = 0; avm_fiq_registration_entry_t *reg_entry; int ret = 0, mode, cpu; if (IS_ENABLED(CONFIG_AVM_FASTIRQ_TZ) && !(avm_tz_supported_features() & AVM_TZ_FEAT_FIQ)) { pr_err("%s warning fiq not supported - update Urlader\n", __func__); return -EINVAL; } /* AVM * Linux 4 and the gic support irq domains which leads to virtual irq numbers * and a mapping on requesting the irq from the device tree. * Unfortunately the fastirq implementation does not support this feature yet. * As a workaround the hwirq number is used when a fastirq is requested. */ if (fiqflags & FIQ_HWIRQ) { pr_err("[%s] Forcing HW-IRQ %u instead\n", __func__, irq); } else { irq_desc = irq_to_desc(irq); if (irq_desc) { pr_warn("[%s] Virtual IRQ numbers are not supported. Use HWIRQ (%lu) for requested IRQ (%u)\n", __func__, irq_desc->irq_data.hwirq, irq); irq = irq_desc->irq_data.hwirq; } else { pr_err("[%s] Using virtual IRQ (%u)\n", __func__, irq); } } mutex_lock(&avm_fiq_lock); DBG_TRC("%s: cpu=%d irq=%u '%s' dev_id=%p\n", __func__, cpu, irq, devname, dev_id); if (!cpumask_intersects(cpumask, cpu_possible_mask)) { pr_err("%s: error inval cpumask=%*pbl\n", __func__, cpumask_pr_args(cpumask)); ret = -EINVAL; goto request_exit; } if ((avm_gic_fiq_nr_ints() - 1) < irq) { pr_err("%s: error inval irq=%u\n", __func__, irq); ret = -EINVAL; goto request_exit; } if (handler == NULL) { pr_err("%s: error inval handler=%p\n", __func__, handler); ret = -EINVAL; goto request_exit; } cpu = 0; for_each_cpu(cpu, cpumask) { reg_entry = &(avm_fiq_registrations[cpu][irq]); if (atomic_read(®_entry->is_allocated) == FIQ_REG_ENTRY_ALLOCATED) { pr_err("%s: error irq=%u in use\n", __func__, irq); ret = -EBUSY; goto request_exit; } atomic_set(®_entry->is_allocated, FIQ_REG_ENTRY_ALLOCATED); reg_entry->ref = dev_id; reg_entry->fiq_handler = handler; reg_entry->prio = FIQ_PRIO_USER; atomic_set(®_entry->count, 0); #if defined(FIQ_STATISTIC) fiq_stat_reset(®_entry->statistic); #endif/*--- #if defined(FIQ_STATISTIC) ---*/ (void)strncpy(reg_entry->name, devname, MAX_FIQ_NAME_SIZE); reg_entry->name[MAX_FIQ_NAME_SIZE] = '\0'; } mode = 0; if (fiqflags & FIQ_EDGE) { mode |= 2; } if (fiqflags & FIQ_1_N) { mode |= 1; } avm_gic_fiq_configure(irq, cpumask, FIQ_PRIO_USER, mode, 0x00); request_exit: mutex_unlock(&avm_fiq_lock); return ret; } EXPORT_SYMBOL(avm_request_fiq_on); void avm_gic_fiq_setup(unsigned int pinNr, const struct cpumask *cpumask, unsigned int prio, unsigned int config, unsigned int mode) { int cpu; avm_fiq_registration_entry_t *reg_entry; mutex_lock(&avm_fiq_lock); for_each_cpu(cpu, cpumask) { reg_entry = &avm_fiq_registrations[cpu][pinNr]; if (atomic_read(®_entry->is_allocated) == FIQ_REG_ENTRY_ALLOCATED) reg_entry->prio = prio; } mutex_unlock(&avm_fiq_lock); avm_gic_fiq_configure(pinNr, cpumask, prio, config, mode); } EXPORT_SYMBOL(avm_gic_fiq_setup); /* * FIQ freigeben * ret: Ungültige CPU-Nr: -EINVAL * Ungültiger IRQ: -EINVAL * Unpassende Ref: -EINVAL * Sonst 0 */ int avm_free_fiq_on(int cpu, unsigned int irq, void *dev_id) { avm_fiq_registration_entry_t *reg_entry; int ret = 0; mutex_lock(&avm_fiq_lock); DBG_TRC("%s: cpu=%d irq=%u dev_id=%p\n", __func__, cpu, irq, dev_id); if (num_possible_cpus() - 1 < cpu) { pr_err("%s: error inval cpu=%d\n", __func__, cpu); ret = -EINVAL; goto free_exit; } if (avm_gic_fiq_nr_ints() - 1 < irq) { pr_err("%s: error inval irq=%u\n", __func__, irq); ret = -EINVAL; goto free_exit; } reg_entry = &(avm_fiq_registrations[cpu][irq]); if (reg_entry->ref != dev_id) { pr_err("%s: error inval dev_id=%u\n", __func__, irq); ret = -EINVAL; goto free_exit; } atomic_set(®_entry->is_allocated, FIQ_REG_ENTRY_UNALLOCATED); avm_gic_fiq_disable(irq, cpu); free_exit: mutex_unlock(&avm_fiq_lock); return ret; } EXPORT_SYMBOL(avm_free_fiq_on); extern void __iomem *DIST_BASE; static volatile int tz_interrupt_handler(unsigned int intNr) { #ifndef CONFIG_AVM_FASTIRQ_TZ return IRQ_NONE; #elif !defined(CONFIG_AVM_TZ_TYPE_QCA_RTE) return IRQ_HANDLED; #else /*--- We have a TZ FIQ->Hand it to TZ by SMC ---*/ register u32 r0 asm("r0"); register u32 r1 asm("r1"); #if defined(FIQ_STATISTIC) unsigned long tstart; avm_fiq_registration_entry_t *reg_entry; reg_entry = &(avm_fiq_registrations[raw_smp_processor_id()][intNr]); if (atomic_read(®_entry->is_allocated) == FIQ_REG_ENTRY_UNALLOCATED) { atomic_set(®_entry->is_allocated, FIQ_REG_ENTRY_ALLOCATED_TZ); atomic_set(®_entry->count, 0); } atomic_set(®_entry->busy, 1); atomic_inc(®_entry->count); tstart = avm_get_cycles(); #endif/*--- #if defined(FIQ_STATISTIC) ---*/ r0 = AVM_SMC_FIQ_FOR_TZ; r1 = intNr; asm volatile( __asmeq("%0", "r0") __asmeq("%1", "r1") ".arch_extension sec\n" "smc #0 @ switch to secure world\n" : "=r" (r0), "=r" (r1) : "r" (r0), "r" (r1) : "r2", "r3", "cc" ); #if defined(FIQ_STATISTIC) atomic_set(®_entry->busy, 0); fiq_stat(®_entry->statistic, tstart, avm_get_cycles()); #endif /*--- #if defined(FIQ_STATISTIC) ---*/ return IRQ_HANDLED; #endif } /* * High-Level FIQ-Handler */ void avm_mask_all_fiqs_down(unsigned int fiq_prio, unsigned long *restore_PMR, unsigned long flags) { unsigned long mask = get_ICCPMR(IS_ATOMIC); *restore_PMR = mask; set_ICCPMR(((mask <= fiq_prio) ? mask : fiq_prio), IS_ATOMIC); } EXPORT_SYMBOL(avm_mask_all_fiqs_down); void avm_unmask_all_fiqs_up(unsigned long restore_PMR, unsigned long flags) { unsigned long mask = get_ICCPMR(IS_ATOMIC); set_ICCPMR(((mask <= restore_PMR) ? restore_PMR : mask), IS_ATOMIC); } EXPORT_SYMBOL(avm_unmask_all_fiqs_up); /* You definitely need to know what you are doing here ... ;-) */ void avm_bust_mask(unsigned int mask, unsigned long flags) { set_ICCPMR(mask, IS_ATOMIC); } EXPORT_SYMBOL(avm_bust_mask); static void avm_fiq_high_level_handler(struct pt_regs *regs, unsigned long restore_PMR, unsigned int intNr, unsigned int userRef) { unsigned int ref = 0; bool enter_nmi; enter_nmi = NMI_BITS >= 4 || !in_nmi(); if (enter_nmi) nmi_enter(); #ifdef CONFIG_AVM_TZ_TYPE_QCA_RTE unsigned long saved_PMR = 0; /* * Nur das 1-N Modell des GICs funktioniert. Das Signal wird also zurückgenommen, wenn der erste Core ACKed. * Also schauen wir erstmal nur in das Highest Pending Register. Wenn dieses den WD enthält, ACK überspringen, * (wir wollen alle Cores fangen, das Signal wird zu allen Cores gerouted) * sonst ganz normal ACK und am Ende End of Interrupt. */ intNr = avm_gic_fiq_get_ICCHPIR() & 0x3FF; if ((intNr != AVM_IRQ_WD)) { /** * Sollten wir keinen WD im Highest Pending gelesen haben, könnte der anliegende FIQ dennoch bis zum ACK * vom WD verdrängt werden. Lesen des ICCHPIR friert den GIC nicht ein, wir haben keine Garantie, dass wir * jetzt immer noch den anderen pending FIQ lesen. Also nochmal auf WD prüfen und in diesem Fall WD nochmal * manuell für die anderen Cores reinstellen, da er ja gerade weggenommen wurde. Wie auch immer, jetzt enhält * intNr den abzuarbeitenden Interrupt. */ intNr = get_ICCIAR(IS_ATOMIC) & 0x3FF; if (intNr == AVM_IRQ_WD) { /* Interrupt Pending löschen */ set_ICDICPR(intNr, 1, IS_ATOMIC); /* Interrupt beenden */ set_ICCEOIR(intNr, IS_ATOMIC); /* Interrupt Pending setzen */ set_ICDISPR(intNr, 1, IS_ATOMIC); } } avm_mask_all_fiqs_down(FIQ_PRIO_MONITOR, &saved_PMR, 0); local_fiq_enable(); #else intNr = intNr & 0x3FF; #endif if (likely(intNr > 15 && intNr < avm_gic_fiq_nr_ints())) { avm_fiq_registration_entry_t *reg_entry = &(((avm_fiq_registration_entry_t *)(userRef))[intNr]); if (likely(atomic_read(®_entry->is_allocated) == FIQ_REG_ENTRY_ALLOCATED)) { #if defined(FIQ_STATISTIC) unsigned long tstart = avm_get_cycles(); #endif/*--- #if defined(FIQ_STATISTIC) ---*/ unsigned long void_PMR = 0; atomic_set(®_entry->busy, 1); avm_simple_profiling_log(avm_profile_data_type_hw_irq_begin, (unsigned int)reg_entry, intNr); set_fiq_regs(regs); atomic_inc(®_entry->count); __this_cpu_write(recursion_depth, (__this_cpu_read(recursion_depth) + 1)); if (__this_cpu_read(recursion_depth) >= 2) { __this_cpu_write(fiq_fiq_preemptions, (__this_cpu_read(fiq_fiq_preemptions) + 1)); } /* alte Trustzone ==> keine FIQ Preemption! */ if (!IS_ENABLED(CONFIG_AVM_FASTIRQ_TZ) || (avm_tz_supported_features() & AVM_TZ_FEAT_FIQ_PREEMPT)) { if ((__this_cpu_read(recursion_depth) - 1) == 0) { avm_secmon_fiqhandler_install(ref); } if (intNr != AVM_IRQ_WD) avm_unmask_all_fiqs_up(reg_entry->prio, 0); } (reg_entry->fiq_handler)((int)intNr, reg_entry->ref); /* alte Trustzone ==> keine FIQ Preemption! */ if (!IS_ENABLED(CONFIG_AVM_FASTIRQ_TZ) || (avm_tz_supported_features() & AVM_TZ_FEAT_FIQ_PREEMPT)) { if (intNr != AVM_IRQ_WD) avm_mask_all_fiqs_down(FIQ_PRIO_MONITOR, &void_PMR, 0); if ((__this_cpu_read(recursion_depth) - 1) == 0) { avm_secmon_fiqhandler_uninstall(ref); } } __this_cpu_write(recursion_depth, (__this_cpu_read(recursion_depth) - 1)); avm_simple_profiling_log(avm_profile_data_type_hw_irq_end, (unsigned int)reg_entry, intNr); atomic_set(®_entry->busy, 0); #if defined(FIQ_STATISTIC) fiq_stat(®_entry->statistic, tstart, avm_get_cycles()); #endif/*--- #if defined(FIQ_STATISTIC) ---*/ } else if (tz_interrupt_handler(intNr) == IRQ_NONE) { pr_err("%s: BAD TZ-FIRQ CALL unhandled with ID=%u\n", __func__, intNr); atomic_inc(®_entry->unhandled_count); /*--- avm_gic_fiq_raise_irq(intNr); ---*/ } else { pr_debug("%s: BAD TZ-FIRQ CALL handled with ID=%u\n", __func__, intNr); } } else { if (intNr <= 15) { if (avm_rte_ipi_handle(intNr, regs) != IRQ_HANDLED) { dsb(); pr_err("%s: inval IPI irq=%u\n", __func__, intNr); } } else { // 1023 is the spurious interrupt number if (intNr != 1023 && intNr != 1022) { pr_err("%s: invalid firq=%u\n", __func__, intNr); } __this_cpu_write(spurious_count, (__this_cpu_read(spurious_count) + 1)); } } #ifdef CONFIG_AVM_TZ_TYPE_QCA_RTE local_fiq_disable(); avm_unmask_all_fiqs_up(saved_PMR, 0); if (intNr != AVM_IRQ_WD) { set_ICCEOIR(intNr, IS_ATOMIC); } #endif if (enter_nmi) nmi_exit(); } /*------------------------------------------------------------------------------------------*/ /* Initialisierung */ #ifdef CONFIG_AVM_FASTIRQ_TZ extern void *jump_into_linux; extern void *avm_fiqhandler_regsave; #endif int avm_fiq_setup(void) { int ret; int cpu; char tz_version[AVM_TZ_VERSION_SIZE] __maybe_unused; // firq_spin_lock_init(&wd_mask_lock); if (IS_ENABLED(CONFIG_AVM_FASTIRQ_TZ) && !(avm_tz_supported_features() & AVM_TZ_FEAT_FIQ)) { pr_err("FIQ Handler not installed! QCA TZ\n"); return -ENODEV; } mutex_lock(&avm_fiq_lock); avm_gic_fiq_init(); if (__get_cpu_var(avm_fiq_registration)) { ret = -EBUSY; goto err_avm_fiq_busy; } for_each_possible_cpu(cpu) { void *stack; void *task_info; void *registration; registration = kmalloc(sizeof(avm_fiq_registration_entry_t) * avm_gic_fiq_nr_ints(), GFP_KERNEL); if (WARN_ON(!registration)) { ret = -ENOMEM; goto err_alloc_avm_fiq; } stack = (void *)fiq_stack[cpu]; if (WARN_ON(!stack)) { ret = -ENOMEM; goto err_alloc_avm_fiq; } task_info = (void *)fiq_task[cpu]; if (WARN_ON(!task_info)) { ret = -ENOMEM; goto err_alloc_avm_fiq; } per_cpu(avm_fiq_stack, cpu) = stack; per_cpu(avm_fiq_task, cpu) = task_info; per_cpu(avm_fiq_registration, cpu) = registration; per_cpu(fiq_fiq_preemptions, cpu) = 0; per_cpu(spurious_count, cpu) = 0; } on_each_cpu(avm_fiq_setup_helper, avm_fiq_high_level_handler, true); avm_fiq_stacks = (unsigned int)(avm_fiq_stack_array); #ifdef CONFIG_AVM_FASTIRQ_TZ on_each_cpu(avm_tz_disable_fiq_handling_in_linux, (void *) 0, 1); #endif on_each_cpu(avm_fiq_prepare_vector, NULL, true); avm_set_fiq_handler(avm_fiq_handler, &avm_fiq_handler_end - (unsigned char *)avm_fiq_handler); #ifdef CONFIG_AVM_FASTIRQ_TZ avm_tz_set_fiq_handler(&jump_into_linux, (void *)virt_to_phys(&avm_fiqhandler_regsave)); #endif on_each_cpu(avm_fiq_finish_vector, NULL, true); #ifdef CONFIG_AVM_FASTIRQ_TZ on_each_cpu(avm_tz_enable_fiq_handling_in_linux, (void *) 0, 1); if (avm_get_tz_version(tz_version, AVM_TZ_VERSION_SIZE)) { pr_info("[AVM_FIQ] (Version %s)(TZ %s)\n", "1.0", tz_version); } else { pr_info("[AVM_FIQ] (Version %s)\n", "1.0"); } #else pr_info("[AVM_FIQ] (Version %s)\n", "1.0"); #endif mutex_unlock(&avm_fiq_lock); #if defined(CONFIG_PROC_FS) fiq_proc_init(); #endif/*--- #if defined(CONFIG_PROC_FS) ---*/ return 0; err_alloc_avm_fiq: for_each_possible_cpu(cpu) { per_cpu(avm_fiq_stack, cpu) = NULL; per_cpu(avm_fiq_task, cpu) = NULL; kfree(per_cpu(avm_fiq_registration, cpu)); per_cpu(avm_fiq_registration, cpu) = NULL; } err_avm_fiq_busy: mutex_unlock(&avm_fiq_lock); return ret; } fs_initcall(avm_fiq_setup); /* Interne Initialisierungs-Hilfsfunktionen */ #define __set_fiq_banked(data) \ do { \ asm volatile("push {r4, r5, r6} \n" \ "mov r4, %0 \n" \ "mrs r5, cpsr \n" \ "bic r6, r5, #31 \n" \ "orr r6, r6, #17 \n" \ "msr cpsr, r6 \n" \ "ldm r4, {r10-r12,sp} \n" \ "nop \n" \ "msr cpsr, r5 \n" \ "pop {r4, r5, r6} \n" \ : \ : "r" (data) \ : "memory", "cc" \ ); \ } while (0) /** */ static void set_fiq_banked(void *fiq_stackpointer, void *cpubase, void *userref, void *handler) { unsigned long flags; void *data[4]; data[0] = userref; /*--- r10 ---*/ data[1] = handler; /*--- r11 ---*/ data[2] = cpubase; /*--- r12 ---*/ data[3] = fiq_stackpointer; /*--- sp ---*/ flags = avm_arch_local_fiq_and_iq_save(); __set_fiq_banked(data); avm_arch_local_fiq_restore(flags); } /** */ static void avm_fiq_setup_helper(void *handler) { unsigned int CPU_NR = raw_smp_processor_id(); unsigned int *stackStart = (unsigned int *)(__get_cpu_var(avm_fiq_stack) + THREAD_SIZE - 4); struct thread_info *threadInfoStart = (struct thread_info *)((unsigned int)stackStart & ~(THREAD_SIZE - 1)); struct task_struct *taskStructStart = (struct task_struct *)(__get_cpu_var(avm_fiq_task)); avm_fiq_registration_entry_t *registrationStart = (avm_fiq_registration_entry_t *)(__get_cpu_var(avm_fiq_registration)); memset(__get_cpu_var(avm_fiq_stack), 0, THREAD_SIZE); memset(__get_cpu_var(avm_fiq_task), 0, THREAD_SIZE); memset(__get_cpu_var(avm_fiq_registration), 0, sizeof(avm_fiq_registration_entry_t) * avm_gic_fiq_nr_ints()); (void)snprintf(taskStructStart->comm, TASK_COMM_LEN, "FIQ_CPU%d", CPU_NR); threadInfoStart->task = taskStructStart; threadInfoStart->flags = 0; threadInfoStart->preempt_count = INIT_PREEMPT_COUNT; threadInfoStart->addr_limit = KERNEL_DS; threadInfoStart->cpu = CPU_NR; threadInfoStart->cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | domain_val(DOMAIN_IO, DOMAIN_CLIENT); *stackStart-- = (unsigned int)CPU_NR; *stackStart = ((unsigned int)(stackStart)) - 16; stackStart--; *stackStart-- = (unsigned int)avm_gic_fiq_dist_base(); *stackStart-- = (unsigned int)avm_gic_fiq_cpu_base(); *stackStart-- = (unsigned int)handler; *stackStart = (unsigned int)registrationStart; taskStructStart->stack = __get_cpu_var(avm_fiq_stack); /*--- taskStruct enthaelt pointer auf stack-page! ---*/ set_fiq_banked(stackStart, avm_gic_fiq_cpu_base(), registrationStart, handler); avm_fiq_stack_array[CPU_NR] = stackStart; avm_fiq_registrations[CPU_NR] = registrationStart; } static void avm_fiq_prepare_vector(void *handler) { pr_info("[AVM_FIQ] FIQ disabled on CPU%d to install new vector!\n", raw_smp_processor_id()); local_fiq_disable(); } static void avm_set_fiq_handler(void *start, unsigned int length) { /** * FIQ location is at the end of the exception vector table (0x1C). * If the FIQ handler code is placed directly at the end of the vector table, * no branch is required - the code can execute directly from 0x1C. */ memcpy(vectors_page + 0x1c, start, length); flush_icache_range((unsigned long)vectors_page + 0x1c, (unsigned long)vectors_page + 0x1c + length); pr_info("[AVM_FIQ] Vector installed!\n"); } static void avm_fiq_finish_vector(void *handler) { pr_info("[AVM_FIQ] FIQ enabled again on CPU%d!\n", raw_smp_processor_id()); __flush_icache_all(); local_fiq_enable(); } /** * DAKOTA hat Virtualisierungseinheit (Broadcom leider nicht) */ #define read_special(r) ({ \ u32 __val; \ asm volatile( \ ".arch armv7-a \n" \ ".arch_extension virt \n" \ "mrs %0, " __stringify(r) \ : "=r" (__val) \ : \ : "memory", "cc"); \ __val; \ }) #define COPY_OTHER_BANKED_REGISTER(regs, mode) \ do { \ asm volatile("push {r4, r5, r6} \n" \ "mov r4, %0 \n" \ "mrs r5, cpsr \n" \ "bic r6, r5, #31 \n" \ "orr r6, r6, #"#mode " \n" \ "msr cpsr, r6 \n" \ "stmia r4, {r8,r9,r10,r11,r12,r13,r14} \n" \ "nop \n" \ "msr cpsr, r5 \n" \ "pop {r4, r5, r6} \n" \ : \ : "r" (&(regs)->ARM_r8) \ : "memory", "cc" \ ); \ } while (0) #define COPY_OTHER_BANKED_REGISTER2(regs, mode) \ do { \ asm volatile("push {r4, r5, r6} \n" \ "mov r4, %0 \n" \ "mrs r5, cpsr \n" \ "bic r6, r5, #31 \n" \ "orr r6, r6, #"#mode " \n" \ "msr cpsr, r6 \n" \ "stmia r4, {sp,lr} \n" \ "nop \n" \ "msr cpsr, r5 \n" \ "pop {r4, r5, r6} \n" \ : \ : "r" (&(regs)->ARM_sp) \ : "memory", "cc" \ ); \ } while (0) /* * um USER-Register zu lesen, stm-"dach" verwendet werden */ #define COPY_USER_BANKED_REGISTER(regs) \ do { \ asm volatile("push {r4, r5, r6} \n" \ "mov r4, %0 \n" \ "mrs r5, cpsr \n" \ "bic r6, r5, #31 \n" \ "orr r6, r6, #19 \n" \ "msr cpsr, r6 \n" \ "stmia r4, {r0-lr}^; \n" \ "nop \n" \ "msr cpsr, r5 \n" \ "pop {r4, r5, r6} \n" \ : \ : "r" (&(regs)->ARM_r0) \ : "memory", "cc" \ ); \ } while (0) #define COPY_USER_BANKED_REGISTER2(regs) \ do { \ asm volatile("mov r4, %0 \n" \ "stmia r4, {sp,lr}^; \n" \ "nop \n" \ : \ : "r" (&(regs)->ARM_sp) \ : "memory", "cc" \ ); \ } while (0) /** * lese banked fiq-register aus SVC-Mode */ void copy_banked_fiqregs(struct pt_regs *regs) { unsigned long flags; flags = avm_arch_local_fiq_and_iq_save(); COPY_OTHER_BANKED_REGISTER(regs, 17); avm_arch_local_fiq_restore(flags); } /** * Lese banked register aus FIQ-Kontext * nur sp und lr * in regs->Arm_cpsr muss der vorhergehende processor-mode korrekt vermerkt sein! */ void copy_banked_regs(struct pt_regs *regs, const struct pt_regs *org_regs) { unsigned int mode; /* TODO: reintroduce it as soon as this function is again only called * from FIQ-context * BUG_ON(is_cpu_mode_fiq() == 0); */ memcpy(regs, org_regs, sizeof(struct pt_regs)); mode = processor_mode(regs); if (mode == SVC_MODE) { /*---Kernel-sp fuer current-thread-struct ---*/ COPY_OTHER_BANKED_REGISTER2(regs, 19); } else if (mode == ABT_MODE) { COPY_OTHER_BANKED_REGISTER2(regs, 23); } else if (mode == IRQ_MODE) { COPY_OTHER_BANKED_REGISTER2(regs, 18); } else if (mode == UND_MODE) { COPY_OTHER_BANKED_REGISTER2(regs, 27); } else if (mode == FIQ_MODE) { /* in the according ptregs-structure on the separate FIQ stack * the stored SP of the underlying interrupted FIQ function * includes two additional positions (PC and R0_orig) from the * interrupting assembler routine. These two positions don't * belong to the underlying function and would confuse the * UNWIND algorithm in case of a crash, because they don't * have any link to the code of the underlying interrupted * function! Fix it here by adjusting the SP! * We should move it however to the assembler routine, * please refer to avm_fiqasm.S. */ regs->ARM_sp += 8; } else if (user_mode(regs)) { /*--- sp und lr vom user-process ---*/ COPY_USER_BANKED_REGISTER2(regs); } } /** * Lese alle banked register aus FIQ-Kontext * in regs->Arm_cpsr muss der vorhergehende processor-mode korrekt vermerkt sein! */ void copy_banked_regs_full(struct pt_regs *regs, const struct pt_regs *org_regs) { unsigned int mode; /* TODO: reintroduce it as soon as this function is again only called * from FIQ-context * BUG_ON(is_cpu_mode_fiq() == 0); */ memcpy(regs, org_regs, sizeof(struct pt_regs)); mode = processor_mode(regs); if (mode == SVC_MODE) { /*---Kernel-sp fuer current-thread-struct ---*/ COPY_OTHER_BANKED_REGISTER(regs, 19); } else if (mode == ABT_MODE) { COPY_OTHER_BANKED_REGISTER(regs, 23); } else if (mode == IRQ_MODE) { COPY_OTHER_BANKED_REGISTER(regs, 18); } else if (mode == UND_MODE) { COPY_OTHER_BANKED_REGISTER(regs, 27); } else if (mode == FIQ_MODE) { /* see comment in previous function copy_banked_regs */ regs->ARM_sp += 8; } else if (user_mode(regs)) { COPY_USER_BANKED_REGISTER(regs); } } /** */ unsigned long get_svc_sp(void) { struct pt_regs regs; //BUG_ON(is_cpu_mode_fiq() == 0); COPY_OTHER_BANKED_REGISTER2(®s, 19); /*---Kernel-sp fuer current-thread-struct ---*/ return regs.ARM_sp; } /** * \brief: korrigiere aktuelles Registerset falls im FIQ-Kontext oder Exception aus FIQ-Kontext * param regs Ziel-Registerset * param org_regs Source-Registerset */ void prepare_register_for_trap(struct pt_regs *regs, struct pt_regs **org_regs) { if (is_cpu_mode_fiq()) { /*--- wir sind in FIQ-Modus ---*/ copy_banked_regs_full(regs, *org_regs); *org_regs = regs; } else { /*--- im Linux-OS-Modus ---*/ if (processor_mode(*org_regs) == FIQ_MODE) { /*--- ... aber da ist was komisches im FIQ passiert ---*/ memcpy(regs, *org_regs, sizeof(struct pt_regs)); copy_banked_fiqregs(regs); *org_regs = regs; } } } /** * \brief: Achtung! Vorher muessen die Register mittels prepare_register_for_trap() * korrigiert werden */ struct thread_info *current_thread_info_depend_on_context(struct pt_regs *regs) { if (regs == NULL) { return current_thread_info(); } else if (unlikely(is_cpu_mode_fiq())) { /*--- sind im FIQ-Kontext ---*/ if (processor_mode(regs) != FIQ_MODE) { return thread_info_by_sp(get_svc_sp()); /*--- current_thread_info aus SVC-Mode holen ---*/ } return thread_info_by_sp(regs->ARM_sp); /*--- (pseudo-)current_thread_info des FIQ's ---*/ } /*--- im SVC/USER-Kontext---*/ if (processor_mode(regs) == FIQ_MODE) { /*--- ... aber da ist was komisches im FIQ passiert ---*/ return thread_info_by_sp(regs->ARM_sp); /*--- (pseudo-)current_threadinfo des FIQ's ---*/ } /*--- das ist der Normalfall ---*/ return current_thread_info(); } #if defined(CONFIG_PROC_FS) /** */ static int fiq_proc_stat(struct seq_file *m, void *data __maybe_unused) { int cpu, irq, nr_ints = avm_gic_fiq_nr_ints(); unsigned long unhandled_count = 0; char line[256]; char txtstat[128]; unsigned long consume_promille[NR_CPUS]; bool lock = !!m; struct semi_seq sseq; if (lock) mutex_lock(&avm_fiq_lock); if (!m) m = sseq_create(&sseq, KERN_ERR, line, sizeof(line)); seq_printf(m, "%8s", ""); for_each_possible_cpu(cpu) { consume_promille[cpu] = 0; seq_printf(m, "%10s%u", "CPU", cpu); } sseq_puts(m, "\n"); for (irq = 0; irq < nr_ints; irq++) { char *name = ""; int set = 0; int busy = 0; txtstat[0] = 0; for_each_possible_cpu(cpu) { avm_fiq_registration_entry_t *reg_entry = &(avm_fiq_registrations[cpu][irq]); if (atomic_read(®_entry->count) != 0) { set++; } if (atomic_read(®_entry->is_allocated) != 0) { #if defined(FIQ_STATISTIC) unsigned long cprom = 0; fill_fiq_stat(txtstat, sizeof(txtstat), ®_entry->statistic, 1, &cprom); consume_promille[cpu] += cprom; #endif/*--- #if defined(FIQ_STATISTIC) ---*/ name = reg_entry->name; busy |= atomic_read(®_entry->busy) << cpu; unhandled_count = atomic_read(®_entry->unhandled_count); set++; } } if (set == 0) { continue; } seq_printf(m, "%8u: ", irq); for (cpu = 0; cpu < num_possible_cpus(); cpu++) { avm_fiq_registration_entry_t *reg_entry = &(avm_fiq_registrations[cpu][irq]); seq_printf(m, "%10u ", atomic_read(®_entry->count)); } seq_printf(m, " %-14s %c %s", name, busy ? '*' : ' ', txtstat); if (unhandled_count) seq_printf(m, "unhandled: %lu", unhandled_count); sseq_puts(m, "\n"); } avm_rte_ipi_show_list(m, 8, true); seq_puts(m, "preempts: "); for_each_possible_cpu(cpu) seq_printf(m, "%10u ", per_cpu(fiq_fiq_preemptions, cpu)); sseq_puts(m, "\n"); seq_puts(m, "spurious: "); for_each_possible_cpu(cpu) seq_printf(m, "%10u ", per_cpu(spurious_count, cpu)); sseq_puts(m, "\n"); #if defined(FIQ_STATISTIC) seq_puts(m, " consum: "); for_each_possible_cpu(cpu) seq_printf(m, "%8lu.%lu ", consume_promille[cpu] / 10, consume_promille[cpu] % 10); sseq_puts(m, "\n"); #endif/*--- #if defined(FIQ_STATISTIC) ---*/ if (lock) mutex_unlock(&avm_fiq_lock); return 0; } /** */ void avm_fiq_dump_stat(void) { pr_err("FASTIRQ-Status:\n"); fiq_proc_stat(NULL, NULL); } /** */ static int fiq_proc_open(struct inode *inode, struct file *file) { return single_open(file, fiq_proc_stat, NULL); } /** */ static const struct file_operations fiq_proc_fops = { .open = fiq_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /** */ static int fiq_proc_init(void) { proc_create("fastinterrupts", 0, NULL, &fiq_proc_fops); return 0; } #endif /*--- #if defined(CONFIG_PROC_FS) ---*/