/*------------------------------------------------------------------------------------------*\ * * Copyright (C) 2016 AVM GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA \*------------------------------------------------------------------------------------------*/ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_PROC_FS) #include #endif #include #if defined(CONFIG_AVM_SIMPLE_PROFILING) #include #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ #ifdef CONFIG_AVM_FASTIRQ_TZ #include #include #include #define avm_secmon_fiqhandler_install(r) (avm_tz_disable_fiq_handling_in_linux(&r)) #define avm_secmon_fiqhandler_uninstall(r) (avm_tz_enable_fiq_handling_in_linux(&r)) uint32_t version, modified; #else #include "../../../../../shared/opensource/include/bcm963xx/63138_intr.h" unsigned int avm_secmon_fiqhandler_prepare(void); void avm_secmon_fiqhandler_cleanup(unsigned int mvbar); #define avm_secmon_fiqhandler_install(r) (r = avm_secmon_fiqhandler_prepare()) #define avm_secmon_fiqhandler_uninstall(r) (avm_secmon_fiqhandler_cleanup(r)) #endif #define FASTIRQ_HANDLED 1 #define MAX_FIQ_NAME_SIZE 63 #define __get_cpu_var(var) (*(raw_cpu_ptr((&var)))) #define DBG_TRC(args...) /*--- #define DBG_TRC(args...) printk(KERN_INFO args) ---*/ /*------------------------------------------------------------------------------------------*/ /* Externes */ // Externe Symbole des FIQ Low-Level Handlers extern unsigned int avm_fiq_stacks; extern void avm_fiq_handler(void); extern unsigned char avm_fiq_handler_begin, avm_fiq_handler_end; /* Internes */ // Interne Prototypen static void avm_fiq_setup_helper(void *handler); static void avm_set_fiq_handler(void *start, unsigned int length); // Interne Datenstrukturen //static spinlock_t wd_mask_lock; static DEFINE_MUTEX(avm_fiq_lock); static DEFINE_PER_CPU(void *, avm_fiq_stack); static DEFINE_PER_CPU(void *, avm_fiq_task); static DEFINE_PER_CPU(void *, avm_fiq_registration); DEFINE_PER_CPU(struct pt_regs *, __fiq_regs); DEFINE_PER_CPU(unsigned int, spurious_count); DEFINE_PER_CPU(unsigned int, fiq_fiq_preemptions); DEFINE_PER_CPU(unsigned int, recursion_depth); unsigned int *avm_fiq_stack_array[NR_CPUS]; EXPORT_SYMBOL(avm_fiq_stack_array); #define FIQ_STATISTIC #if defined(FIQ_STATISTIC) struct _fiq_stat { unsigned long last_t; unsigned long cnt; unsigned long consumption_min; unsigned long consumption_max; unsigned long long consumption_sum; unsigned long dtrigger_min; unsigned long dtrigger_max; unsigned long long dtrigger_sum; }; /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static void init_fiq_stat(struct _fiq_stat *pgstat) { pgstat->cnt = 0; pgstat->consumption_min = LONG_MAX; pgstat->consumption_max = 0; pgstat->consumption_sum = 0; pgstat->dtrigger_min = LONG_MAX; pgstat->dtrigger_max = 0; pgstat->dtrigger_sum = 0; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static void fiq_stat(struct _fiq_stat *pgstat, unsigned long tstart, unsigned long tend){ unsigned long dtrigger, consumption; if(pgstat->last_t == 0) { init_fiq_stat(pgstat); pgstat->last_t = tstart | 0x1; return; } consumption = tend - tstart; if(consumption > pgstat->consumption_max) pgstat->consumption_max = consumption; if(consumption < pgstat->consumption_min) pgstat->consumption_min = consumption; pgstat->consumption_sum += (unsigned long long)consumption; dtrigger = (tstart | 0x1) - pgstat->last_t; if(dtrigger > pgstat->dtrigger_max) pgstat->dtrigger_max = dtrigger; if(dtrigger < pgstat->dtrigger_min) pgstat->dtrigger_min = dtrigger; pgstat->dtrigger_sum += (unsigned long long)dtrigger; pgstat->cnt++; pgstat->last_t = tstart | 0x1; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static inline void fiq_stat_reset(struct _fiq_stat *pgstat) { pgstat->last_t = 0; } #define CLK_TO_USEC(a) (a) / (avm_get_cyclefreq() / 1000000) /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static char *fill_fiq_stat(char *txt, int txt_len, struct _fiq_stat *pgstat, unsigned int reset, unsigned long *consume_promille) { struct _fiq_stat gstat; unsigned long cnt; unsigned long long c_avg, l_avg; unsigned long c_avg_us, l_avg_us; cnt = pgstat->cnt; txt[0] = 0; if(cnt == 0) { if(consume_promille) *consume_promille = 0; return txt; } memcpy(&gstat, pgstat, sizeof(gstat)); if(reset) { fiq_stat_reset(pgstat); } c_avg = gstat.consumption_sum; do_div(c_avg, cnt); c_avg_us = CLK_TO_USEC((unsigned long)c_avg); l_avg = gstat.dtrigger_sum; do_div(l_avg, cnt); l_avg_us = CLK_TO_USEC((unsigned long)l_avg); snprintf(txt, txt_len, "consum:min %6lu max %6lu avg %6lu dt:min %6lu max %6lu avg %6lu us", CLK_TO_USEC(gstat.consumption_min), CLK_TO_USEC(gstat.consumption_max), c_avg_us, CLK_TO_USEC(gstat.dtrigger_min), CLK_TO_USEC(gstat.dtrigger_max), l_avg_us); if(consume_promille) { *consume_promille = ((c_avg_us + 1) * 1000) / l_avg_us; /*--- 32 bit range reicht (max = 1 Mio * 1000) ---*/ } return txt; } #endif/*--- #if defined(FIQ_STATISTIC) ---*/ typedef struct avm_fiq_registration_entry_s { atomic_t is_allocated; void *ref; avm_fiq_cb_t fiq_handler; atomic_t count; atomic_t unhandled_count; atomic_t busy; char name[MAX_FIQ_NAME_SIZE + 1]; #if defined(FIQ_STATISTIC) struct _fiq_stat statistic; #endif/*--- #if defined(FIQ_STATISTIC) ---*/ } avm_fiq_registration_entry_t; #define FIQ_REG_ENTRY_UNALLOCATED 0 #define FIQ_REG_ENTRY_ALLOCATED 1 #define FIQ_REG_ENTRY_ALLOCATED_TZ 2 avm_fiq_registration_entry_t *avm_fiq_registrations[NR_CPUS]; unsigned int fiq_stack[NR_CPUS][THREAD_SIZE / sizeof(unsigned int)] __attribute__((aligned(THREAD_SIZE), externally_visible)); unsigned int fiq_task[NR_CPUS][THREAD_SIZE / sizeof(unsigned int)] __attribute__((aligned(THREAD_SIZE), externally_visible)); #if defined(CONFIG_PROC_FS) static int fiq_proc_init(void); #endif/*--- #if defined(CONFIG_PROC_FS) ---*/ /*------------------------------------------------------------------------------------------*/ /* Interface-Funktionen */ /*--------------------------------------------------------------------------------*\ * Belegten FIQ einschalten * ret: Ungültige CPU-Nr: -EINVAL * Ungültiger IRQ: -EINVAL * War nicht korrekt angefordert: -EINVAL * Sonst 0 \*--------------------------------------------------------------------------------*/ int avm_enable_fiq_on(int cpu, unsigned int irq) { avm_fiq_registration_entry_t *reg_entry; int ret = 0; mutex_lock(&avm_fiq_lock); if( NR_CPUS - 1 < cpu ) ret = -EINVAL; if( avm_gic_fiq_nr_ints() - 1 < irq ) ret = -EINVAL; reg_entry = &(avm_fiq_registrations[cpu][irq]); if( atomic_read(®_entry->is_allocated) == FIQ_REG_ENTRY_UNALLOCATED ) ret = -EINVAL; if( ret >= 0 ) { avm_gic_fiq_enable(cpu, irq); } mutex_unlock(&avm_fiq_lock); return ret; } EXPORT_SYMBOL(avm_enable_fiq_on); /*--------------------------------------------------------------------------------*\ * Belegten FIQ ausschalten * ret: Ungültige CPU-Nr: -EINVAL * Ungültiger IRQ: -EINVAL * War nicht korrekt angefordert: -EINVAL * Sonst 0 \*--------------------------------------------------------------------------------*/ int avm_disable_fiq_on(int cpu, unsigned int irq) { avm_fiq_registration_entry_t *reg_entry; int ret = 0; mutex_lock(&avm_fiq_lock); if( NR_CPUS - 1 < cpu ) ret = -EINVAL; if( avm_gic_fiq_nr_ints() - 1 < irq ) ret = -EINVAL; reg_entry = &(avm_fiq_registrations[cpu][irq]); if( atomic_read(®_entry->is_allocated) == FIQ_REG_ENTRY_UNALLOCATED ) ret = -EINVAL; if( ret >= 0 ) { avm_gic_fiq_disable(cpu, irq); } mutex_unlock(&avm_fiq_lock); return ret; } EXPORT_SYMBOL(avm_disable_fiq_on); /*--------------------------------------------------------------------------------*\ * FIQ belegen * ret: Ungültige CPU-Nr: -EINVAL * Ungültiger IRQ: -EINVAL * Bereits belegt: -EBUSY * Sonst 0 \*--------------------------------------------------------------------------------*/ int avm_request_fiq_on(int cpu, unsigned int irq, avm_fiq_cb_t handler, unsigned long fiqflags, const char *devname, void *dev_id) { struct irq_desc *irq_desc = 0; avm_fiq_registration_entry_t *reg_entry; int ret = 0, cpumask, mode; #ifdef CONFIG_AVM_FASTIRQ_TZ if(avm_get_tz_version(&version, &modified)){ printk(KERN_ERR"%s warning fiq not supported - update urloader\n", __func__); return -EINVAL; } #endif/*--- #ifdef CONFIG_AVM_FASTIRQ_TZ ---*/ /* AVM * Linux 4 and the gic support irq domains which leads to virtual irq numbers * and a mapping on requesting the irq from the device tree. * Unfortunately the fastirq implementation does not support this feature yet. * As a workaround the hwirq number is used when a fastirq is requested. */ if(fiqflags & FIQ_HWIRQ) { printk(KERN_ERR "[%s] Forcing HW-IRQ %u instead\n", __func__, irq); } else { irq_desc = irq_to_desc(irq); if (irq_desc) { printk(KERN_WARNING "[%s] Virtual IRQ numbers are not supported. Use HWIRQ (%lu) for requested IRQ (%u)\n", __func__, irq_desc->irq_data.hwirq, irq); irq = irq_desc->irq_data.hwirq; } else { printk(KERN_ERR "[%s] Unable to get IRQ desc for virtual irq conversion of irq (%u)\n", __func__, irq); printk(KERN_ERR "[%s] Using virtual IRQ (%u) instead\n", __func__, irq); } } mutex_lock(&avm_fiq_lock); DBG_TRC("%s: cpu=%d irq=%u '%s' dev_id=%p\n", __func__, cpu, irq, devname, dev_id); if(fiqflags & FIQ_CPUMASK){ cpumask = cpu; if(cpumask & ~((1 << NR_CPUS) - 1)) { printk(KERN_ERR"%s: error inval cpumask=%x\n", __func__, cpumask); ret = -EINVAL; goto request_exit; } } else if ( NR_CPUS - 1 < cpu ) { printk(KERN_ERR"%s: error inval cpu=%d\n", __func__, cpu); ret = -EINVAL; goto request_exit; } else { cpumask = 1 << cpu; } if( avm_gic_fiq_nr_ints() - 1 < irq ) { printk(KERN_ERR"%s: error inval irq=%u\n", __func__, irq); ret = -EINVAL; goto request_exit; } if( handler == NULL ) { printk(KERN_ERR"%s: error inval handler=%p\n", __func__, handler); ret = -EINVAL; goto request_exit; } for(cpu = 0; cpu < NR_CPUS; cpu++) { if(((1 << cpu) & cpumask) == 0) { continue; } reg_entry = &(avm_fiq_registrations[cpu][irq]); if( atomic_read(®_entry->is_allocated) == FIQ_REG_ENTRY_ALLOCATED ) { printk(KERN_ERR"%s: error irq=%u in use\n", __func__, irq); ret = -EBUSY; goto request_exit; } atomic_set(®_entry->is_allocated, FIQ_REG_ENTRY_ALLOCATED); reg_entry->ref = dev_id; reg_entry->fiq_handler = handler; atomic_set(®_entry->count, 0); #if defined(FIQ_STATISTIC) fiq_stat_reset(®_entry->statistic); #endif/*--- #if defined(FIQ_STATISTIC) ---*/ (void)strncpy(reg_entry->name, devname, MAX_FIQ_NAME_SIZE); reg_entry->name[MAX_FIQ_NAME_SIZE] = '\0'; } mode = 0; if(fiqflags & FIQ_EDGE) { mode |= 2; } if(fiqflags & FIQ_1_N) { mode |= 1; } avm_gic_fiq_setup(irq, cpumask, 0x40, mode, 0x00); request_exit: mutex_unlock(&avm_fiq_lock); return ret; } EXPORT_SYMBOL(avm_request_fiq_on); /*--------------------------------------------------------------------------------*\ * FIQ freigeben * ret: Ungültige CPU-Nr: -EINVAL * Ungültiger IRQ: -EINVAL * Unpassende Ref: -EINVAL * Sonst 0 \*--------------------------------------------------------------------------------*/ int avm_free_fiq_on(int cpu, unsigned int irq, void *dev_id) { avm_fiq_registration_entry_t *reg_entry; int ret = 0; mutex_lock(&avm_fiq_lock); DBG_TRC("%s: cpu=%d irq=%u dev_id=%p\n", __func__, cpu, irq, dev_id); if( NR_CPUS - 1 < cpu ) { printk(KERN_ERR"%s: error inval cpu=%d\n", __func__, cpu); ret = -EINVAL; goto free_exit; } if( avm_gic_fiq_nr_ints() - 1 < irq ) { printk(KERN_ERR"%s: error inval irq=%u\n", __func__, irq); ret = -EINVAL; goto free_exit; } reg_entry = &(avm_fiq_registrations[cpu][irq]); if( reg_entry->ref != dev_id ) { printk(KERN_ERR"%s: error inval dev_id=%u\n", __func__, irq); ret = -EINVAL; goto free_exit; } atomic_set(®_entry->is_allocated, FIQ_REG_ENTRY_UNALLOCATED); avm_gic_fiq_disable(irq, cpu); free_exit: mutex_unlock(&avm_fiq_lock); return ret; } EXPORT_SYMBOL(avm_free_fiq_on); extern void __iomem *DIST_BASE; static volatile int tz_interrupt_handler(unsigned int intNr) { #ifdef CONFIG_AVM_FASTIRQ_TZ /*--- We have a TZ FIQ->Hand it to TZ by SMC ---*/ register u32 r0 asm("r0"); register u32 r1 asm("r1"); #if defined(FIQ_STATISTIC) unsigned long tstart; avm_fiq_registration_entry_t *reg_entry; reg_entry = &(avm_fiq_registrations[raw_smp_processor_id()][intNr]); if(atomic_read(®_entry->is_allocated) == FIQ_REG_ENTRY_UNALLOCATED){ atomic_set(®_entry->is_allocated, FIQ_REG_ENTRY_ALLOCATED_TZ); atomic_set(®_entry->count, 0); } atomic_set(®_entry->busy, 1); atomic_inc(®_entry->count); tstart = avm_get_cycles(); #endif/*--- #if defined(FIQ_STATISTIC) ---*/ r0 = AVM_SMC_FIQ_FOR_TZ; r1 = intNr; asm volatile( __asmeq("%0", "r0") __asmeq("%1", "r1") ".arch_extension sec\n" "smc #0 @ switch to secure world\n" : "=r" (r0), "=r" (r1) : "r" (r0), "r" (r1) : "r2", "r3", "cc" ); #if defined(FIQ_STATISTIC) atomic_set(®_entry->busy, 0); fiq_stat(®_entry->statistic, tstart, avm_get_cycles()); #endif/*--- #if defined(FIQ_STATISTIC) ---*/ return IRQ_HANDLED; #else/*--- #ifdef CONFIG_AVM_FASTIRQ_TZ ---*/ return IRQ_NONE; #endif/*--- #else ---*//*--- #ifdef CONFIG_AVM_FASTIRQ_TZ ---*/ } /*--------------------------------------------------------------------------------*\ * High-Level FIQ-Handler \*--------------------------------------------------------------------------------*/ void avm_mask_all_fiqs_down(unsigned int fiq_prio, unsigned long *restore_PMR, unsigned long flags) { *restore_PMR = get_ICCPMR(IS_ATOMIC); set_ICCPMR(fiq_prio, IS_ATOMIC); } EXPORT_SYMBOL(avm_mask_all_fiqs_down); void avm_unmask_all_fiqs_up(unsigned long restore_PMR, unsigned long flags) { set_ICCPMR(restore_PMR, IS_ATOMIC); } EXPORT_SYMBOL(avm_unmask_all_fiqs_up); unsigned int avm_secmon_fiqhandler_prepare(void); void avm_secmon_fiqhandler_cleanup(unsigned int mvbar); static void avm_fiq_high_level_handler(struct pt_regs *regs, unsigned int intNr, unsigned int userRef) { unsigned int mvbar = 0; // unsigned long restore_PMR = 0; // unsigned int cpu = raw_smp_processor_id(); intNr = intNr & ~0x1c00; if (likely(intNr > 15 && intNr < avm_gic_fiq_nr_ints())) { avm_fiq_registration_entry_t *reg_entry = &(((avm_fiq_registration_entry_t *)(userRef))[intNr]); if( likely(atomic_read(®_entry->is_allocated) == FIQ_REG_ENTRY_ALLOCATED )) { #if defined(FIQ_STATISTIC) unsigned long tstart = avm_get_cycles(); #endif/*--- #if defined(FIQ_STATISTIC) ---*/ atomic_set(®_entry->busy, 1); if(likely(intNr < AVM_IRQ_MESSAGING_START)) { /*--- Watchdog- und Sync-Trigger Handling (0x10 bzw. 0x00) beim Backtrace aussparen ---*/ if (get_ICDIPR(intNr, IS_ATOMIC) > FIQ_PRIO_WATCHDOG) { set_fiq_regs(regs); } } atomic_inc(®_entry->count); #if defined(CONFIG_AVM_SIMPLE_PROFILING) avm_simple_profiling_log(avm_profile_data_type_hw_irq_begin, (unsigned int)reg_entry, intNr); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ if(likely(intNr < AVM_IRQ_MESSAGING_START)) { __this_cpu_write(recursion_depth, (__this_cpu_read(recursion_depth) + 1)); if(__this_cpu_read(recursion_depth) == 2) { __this_cpu_write(fiq_fiq_preemptions, (__this_cpu_read(fiq_fiq_preemptions) + 1)); } #ifdef CONFIG_AVM_FASTIRQ_TZ if (version >= 50000000/*50002000*/) // alte Trustzone ==> keine FIQ Preemption! #endif { if( (__this_cpu_read(recursion_depth) - 1) == 0 ) { avm_secmon_fiqhandler_install(mvbar); } if(!(intNr == AVM_IRQ_WD)) local_fiq_enable(); } (reg_entry->fiq_handler)((int)intNr, reg_entry->ref); #ifdef CONFIG_AVM_FASTIRQ_TZ if (version >= 50000000) // alte Trustzone ==> keine FIQ Preemption! #endif { if(!(intNr == AVM_IRQ_WD)) local_fiq_disable(); if( (__this_cpu_read(recursion_depth) - 1) == 0 ) { avm_secmon_fiqhandler_uninstall(mvbar); } } __this_cpu_write(recursion_depth, (__this_cpu_read(recursion_depth) - 1)); } else { (reg_entry->fiq_handler)((int)intNr, reg_entry->ref); } #if defined(CONFIG_AVM_SIMPLE_PROFILING) avm_simple_profiling_log(avm_profile_data_type_hw_irq_end, (unsigned int)reg_entry, intNr); #endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ atomic_set(®_entry->busy, 0); #if defined(FIQ_STATISTIC) fiq_stat(®_entry->statistic, tstart, avm_get_cycles()); #endif/*--- #if defined(FIQ_STATISTIC) ---*/ } else if(tz_interrupt_handler(intNr) == IRQ_NONE) { atomic_inc(®_entry->unhandled_count); /*--- avm_gic_fiq_raise_irq(intNr); ---*/ } } else { if(intNr <= 15) { dsb(); printk(KERN_ERR"%s: inval IPI irq=%u\n", __func__, intNr); /*--- writel_relaxed( ((1 << ((intNr >> 10) & 0x7)) << 16) | (1 << 15) | (intNr & 0xF), DIST_BASE + GIC_DIST_SOFTINT ); ---*/ } else { // 1023 is the spurious interrupt number if (intNr != 1023 && intNr != 1022) { printk(KERN_ERR"%s: invalid firq=%u\n", __func__, intNr); } __this_cpu_write(spurious_count, (__this_cpu_read(spurious_count) + 1)); } } } /*------------------------------------------------------------------------------------------*/ /* Initialisierung */ #ifdef CONFIG_AVM_FASTIRQ_TZ extern void *jump_into_linux; extern void *avm_fiqhandler_regsave; #endif int avm_fiq_setup(void) { int ret; int cpu; // firq_spin_lock_init(&wd_mask_lock); #ifdef CONFIG_AVM_FASTIRQ_TZ if(avm_get_tz_version(&version, &modified)){ pr_err("FIQ Handler not installed! QCA TZ\n"); return -ENODEV; } #endif mutex_lock(&avm_fiq_lock); avm_gic_fiq_init(); if (__get_cpu_var(avm_fiq_registration)) { ret = -EBUSY; goto err_avm_fiq_busy; } for_each_possible_cpu(cpu) { void *stack; void *task_info; void *registration; registration = kmalloc( sizeof(avm_fiq_registration_entry_t) * avm_gic_fiq_nr_ints(), GFP_KERNEL ); if (WARN_ON(!registration)){ ret = -ENOMEM; goto err_alloc_avm_fiq; } stack = (void *)fiq_stack[cpu]; if (WARN_ON(!stack)) { ret = -ENOMEM; goto err_alloc_avm_fiq; } task_info = (void *)fiq_task[cpu]; if (WARN_ON(!task_info)) { ret = -ENOMEM; goto err_alloc_avm_fiq; } per_cpu(avm_fiq_stack, cpu) = stack; per_cpu(avm_fiq_task, cpu) = task_info; per_cpu(avm_fiq_registration, cpu) = registration; per_cpu(fiq_fiq_preemptions, cpu) = 0; per_cpu(spurious_count, cpu) = 0; } on_each_cpu(avm_fiq_setup_helper, avm_fiq_high_level_handler, true); avm_fiq_stacks = (unsigned int)(avm_fiq_stack_array); avm_set_fiq_handler(avm_fiq_handler, &avm_fiq_handler_end - (unsigned char *)avm_fiq_handler); #ifdef CONFIG_AVM_FASTIRQ_TZ avm_tz_set_fiq_handler(&jump_into_linux, virt_to_phys(&avm_fiqhandler_regsave)); on_each_cpu(avm_tz_enable_fiq_handling_in_linux, (void *) 0, 1); if(modified){ printk("[AVM_FIQ] (Version %s)(TZ %dM)\n", "1.0", version); } else{ printk("[AVM_FIQ] (Version %s)(TZ %d)\n", "1.0", version); } #else printk("[AVM_FIQ] (Version %s, build: %s %s)\n", "1.0", __DATE__, __TIME__); #endif mutex_unlock(&avm_fiq_lock); #if defined(CONFIG_PROC_FS) fiq_proc_init(); #endif/*--- #if defined(CONFIG_PROC_FS) ---*/ return 0; err_alloc_avm_fiq: for_each_possible_cpu(cpu) { per_cpu(avm_fiq_stack, cpu) = NULL; per_cpu(avm_fiq_task, cpu) = NULL; kfree(per_cpu(avm_fiq_registration, cpu)); per_cpu(avm_fiq_registration, cpu) = NULL; } err_avm_fiq_busy: mutex_unlock(&avm_fiq_lock); return ret; } postcore_initcall(avm_fiq_setup); /* Interne Initialisierungs-Hilfsfunktionen */ #define __set_fiq_banked(data) \ asm volatile( "push {r4, r5, r6} \n" \ "mov r4, %0 \n" \ "mrs r5, cpsr \n" \ "bic r6, r5, #31 \n" \ "orr r6, r6, #17 \n" \ "msr cpsr, r6 \n" \ "ldm r4, {r10-r12,sp} \n" \ "nop \n" \ "msr cpsr, r5 \n" \ "pop {r4, r5, r6} \n" \ : \ : "r" (data) \ : "memory", "cc" \ ); /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static void set_fiq_banked(void *fiq_stackpointer, void *cpubase, void *userref, void *handler) { unsigned long flags; void *data[4]; data[0] = userref; /*--- r10 ---*/ data[1] = handler; /*--- r11 ---*/ data[2] = cpubase; /*--- r12 ---*/ data[3] = fiq_stackpointer; /*--- sp ---*/ flags = avm_arch_local_fiq_and_iq_save(); __set_fiq_banked(data); avm_arch_local_fiq_restore(flags); } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static void avm_fiq_setup_helper(void *handler) { unsigned int CPU_NR = raw_smp_processor_id(); unsigned int *stackStart = (unsigned int *)(__get_cpu_var(avm_fiq_stack) + THREAD_SIZE - 4); struct thread_info *threadInfoStart = (struct thread_info *)((unsigned int)stackStart & ~(THREAD_SIZE - 1)); struct task_struct *taskStructStart = (struct task_struct *)(__get_cpu_var(avm_fiq_task)); avm_fiq_registration_entry_t *registrationStart = (avm_fiq_registration_entry_t *)(__get_cpu_var(avm_fiq_registration)); memset(__get_cpu_var(avm_fiq_stack), 0, THREAD_SIZE); memset(__get_cpu_var(avm_fiq_task), 0, THREAD_SIZE); memset(__get_cpu_var(avm_fiq_registration), 0, sizeof(avm_fiq_registration_entry_t) * avm_gic_fiq_nr_ints()); (void)snprintf(taskStructStart->comm, TASK_COMM_LEN, "FIQ_CPU%d", CPU_NR); threadInfoStart->task = taskStructStart; threadInfoStart->flags = 0; threadInfoStart->preempt_count= INIT_PREEMPT_COUNT; threadInfoStart->addr_limit = KERNEL_DS; threadInfoStart->cpu = CPU_NR; threadInfoStart->cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | domain_val(DOMAIN_IO, DOMAIN_CLIENT); *stackStart-- = (unsigned int)CPU_NR; *stackStart = ((unsigned int)(stackStart)) - 16; stackStart-- ; *stackStart-- = (unsigned int)avm_gic_fiq_dist_base(); *stackStart-- = (unsigned int)avm_gic_fiq_cpu_base(); *stackStart-- = (unsigned int)avm_fiq_high_level_handler; *stackStart = (unsigned int)registrationStart; taskStructStart->stack = __get_cpu_var(avm_fiq_stack); /*--- taskStruct enthaelt pointer auf stack-page! ---*/ set_fiq_banked(stackStart, avm_gic_fiq_cpu_base(), registrationStart, avm_fiq_high_level_handler); avm_fiq_stack_array[CPU_NR] = stackStart; avm_fiq_registrations[CPU_NR] = registrationStart; } static void avm_set_fiq_handler(void *start, unsigned int length) { /** * FIQ location is at the end of the exception vector table (0x1C). * If the FIQ handler code is placed directly at the end of the vector table, * no branch is required - the code can execute directly from 0x1C. */ memcpy(vectors_page + 0x1c, start, length); flush_icache_range((unsigned long)vectors_page + 0x1c, (unsigned long)vectors_page + 0x1c + length); } /*--------------------------------------------------------------------------------*\ * DAKOTA hat Virtualisierungseinheit (Broadcom leider nicht) \*--------------------------------------------------------------------------------*/ #define read_special(r) ({ \ u32 __val; \ asm volatile( \ ".arch armv7-a \n" \ ".arch_extension virt \n" \ "mrs %0, " __stringify(r) \ : "=r" (__val) \ : \ : "memory", "cc"); \ __val; \ }) #define COPY_OTHER_BANKED_REGISTER(regs, mode) \ asm volatile( "push {r4, r5, r6} \n" \ "mov r4, %0 \n" \ "mrs r5, cpsr \n" \ "bic r6, r5, #31 \n" \ "orr r6, r6, #"#mode " \n" \ "msr cpsr, r6 \n" \ "stmia r4, {r8,r9,r10,r11,r12,r13,r14} \n" \ "nop \n" \ "msr cpsr, r5 \n" \ "pop {r4, r5, r6} \n" \ : \ : "r" (&(regs)->ARM_r8) \ : "memory", "cc" \ ); #define COPY_OTHER_BANKED_REGISTER2(regs, mode) \ asm volatile( "push {r4, r5, r6} \n" \ "mov r4, %0 \n" \ "mrs r5, cpsr \n" \ "bic r6, r5, #31 \n" \ "orr r6, r6, #"#mode " \n" \ "msr cpsr, r6 \n" \ "stmia r4, {sp,lr} \n" \ "nop \n" \ "msr cpsr, r5 \n" \ "pop {r4, r5, r6} \n" \ : \ : "r" (&(regs)->ARM_sp) \ : "memory", "cc" \ ); /*--------------------------------------------------------------------------------*\ * um USER-Register zu lesen, stm-"dach" verwendet werden \*--------------------------------------------------------------------------------*/ #define COPY_USER_BANKED_REGISTER(regs) \ asm volatile( "push {r4, r5, r6} \n" \ "mov r4, %0 \n" \ "mrs r5, cpsr \n" \ "bic r6, r5, #31 \n" \ "orr r6, r6, #19 \n" \ "msr cpsr, r6 \n" \ "stmia r4, {r0-lr}^; \n" \ "nop \n" \ "msr cpsr, r5 \n" \ "pop {r4, r5, r6} \n" \ : \ : "r" (&(regs)->ARM_r0) \ : "memory", "cc" \ ); #define COPY_USER_BANKED_REGISTER2(regs) \ asm volatile( "mov r4, %0 \n" \ "stmia r4, {sp,lr}^; \n" \ "nop \n" \ : \ : "r" (&(regs)->ARM_sp) \ : "memory", "cc" \ ); /*--------------------------------------------------------------------------------*\ * lese banked fiq-register aus SVC-Mode \*--------------------------------------------------------------------------------*/ void copy_banked_fiqregs(struct pt_regs *regs) { unsigned long flags; flags = avm_arch_local_fiq_and_iq_save(); COPY_OTHER_BANKED_REGISTER(regs, 17); avm_arch_local_fiq_restore(flags); } /**--------------------------------------------------------------------------------**\ * Lese banked register aus FIQ-Kontext * nur sp und lr * in regs->Arm_cpsr muss der vorhergehende processor-mode korrekt vermerkt sein! \**--------------------------------------------------------------------------------**/ void copy_banked_regs(struct pt_regs *regs, const struct pt_regs *org_regs) { unsigned int mode; BUG_ON(is_cpu_mode_fiq() == 0); memcpy(regs, org_regs, sizeof(struct pt_regs)); mode = processor_mode(regs); if(mode == SVC_MODE) { COPY_OTHER_BANKED_REGISTER2(regs, 19); /*---Kernel-sp fuer current-thread-struct ---*/ } else if(mode == ABT_MODE) { COPY_OTHER_BANKED_REGISTER2(regs, 23); } else if(mode == IRQ_MODE) { COPY_OTHER_BANKED_REGISTER2(regs, 18); } else if(mode == UND_MODE) { COPY_OTHER_BANKED_REGISTER2(regs, 27); } else if(user_mode(regs)) { COPY_USER_BANKED_REGISTER2(regs); /*--- sp und lr vom user-process ---*/ } } /**--------------------------------------------------------------------------------**\ * Lese alle banked register aus FIQ-Kontext * in regs->Arm_cpsr muss der vorhergehende processor-mode korrekt vermerkt sein! \**--------------------------------------------------------------------------------**/ void copy_banked_regs_full(struct pt_regs *regs, const struct pt_regs *org_regs) { unsigned int mode; //BUG_ON(is_cpu_mode_fiq() == 0); memcpy(regs, org_regs, sizeof(struct pt_regs)); mode = processor_mode(regs); if(mode == SVC_MODE) { COPY_OTHER_BANKED_REGISTER(regs, 19); /*---Kernel-sp fuer current-thread-struct ---*/ } else if(mode == ABT_MODE) { COPY_OTHER_BANKED_REGISTER(regs, 23); } else if(mode == IRQ_MODE) { COPY_OTHER_BANKED_REGISTER(regs, 18); } else if(mode == UND_MODE) { COPY_OTHER_BANKED_REGISTER(regs, 27); } else if(user_mode(regs)) { COPY_USER_BANKED_REGISTER(regs); } } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ unsigned long get_svc_sp(void) { struct pt_regs regs; //BUG_ON(is_cpu_mode_fiq() == 0); COPY_OTHER_BANKED_REGISTER2(®s, 19); /*---Kernel-sp fuer current-thread-struct ---*/ return regs.ARM_sp; } /**--------------------------------------------------------------------------------**\ * \brief: korrigiere aktuelles Registerset falls im FIQ-Kontext oder Exception aus FIQ-Kontext * param regs Ziel-Registerset * param org_regs Source-Registerset \**--------------------------------------------------------------------------------**/ void prepare_register_for_trap(struct pt_regs *regs, struct pt_regs **org_regs) { if (is_cpu_mode_fiq()) { /*--- wir sind in FIQ-Modus ---*/ copy_banked_regs_full(regs, *org_regs); *org_regs = regs; } else { /*--- im Linux-OS-Modus ---*/ if(processor_mode(regs) == FIQ_MODE) { /*--- ... aber da ist was komisches im FIQ passiert ---*/ memcpy(regs, *org_regs, sizeof(struct pt_regs)); copy_banked_fiqregs(regs); *org_regs = regs; } } } /**--------------------------------------------------------------------------------**\ * \brief: Achtung! Vorher muessen die Register mittels prepare_register_for_trap() * korrigiert werden \**--------------------------------------------------------------------------------**/ struct thread_info *current_thread_info_depend_on_context(struct pt_regs *regs) { if(regs == NULL) { return current_thread_info(); } else if (unlikely(is_cpu_mode_fiq())) { /*--- sind im FIQ-Kontext ---*/ if(processor_mode(regs) != FIQ_MODE) { return thread_info_by_sp(get_svc_sp()); /*--- current_thread_info aus SVC-Mode holen ---*/ } return thread_info_by_sp(regs->ARM_sp); /*--- (pseudo-)current_thread_info des FIQ's ---*/ } /*--- im SVC/USER-Kontext---*/ if(processor_mode(regs) == FIQ_MODE) { /*--- ... aber da ist was komisches im FIQ passiert ---*/ return thread_info_by_sp(regs->ARM_sp); /*--- (pseudo-)current_threadinfo des FIQ's ---*/ } /*--- das ist der Normalfall ---*/ return current_thread_info(); } #if defined(CONFIG_PROC_FS) #define local_print(m, args ...) if(m) seq_printf(m, args); else printk(KERN_ERR args) /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static int fiq_proc_stat(struct seq_file *m, void *data __maybe_unused) { int cpu, irq, nr_ints = avm_gic_fiq_nr_ints(); unsigned long unhandled_count = 0; char txtbuf[256]; char txtstat[128]; char *ptxt; unsigned int written, len; unsigned long consume_promille[NR_CPUS]; if(m) mutex_lock(&avm_fiq_lock); ptxt = txtbuf, len = sizeof(txtbuf); written = snprintf(ptxt, len, "%8s", ""); written = min(written, len); ptxt += written, len -= written; for_each_possible_cpu(cpu) { consume_promille[cpu] = 0; written = snprintf(ptxt, len, "%11s%u", "CPU", cpu); written = min(written, len); ptxt += written, len -= written; } local_print(m, "%s\n", txtbuf); for(irq = 0; irq < nr_ints; irq++) { char *name = ""; int set = 0; int busy = 0; txtstat[0] = 0; for_each_possible_cpu(cpu) { avm_fiq_registration_entry_t *reg_entry = &(avm_fiq_registrations[cpu][irq]); if( atomic_read(®_entry->count) != 0 ) { set++; } if( atomic_read(®_entry->is_allocated) != 0 ) { #if defined(FIQ_STATISTIC) unsigned long cprom = 0; fill_fiq_stat(txtstat, sizeof(txtstat), ®_entry->statistic, 1, &cprom); consume_promille[cpu] += cprom; #endif/*--- #if defined(FIQ_STATISTIC) ---*/ name = reg_entry->name; busy |= atomic_read(®_entry->busy) << cpu; unhandled_count = atomic_read(®_entry->unhandled_count); set++; } } if(set == 0) { continue; } ptxt = txtbuf, len = sizeof(txtbuf); written = snprintf(ptxt, len, "%8u:", irq); written = min(written, len); ptxt += written, len -= written; for(cpu = 0; cpu < NR_CPUS; cpu++) { avm_fiq_registration_entry_t *reg_entry = &(avm_fiq_registrations[cpu][irq]); written = snprintf(ptxt, len, "%11u ", atomic_read(®_entry->count)); written = min(written, len); ptxt += written, len -= written; } written = snprintf(ptxt, len,"\t%-14s %c %s", name, busy ? '*' : ' ', txtstat); written = min(written, len); ptxt += written, len -= written; if(unhandled_count) { written = snprintf(ptxt, len, "unhandled: %lu", unhandled_count); written = min(written, len); ptxt += written, len -= written; } local_print(m, "%s\n", txtbuf); } ptxt = txtbuf, len = sizeof(txtbuf); written = snprintf(ptxt, len, "preempts:"); written = min(written, len); ptxt += written, len -= written; for_each_possible_cpu(cpu) { written = snprintf(ptxt, len, "%11u ", per_cpu(fiq_fiq_preemptions, cpu)); written = min(written, len); ptxt += written, len -= written; } local_print(m, "%s\n", txtbuf); ptxt = txtbuf, len = sizeof(txtbuf); written = snprintf(ptxt, len, "spurious:"); written = min(written, len); ptxt += written, len -= written; for_each_possible_cpu(cpu) { written = snprintf(ptxt, len, "%11u ", per_cpu(spurious_count, cpu)); written = min(written, len); ptxt += written, len -= written; } local_print(m, "%s\n", txtbuf); #if defined(FIQ_STATISTIC) ptxt = txtbuf, len = sizeof(txtbuf); written = snprintf(ptxt, len, " consum:"); written = min(written, len); ptxt += written, len -= written; for_each_possible_cpu(cpu) { written = snprintf(ptxt, len, "%9lu.%lu ", consume_promille[cpu] / 10 , consume_promille[cpu] % 10); written = min(written, len); ptxt += written, len -= written; } local_print(m, "%s %%\n", txtbuf); #endif/*--- #if defined(FIQ_STATISTIC) ---*/ if(m) mutex_unlock(&avm_fiq_lock); return 0; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ void avm_fiq_dump_stat(void) { printk(KERN_ERR"FASTIRQ-Status:\n"); fiq_proc_stat(NULL, NULL); } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static int fiq_proc_open(struct inode *inode, struct file *file) { return single_open(file, fiq_proc_stat, NULL); } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static struct file_operations fiq_proc_fops= { .open = fiq_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static int fiq_proc_init(void) { proc_create("fastinterrupts", 0, NULL, &fiq_proc_fops); return 0; } #endif /*--- #if defined(CONFIG_PROC_FS) ---*/