/* * * Copyright (C) 2016 AVM GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef _AVM_FIQ_H_ #define _AVM_FIQ_H_ #define FIQ_EDGE (0x1 << 0) #define FIQ_HWIRQ (0x1 << 1) #define FIQ_1_N (0x1 << 2) #include #ifndef CLIENT_FIQ_PRIO #define CLIENT_FIQ_PRIO FIQ_PRIO_USER #endif #include #define READ_PRIO_FROM_CSPR(flags) ((flags & PSR_r) >> 6) #define WRITE_PRIO_TO_CSPR(flags) ((flags & 0xF0) << 6) #define CLEAR_PSR_r(flags) (flags & ~PSR_r) #define READ_PRIO_FROM_CSPR_v7(flags) ((flags & PSR_r_v7) >> 16) #define WRITE_PRIO_TO_CSPR_v7(flags) ((flags & 0xF0) << 16) #define CLEAR_PSR_r_v7(flags) (flags & ~PSR_r_v7) #include #include #include extern void avm_mask_all_fiqs_down(unsigned int fiq_prio, unsigned long *restore_PMR, unsigned long flags); extern void avm_unmask_all_fiqs_up(unsigned long restore_PMR, unsigned long flags); extern void avm_bust_mask(unsigned int mask, unsigned long flags); /* * Zaehler fuer spurious interrupts, pro CPU ... */ DECLARE_PER_CPU(unsigned int, spurious_count); /** * Mode-Umschaltung */ #define AVM_CHANGE_MODE(_mode) \ do { \ asm volatile( \ "nop \n" \ "nop \n" \ "nop \n" \ "isb \n" \ "nop \n" \ "nop \n" \ "nop \n" \ "mrs r0, cpsr \n" \ "bic r0, r0, #0x1F \n" \ "orr r0, r0, %[mode] \n" \ "bic r0, r0, #0x1C0 \n" \ "orr r0, r0, #0xC0 \n" \ "msr cpsr_c, r0 \n" \ "nop \n" \ "nop \n" \ "nop \n" \ "isb \n" \ "nop \n" \ "nop \n" \ "nop \n" \ : \ : [mode] "n"(_mode) \ : "memory", "cc", "r0" \ ); \ } while (0) /* * \brief: Sind wir im FIQ? * ret: Nein: 0 * Ja: 1 */ static inline int is_cpu_mode_fiq(void) { unsigned long cpsr_reg; asm volatile( " mrs %0, cpsr @ is_cpu_mode_fiq\n" : "=r" (cpsr_reg) : : "memory", "cc"); return (cpsr_reg & MODE_MASK) == FIQ_MODE; } /** * \brief: IRQ und FIRQ ausschalten */ static inline void avm_arch_local_fiq_and_iq_disable(void) { asm volatile( " cpsid fi @ avm_arch_local_fiq_and_iq_disable\n" : : : "memory", "cc"); } /** * \brief: IRQ und FIRQ anschalten */ static inline void avm_arch_local_fiq_and_iq_enable(void) { asm volatile( " cpsie fi @ avm_arch_local_fiq_and_iq_enable\n" : : : "memory", "cc"); } /** * \brief: IRQ und FIRQ sichern, alten Zustand in flags zurückgeben */ static inline unsigned long avm_raw_local_fiq_and_iq_save(void) { unsigned long flags; asm volatile( " mrs %0, cpsr @ avm_raw_local_fiq_and_iq_save\n" " cpsid if\n" : "=r" (flags) : : "memory", "cc"); return flags; } /** * \brief: IRQ und FIRQ über flags wieder auf vorherigen Zustand zurücksetzen */ static inline void avm_raw_local_fiq_and_iq_restore(unsigned long flags) { asm volatile( " msr cpsr_c, %0 @ avm_raw_local_fiq_and_iq_restore\n" : : "r" (flags) : "memory", "cc"); } /** * \brief: FIQ Maske und IRQs aus flags im IntC zurücksetzen * Da dieser Vorgang nicht atomar ist, * einmal hart IRQ/FIQ disable/enable drumherum */ static inline void avm_arch_local_fiq_restore(unsigned long flags) { #ifndef CONFIG_ARCH_IPQ5018 unsigned long avm_rte_prio = READ_PRIO_FROM_CSPR_v7(flags); unsigned long real_flags = CLEAR_PSR_r_v7(flags); #else unsigned long avm_rte_prio = READ_PRIO_FROM_CSPR(flags); unsigned long real_flags = CLEAR_PSR_r(flags); #endif /* * At this point we do not need to remember the current I&F status. * * We just need to ensure that we're protected here. The desired * I&F state is passed in by flags from the caller. */ avm_arch_local_fiq_and_iq_disable(); avm_unmask_all_fiqs_up(avm_rte_prio, flags); avm_raw_local_fiq_and_iq_restore(real_flags); } /** * \brief: FIQ Maske aus Umgebungsdefine CLIENT_FIQ_PRIO im IntC sperren, * alte Maske und alte IRQs in flags zurückgeben, IRQs gesperrt lassen. * Da dieser Vorgang nicht atomar ist, * einmal hart IRQ/FIQ disable/enable drumherum */ static inline unsigned long avm_arch_local_fiq_and_iq_save(void) { unsigned long flags; unsigned long restore_PMR = 0; flags = avm_raw_local_fiq_and_iq_save(); avm_mask_all_fiqs_down(CLIENT_FIQ_PRIO, &restore_PMR, flags); /* * Restore the old firq mask but always mask the interrupts. * * Technically due to the min interrupt priority enforced by * mask_all_fiq_down no interrupts will be delivered. However linux * will still check the I bit to know whether interrupts are disabled. * * Therefore we will set the I bit to indicate that the interrupts * are disabled. */ avm_raw_local_fiq_and_iq_restore(flags | PSR_I_BIT); #ifndef CONFIG_ARCH_IPQ5018 return CLEAR_PSR_r_v7(flags) | WRITE_PRIO_TO_CSPR_v7(restore_PMR); #else return CLEAR_PSR_r(flags) | WRITE_PRIO_TO_CSPR(restore_PMR); #endif } /** * \brief: FIQ belegen * \param: cpu * \param: irq * \param: handler * \param: fiqflags * \param: devname * \param: dev_id * * ret: Ungültige CPU-Nr: -EINVAL * Bereits belegt: -EBUSY * Sonst 0 */ typedef irqreturn_t (*avm_fiq_cb_t)(int, void *); int avm_request_fiq_on(const struct cpumask *cpumask, unsigned int irq, avm_fiq_cb_t handler, unsigned long fiqflags, const char *devname, void *dev_id); void avm_gic_fiq_setup(unsigned int pinNr, const struct cpumask *cpumask, unsigned int prio, unsigned int config, unsigned int mode); /* * \brief: FIQ freigeben * \param: cpu * \param: irq * \param: dev_id - muss mit der in avm_request_fiq_on() angegebenen uebereinstimmen * ret: Ungültige CPU-Nr: -EINVAL * Unpassende Ref: -EINVAL * Sonst 0 */ int avm_free_fiq_on(int cpu, unsigned int irq, void *dev_id); /** * \brief: Belegten FIQ einschalten * \param: cpu * \param: irq * ret: War nicht korrekt angefordert: -EINVAL * Sonst 0 */ int avm_enable_fiq_on(int cpu, unsigned int irq); /** * \brief: Belegten FIQ ausschalten * \param: cpu * \param: irq * ret: War nicht korrekt angefordert: -EINVAL * Sonst 0 */ int avm_disable_fiq_on(int cpu, unsigned int irq); /** * \brief: Lese banked register aus FIQ-Kontext * nur sp und lr * diese Funktion nur aus FIQ-Kontext verwenden */ void copy_banked_regs(struct pt_regs *regs, const struct pt_regs *org_regs); /** * \brief: Lese alle banked register aus FIQ-Kontext * diese Funktion nur aus FIQ-Kontext verwenden */ void copy_banked_regs_full(struct pt_regs *regs, const struct pt_regs *org_regs); /* * \brief: liefere den SP im SVC als Grundlage fuer Ermittlung des current dieser CPU * diese Funktion nur aus FIQ-Kontext verwenden */ unsigned long get_svc_sp(void); /** * \brief: korrigiere aktuelles Registerset falls im FIQ-Kontext oder Exception im FIQ-Kontext * passiert * Kontext: beliebig * param regs Ziel-Registerset * param org_regs Source-Registerset * * ret org_regs pointer wird modifiziert falls obige Bedingung zutrifft */ void prepare_register_for_trap(struct pt_regs *regs, struct pt_regs **org_regs); /** * \brief: Achtung! Vorher muessen die Register mittels prepare_register_for_trap() * korrigiert werden */ struct thread_info *current_thread_info_depend_on_context(struct pt_regs *regs); /** * \brief: Backtrace aller CPU's ueber den FAST-IRQ * \param: exception_context falls Ausgabe aus Exception passiert * (sonst i.d.R. ueberfluessiger Backtrace des Exceptionhandlers) */ bool avm_trigger_all_cpu_backtrace(struct pt_regs *exception_regs, cpumask_t *cpu_mask); void firq_avm_trigger_all_cpu_backtrace(struct pt_regs *exception_regs, cpumask_t *cpu_mask); /* * Per-cpu current frame pointer - the location of the last exception frame on * the stack */ DECLARE_PER_CPU(struct pt_regs *, __fiq_regs); DECLARE_PER_CPU(struct pt_regs *, __fiq_regs_save); DECLARE_PER_CPU(struct pt_regs *, __exc_regs); /** */ static inline struct pt_regs *get_fiq_regs(void) { return __this_cpu_read(__fiq_regs); } static inline struct pt_regs *get_fiq_regs_save(void) { return __this_cpu_read(__fiq_regs_save); } static inline struct pt_regs *get_fiq_regs_per_cpu(int cpu) { return per_cpu(__fiq_regs, cpu); } static inline struct pt_regs *get_fiq_regs_save_per_cpu(int cpu) { return per_cpu(__fiq_regs_save, cpu); } static inline struct pt_regs *set_fiq_regs(struct pt_regs *new_regs) { struct pt_regs *old_regs; old_regs = __this_cpu_read(__fiq_regs); __this_cpu_write(__fiq_regs, new_regs); return old_regs; } static inline struct pt_regs *set_fiq_regs_save(struct pt_regs *new_regs) { struct pt_regs *old_regs; old_regs = __this_cpu_read(__fiq_regs_save); __this_cpu_write(__fiq_regs_save, new_regs); return old_regs; } static inline struct pt_regs *set_fiq_regs_save_per_cpu( struct pt_regs *new_regs, int cpu) { struct pt_regs *old_regs; old_regs = per_cpu(__fiq_regs_save, cpu); per_cpu(__fiq_regs_save, cpu) = new_regs; return old_regs; } static inline struct pt_regs *get_exc_regs(int cpu) { return per_cpu(__exc_regs, cpu); } static inline struct pt_regs *set_exc_regs(struct pt_regs *new_regs) { struct pt_regs *old_regs; old_regs = __this_cpu_read(__exc_regs); __this_cpu_write(__exc_regs, new_regs); return old_regs; } static inline struct pt_regs *set_exc_regs_per_cpu(struct pt_regs *new_regs, int cpu) { struct pt_regs *old_regs; old_regs = per_cpu(__exc_regs, cpu); per_cpu(__exc_regs, cpu) = new_regs; return old_regs; } /** */ static inline struct thread_info *thread_info_by_sp(unsigned long sp) { return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); } extern void avm_set_crashed_cpu(int cpu); extern int _avm_get_crashed_cpu(void); extern int avm_get_crashed_cpu(void); #if defined(CONFIG_PROC_FS) void avm_fiq_dump_stat(void); #else /*--- #if defined(CONFIG_PROC_FS) ---*/ #define avm_fiq_dump_stat() #endif /*--- #else ---*//*--- #if defined(CONFIG_PROC_FS) ---*/ #endif // #ifndef _AVM_FIQ_H_