/* * * Copyright (C) 2016 AVM GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * backtrace-helper-functions for stackdump on smp etc. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define snprintf_add(ptxt, txtlen, args...) do { \ if (ptxt == NULL) { \ pr_err(args); \ } else { \ int local_add_len; \ \ local_add_len = snprintf(ptxt, txtlen, args); \ if (local_add_len > 0) { \ int tail = min_t(int, txtlen, local_add_len); \ \ (ptxt) += tail, (txtlen) -= tail; \ } \ } \ } while (0) #if defined(CONFIG_AVM_FASTIRQ) #include #include #include #include #include #include static int rte_monitor_ipi; enum _monitor_ipi_type { monitor_nop = 0x0, monitor_bt = 0x1, }; struct _monitor_bt { struct pt_regs ptregs; unsigned long svc_sp; /*--- fuer current ---*/ unsigned long transbase; unsigned long dac; unsigned long ctrl; unsigned int all_register_valid; }; struct _monitor_ipi { enum _monitor_ipi_type type; union __monitor_ipi { struct _monitor_bt bt; } buf; unsigned int init; atomic_t ready; spinlock_t lock; }; static struct _monitor_ipi IPI_Monitor[NR_CPUS]; static atomic_t backtrace_busy; static const char *const processor_modes[] = { "USER_26", "FIQ_26", "IRQ_26", "SVC_26", "UK4_26", "UK5_26", "UK6_26", "UK7_26", "UK8_26", "UK9_26", "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", "USER_32", "FIQ_32", "IRQ_32", "SVC_32", "UK4_32", "UK5_32", "UK6_32", "ABT_32", "UK8_32", "UK9_32", "UK10_32", "UND_32", "UK12_32", "UK13_32", "UK14_32", "SYS_32" }; static const char * const isa_modes[] = { "ARM", "Thumb", "Jazelle", "ThumbEE" }; /** */ static void get_cp15_register(struct _monitor_bt *pbt) { #ifdef CONFIG_CPU_CP15 unsigned int ctrl; #ifdef CONFIG_CPU_CP15_MMU unsigned int transbase, dac; asm("mrc p15, 0, %0, c2, c0\n\t" "mrc p15, 0, %1, c3, c0\n" : "=r" (transbase), "=r" (dac)); pbt->transbase = transbase; pbt->dac = dac; #endif asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); pbt->ctrl = ctrl; #endif } /** */ static inline long get_cpsr_reg(void) { unsigned long cpsr_reg; asm volatile( " mrs %0, cpsr @ get_cpsr\n" : "=r" (cpsr_reg) : : "memory", "cc"); return cpsr_reg; } /** */ static int __get_userinfo(char *buf, unsigned int maxbuflen, struct mm_struct *mmm, unsigned long addr) { struct vm_area_struct *vm; struct vm_area_struct *vm_last = NULL; unsigned int i = 0; if (!virt_addr_valid(mmm)) { return 1; } vm = mmm->mmap; while (vm) { /*--- pr_err("%s[%x]:%p %x - %x vm_mm %p\n", __func__, addr, vm, vm->vm_start, vm->vm_end, vm->vm_mm); ---*/ if (!virt_addr_valid(vm)) { snprintf(buf, maxbuflen, "%s: error: corrupt vm %p vm_last=%pS\n", __func__, vm, vm_last); return 1; } if ((addr >= vm->vm_start) && (addr < vm->vm_end)) { snprintf(buf, maxbuflen, "seg=%3u of=0x%08lx/0x%lx [%s]", i, addr - (unsigned long)vm->vm_start, (unsigned long)vm->vm_end - (unsigned long)vm->vm_start, (vm->vm_file && vm->vm_file->f_path.dentry) ? (char *)vm->vm_file->f_path.dentry->d_name.name : "" ); /*--- pr_err("%s", buf); ---*/ return 0; } vm_last = vm; vm = vm->vm_next; i++; } return 1; } /** * Schwachstelle: thread/task nicht mehr valid (in dem Moment freigegeben) */ static char *get_user_symbol(char *txt, unsigned int maxttxlen, struct thread_info *thread, unsigned long addr) { struct task_struct *task = NULL; unsigned long flags; txt[0] = 0; firq_local_irq_save(flags); if (!virt_addr_valid(thread)) { firq_local_irq_restore(flags); return txt; } task = thread->task; if (!virt_addr_valid(task)) { firq_local_irq_restore(flags); return txt; } if (firq_spin_trylock(&task->alloc_lock)) { /*--- kritisch: mmput()/get_task_mm() darf im FIrq-Kontext nicht verwendet werden, mmput hoffentlich nur im task_lock() ---*/ __get_userinfo(txt, maxttxlen, task->mm, addr); firq_spin_unlock(&task->alloc_lock); } firq_local_irq_restore(flags); return txt; } /** * FAST-IRQ-Kontext */ static noinline void monitor_ipi_firq_context(struct _monitor_ipi *m_ipi) { struct pt_regs *pregs; if (atomic_read(&m_ipi->ready)) { return; } /* Sind wir hier die crashende CPU? */ if (avm_get_crashed_cpu() == raw_smp_processor_id()) { /* Nachschauen, ob von uns willentlich die exc_regs auf NULL gesetzt wurden (Panic-Fall) */ pregs = get_exc_regs(raw_smp_processor_id()); if (pregs == NULL) { /* PANIC! Die hat keinen eigenen Exception-Kontext, wir haben ihn aber durch den WD-FIQ bekommen */ pregs = get_fiq_regs(); } pr_info("[%s] slave cpu#%d is the crashed cpu!\n", __func__, raw_smp_processor_id()); } else { /* Nein, wir sind eine der drei im FIQ gefangenen ... */ pregs = get_fiq_regs(); pr_info("[%s] slave cpu#%d\n", __func__, raw_smp_processor_id()); } switch (m_ipi->type) { case monitor_bt: /* Sind wir hier die crashende CPU? */ if (avm_get_crashed_cpu() == raw_smp_processor_id()) { /* Nachschauen, ob von uns willentlich die exc_regs auf NULL gesetzt wurden (Panic-Fall) */ if (get_exc_regs(raw_smp_processor_id()) != NULL) { /* Der Exception-Kontext braucht nicht korrigiert zu werden, nur der FIQ-Kontext ... */ memcpy(&m_ipi->buf.bt.ptregs, pregs, sizeof(m_ipi->buf.bt.ptregs)); pr_info("[%s] just memcpy pregs!\n", __func__); /* PANIC! Die hat keinen eigenen Exception-Kontext, wir haben ihn aber durch den WD-FIQ bekommen */ } else { /* Da wir nicht die ausführende CPU sind, hier an die Kontext-Korrektur denken! */ copy_banked_regs_full(&m_ipi->buf.bt.ptregs, pregs); pr_info("[%s] copy_banked_regs_full!\n", __func__); } /* Nein, wir sind eine der übrigen drei ... */ } else { /* und hier im FIQ, daher wieder Kontext-Korrektur! */ copy_banked_regs_full(&m_ipi->buf.bt.ptregs, pregs); pr_info("[%s] copy_banked_regs_full!\n", __func__); } m_ipi->buf.bt.svc_sp = get_svc_sp(); get_cp15_register(&m_ipi->buf.bt); m_ipi->buf.bt.all_register_valid = 1; break; case monitor_nop: default: break; } dsb(); atomic_set(&m_ipi->ready, 1); } /** */ static void dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); const int thumb = thumb_mode(regs); const int width = thumb ? 4 : 8; mm_segment_t fs; char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; int i; /* * We need to switch to kernel mode so that we can use __get_user * to safely read from kernel space. Note that we now dump the * code first, just in case the backtrace kills us. */ fs = get_fs(); set_fs(KERNEL_DS); for (i = -4; i < 1 + !!thumb; i++) { unsigned int val, bad; if (thumb) bad = __get_user(val, &((u16 *)addr)[i]); else bad = __get_user(val, &((u32 *)addr)[i]); if (!bad) p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", width, val); else { p += sprintf(p, "bad PC value"); break; } } printk("%sCode: %s\n", lvl, str); set_fs(fs); } #define UNALIGNED_MAX_SCORE_ENTRIES 8 #if defined(UNALIGNED_MAX_SCORE_ENTRIES) /** */ struct _unaligned_access_score_entry { unsigned long unaligneds; unsigned long ts; char comm[TASK_COMM_LEN]; /*--- Name mal sichern falls beendet ---*/ pid_t pid; unsigned long pc; }; /** * usermode: 0 Kernel */ static void add_unaligned_access_to_table(unsigned long pc, struct _unaligned_access_score_entry table[], unsigned int table_entries, unsigned int usermode) { unsigned int ts_idx = 0, ts_score = 0; unsigned int i; for (i = 0; i < table_entries; i++) { if (table[i].unaligneds) { if (usermode) { if (task_pid_nr(current) != table[i].pid) { continue; } } if (pc == table[i].pc) { table[i].unaligneds++; /*--- pr_err("%s:[%u]add %s %s pid=%u pc=0x%lx %lu\n", __func__, i, usermode ? "USER" : "KERNEL", current->comm, table[i].pid, pc, table[i].unaligneds); ---*/ return; } continue; } /*--- pr_err("%s:[%u]initial %s %s pc=0x%lx pid=%u\n", __func__, i, usermode ? "USER" : "KERNEL", current->comm, pc, table[i].pid); ---*/ ts_idx = i; goto table_settings; } /*--- alle besetzt: bewerte per unaligneds / per time ---*/ for (i = 0; i < table_entries; i++) { unsigned long diff = jiffies - table[i].ts; unsigned int score = diff / (table[i].unaligneds | 0x1); /*--- pr_err("%s:[%u]score %d diff %u unaligneds=%lu ts_score %u idx=%u\n", __func__, i, score, diff, table[i].unaligneds, ts_score, ts_idx); ---*/ if (score > ts_score) { ts_score = score; ts_idx = i; } } /*--- pr_err("%s:[%u]replace %s old: unaligneds=%lu pc=0x%lx pid=%u ts_score=%u new=%s pc=0x%lx\n", __func__, ts_idx, usermode ? "USER" : "KERNEL", table[ts_idx].unaligneds, table[ts_idx].pc, table[ts_idx].pid, ts_score, current->comm, pc); ---*/ table_settings: table[ts_idx].unaligneds = 1; table[ts_idx].pc = pc; table[ts_idx].ts = jiffies; table[ts_idx].pid = task_pid_nr(current); memcpy(table[ts_idx].comm, current->comm, sizeof(table[ts_idx].comm)); } /** * die Normierung erfolgt entsprechend des Zeitfensters */ static const char *norm_ai(unsigned long *ai, unsigned int sec) { if (sec >= 60 * 60) { *ai *= 60 * 60; return "h"; } if (sec >= 60) { *ai *= 60; return "min"; } return "s"; } /** */ static unsigned long show_unaligned_access_table(struct seq_file *seq, struct _unaligned_access_score_entry table[], unsigned int table_entries, unsigned int usermode) { unsigned int i; unsigned long sum = 0; if (table[0].unaligneds) { seq_printf(seq, "%s:\nunaligneds\t unaligneds/time\t\n", usermode ? "User-Scorelist" : "System-Scorelist"); } for (i = 0; i < table_entries; i++) { if (table[i].unaligneds) { unsigned long ai_per_time = table[i].unaligneds; unsigned long sec = (jiffies - table[i].ts) / HZ; const char *post_fix = norm_ai(&ai_per_time, sec); sum += table[i].unaligneds; if (sec == 0) sec = 1; if (usermode) { struct pid *ppid; struct task_struct *tsk = NULL; char *pcomm; ppid = find_get_pid(table[i].pid); if (ppid) { tsk = get_pid_task(ppid, PIDTYPE_PID); } if (tsk) { pcomm = tsk->comm; } else { pcomm = table[i].comm; tsk = NULL; } seq_printf(seq, "%10lu \t%5lu.%02lu/%s \t %s(%u) pc=0x%lx ", table[i].unaligneds, ai_per_time / sec, ((ai_per_time * 100) / sec) % 100, post_fix, pcomm, table[i].pid, table[i].pc); if (tsk) { put_task_struct(tsk); } if (ppid) { put_pid(ppid); } seq_puts(seq, "\n"); } else { seq_printf(seq, "%10lu \t%5lu.%02lu/%s \t 0x%lx(%pS) ", table[i].unaligneds, ai_per_time / sec, ((ai_per_time * 100) / sec) % 100, post_fix, table[i].pc, (void *)table[i].pc); seq_puts(seq, "\n"); } } } return sum; } static struct _unaligned_access_score_entry user_score_table[UNALIGNED_MAX_SCORE_ENTRIES], sys_score_table[UNALIGNED_MAX_SCORE_ENTRIES]; #endif/*--- #if defined(UNALIGNED_MAX_SCORE_ENTRIES) ---*/ /** * Dump memory */ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, unsigned long top) { unsigned long first; mm_segment_t fs; int i; /* * We need to switch to kernel mode so that we can use __get_user * to safely read from kernel space. Note that we now dump the * code first, just in case the backtrace kills us. */ fs = get_fs(); set_fs(KERNEL_DS); printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top); for (first = bottom & ~31; first < top; first += 32) { unsigned long p; char str[sizeof(" 12345678") * 8 + 1]; memset(str, ' ', sizeof(str)); str[sizeof(str) - 1] = '\0'; for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { if (p >= bottom && p < top) { unsigned long val; if (__get_user(val, (unsigned long *)p) == 0) sprintf(str + i * 9, " %08lx", val); else sprintf(str + i * 9, " ????????"); } } printk("%s%04lx:%s\n", lvl, first & 0xffff, str); } set_fs(fs); } /** * wir sind auf der cpu nicht im firq: nun steht noch die Kontext-Frage: * (a) Interrupt * (b) Exception * (c) direkter Aufruf */ extern struct pt_regs avm_die_pregs; static void monitor_ipi_direct_context(struct _monitor_ipi *m_ipi, struct pt_regs *exception_regs) { register unsigned long current_sp asm ("sp"); struct pt_regs *regs; if (atomic_read(&m_ipi->ready)) { return; } switch (m_ipi->type) { case monitor_bt: /* Sind wir hier die crashende CPU? */ if (avm_get_crashed_cpu() == raw_smp_processor_id()) { /* Ist der uns übergebene Exception-Kontext gültig? */ if (exception_regs) { /* Nachschauen, ob von uns willentlich die exc_regs auf NULL gesetzt wurden (Panic-Fall) */ regs = get_exc_regs(avm_get_crashed_cpu()); pr_info("[%s] MONITOR_BT exception_regs\n", __func__); /* PANIC! Die hat keinen eigenen Exception-Kontext, wir haben ihn aber durch den WD-FIQ bekommen */ if (regs == NULL) { regs = get_fiq_regs(); pr_info("[%s] use exchanged fiq_regs!\n", __func__); } } else if (in_interrupt()) { /*--- Irq-Kontext ---*/ regs = get_irq_regs(); pr_info("[%s] MONITOR_BT in_interrupt()==True, regs=0x%p\n", __func__, (void *)regs); } else { regs = NULL; pr_info("[%s] MONITOR_BT regs=NULL!\n", __func__); } } else { /* Nein, wir sind eine der übrigen drei ... */ regs = get_fiq_regs(); if (regs) pr_info("[%s] MONITOR_BT fiq_regs! PC=0x%p, LR=0x%p, SP=0x%p\n", __func__, (void *)regs->ARM_pc, (void *)regs->ARM_lr, (void *)regs->ARM_sp); } if (regs) { if (avm_get_crashed_cpu() == raw_smp_processor_id()) { pr_info("[%s] master cpu is crashed cpu!\n", __func__); } else { pr_info("[%s] master cpu is not crashed cpu!\n", __func__); } /* Die Master-CPU hat die Kontext-Korrektur schon vorab im WD-FIQ Einstieg erhalten */ memcpy(&m_ipi->buf.bt.ptregs, regs, sizeof(m_ipi->buf.bt.ptregs)); m_ipi->buf.bt.svc_sp = get_svc_sp(); m_ipi->buf.bt.all_register_valid = 1; pr_info("[%s] BT register set valid!\n", __func__); } else { /** * Wenn aus einer (Fault-)Exception ohne exception_regs: * erstmal (unsinniger) Backtrace des Exception-Handlers, dabei * erkennt der Backtracer hoffentlich das Exception-Text-Segment * und liefert dann doch nochmal den relevanten Backtrace */ memset(&m_ipi->buf.bt, 0, sizeof(m_ipi->buf.bt)); m_ipi->buf.bt.ptregs.ARM_fp = (unsigned long)__builtin_frame_address(0); m_ipi->buf.bt.ptregs.ARM_pc = _THIS_IP_; m_ipi->buf.bt.ptregs.ARM_lr = _RET_IP_; m_ipi->buf.bt.ptregs.ARM_sp = current_sp; m_ipi->buf.bt.ptregs.ARM_cpsr = get_cpsr_reg(); m_ipi->buf.bt.all_register_valid = 0; /*--- Registerdump sinnfrei ---*/ m_ipi->buf.bt.svc_sp = current_sp; pr_info("[%s] NO_EXCEPTION_CONTEXT! BT register set not valid!\n", __func__); } get_cp15_register(&m_ipi->buf.bt); break; case monitor_nop: default: break; } dsb(); atomic_set(&m_ipi->ready, 1); } /** * FAST-IRQ-Kontext */ static irqreturn_t firq_monitor(unsigned int ipi, struct pt_regs *regs, void *ctx) { struct _monitor_ipi *m_ipi = &IPI_Monitor[raw_smp_processor_id()]; monitor_ipi_firq_context(m_ipi); return IRQ_HANDLED; } /** * Absetzen eines Monitor-Befehls um synchron Infos von anderen CPU's zu erhalten * ret: 0 ok */ static noinline int trigger_firq_monitor(unsigned int cpu, enum _monitor_ipi_type type, void *buf, struct pt_regs *exception_regs) { int ret = 0, os_context; unsigned long flags = 0, retry = 1000; struct _monitor_ipi *m_ipi = &IPI_Monitor[cpu]; if (cpu > ARRAY_SIZE(IPI_Monitor)) { return -EINVAL; } if (m_ipi->init == 0) { return -EACCES; } os_context = !firq_is_avm_rte(); if (os_context) { spin_lock_irqsave(&m_ipi->lock, flags); } atomic_set(&m_ipi->ready, 0); m_ipi->type = type; if (raw_smp_processor_id() == cpu) { if (is_cpu_mode_fiq()) { /*--- sind auf dieser CPU im fastirq ---*/ monitor_ipi_firq_context(m_ipi); } else { /*--- sind auf dieser CPU (irq oder normaler kernel-kontext) ---*/ monitor_ipi_direct_context(m_ipi, exception_regs); } } else { avm_rte_ipi_trigger_on(cpu, rte_monitor_ipi); } while (retry && atomic_read(&m_ipi->ready) == 0) { udelay(1); retry--; } if (atomic_read(&m_ipi->ready)) { switch (type) { case monitor_bt: memcpy(buf, &m_ipi->buf.bt, sizeof(m_ipi->buf.bt)); break; case monitor_nop: default: break; } } else { pr_err("%s: for cpu%u on cpu%u%s timeout-error\n", __func__, cpu, raw_smp_processor_id(), os_context ? "" : "(fiq-context)"); ret = -EACCES; } /*--- pr_err("%s: retry=%lu\n", __func__, retry); ---*/ if (os_context) { spin_unlock_irqrestore(&m_ipi->lock, flags); } return ret; } /** */ static void _bt_show_regs(int cpu, struct _monitor_bt *pbt, struct thread_info *pthread, struct task_struct *curr_tsk) { int os_context; unsigned int all_register_valid; struct pt_regs *regs; unsigned long flags, flag; char txtbuf[64]; char buf[64]; os_context = !firq_is_avm_rte(); regs = &pbt->ptregs; all_register_valid = pbt->all_register_valid; if (os_context) { firq_local_irq_save(flag); } pr_err("\n"); pr_err("CPU: %d Pid: %d, comm: %20s\n", cpu, (curr_tsk ? task_pid_nr(curr_tsk) : -1), (curr_tsk ? curr_tsk->comm : "?")); pr_err(" %s (%s %.*s)\n", print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); if (user_mode(regs)) { pr_err("PC is at %s\n", get_user_symbol(txtbuf, sizeof(txtbuf), pthread, regs->ARM_pc)); pr_err("LC is at %s\n", get_user_symbol(txtbuf, sizeof(txtbuf), pthread, regs->ARM_lr)); } else { print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("LR is at %s\n", regs->ARM_lr); } if (all_register_valid) { pr_err("pc :[<%08lx>] lr :[<%08lx>] psr: %08lx\n" "sp : %08lx ip : %08lx fp : %08lx\n", regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); pr_err("r10: %08lx r9 : %08lx r8 : %08lx\n", regs->ARM_r10, regs->ARM_r9, regs->ARM_r8); pr_err("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", regs->ARM_r7, regs->ARM_r6, regs->ARM_r5, regs->ARM_r4); pr_err("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", regs->ARM_r3, regs->ARM_r2, regs->ARM_r1, regs->ARM_r0); } else { pr_err("pc :[<%08lx>] lr :[<%08lx>] psr: %08lx\n" "sp : %08lx fp : %08lx\n", regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, regs->ARM_sp, regs->ARM_fp); } flags = regs->ARM_cpsr; buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; buf[4] = '\0'; pr_err("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", buf, interrupts_enabled(regs) ? "n" : "ff", fast_interrupts_enabled(regs) ? "n" : "ff", processor_modes[processor_mode(regs)], isa_modes[isa_mode(regs)], (pthread ? ((pthread->addr_limit == get_ds()) ? "kernel" : "user") : "?")); #ifdef CONFIG_CPU_CP15 buf[0] = '\0'; #ifdef CONFIG_CPU_CP15_MMU snprintf(buf, sizeof(buf), " Table: %08lx DAC: %08lx", pbt->transbase, pbt->dac); #endif pr_err("Control: %08lx%s\n", pbt->ctrl, buf); #endif if (os_context) { firq_local_irq_restore(flag); } } /** */ static void bt_show_regs(int cpu, struct _monitor_bt *pbt, struct thread_info *pthread) { int os_context; unsigned long flag; struct pt_regs *regs; struct task_struct *curr_tsk = NULL; os_context = !firq_is_avm_rte(); regs = &pbt->ptregs; curr_tsk = pthread->task; if (os_context) { firq_local_irq_save(flag); } _bt_show_regs(cpu, pbt, pthread, curr_tsk); arch_show_register_memoryclassifier(regs); if (os_context) { firq_local_irq_restore(flag); } } /** * \brief: Backtrace aller CPU's ueber den FAST-IRQ * \param: exception_context falls Ausgabe aus Exception passiert * (sonst i.d.R. ueberfluessiger Backtrace des Exceptionhandlers) * cpu_mask: NULL all cpu's - sonst cpu's entsprechend mask */ #define AVM_SAVE_BT_REGS() \ do { \ asm volatile( \ "push {r0-r7} \n" \ : \ : \ : "memory", "cc" \ ) \ while (0) #define AVM_RESTORE_BT_REGS() \ do { \ asm volatile( \ "pop {r0-r7} \n" \ : \ : \ : "memory", "cc" \ ) \ while (0) /** */ bool avm_trigger_all_cpu_backtrace(struct pt_regs *exception_regs, cpumask_t *cpu_mask) { struct _monitor_bt bt; unsigned int cpu; cpumask_t cpu_bt_stat; if (cpu_mask) cpu_bt_stat = *cpu_mask; else cpumask_setall(&cpu_bt_stat); if (atomic_add_return(1, &backtrace_busy) > 1) { return 0; } for_each_online_cpu(cpu) { unsigned long sp; struct thread_info *pthread = NULL; struct task_struct *curr_tsk = NULL; if (cpu_mask && (cpumask_test_cpu(cpu, cpu_mask) == 0)) { continue; } memset(&bt, 0, sizeof(bt)); if (trigger_firq_monitor(cpu, monitor_bt, &bt, exception_regs)) { cpumask_clear_cpu(cpu, &cpu_bt_stat); continue; } sp = bt.svc_sp; if (virt_addr_valid(sp)) { pthread = thread_info_by_sp(sp); if (virt_addr_valid(pthread)) { curr_tsk = pthread->task; } else { pthread = NULL; } } if (!virt_addr_valid(curr_tsk)) { _bt_show_regs(cpu, &bt, pthread, NULL); pr_emerg("Backtrace: skipped ...\n"); if (virt_addr_valid(bt.ptregs.ARM_sp)) { dump_mem(KERN_ERR, "Stack: ", bt.ptregs.ARM_sp, min((bt.ptregs.ARM_sp & (~(THREAD_SIZE - 1))) + THREAD_SIZE, (bt.ptregs.ARM_sp + (PAGE_SIZE >> 1)))); } continue; } bt_show_regs(cpu, &bt, pthread); if (!user_mode(&bt.ptregs) || in_interrupt()) { #ifdef CONFIG_ARM_UNWIND pr_emerg("Backtrace:\n"); unwind_backtrace(&bt.ptregs, NULL); #else show_stack(curr_tsk, (void *)bt.ptregs.ARM_sp); #endif/*--- #ifdef CONFIG_ARM_UNWIND ---*/ if (!is_cpu_mode_fiq()) { dump_instr(KERN_ERR, &bt.ptregs); } dump_mem(KERN_ERR, "Stack: ", bt.ptregs.ARM_sp, min((bt.ptregs.ARM_sp & (~(THREAD_SIZE - 1))) + THREAD_SIZE, (bt.ptregs.ARM_sp + (PAGE_SIZE >> 1)))); #if defined(show_stacktrace_memoryclassifier) show_stacktrace_memoryclassifier(&bt.ptregs); #endif/*--- #if defined(show_stacktrace_memoryclassifier) ---*/ } } atomic_set(&backtrace_busy, 0); return cpumask_weight(&cpu_bt_stat) ? 1 : 0; } /** * installiere auf jeder CPU ein monitor-ipi-linux-firq */ static int monitor_ipi_init(void) { int cpu; int ret; for_each_cpu(cpu, cpu_online_mask) { spin_lock_init(&IPI_Monitor[cpu].lock); IPI_Monitor[cpu].init = 0; } ret = avm_rte_ipi_request(AVM_RTE_IPI_ALLOC_NR, firq_monitor, NULL, AVM_RTE_IPI_SECURE, 0x0, "RTE monitor"); if (ret) rte_monitor_ipi = ret; else pr_err("Could not register sgi monitor ipi\n"); for_each_cpu(cpu, cpu_online_mask) IPI_Monitor[cpu].init = 1; atomic_set(&backtrace_busy, 0); return 0; } late_initcall(monitor_ipi_init); #endif/*--- #if defined(CONFIG_AVM_FASTIRQ) ---*/ /** */ void ai_add_to_scorelist(unsigned long pc, int user) { #if defined(UNALIGNED_MAX_SCORE_ENTRIES) if (user) { add_unaligned_access_to_table(pc, user_score_table, UNALIGNED_MAX_SCORE_ENTRIES, 1); return; } add_unaligned_access_to_table(pc, sys_score_table, UNALIGNED_MAX_SCORE_ENTRIES, 0); #endif/*--- #if defined(UNALIGNED_MAX_SCORE_ENTRIES) ---*/ } /** */ unsigned long ai_show_scorelist(struct seq_file *seq, int user) { #if defined(UNALIGNED_MAX_SCORE_ENTRIES) if (user) { return show_unaligned_access_table(seq, user_score_table, UNALIGNED_MAX_SCORE_ENTRIES, 1); } return show_unaligned_access_table(seq, sys_score_table, UNALIGNED_MAX_SCORE_ENTRIES, 0); #else return 0; #endif/*--- #if defined(UNALIGNED_MAX_SCORE_ENTRIES) ---*/ } /** */ void print_code_range(struct seq_file *seq, const char *prefix, unsigned long addr, unsigned int thumb) { const int width = thumb ? 4 : 8; mm_segment_t fs; char str[sizeof("0x00000000 ") * 10 + 2 + 1], *p = str; int i; unsigned long start_addr = 0; /* * We need to switch to kernel mode so that we can use __get_user * to safely read from kernel space. Note that we now dump the * code first, just in case the backtrace kills us. */ fs = get_fs(); set_fs(KERNEL_DS); for (i = -2; i < 3 + !!thumb; i++) { unsigned int val, bad; if (thumb) { bad = __get_user(val, &((u16 *)addr)[i]); if (start_addr == 0) start_addr = (unsigned long)&((u16 *)addr)[i]; } else { bad = __get_user(val, &((u32 *)addr)[i]); if (start_addr == 0) start_addr = (unsigned long)&((u32 *)addr)[i]; } if (!bad) p += sprintf(p, i == 0 ? "<0x%0*x> " : "0x%0*x ", width, val); else { p += sprintf(p, "bad PC value"); break; } } set_fs(fs); if (seq) { seq_printf(seq, "%s Code(0x%08lx): %s\n", prefix, start_addr, str); } else { pr_err("%s Code(0x%08lx): %s\n", prefix, start_addr, str); } } /** */ #define IS_KERNEL_ADDR 0x1 #define IS_MODULE_ADDR 0x2 #define IS_VMALLOC_ADDR 0x3 #define IS_STACK_ADDR 0x4 int memory_classifier(unsigned long addr) { if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_end)) { return IS_KERNEL_ADDR; } else if (is_module_text_address(addr)) { return IS_MODULE_ADDR; } else if (is_vmalloc_addr((void *)addr)) { return IS_VMALLOC_ADDR; } else if (object_is_on_stack((void *)addr)) { return IS_STACK_ADDR; } return 0; } /** * @return NULL no virtual addr */ static struct page *memory_page_classifier(unsigned long addr) { if (virt_addr_valid(addr)) { return virt_to_head_page((void *)addr); } return NULL; } /** */ static char *print_vmflags(char *txt, unsigned int txtlen, unsigned long vm_flags) { char *txt_start = txt; if (txt == NULL) return NULL; txt[0] = 0; if (vm_flags & VM_IOREMAP) snprintf_add(txt, txtlen, "ioremap "); if (vm_flags & VM_ALLOC) snprintf_add(txt, txtlen, "vmalloc "); if (vm_flags & VM_MAP) snprintf_add(txt, txtlen, "vmap "); if (vm_flags & VM_USERMAP) snprintf_add(txt, txtlen, "user "); if (vm_flags & VM_VPAGES) snprintf_add(txt, txtlen, "vpages "); if (vm_flags & VM_KASAN) snprintf_add(txt, txtlen, "kasan "); if (txt_start[0] == 0) snprintf_add(txt, txtlen, "vm_flags=0x%lx ", vm_flags); return txt_start; } /** */ char *arch_print_memory_classifier(char *txt, unsigned int txtlen, unsigned long addr, int include_addr_prefix) { char *modname __maybe_unused; char sym[KSYM_SYMBOL_LEN]; char txtbuf[TASK_COMM_LEN + 16]; char *txt_start = txt; unsigned long caller, size, offset __maybe_unused, start, flags, vmflags; int freed, type; const char *name; static struct page *page; struct zone *zone; if (include_addr_prefix) { snprintf_add(txt, txtlen, "0x%08lx ", addr); } else { txt[0] = 0; } type = memory_classifier(addr); switch (type) { case IS_KERNEL_ADDR: case IS_MODULE_ADDR: #ifdef CONFIG_KALLSYMS name = kallsyms_lookup(addr, &size, &offset, &modname, sym); if (!name) { return txt_start; } snprintf_add(txt, txtlen, "%s+%#lx/%#lx", name, offset, size); if (modname) { snprintf_add(txt, txtlen, " [%s]", modname); } #else #if defined(CONFIG_AVM_BOOTMEM) if (!IS_ERR(module_alloc_find_module_name)) { module_alloc_find_module_name(txt, txt + txtlen, addr); } #endif /*--- #if defined(CONFIG_AVM_BOOTMEM) ---*/ #endif return txt_start; case IS_VMALLOC_ADDR: #if defined(CONFIG_AVM_FASTIRQ) if (is_cpu_mode_fiq()) { return txt_start; } #endif start = get_vmap_area(addr, &caller, &size, &vmflags); if (start) { snprintf(txt, txtlen, "[%s: size:%lu start:%p+0x%lx alloced by:%pS]", print_vmflags(txtbuf, sizeof(txtbuf), vmflags), size, (void *)start, addr - start, (void *)caller); } return txt_start; case IS_STACK_ADDR: break; default: break; } start = get_taskstack_area(addr, txtbuf, sizeof(txtbuf), type == IS_STACK_ADDR ? 1 : 0); if (start) { snprintf(txt, txtlen, "[%s: %p+0x%lx]", txtbuf, (void *)start, addr - start); return txt_start; } #ifdef CONFIG_AVM_RTE start = get_simplemempool_area(addr, &caller, txtbuf, sizeof(txtbuf), &size, &freed); if (start) { if (caller) { snprintf(sym, sizeof(sym), " allocated by:%pS ", (void *)caller); } else { sym[0] = 0; } snprintf(txt, txtlen, "[smempool: type:%s size:%lu start:%p%c0x%lx %s%s]", txtbuf, size, (void *)start, addr >= start ? '+' : '-', addr >= start ? addr - start : start - addr, sym, freed == 0 ? "" : freed == 1 ? "free" : freed == 2 ? "ctrl" : "padding"); return txt_start; } if (is_cpu_mode_fiq()) { return txt_start; } #endif page = memory_page_classifier(addr); if (!page) { return txt_start; } zone = page_zone(page); if (!spin_trylock_irqsave(&zone->lock, flags)) { return txt_start; } if (PageSlab(page)) { start = get_kmemalloc_area(addr, &caller, &name, &size, &freed); if (start) { if (caller) { snprintf(sym, sizeof(sym), " %s by:%pS", freed ? "freed" : "allocated", (void *)caller); } else { sym[0] = 0; } snprintf(txt, txtlen, "[slab: type:%s size:%lu start:0x%p+0x%lx%s]", name, size, (void *)start, addr - start, sym); } } else if (PageReserved(page)) { snprintf(txt, txtlen, "[page: type:reserved]"); } else if ((atomic_read(&page->_count))) { char ordertxt[32]; unsigned int order; unsigned long current_pc = 0; #if defined(CONFIG_AVM_PAGE_TRACE) current_pc = avm_get_page_current_pc(page); #endif/*--- #if defined(CONFIG_AVM_PAGE_TRACE) ---*/ if (current_pc) { snprintf(sym, sizeof(sym), " by:%pS", (void *)current_pc); } else { sym[0] = 0; } order = compound_order(page); if (order) { snprintf(ordertxt, sizeof(ordertxt), " O%u[%lu]", order, virt_to_pfn(addr) - page_to_pfn(page)); } else { ordertxt[0] = 0; } snprintf(txt, txtlen, "[page%s: type:alloc%s]", ordertxt, sym); } spin_unlock_irqrestore(&zone->lock, flags); return txt_start; } EXPORT_SYMBOL(print_memory_classifier); /** */ static int match_data(unsigned long data, unsigned long data_array[], unsigned int array_elements) { unsigned int i; for (i = 0; i < array_elements; i++) { if (data_array[i] == 0) { data_array[i] = data; return 0; } if (data_array[i] == data) { return 1; } } return 0; } /** */ void arch_show_stacktrace_memoryclassifier(const struct pt_regs *pregs) { unsigned long data_hist[40]; char txt[KSYM_SYMBOL_LEN]; unsigned long sp, sp_top; unsigned int start = 0, limit = 0; unsigned long stackdata; int fs; if (pregs == NULL) { return; } if (user_mode(pregs)) { return; } fs = get_fs(); set_fs(KERNEL_DS); sp = kernel_stack_pointer((struct pt_regs *)pregs); sp_top = (sp & ~(THREAD_SIZE - 1)) + THREAD_SIZE; for (; sp < sp_top; sp += 4) { if (limit > 60) { break; } if (__get_user(stackdata, (unsigned long *)sp)) { break; } if (start >= ARRAY_SIZE(data_hist)) { pr_err("...\n"); break; } if (stackdata && match_data(stackdata, data_hist, ARRAY_SIZE(data_hist))) { continue; } print_memory_classifier(txt, sizeof(txt), stackdata, 0); if (txt[0]) { if (start == 0) { pr_err("Classified pointer on stack:\n"); } start++; pr_err("%08lx %s\n", stackdata, txt); } limit++; } set_fs(fs); } static char *reg_name[] = {"r0 ", "r1 ", "r2 ", "r3 ", "r4 ", "r5 ", "r6 ", "r7 ", "r8 ", "r9 ", "r10", "ip ", "fp "}; /** */ void arch_show_register_memoryclassifier(const struct pt_regs *regs) { unsigned long reg[13]; char txt[KSYM_SYMBOL_LEN]; unsigned int i, start = 0; if (regs == NULL) { return; } if (user_mode(regs)) { return; } reg[0] = regs->ARM_r0, reg[1] = regs->ARM_r1, reg[2] = regs->ARM_r2, reg[3] = regs->ARM_r3; reg[4] = regs->ARM_r4, reg[5] = regs->ARM_r5, reg[6] = regs->ARM_r6, reg[7] = regs->ARM_r7; reg[7] = regs->ARM_r7, reg[8] = regs->ARM_r8, reg[9] = regs->ARM_r9, reg[10] = regs->ARM_r10; reg[11] = regs->ARM_ip, reg[12] = regs->ARM_fp; for (i = 0; i < ARRAY_SIZE(reg); i++) { print_memory_classifier(txt, sizeof(txt), reg[i], 0); if (txt[0]) { if (start == 0) { start = 1; pr_err("Classified pointer on registers:\n"); } pr_err("%s: %08lx %s\n", reg_name[i], reg[i], txt); } } }