--- zzzz-none-000/linux-3.10.107/arch/mips/kernel/traps.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/mips/kernel/traps.c 2021-02-04 17:41:59.000000000 +0000 @@ -10,9 +10,13 @@ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. + * Copyright (C) 2014, Imagination Technologies Ltd. */ +#include #include #include +#include +#include #include #include #include @@ -33,18 +37,22 @@ #include #include +#include #include #include #include #include #include +#include #include #include #include #include +#include #include #include #include +#include #include #include #include @@ -56,6 +64,15 @@ #include #include #include +#if defined(CONFIG_AVM_SIMPLE_PROFILING) +#include +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ +#if defined(CONFIG_AVM_ENHANCED) +#include +#include +#include +extern unsigned long kernelsp[NR_CPUS]; +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ extern void check_wait(void); extern asmlinkage void rollback_handle_int(void); @@ -75,13 +92,17 @@ extern asmlinkage void handle_cpu(void); extern asmlinkage void handle_ov(void); extern asmlinkage void handle_tr(void); +extern asmlinkage void handle_msa_fpe(void); extern asmlinkage void handle_fpe(void); +extern asmlinkage void handle_ftlb(void); +extern asmlinkage void handle_msa(void); extern asmlinkage void handle_mdmx(void); extern asmlinkage void handle_watch(void); extern asmlinkage void handle_mt(void); extern asmlinkage void handle_dsp(void); extern asmlinkage void handle_mcheck(void); extern asmlinkage void handle_reserved(void); +extern void tlb_do_page_fault_0(void); void (*board_be_init)(void); int (*board_be_handler)(struct pt_regs *regs, int is_fixup); @@ -89,28 +110,30 @@ void (*board_ejtag_handler_setup)(void); void (*board_bind_eic_interrupt)(int irq, int regset); void (*board_ebase_setup)(void); -void __cpuinitdata(*board_cache_error_setup)(void); +void(*board_cache_error_setup)(void); static void show_raw_backtrace(unsigned long reg29) { unsigned long *sp = (unsigned long *)(reg29 & ~3); unsigned long addr; - printk("Call Trace:"); -#ifdef CONFIG_KALLSYMS - printk("\n"); -#endif + printk(KERN_EMERG"Call Trace:\n"); while (!kstack_end(sp)) { unsigned long __user *p = (unsigned long __user *)(unsigned long)sp++; if (__get_user(addr, p)) { - printk(" (Bad stack address)"); + printk(KERN_EMERG" (Bad stack address)"); break; } - if (__kernel_text_address(addr)) + if (__kernel_text_address(addr)) { +#if defined(CONFIG_AVM_ENHANCED) + printk(KERN_EMERG"%04lx: [<%p>] %pS\n", (unsigned long)sp & THREAD_MASK,(void *) addr, (void *) addr); +#else/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ print_ip_sym(addr); +#endif/*--- #else ---*//*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + } } - printk("\n"); + printk(KERN_EMERG"\n"); } #ifdef CONFIG_KALLSYMS @@ -123,25 +146,50 @@ __setup("raw_show_trace", set_raw_show_trace); #endif -static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) +void show_backtrace(struct task_struct *task, const struct pt_regs *regs) { unsigned long sp = regs->regs[29]; unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; +#if defined(CONFIG_AVM_ENHANCED) + unsigned long first_ra = ra; + unsigned long first_sp = sp; +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ if (!task) task = current; - if (raw_show_trace || !__kernel_text_address(pc)) { + if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) { show_raw_backtrace(sp); return; } - printk("Call Trace:\n"); + printk(KERN_EMERG"Call Trace:\n"); do { - print_ip_sym(pc); +#if defined(CONFIG_AVM_ENHANCED) + printk(KERN_EMERG"%04lx: [<%p>] %pS\n", sp & THREAD_MASK,(void *) pc, (void *) pc); +#else/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + print_ip_sym(pc); +#endif/*--- #else ---*//*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ pc = unwind_stack(task, &sp, pc, &ra); +#if defined(CONFIG_AVM_ENHANCED) + if(first_ra) { + if(pc == 0) { + /**--------------------------------------------------------------------------------**\ + * es gibt Funktionen (z.B. _raw_spin_lock_bh), wo direkt in eine andere Funktion + * per bxx symbol+offset(!=0) (z.b. _raw_read_unlock_bh+0xa4) gesprungen wird. In diesem Fall + * funktioniert die Ermittlung von ra ueber den Stack nicht, da der Stackoffset der + * angeprungenen Funktion mit Offset 0 ermittelt wird - passiert dies beim ersten Aufruf + * wo ja ra noch korrekt im Register koennen wir hier das ra direkt verwenden + * (siehe JZ-30655) + \**--------------------------------------------------------------------------------**/ + pc = first_ra; + sp = first_sp; + } + first_ra = 0; /*--- nur initial das Register ra verwenden ---*/ + } +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ } while (pc); - printk("\n"); + printk(KERN_EMERG"\n"); } /* @@ -175,12 +223,16 @@ i++; } printk("\n"); +#if defined(CONFIG_AVM_ENHANCED) + show_stacktrace_memoryclassifier(regs); +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ show_backtrace(task, regs); } void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; + mm_segment_t old_fs = get_fs(); if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; @@ -199,7 +251,13 @@ prepare_frametrace(®s); } } + /* + * show_stack() deals exclusively with kernel mode, so be sure to access + * the stack in the kernel (not user) address space. + */ + set_fs(KERNEL_DS); show_stacktrace(task, ®s); + set_fs(old_fs); } static void show_code(unsigned int __user *pc) @@ -207,24 +265,42 @@ long i; unsigned short __user *pc16 = NULL; - printk("\nCode:"); + printk(KERN_ERR"\nCode:"); if ((unsigned long)pc & 1) pc16 = (unsigned short __user *)((unsigned long)pc & ~1); for(i = -3 ; i < 6 ; i++) { unsigned int insn; if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { - printk(" (Bad address in epc)\n"); + printk(KERN_ERR" (Bad address in epc)\n"); break; } printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); } } +#if defined(CONFIG_AVM_ENHANCED) +char *get_kernel_or_user_symbol(char *txt, unsigned int maxttxlen, const struct pt_regs *regs, unsigned long addr){ + unsigned long gp; + txt[0] = 0; + if(!user_mode(regs)) { + snprintf(txt, maxttxlen, "%pS", (void *)addr); + return txt; + } + /*--- wir sind im USER-Mode: Spezialbehandlung fuer gp-Ermittlung (da virtuelle Adressen) ---*/ + gp = kernelsp[smp_processor_id()] & ~THREAD_MASK; /*--- kseg0-gp ermitteln ---*/ + return get_user_symbol(txt, maxttxlen, gp, addr); +} +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + static void __show_regs(const struct pt_regs *regs) { +#if defined(CONFIG_AVM_ENHANCED) + char txt[KSYM_NAME_LEN]; +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ const int field = 2 * sizeof(unsigned long); unsigned int cause = regs->cp0_cause; + unsigned int exccode; int i; show_regs_print_info(KERN_DEFAULT); @@ -248,23 +324,25 @@ } #ifdef CONFIG_CPU_HAS_SMARTMIPS - printk("Acx : %0*lx\n", field, regs->acx); + printk(KERN_ERR"Acx : %0*lx\n", field, regs->acx); #endif - printk("Hi : %0*lx\n", field, regs->hi); - printk("Lo : %0*lx\n", field, regs->lo); + printk(KERN_ERR"Hi : %0*lx\n", field, regs->hi); + printk(KERN_ERR"Lo : %0*lx\n", field, regs->lo); +#if !defined(CONFIG_AVM_ENHANCED) /* * Saved cp0 registers */ printk("epc : %0*lx %pS\n", field, regs->cp0_epc, (void *) regs->cp0_epc); - printk(" %s\n", print_tainted()); - printk("ra : %0*lx %pS\n", field, regs->regs[31], + printk(KERN_ERR" %s\n", print_tainted()); + printk(KERN_ERR"ra : %0*lx %pS\n", field, regs->regs[31], (void *) regs->regs[31]); +#endif/*--- #if !defined(CONFIG_AVM_ENHANCED) ---*/ - printk("Status: %08x ", (uint32_t) regs->cp0_status); + printk(KERN_ERR"Status: %08x ", (uint32_t) regs->cp0_status); - if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) { + if (cpu_has_3kex) { if (regs->cp0_status & ST0_KUO) printk("KUo "); if (regs->cp0_status & ST0_IEO) @@ -277,7 +355,7 @@ printk("KUc "); if (regs->cp0_status & ST0_IEC) printk("IEc "); - } else { + } else if (cpu_has_4kex) { if (regs->cp0_status & ST0_KX) printk("KX "); if (regs->cp0_status & ST0_SX) @@ -304,17 +382,43 @@ printk("EXL "); if (regs->cp0_status & ST0_IE) printk("IE "); + if (regs->cp0_status & ST0_NMI) + printk("NMI"); } printk("\n"); - printk("Cause : %08x\n", cause); + exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; +#if defined(CONFIG_AVM_ENHANCED) + printk(KERN_ERR"Cause : %08x exc_code:%d %s\n", cause, exccode, + exccode == 1 ? "Mod" : + exccode == 2 ? "TLBL" : + exccode == 3 ? "TLBS" : + exccode == 4 ? "AdEL" : + exccode == 5 ? "AdES" : + exccode == 6 ? "IBE" : + exccode == 7 ? "DBE" : + exccode == 25 ? "Thread" : + exccode == 23 ? "WATCH" : + exccode == 31 ? "CacheErr" : "" + ); +#else /*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + printk("Cause : %08x (ExcCode %02x)\n", cause, exccode); +#endif - cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; - if (1 <= cause && cause <= 5) - printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); + if (1 <= exccode && exccode <= 5) + printk(KERN_ERR"BadVA : %0*lx\n", field, regs->cp0_badvaddr); +#if defined(CONFIG_AVM_ENHANCED) +/*--- printk(KERN_ERR"errepc: %08lx %pS\n", read_c0_errorepc(), (void *)read_c0_errorepc()); ---*/ + printk(KERN_ERR"epc : %0*lx %s\n", field, regs->cp0_epc, get_kernel_or_user_symbol(txt, sizeof(txt), regs, regs->cp0_epc)); + printk(KERN_ERR"ra : %0*lx %s\n", field, regs->regs[31], get_kernel_or_user_symbol(txt, sizeof(txt), regs, regs->regs[31])); + printk(KERN_ERR" %s\n", print_tainted()); +#endif/*--- #else ---*//*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ printk("PrId : %08x (%s)\n", read_c0_prid(), cpu_name_string()); +#if defined(CONFIG_AVM_ENHANCED) + show_register_memoryclassifier(regs); +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ } /* @@ -325,9 +429,10 @@ __show_regs((struct pt_regs *)regs); } -void show_registers(struct pt_regs *regs) +void show_registers(const struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); + mm_segment_t old_fs = get_fs(); __show_regs(regs); print_modules(); @@ -342,14 +447,13 @@ printk("*HwTLS: %0*lx\n", field, tls); } + if (!user_mode(regs)) + /* Necessary for getting the correct stack content */ + set_fs(KERNEL_DS); show_stacktrace(current, regs); show_code((unsigned int __user *) regs->cp0_epc); printk("\n"); -} - -static int regs_to_trapnr(struct pt_regs *regs) -{ - return (regs->cp0_cause >> 2) & 0x1f; + set_fs(old_fs); } static DEFINE_RAW_SPINLOCK(die_lock); @@ -358,24 +462,16 @@ { static int die_counter; int sig = SIGSEGV; -#ifdef CONFIG_MIPS_MT_SMTC - unsigned long dvpret; -#endif /* CONFIG_MIPS_MT_SMTC */ oops_enter(); - if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) + if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr, + SIGSEGV) == NOTIFY_STOP) sig = 0; console_verbose(); raw_spin_lock_irq(&die_lock); -#ifdef CONFIG_MIPS_MT_SMTC - dvpret = dvpe(); -#endif /* CONFIG_MIPS_MT_SMTC */ bust_spinlocks(1); -#ifdef CONFIG_MIPS_MT_SMTC - mips_mt_regdump(dvpret); -#endif /* CONFIG_MIPS_MT_SMTC */ printk("%s[#%d]:\n", str, ++die_counter); show_registers(regs); @@ -423,7 +519,13 @@ const struct exception_table_entry *fixup = NULL; int data = regs->cp0_cause & 4; int action = MIPS_BE_FATAL; + enum ctx_state prev_state; +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_exception_begin, regs->cp0_epc, + (regs->cp0_cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ + prev_state = exception_enter(); /* XXX For now. Fixme, this searches the wrong table ... */ if (data && !user_mode(regs)) fixup = search_dbe_tables(exception_epc(regs)); @@ -434,13 +536,17 @@ if (board_be_handler) action = board_be_handler(regs, fixup != NULL); +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_exception_end, regs->cp0_epc, + (regs->cp0_cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ switch (action) { case MIPS_BE_DISCARD: - return; + goto out; case MIPS_BE_FIXUP: if (fixup) { regs->cp0_epc = fixup->nextinsn; - return; + goto out; } break; default: @@ -453,12 +559,15 @@ printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", data ? "Data" : "Instruction", field, regs->cp0_epc, field, regs->regs[31]); - if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS) - == NOTIFY_STOP) - return; + if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr, + SIGBUS) == NOTIFY_STOP) + goto out; die_if_kernel("Oops", regs); force_sig(SIGBUS, current); + +out: + exception_exit(prev_state); } /* @@ -616,7 +725,7 @@ regs->regs[rt] = read_c0_count(); return 0; case 3: /* Count register resolution */ - switch (current_cpu_data.cputype) { + switch (current_cpu_type()) { case CPU_20KC: case CPU_25KF: regs->regs[rt] = 1; @@ -673,57 +782,156 @@ asmlinkage void do_ov(struct pt_regs *regs) { - siginfo_t info; - + enum ctx_state prev_state; + siginfo_t info = { + .si_signo = SIGFPE, + .si_code = FPE_INTOVF, + .si_addr = (void __user *)regs->cp0_epc, + }; + + prev_state = exception_enter(); +#if defined(CONFIG_AVM_ENHANCED) + show_code_position_by_epc("kernel integer overflow", regs); +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ die_if_kernel("Integer overflow", regs); - info.si_code = FPE_INTOVF; - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); + exception_exit(prev_state); } -int process_fpemu_return(int sig, void __user *fault_addr) +int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) { - if (sig == SIGSEGV || sig == SIGBUS) { - struct siginfo si = {0}; + struct siginfo si = { 0 }; + + switch (sig) { + case 0: + return 0; + + case SIGFPE: si.si_addr = fault_addr; si.si_signo = sig; - if (sig == SIGSEGV) { - if (find_vma(current->mm, (unsigned long)fault_addr)) - si.si_code = SEGV_ACCERR; - else - si.si_code = SEGV_MAPERR; - } else { - si.si_code = BUS_ADRERR; - } + /* + * Inexact can happen together with Overflow or Underflow. + * Respect the mask to deliver the correct exception. + */ + fcr31 &= (fcr31 & FPU_CSR_ALL_E) << + (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)); + if (fcr31 & FPU_CSR_INV_X) + si.si_code = FPE_FLTINV; + else if (fcr31 & FPU_CSR_DIV_X) + si.si_code = FPE_FLTDIV; + else if (fcr31 & FPU_CSR_OVF_X) + si.si_code = FPE_FLTOVF; + else if (fcr31 & FPU_CSR_UDF_X) + si.si_code = FPE_FLTUND; + else if (fcr31 & FPU_CSR_INE_X) + si.si_code = FPE_FLTRES; + else + si.si_code = __SI_FAULT; + force_sig_info(sig, &si, current); + return 1; + + case SIGBUS: + si.si_addr = fault_addr; + si.si_signo = sig; + si.si_code = BUS_ADRERR; force_sig_info(sig, &si, current); return 1; - } else if (sig) { + + case SIGSEGV: + si.si_addr = fault_addr; + si.si_signo = sig; + down_read(¤t->mm->mmap_sem); + if (find_vma(current->mm, (unsigned long)fault_addr)) + si.si_code = SEGV_ACCERR; + else + si.si_code = SEGV_MAPERR; + up_read(¤t->mm->mmap_sem); + force_sig_info(sig, &si, current); + return 1; + + default: force_sig(sig, current); return 1; - } else { - return 0; } } +static int simulate_fp(struct pt_regs *regs, unsigned int opcode, + unsigned long old_epc, unsigned long old_ra) +{ + union mips_instruction inst = { .word = opcode }; + void __user *fault_addr; + unsigned long fcr31; + int sig; + + /* If it's obviously not an FP instruction, skip it */ + switch (inst.i_format.opcode) { + case cop1_op: + case cop1x_op: + case lwc1_op: + case ldc1_op: + case swc1_op: + case sdc1_op: + break; + + default: + return -1; + } + + /* + * do_ri skipped over the instruction via compute_return_epc, undo + * that for the FPU emulator. + */ + regs->cp0_epc = old_epc; + regs->regs[31] = old_ra; + + /* Save the FP context to struct thread_struct */ + lose_fpu(1); + + /* Run the emulator */ + sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, + &fault_addr); + fcr31 = current->thread.fpu.fcr31; + + /* + * We can't allow the emulated instruction to leave any of + * the cause bits set in $fcr31. + */ + current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; + + /* Restore the hardware register state */ + own_fpu(1); + + /* Send a signal if required. */ + process_fpemu_return(sig, fault_addr, fcr31); + + return 0; +} + /* * XXX Delayed fp exceptions when doing a lazy ctx switch XXX */ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) { - siginfo_t info = {0}; + enum ctx_state prev_state; + void __user *fault_addr; + int sig; + + prev_state = exception_enter(); +#if defined(CONFIG_AVM_ENHANCED) + show_code_position_by_epc("kernel-floatingpoint-exception", regs); +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr, + SIGFPE) == NOTIFY_STOP) + goto out; + + /* Clear FCSR.Cause before enabling interrupts */ + write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X); + local_irq_enable(); - if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) - == NOTIFY_STOP) - return; die_if_kernel("FP exception in kernel code", regs); if (fcr31 & FPU_CSR_UNI_X) { - int sig; - void __user *fault_addr = NULL; - /* * Unimplemented operation exception. If we've got the full * software emulator on-board, let's use it... @@ -740,50 +948,42 @@ /* Run the emulator */ sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, &fault_addr); + fcr31 = current->thread.fpu.fcr31; /* * We can't allow the emulated instruction to leave any of - * the cause bit set in $fcr31. + * the cause bits set in $fcr31. */ current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; /* Restore the hardware register state */ own_fpu(1); /* Using the FPU again. */ + } else { + sig = SIGFPE; + fault_addr = (void __user *) regs->cp0_epc; + } - /* If something went wrong, signal */ - process_fpemu_return(sig, fault_addr); + /* Send a signal if required. */ + process_fpemu_return(sig, fault_addr, fcr31); - return; - } else if (fcr31 & FPU_CSR_INV_X) - info.si_code = FPE_FLTINV; - else if (fcr31 & FPU_CSR_DIV_X) - info.si_code = FPE_FLTDIV; - else if (fcr31 & FPU_CSR_OVF_X) - info.si_code = FPE_FLTOVF; - else if (fcr31 & FPU_CSR_UDF_X) - info.si_code = FPE_FLTUND; - else if (fcr31 & FPU_CSR_INE_X) - info.si_code = FPE_FLTRES; - else - info.si_code = __SI_FAULT; - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_addr = (void __user *) regs->cp0_epc; - force_sig_info(SIGFPE, &info, current); +out: + exception_exit(prev_state); } -static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, +void do_trap_or_bp(struct pt_regs *regs, unsigned int code, const char *str) { - siginfo_t info; + siginfo_t info = { 0 }; char b[40]; #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP - if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) + if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr, + SIGTRAP) == NOTIFY_STOP) return; #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ - if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) + if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr, + SIGTRAP) == NOTIFY_STOP) return; /* @@ -802,19 +1002,21 @@ else info.si_code = FPE_INTOVF; info.si_signo = SIGFPE; - info.si_errno = 0; info.si_addr = (void __user *) regs->cp0_epc; force_sig_info(SIGFPE, &info, current); break; case BRK_BUG: +#if defined(CONFIG_AVM_ENHANCED) + show_bug_by_bugtable(regs); +#endif /*--- #ifdef CONFIG_BUG_EXTRA_INFO ---*/ die_if_kernel("Kernel bug detected", regs); force_sig(SIGTRAP, current); break; case BRK_MEMU: /* - * Address errors may be deliberately induced by the FPU - * emulator to retake control of the CPU after executing the - * instruction in the delay slot of an emulated branch. + * This breakpoint code is used by the FPU emulator to retake + * control of the CPU after executing the instruction from the + * delay slot of an emulated branch. * * Terminate if exception was recognized as a delay slot return * otherwise handle as normal. @@ -834,29 +1036,43 @@ asmlinkage void do_bp(struct pt_regs *regs) { + unsigned long epc = msk_isa16_mode(exception_epc(regs)); unsigned int opcode, bcode; - unsigned long epc; - u16 instr[2]; + enum ctx_state prev_state; + mm_segment_t seg; + seg = get_fs(); + if (!user_mode(regs)) + set_fs(KERNEL_DS); + +#if defined(CONFIG_AVM_ENHANCED) + show_code_position_by_epc("kernel-breakpoint", regs); +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + prev_state = exception_enter(); + current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; if (get_isa16_mode(regs->cp0_epc)) { - /* Calculate EPC. */ - epc = exception_epc(regs); - if (cpu_has_mmips) { - if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || - (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) - goto out_sigsegv; - opcode = (instr[0] << 16) | instr[1]; + u16 instr[2]; + + if (__get_user(instr[0], (u16 __user *)epc)) + goto out_sigsegv; + + if (!cpu_has_mmips) { + /* MIPS16e mode */ + bcode = (instr[0] >> 5) & 0x3f; + } else if (mm_insn_16bit(instr[0])) { + /* 16-bit microMIPS BREAK */ + bcode = instr[0] & 0xf; } else { - /* MIPS16e mode */ - if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) + /* 32-bit microMIPS BREAK */ + if (__get_user(instr[1], (u16 __user *)(epc + 2))) goto out_sigsegv; - bcode = (instr[0] >> 6) & 0x3f; - do_trap_or_bp(regs, bcode, "Break"); - return; + opcode = (instr[0] << 16) | instr[1]; + bcode = (opcode >> 6) & ((1 << 20) - 1); } } else { - if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) + if (__get_user(opcode, (unsigned int __user *)epc)) goto out_sigsegv; + bcode = (opcode >> 6) & ((1 << 20) - 1); } /* @@ -865,23 +1081,36 @@ * Gas is bug-compatible, but not always, grrr... * We handle both cases with a simple heuristics. --macro */ - bcode = ((opcode >> 6) & ((1 << 20) - 1)); if (bcode >= (1 << 10)) - bcode >>= 10; + bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10); /* * notify the kprobe handlers, if instruction is likely to * pertain to them. */ switch (bcode) { + case BRK_UPROBE: + if (notify_die(DIE_UPROBE, "uprobe", regs, bcode, + current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) + goto out; + else + break; + case BRK_UPROBE_XOL: + if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode, + current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) + goto out; + else + break; case BRK_KPROBE_BP: - if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) - return; + if (notify_die(DIE_BREAK, "debug", regs, bcode, + current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) + goto out; else break; case BRK_KPROBE_SSTEPBP: - if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) - return; + if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, + current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) + goto out; else break; default: @@ -889,18 +1118,34 @@ } do_trap_or_bp(regs, bcode, "Break"); + +out: + set_fs(seg); + exception_exit(prev_state); return; out_sigsegv: force_sig(SIGSEGV, current); + goto out; } asmlinkage void do_tr(struct pt_regs *regs) { u32 opcode, tcode = 0; + enum ctx_state prev_state; u16 instr[2]; + mm_segment_t seg; unsigned long epc = msk_isa16_mode(exception_epc(regs)); +#if defined(CONFIG_AVM_ENHANCED) + show_code_position_by_epc("kernel-trap", regs); +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + + seg = get_fs(); + if (!user_mode(regs)) + set_fs(get_ds()); + prev_state = exception_enter(); + current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; if (get_isa16_mode(regs->cp0_epc)) { if (__get_user(instr[0], (u16 __user *)(epc + 0)) || __get_user(instr[1], (u16 __user *)(epc + 2))) @@ -918,10 +1163,15 @@ } do_trap_or_bp(regs, tcode, "Trap"); + +out: + set_fs(seg); + exception_exit(prev_state); return; out_sigsegv: force_sig(SIGSEGV, current); + goto out; } asmlinkage void do_ri(struct pt_regs *regs) @@ -929,17 +1179,49 @@ unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); unsigned long old_epc = regs->cp0_epc; unsigned long old31 = regs->regs[31]; + enum ctx_state prev_state; unsigned int opcode = 0; int status = -1; - if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) - == NOTIFY_STOP) - return; + /* + * Avoid any kernel code. Just emulate the R2 instruction + * as quickly as possible. + */ + if (mipsr2_emulation && cpu_has_mips_r6 && + likely(user_mode(regs)) && + likely(get_user(opcode, epc) >= 0)) { + unsigned long fcr31 = 0; + + status = mipsr2_decoder(regs, opcode, &fcr31); + switch (status) { + case 0: + case SIGEMT: + task_thread_info(current)->r2_emul_return = 1; + return; + case SIGILL: + goto no_r2_instr; + default: + process_fpemu_return(status, + ¤t->thread.cp0_baduaddr, + fcr31); + task_thread_info(current)->r2_emul_return = 1; + return; + } + } + +no_r2_instr: + + prev_state = exception_enter(); + current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; + + if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr, + SIGILL) == NOTIFY_STOP) + goto out; die_if_kernel("Reserved instruction in kernel code", regs); if (unlikely(compute_return_epc(regs) < 0)) - return; + goto out; if (get_isa16_mode(regs->cp0_epc)) { unsigned short mmop[2] = { 0 }; @@ -964,6 +1246,9 @@ if (status < 0) status = simulate_sync(regs, opcode); + + if (status < 0) + status = simulate_fp(regs, opcode, old_epc, old31); } if (status < 0) @@ -974,6 +1259,9 @@ regs->regs[31] = old31; force_sig(status, current); } + +out: + exception_exit(prev_state); } /* @@ -991,13 +1279,13 @@ * restricted the allowed set to exclude any CPUs with FPUs, * we'll skip the procedure. */ - if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { + if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) { cpumask_t tmask; current->thread.user_cpus_allowed = current->cpus_allowed; - cpus_and(tmask, current->cpus_allowed, - mt_fpu_cpumask); + cpumask_and(&tmask, ¤t->cpus_allowed, + &mt_fpu_cpumask); set_cpus_allowed_ptr(current, &tmask); set_thread_flag(TIF_FPUBOUND); } @@ -1025,32 +1313,158 @@ { struct pt_regs *regs = data; - switch (action) { - default: - die_if_kernel("Unhandled kernel unaligned access or invalid " + die_if_kernel("COP2: Unhandled kernel unaligned access or invalid " "instruction", regs); - /* Fall through */ + force_sig(SIGILL, current); - case CU2_EXCEPTION: - force_sig(SIGILL, current); + return NOTIFY_OK; +} + +static int wait_on_fp_mode_switch(atomic_t *p) +{ + /* + * The FP mode for this task is currently being switched. That may + * involve modifications to the format of this tasks FP context which + * make it unsafe to proceed with execution for the moment. Instead, + * schedule some other task. + */ + schedule(); + return 0; +} + +static int enable_restore_fp_context(int msa) +{ + int err, was_fpu_owner, prior_msa; + + /* + * If an FP mode switch is currently underway, wait for it to + * complete before proceeding. + */ + wait_on_atomic_t(¤t->mm->context.fp_mode_switching, + wait_on_fp_mode_switch, TASK_KILLABLE); + + if (!used_math()) { + /* First time FP context user. */ + preempt_disable(); + err = init_fpu(); + if (msa && !err) { + enable_msa(); + init_msa_upper(); + set_thread_flag(TIF_USEDMSA); + set_thread_flag(TIF_MSA_CTX_LIVE); + } + preempt_enable(); + if (!err) + set_used_math(); + return err; } - return NOTIFY_OK; + /* + * This task has formerly used the FP context. + * + * If this thread has no live MSA vector context then we can simply + * restore the scalar FP context. If it has live MSA vector context + * (that is, it has or may have used MSA since last performing a + * function call) then we'll need to restore the vector context. This + * applies even if we're currently only executing a scalar FP + * instruction. This is because if we were to later execute an MSA + * instruction then we'd either have to: + * + * - Restore the vector context & clobber any registers modified by + * scalar FP instructions between now & then. + * + * or + * + * - Not restore the vector context & lose the most significant bits + * of all vector registers. + * + * Neither of those options is acceptable. We cannot restore the least + * significant bits of the registers now & only restore the most + * significant bits later because the most significant bits of any + * vector registers whose aliased FP register is modified now will have + * been zeroed. We'd have no way to know that when restoring the vector + * context & thus may load an outdated value for the most significant + * bits of a vector register. + */ + if (!msa && !thread_msa_context_live()) + return own_fpu(1); + + /* + * This task is using or has previously used MSA. Thus we require + * that Status.FR == 1. + */ + preempt_disable(); + was_fpu_owner = is_fpu_owner(); + err = own_fpu_inatomic(0); + if (err) + goto out; + + enable_msa(); + write_msa_csr(current->thread.fpu.msacsr); + set_thread_flag(TIF_USEDMSA); + + /* + * If this is the first time that the task is using MSA and it has + * previously used scalar FP in this time slice then we already nave + * FP context which we shouldn't clobber. We do however need to clear + * the upper 64b of each vector register so that this task has no + * opportunity to see data left behind by another. + */ + prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); + if (!prior_msa && was_fpu_owner) { + init_msa_upper(); + + goto out; + } + + if (!prior_msa) { + /* + * Restore the least significant 64b of each vector register + * from the existing scalar FP context. + */ + _restore_fp(current); + + /* + * The task has not formerly used MSA, so clear the upper 64b + * of each vector register such that it cannot see data left + * behind by another task. + */ + init_msa_upper(); + } else { + /* We need to restore the vector context. */ + restore_msa(current); + + /* Restore the scalar FP control & status register */ + if (!was_fpu_owner) + write_32bit_cp1_register(CP1_STATUS, + current->thread.fpu.fcr31); + } + +out: + preempt_enable(); + + return 0; } asmlinkage void do_cpu(struct pt_regs *regs) { + enum ctx_state prev_state; unsigned int __user *epc; unsigned long old_epc, old31; + void __user *fault_addr; unsigned int opcode; + unsigned long fcr31; unsigned int cpid; - int status; + int status, err; unsigned long __maybe_unused flags; + int sig; - die_if_kernel("do_cpu invoked from kernel context!", regs); - + prev_state = exception_enter(); cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; + if (cpid != 2) + die_if_kernel("do_cpu invoked from kernel context!", regs); + switch (cpid) { case 0: epc = (unsigned int __user *)exception_epc(regs); @@ -1060,7 +1474,7 @@ status = -1; if (unlikely(compute_return_epc(regs) < 0)) - return; + break; if (get_isa16_mode(regs->cp0_epc)) { unsigned short mmop[2] = { 0 }; @@ -1093,57 +1507,105 @@ force_sig(status, current); } - return; + break; case 3: /* - * Old (MIPS I and MIPS II) processors will set this code - * for COP1X opcode instructions that replaced the original - * COP3 space. We don't limit COP1 space instructions in - * the emulator according to the CPU ISA, so we want to - * treat COP1X instructions consistently regardless of which - * code the CPU chose. Therefore we redirect this trap to - * the FP emulator too. - * - * Then some newer FPU-less processors use this code - * erroneously too, so they are covered by this choice - * as well. + * The COP3 opcode space and consequently the CP0.Status.CU3 + * bit and the CP0.Cause.CE=3 encoding have been removed as + * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs + * up the space has been reused for COP1X instructions, that + * are enabled by the CP0.Status.CU1 bit and consequently + * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable + * exceptions. Some FPU-less processors that implement one + * of these ISAs however use this code erroneously for COP1X + * instructions. Therefore we redirect this trap to the FP + * emulator too. */ - if (raw_cpu_has_fpu) + if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) { + force_sig(SIGILL, current); break; + } /* Fall through. */ case 1: - if (used_math()) /* Using the FPU again. */ - own_fpu(1); - else { /* First time FPU user. */ - init_fpu(); - set_used_math(); - } + err = enable_restore_fp_context(0); - if (!raw_cpu_has_fpu) { - int sig; - void __user *fault_addr = NULL; - sig = fpu_emulator_cop1Handler(regs, - ¤t->thread.fpu, - 0, &fault_addr); - if (!process_fpemu_return(sig, fault_addr)) - mt_ase_fp_affinity(); - } + if (raw_cpu_has_fpu && !err) + break; - return; + sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, + &fault_addr); + fcr31 = current->thread.fpu.fcr31; + + /* + * We can't allow the emulated instruction to leave + * any of the cause bits set in $fcr31. + */ + current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; + + /* Send a signal if required. */ + if (!process_fpemu_return(sig, fault_addr, fcr31) && !err) + mt_ase_fp_affinity(); + + break; case 2: raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); - return; + break; } - force_sig(SIGILL, current); + exception_exit(prev_state); +} + +asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr) +{ + enum ctx_state prev_state; + + prev_state = exception_enter(); + current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; + if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0, + current->thread.trap_nr, SIGFPE) == NOTIFY_STOP) + goto out; + + /* Clear MSACSR.Cause before enabling interrupts */ + write_msa_csr(msacsr & ~MSA_CSR_CAUSEF); + local_irq_enable(); + + die_if_kernel("do_msa_fpe invoked from kernel context!", regs); + force_sig(SIGFPE, current); +out: + exception_exit(prev_state); +} + +asmlinkage void do_msa(struct pt_regs *regs) +{ + enum ctx_state prev_state; + int err; + + prev_state = exception_enter(); + + if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) { + force_sig(SIGILL, current); + goto out; + } + + die_if_kernel("do_msa invoked from kernel context!", regs); + + err = enable_restore_fp_context(1); + if (err) + force_sig(SIGILL, current); +out: + exception_exit(prev_state); } asmlinkage void do_mdmx(struct pt_regs *regs) { + enum ctx_state prev_state; + + prev_state = exception_enter(); force_sig(SIGILL, current); + exception_exit(prev_state); } /* @@ -1151,8 +1613,24 @@ */ asmlinkage void do_watch(struct pt_regs *regs) { + enum ctx_state prev_state; u32 cause; - +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_exception_begin, regs->cp0_epc, + (regs->cp0_cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ + +#if defined(CONFIG_AVM_WP) + if (avm_wp_dispatcher(regs)) { +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_exception_end, regs->cp0_epc, + (regs->cp0_cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ + local_irq_enable(); + return; + } +#endif + prev_state = exception_enter(); /* * Clear WP (bit 22) bit of cause register so we don't loop * forever. @@ -1174,27 +1652,35 @@ mips_clear_watch_registers(); local_irq_enable(); } +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_exception_end, regs->cp0_epc, + (regs->cp0_cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ + exception_exit(prev_state); } asmlinkage void do_mcheck(struct pt_regs *regs) { - const int field = 2 * sizeof(unsigned long); int multi_match = regs->cp0_status & ST0_TS; + enum ctx_state prev_state; + mm_segment_t old_fs = get_fs(); + prev_state = exception_enter(); show_regs(regs); if (multi_match) { - printk("Index : %0x\n", read_c0_index()); - printk("Pagemask: %0x\n", read_c0_pagemask()); - printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); - printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); - printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); - printk("\n"); + dump_tlb_regs(); + pr_info("\n"); dump_tlb_all(); } + if (!user_mode(regs)) + set_fs(KERNEL_DS); + show_code((unsigned int __user *) regs->cp0_epc); + set_fs(old_fs); + /* * Some chips may have other causes of machine check (e.g. SB1 * graduation timer) @@ -1286,6 +1772,12 @@ case CPU_34K: case CPU_74K: case CPU_1004K: + case CPU_1074K: + case CPU_INTERAPTIV: + case CPU_PROAPTIV: + case CPU_P5600: + case CPU_QEMU_GENERIC: + case CPU_I6400: { #define ERRCTL_PE 0x80000000 #define ERRCTL_L2P 0x00800000 @@ -1375,14 +1867,27 @@ printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", reg_val & (1<<30) ? "secondary" : "primary", reg_val & (1<<31) ? "data" : "insn"); - printk("Error bits: %s%s%s%s%s%s%s\n", - reg_val & (1<<29) ? "ED " : "", - reg_val & (1<<28) ? "ET " : "", - reg_val & (1<<26) ? "EE " : "", - reg_val & (1<<25) ? "EB " : "", - reg_val & (1<<24) ? "EI " : "", - reg_val & (1<<23) ? "E1 " : "", - reg_val & (1<<22) ? "E0 " : ""); + if ((cpu_has_mips_r2_r6) && + ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { + pr_err("Error bits: %s%s%s%s%s%s%s%s\n", + reg_val & (1<<29) ? "ED " : "", + reg_val & (1<<28) ? "ET " : "", + reg_val & (1<<27) ? "ES " : "", + reg_val & (1<<26) ? "EE " : "", + reg_val & (1<<25) ? "EB " : "", + reg_val & (1<<24) ? "EI " : "", + reg_val & (1<<23) ? "E1 " : "", + reg_val & (1<<22) ? "E0 " : ""); + } else { + pr_err("Error bits: %s%s%s%s%s%s%s\n", + reg_val & (1<<29) ? "ED " : "", + reg_val & (1<<28) ? "ET " : "", + reg_val & (1<<26) ? "EE " : "", + reg_val & (1<<25) ? "EB " : "", + reg_val & (1<<24) ? "EI " : "", + reg_val & (1<<23) ? "E1 " : "", + reg_val & (1<<22) ? "E0 " : ""); + } printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) @@ -1396,6 +1901,34 @@ panic("Can't handle the cache error!"); } +asmlinkage void do_ftlb(void) +{ + const int field = 2 * sizeof(unsigned long); + unsigned int reg_val; + + /* For the moment, report the problem and hang. */ + if ((cpu_has_mips_r2_r6) && + ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { + pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", + read_c0_ecc()); + pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); + reg_val = read_c0_cacheerr(); + pr_err("c0_cacheerr == %08x\n", reg_val); + + if ((reg_val & 0xc0000000) == 0xc0000000) { + pr_err("Decoded c0_cacheerr: FTLB parity error\n"); + } else { + pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n", + reg_val & (1<<30) ? "secondary" : "primary", + reg_val & (1<<31) ? "data" : "insn"); + } + } else { + pr_err("FTLB error exception\n"); + } + /* Just print the cacheerr bits for now */ + cache_parity_error(); +} + /* * SDBBP EJTAG debug exception handler. * We skip the instruction and return to the next instruction. @@ -1447,10 +1980,18 @@ void __noreturn nmi_exception_handler(struct pt_regs *regs) { + char str[100]; +#if defined(CONFIG_AVM_ENHANCED) + avm_tick_jiffies_update(); /*--- auch im NMI-Fall jiffies korrigieren ---*/ +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + nmi_enter(); raw_notifier_call_chain(&nmi_chain, 0, regs); bust_spinlocks(1); - printk("NMI taken!!!!\n"); - die("NMI", regs); + regs->cp0_epc = read_c0_errorepc(); + snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n", + smp_processor_id(), regs->cp0_epc); + die(str, regs); + nmi_exit(); } #define VECTORSPACING 0x100 /* for EI/VI mode */ @@ -1513,7 +2054,6 @@ unsigned char *b; BUG_ON(!cpu_has_veic && !cpu_has_vint); - BUG_ON((n < 0) && (n > 9)); if (addr == NULL) { handler = (unsigned long) do_default_vi; @@ -1546,19 +2086,6 @@ extern char rollback_except_vec_vi; char *vec_start = using_rollback_handler() ? &rollback_except_vec_vi : &except_vec_vi; -#ifdef CONFIG_MIPS_MT_SMTC - /* - * We need to provide the SMTC vectored interrupt handler - * not only with the address of the handler, but with the - * Status.IM bit to be masked before going there. - */ - extern char except_vec_vi_mori; -#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) - const int mori_offset = &except_vec_vi_mori - vec_start + 2; -#else - const int mori_offset = &except_vec_vi_mori - vec_start; -#endif -#endif /* CONFIG_MIPS_MT_SMTC */ #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) const int lui_offset = &except_vec_vi_lui - vec_start + 2; const int ori_offset = &except_vec_vi_ori - vec_start + 2; @@ -1582,12 +2109,6 @@ #else handler_len); #endif -#ifdef CONFIG_MIPS_MT_SMTC - BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ - - h = (u16 *)(b + mori_offset); - *h = (0x100 << n); -#endif /* CONFIG_MIPS_MT_SMTC */ h = (u16 *)(b + lui_offset); *h = (handler >> 16) & 0xffff; h = (u16 *)(b + ori_offset); @@ -1627,7 +2148,6 @@ } extern void tlb_init(void); -extern void flush_tlb_handlers(void); /* * Timer interrupt @@ -1642,7 +2162,13 @@ int cp0_perfcount_irq; EXPORT_SYMBOL_GPL(cp0_perfcount_irq); -static int __cpuinitdata noulri; +/* + * Fast debug channel IRQ or -1 if not present + */ +int cp0_fdc_irq; +EXPORT_SYMBOL_GPL(cp0_fdc_irq); + +static int noulri; static int __init ulri_disable(char *s) { @@ -1653,32 +2179,16 @@ } __setup("noulri", ulri_disable); -void __cpuinit per_cpu_trap_init(bool is_boot_cpu) +/* configure STATUS register */ +static void configure_status(void) { - unsigned int cpu = smp_processor_id(); - unsigned int status_set = ST0_CU0; - unsigned int hwrena = cpu_hwrena_impl_bits; -#ifdef CONFIG_MIPS_MT_SMTC - int secondaryTC = 0; - int bootTC = (cpu == 0); - - /* - * Only do per_cpu_trap_init() for first TC of Each VPE. - * Note that this hack assumes that the SMTC init code - * assigns TCs consecutively and in ascending order. - */ - - if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && - ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id)) - secondaryTC = 1; -#endif /* CONFIG_MIPS_MT_SMTC */ - /* * Disable coprocessors and select 32-bit or 64-bit addressing * and the 16/32 or 32/32 FPR register model. Reset the BEV * flag that some firmware may have left set and the TS bit (for * IP27). Set XX for ISA IV code to work. */ + unsigned int status_set = ST0_CU0; #ifdef CONFIG_64BIT status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; #endif @@ -1689,8 +2199,14 @@ change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, status_set); +} - if (cpu_has_mips_r2) +/* configure HWRENA register */ +static void configure_hwrena(void) +{ + unsigned int hwrena = cpu_hwrena_impl_bits; + + if (cpu_has_mips_r2_r6) hwrena |= 0x0000000f; if (!noulri && cpu_has_userlocal) @@ -1698,11 +2214,10 @@ if (hwrena) write_c0_hwrena(hwrena); +} -#ifdef CONFIG_MIPS_MT_SMTC - if (!secondaryTC) { -#endif /* CONFIG_MIPS_MT_SMTC */ - +static void configure_exception_vector(void) +{ if (cpu_has_veic || cpu_has_vint) { unsigned long sr = set_c0_status(ST0_BEV); write_c0_ebase(ebase); @@ -1718,29 +2233,39 @@ } else set_c0_cause(CAUSEF_IV); } +} + +void per_cpu_trap_init(bool is_boot_cpu) +{ + unsigned int cpu = smp_processor_id(); + + configure_status(); + configure_hwrena(); + + configure_exception_vector(); /* * Before R2 both interrupt numbers were fixed to 7, so on R2 only: * * o read IntCtl.IPTI to determine the timer interrupt * o read IntCtl.IPPCI to determine the performance counter interrupt + * o read IntCtl.IPFDC to determine the fast debug channel interrupt */ - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_r6) { cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; - if (cp0_perfcount_irq == cp0_compare_irq) - cp0_perfcount_irq = -1; + cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7; + if (!cp0_fdc_irq) + cp0_fdc_irq = -1; + } else { cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ; cp0_perfcount_irq = -1; + cp0_fdc_irq = -1; } -#ifdef CONFIG_MIPS_MT_SMTC - } -#endif /* CONFIG_MIPS_MT_SMTC */ - if (!cpu_data[cpu].asid_cache) cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; @@ -1749,28 +2274,15 @@ BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); -#ifdef CONFIG_MIPS_MT_SMTC - if (bootTC) { -#endif /* CONFIG_MIPS_MT_SMTC */ - /* Boot CPU's cache setup in setup_arch(). */ - if (!is_boot_cpu) - cpu_cache_init(); - tlb_init(); -#ifdef CONFIG_MIPS_MT_SMTC - } else if (!secondaryTC) { - /* - * First TC in non-boot VPE must do subset of tlb_init() - * for MMU countrol registers. - */ - write_c0_pagemask(PM_DEFAULT_MASK); - write_c0_wired(0); - } -#endif /* CONFIG_MIPS_MT_SMTC */ + /* Boot CPU's cache setup in setup_arch(). */ + if (!is_boot_cpu) + cpu_cache_init(); + tlb_init(); TLBMISS_HANDLER_SETUP(); } /* Install CPU exception handler */ -void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) +void set_handler(unsigned long offset, void *addr, unsigned long size) { #ifdef CONFIG_CPU_MICROMIPS memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); @@ -1780,7 +2292,7 @@ local_flush_icache_range(ebase + offset, ebase + offset + size); } -static char panic_null_cerr[] __cpuinitdata = +static char panic_null_cerr[] = "Trying to set NULL cache error exception handler"; /* @@ -1788,7 +2300,7 @@ * This is suitable only for the cache error exception which is the only * exception handler that is being run uncached. */ -void __cpuinit set_uncached_handler(unsigned long offset, void *addr, +void set_uncached_handler(unsigned long offset, void *addr, unsigned long size) { unsigned long uncached_ebase = CKSEG1ADDR(ebase); @@ -1817,26 +2329,26 @@ check_wait(); -#if defined(CONFIG_KGDB) - if (kgdb_early_setup) - return; /* Already done */ -#endif - if (cpu_has_veic || cpu_has_vint) { unsigned long size = 0x200 + VECTORSPACING*64; ebase = (unsigned long) __alloc_bootmem(size, 1 << fls(size), 0); } else { -#ifdef CONFIG_KVM_GUEST -#define KVM_GUEST_KSEG0 0x40000000 - ebase = KVM_GUEST_KSEG0; -#else - ebase = CKSEG0; -#endif - if (cpu_has_mips_r2) + ebase = CAC_BASE; + + if (cpu_has_mips_r2_r6) ebase += (read_c0_ebase() & 0x3ffff000); } + if (cpu_has_mmips) { + unsigned int config3 = read_c0_config3(); + + if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) + write_c0_config3(config3 | MIPS_CONF3_ISA_OE); + else + write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE); + } + if (board_ebase_setup) board_ebase_setup(); per_cpu_trap_init(true); @@ -1912,6 +2424,7 @@ set_except_vector(11, handle_cpu); set_except_vector(12, handle_ov); set_except_vector(13, handle_tr); + set_except_vector(14, handle_msa_fpe); if (current_cpu_type() == CPU_R6000 || current_cpu_type() == CPU_R6000A) { @@ -1934,6 +2447,14 @@ if (cpu_has_fpu && !cpu_has_nofpuex) set_except_vector(15, handle_fpe); + set_except_vector(16, handle_ftlb); + + if (cpu_has_rixiex) { + set_except_vector(19, tlb_do_page_fault_0); + set_except_vector(20, tlb_do_page_fault_0); + } + + set_except_vector(21, handle_msa); set_except_vector(22, handle_mdmx); if (cpu_has_mcheck) @@ -1956,9 +2477,37 @@ set_handler(0x080, &except_vec3_generic, 0x80); local_flush_icache_range(ebase, ebase + 0x400); - flush_tlb_handlers(); sort_extable(__start___dbe_table, __stop___dbe_table); cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ } + +static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd, + void *v) +{ + switch (cmd) { + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + configure_status(); + configure_hwrena(); + configure_exception_vector(); + + /* Restore register with CPU number for TLB handlers */ + TLBMISS_HANDLER_RESTORE(); + + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block trap_pm_notifier_block = { + .notifier_call = trap_pm_notifier, +}; + +static int __init trap_pm_init(void) +{ + return cpu_pm_register_notifier(&trap_pm_notifier_block); +} +arch_initcall(trap_pm_init);