--- zzzz-none-000/linux-3.18.24/arch/mips/kernel/traps.c 2015-10-31 20:39:51.000000000 +0000 +++ rtl96-5690pro-762/linux-3.18.24/arch/mips/kernel/traps.c 2024-08-14 08:36:36.000000000 +0000 @@ -61,6 +61,9 @@ #include #include #include +#ifdef CONFIG_ARCH_CPU_RLX5281 +#include +#endif extern void check_wait(void); extern asmlinkage void rollback_handle_int(void); @@ -91,6 +94,9 @@ extern asmlinkage void handle_mcheck(void); extern asmlinkage void handle_reserved(void); extern void tlb_do_page_fault_0(void); +#ifdef CONFIG_RTK_LOG_SAVE +extern void log_save_event(void); +#endif void (*board_be_init)(void); int (*board_be_handler)(struct pt_regs *regs, int is_fixup); @@ -216,6 +222,9 @@ set_fs(KERNEL_DS); show_stacktrace(task, ®s); set_fs(old_fs); +#ifdef CONFIG_RTK_LOG_SAVE + log_save_event(); +#endif } static void show_code(unsigned int __user *pc) @@ -237,6 +246,40 @@ } } +#ifdef CONFIG_CPU_HAS_RADIAX +static void __show_radiax(struct task_struct *tsk) +{ + struct mips_radiax_struct *regs = &tsk->thread.radiax; + + printk("CBS0 : %08x\n", (uint32_t)regs->radiaxr[0]); + printk("CBS1 : %08x\n", (uint32_t)regs->radiaxr[1]); + printk("CBS2 : %08x\n", (uint32_t)regs->radiaxr[2]); + printk("CBE0 : %08x\n", (uint32_t)regs->radiaxr[3]); + printk("CBE1 : %08x\n", (uint32_t)regs->radiaxr[4]); + printk("CBE2 : %08x\n", (uint32_t)regs->radiaxr[5]); + printk("LPS0 : %08x\n", (uint32_t)regs->radiaxr[6]); + printk("LPE0 : %08x\n", (uint32_t)regs->radiaxr[7]); + printk("LPC0 : %08x\n", (uint32_t)regs->radiaxr[8]); + printk("MMD : %08x\n", (uint32_t)regs->radiaxr[9]); + printk("M0LL : %08x\n", (uint32_t)regs->radiaxr[10]); + printk("M0LH : %08x\n", (uint32_t)regs->radiaxr[11]); + printk("M0HL : %08x\n", (uint32_t)regs->radiaxr[12]); + printk("M0HH : %08x\n", (uint32_t)regs->radiaxr[13]); + printk("M1LL : %08x\n", (uint32_t)regs->radiaxr[14]); + printk("M1LH : %08x\n", (uint32_t)regs->radiaxr[15]); + printk("M1HL : %08x\n", (uint32_t)regs->radiaxr[16]); + printk("M1HH : %08x\n", (uint32_t)regs->radiaxr[17]); + printk("M2LL : %08x\n", (uint32_t)regs->radiaxr[18]); + printk("M2LH : %08x\n", (uint32_t)regs->radiaxr[19]); + printk("M2HL : %08x\n", (uint32_t)regs->radiaxr[20]); + printk("M2HH : %08x\n", (uint32_t)regs->radiaxr[21]); + printk("M3LL : %08x\n", (uint32_t)regs->radiaxr[22]); + printk("M3LH : %08x\n", (uint32_t)regs->radiaxr[23]); + printk("M3HL : %08x\n", (uint32_t)regs->radiaxr[24]); + printk("N3HH : %08x\n", (uint32_t)regs->radiaxr[25]); +} +#endif + static void __show_regs(const struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); @@ -274,7 +317,6 @@ */ printk("epc : %0*lx %pS\n", field, regs->cp0_epc, (void *) regs->cp0_epc); - printk(" %s\n", print_tainted()); printk("ra : %0*lx %pS\n", field, regs->regs[31], (void *) regs->regs[31]); @@ -331,6 +373,10 @@ printk("PrId : %08x (%s)\n", read_c0_prid(), cpu_name_string()); + +#ifdef CONFIG_CPU_HAS_RADIAX + __show_radiax(current); +#endif } /* @@ -366,6 +412,9 @@ show_code((unsigned int __user *) regs->cp0_epc); printk("\n"); set_fs(old_fs); +#ifdef CONFIG_RTK_LOG_SAVE + log_save_event(); +#endif } static int regs_to_trapnr(struct pt_regs *regs) @@ -380,6 +429,18 @@ static int die_counter; int sig = SIGSEGV; +#ifdef CONFIG_RTK_KERNEL_OMD + extern int panic_log_on; + extern int panic_lock; + extern int panic_write(void); + extern int fault_panic_write_file; + int panic_write_file = 0; + if(panic_lock == 0){ + panic_lock = 1; + panic_log_on = 1; + panic_write_file = 1; + } +#endif oops_enter(); if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), @@ -395,6 +456,11 @@ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); raw_spin_unlock_irq(&die_lock); +#ifdef CONFIG_RTK_KERNEL_OMD + if(panic_write_file || fault_panic_write_file){ + panic_write(); + } +#endif oops_exit(); if (in_interrupt()) @@ -495,6 +561,7 @@ #define FUNC 0x0000003f #define SYNC 0x0000000f #define RDHWR 0x0000003b +#define MFLXC0 0x40634000 /* microMIPS definitions */ #define MM_POOL32A_FUNC 0xfc00ffff @@ -653,6 +720,18 @@ static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode) { +#ifdef CONFIG_CPU_RLX + if (opcode == MFLXC0) { + struct thread_info *ti = task_thread_info(current); + int rt = (opcode & RT) >> 16; + + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, regs, 0); + + regs->regs[rt] = ti->tp_value; + return 0; + } +#else if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { int rd = (opcode & RD) >> 11; int rt = (opcode & RT) >> 16; @@ -660,6 +739,7 @@ simulate_rdhwr(regs, rd, rt); return 0; } +#endif /* Not ours. */ return -1; @@ -705,6 +785,7 @@ exception_exit(prev_state); } +#ifdef CONFIG_CPU_HAS_EMU int process_fpemu_return(int sig, void __user *fault_addr) { if (sig == SIGSEGV || sig == SIGBUS) { @@ -730,10 +811,12 @@ return 0; } } +#endif /* CONFIG_CPU_HAS_EMU */ /* * XXX Delayed fp exceptions when doing a lazy ctx switch XXX */ +#ifdef CONFIG_CPU_HAS_FPU asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) { enum ctx_state prev_state; @@ -745,6 +828,7 @@ goto out; die_if_kernel("FP exception in kernel code", regs); +#ifdef CONFIG_CPU_HAS_EMU if (fcr31 & FPU_CSR_UNI_X) { int sig; void __user *fault_addr = NULL; @@ -779,7 +863,9 @@ process_fpemu_return(sig, fault_addr); goto out; - } else if (fcr31 & FPU_CSR_INV_X) + } else +#endif + if (fcr31 & FPU_CSR_INV_X) info.si_code = FPE_FLTINV; else if (fcr31 & FPU_CSR_DIV_X) info.si_code = FPE_FLTDIV; @@ -799,6 +885,7 @@ out: exception_exit(prev_state); } +#endif static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, const char *str) @@ -839,6 +926,7 @@ die_if_kernel("Kernel bug detected", regs); force_sig(SIGTRAP, current); break; +#ifdef CONFIG_CPU_HAS_EMU case BRK_MEMU: /* * Address errors may be deliberately induced by the FPU @@ -854,6 +942,7 @@ die_if_kernel("Math emu break/trap", regs); force_sig(SIGTRAP, current); break; +#endif default: scnprintf(b, sizeof(b), "%s instruction in kernel code", str); die_if_kernel(b, regs); @@ -1009,7 +1098,7 @@ status = SIGSEGV; opcode = (mmop[0] << 16) | mmop[1]; - if (status < 0) + if (!cpu_has_userlocal && status < 0) status = simulate_rdhwr_mm(regs, opcode); } else { if (unlikely(get_user(opcode, epc) < 0)) @@ -1018,10 +1107,10 @@ if (!cpu_has_llsc && status < 0) status = simulate_llsc(regs, opcode); - if (status < 0) + if (!cpu_has_userlocal && status < 0) status = simulate_rdhwr_normal(regs, opcode); - if (status < 0) + if (!cpu_has_sync && status < 0) status = simulate_sync(regs, opcode); } @@ -1043,6 +1132,7 @@ * emulated more than some threshold number of instructions, force migration to * a "CPU" that has FP support. */ +#ifdef CONFIG_CPU_HAS_EMU static void mt_ase_fp_affinity(void) { #ifdef CONFIG_MIPS_MT_FPAFF @@ -1066,6 +1156,7 @@ } #endif /* CONFIG_MIPS_MT_FPAFF */ } +#endif /* * No lock; only written during early bootup by CPU 0. @@ -1094,6 +1185,7 @@ return NOTIFY_OK; } +#ifdef CONFIG_MIPS_FPU static int enable_restore_fp_context(int msa) { int err, was_fpu_owner, prior_msa; @@ -1200,6 +1292,7 @@ return 0; } +#endif asmlinkage void do_cpu(struct pt_regs *regs) { @@ -1208,7 +1301,7 @@ unsigned long old_epc, old31; unsigned int opcode; unsigned int cpid; - int status, err; + int status, err __maybe_unused; unsigned long __maybe_unused flags; prev_state = exception_enter(); @@ -1237,7 +1330,7 @@ status = SIGSEGV; opcode = (mmop[0] << 16) | mmop[1]; - if (status < 0) + if (!cpu_has_userlocal && status < 0) status = simulate_rdhwr_mm(regs, opcode); } else { if (unlikely(get_user(opcode, epc) < 0)) @@ -1246,7 +1339,7 @@ if (!cpu_has_llsc && status < 0) status = simulate_llsc(regs, opcode); - if (status < 0) + if (!cpu_has_userlocal && status < 0) status = simulate_rdhwr_normal(regs, opcode); } @@ -1280,8 +1373,10 @@ /* Fall through. */ case 1: +#ifdef CONFIG_MIPS_FPU err = enable_restore_fp_context(0); +#ifdef CONFIG_CPU_HAS_EMU if (!raw_cpu_has_fpu || err) { int sig; void __user *fault_addr = NULL; @@ -1291,6 +1386,8 @@ if (!process_fpemu_return(sig, fault_addr) && !err) mt_ase_fp_affinity(); } +#endif +#endif goto out; @@ -1305,6 +1402,7 @@ exception_exit(prev_state); } +#ifdef CONFIG_CPU_HAS_MSA asmlinkage void do_msa_fpe(struct pt_regs *regs) { enum ctx_state prev_state; @@ -1335,7 +1433,9 @@ out: exception_exit(prev_state); } +#endif +#ifndef CONFIG_CPU_RLX asmlinkage void do_mdmx(struct pt_regs *regs) { enum ctx_state prev_state; @@ -1344,10 +1444,12 @@ force_sig(SIGILL, current); exception_exit(prev_state); } +#endif /* * Called with interrupts disabled. */ +#if 0//def CONFIG_HARDWARE_WATCHPOINTS asmlinkage void do_watch(struct pt_regs *regs) { enum ctx_state prev_state; @@ -1377,7 +1479,39 @@ } exception_exit(prev_state); } +#endif + +#ifdef CONFIG_ARCH_CPU_RLX5281 +asmlinkage void do_watch(struct pt_regs *regs) +{ + unsigned int addr; + unsigned char entry, attr; + extern void watch_kernel(void); + + addr = read_lxc0_wmpvaddr(); + entry = (read_lxc0_wmpstatus() & ENTRY_MATCH) >> 16; + attr = (read_lxc0_wmpstatus() & 0x7); + printk("WMPU exception (%s): addr:0x%x entry:0x%x attr:0x%x\n", __FUNCTION__, addr, entry, attr); + + //printk("0: %08x %08x %08x\n", (u32)read_c0_watchlo0(), (u32)read_c0_watchhi0(), (u32)read_lxc0_wmpxmask0()); + //printk("1: %08x %08x %08x\n", (u32)read_c0_watchlo1(), (u32)read_c0_watchhi1(), (u32)read_lxc0_wmpxmask1()); + //printk("2: %08x %08x %08x\n", (u32)read_c0_watchlo2(), (u32)read_c0_watchhi2(), (u32)read_lxc0_wmpxmask2()); + //printk("3: %08x %08x %08x\n", (u32)read_c0_watchlo3(), (u32)read_c0_watchhi3(), (u32)read_lxc0_wmpxmask3()); + //printk("4: %08x %08x %08x\n", (u32)read_c0_watchlo4(), (u32)read_c0_watchhi4(), (u32)read_lxc0_wmpxmask4()); + //printk("5: %08x %08x %08x\n", (u32)read_c0_watchlo5(), (u32)read_c0_watchhi5(), (u32)read_lxc0_wmpxmask5()); + //printk("6: %08x %08x %08x\n", (u32)read_c0_watchlo6(), (u32)read_c0_watchhi6(), (u32)read_lxc0_wmpxmask6()); + //printk("7: %08x %08x %08x\n", (u32)read_c0_watchlo7(), (u32)read_c0_watchhi7(), (u32)read_lxc0_wmpxmask7()); + //printk("-: %08x\n", (u32)read_lxc0_wmpctl()); + + + rlx_wmpu_reset(); + show_regs(regs); + dump_stack(); + //watch_kernel(); +} +#endif +#ifndef CONFIG_CPU_RLX asmlinkage void do_mcheck(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); @@ -1413,7 +1547,9 @@ "matching entries in the TLB.", (multi_match) ? "" : "not "); } +#endif +#ifdef CONFIG_MIPS_MT asmlinkage void do_mt(struct pt_regs *regs) { int subcode; @@ -1448,8 +1584,10 @@ force_sig(SIGILL, current); } +#endif +#ifdef CONFIG_CPU_HAS_DSP asmlinkage void do_dsp(struct pt_regs *regs) { if (cpu_has_dsp) @@ -1457,6 +1595,7 @@ force_sig(SIGILL, current); } +#endif asmlinkage void do_reserved(struct pt_regs *regs) { @@ -1623,6 +1762,7 @@ panic("Can't handle the cache error!"); } +#ifndef CONFIG_CPU_RLX asmlinkage void do_ftlb(void) { const int field = 2 * sizeof(unsigned long); @@ -1711,6 +1851,7 @@ regs->cp0_epc = read_c0_errorepc(); die(str, regs); } +#endif #define VECTORSPACING 0x100 /* for EI/VI mode */ @@ -1880,17 +2021,6 @@ int cp0_perfcount_irq; EXPORT_SYMBOL_GPL(cp0_perfcount_irq); -static int noulri; - -static int __init ulri_disable(char *s) -{ - pr_info("Disabling ulri\n"); - noulri = 1; - - return 1; -} -__setup("noulri", ulri_disable); - /* configure STATUS register */ static void configure_status(void) { @@ -1921,7 +2051,7 @@ if (cpu_has_mips_r2) hwrena |= 0x0000000f; - if (!noulri && cpu_has_userlocal) + if (cpu_has_mips_r && cpu_has_userlocal) hwrena |= (1 << 29); if (hwrena) @@ -1982,10 +2112,10 @@ BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); - /* Boot CPU's cache setup in setup_arch(). */ - if (!is_boot_cpu) - cpu_cache_init(); - tlb_init(); + /* Boot CPU's cache setup in setup_arch(). */ + if (!is_boot_cpu) + cpu_cache_init(); + tlb_init(); TLBMISS_HANDLER_SETUP(); } @@ -2019,20 +2149,10 @@ memcpy((void *)(uncached_ebase + offset), addr, size); } -static int __initdata rdhwr_noopt; -static int __init set_rdhwr_noopt(char *str) -{ - rdhwr_noopt = 1; - return 1; -} - -__setup("rdhwr_noopt", set_rdhwr_noopt); - void __init trap_init(void) { extern char except_vec3_generic; extern char except_vec4; - extern char except_vec3_r4000; unsigned long i; check_wait(); @@ -2048,10 +2168,10 @@ __alloc_bootmem(size, 1 << fls(size), 0); } else { #ifdef CONFIG_KVM_GUEST -#define KVM_GUEST_KSEG0 0x40000000 - ebase = KVM_GUEST_KSEG0; +#define KVM_GUEST_KSEG0 0x40000000 + ebase = KVM_GUEST_KSEG0; #else - ebase = CKSEG0; + ebase = CKSEG0; #endif if (cpu_has_mips_r2) ebase += (read_c0_ebase() & 0x3ffff000); @@ -2093,8 +2213,13 @@ /* * Only some CPUs have the watch exceptions. */ +//#ifdef CONFIG_HARDWARE_WATCHPOINTS +#ifdef CONFIG_ARCH_CPU_RLX5281 if (cpu_has_watch) set_except_vector(23, handle_watch); + else + printk("CPU Don't have watch !!\n"); +#endif /* * Initialise interrupt handlers @@ -2135,44 +2260,42 @@ set_except_vector(8, handle_sys); set_except_vector(9, handle_bp); - set_except_vector(10, rdhwr_noopt ? handle_ri : - (cpu_has_vtag_icache ? - handle_ri_rdhwr_vivt : handle_ri_rdhwr)); +#ifdef CONFIG_CPU_RLX + set_except_vector(10, handle_ri); +#else + set_except_vector(10, cpu_has_userlocal ? handle_ri : handle_ri_rdhwr); +#endif + set_except_vector(11, handle_cpu); set_except_vector(12, handle_ov); - set_except_vector(13, handle_tr); - set_except_vector(14, handle_msa_fpe); - if (current_cpu_type() == CPU_R6000 || - current_cpu_type() == CPU_R6000A) { - /* - * The R6000 is the only R-series CPU that features a machine - * check exception (similar to the R4000 cache error) and - * unaligned ldc1/sdc1 exception. The handlers have not been - * written yet. Well, anyway there is no R6000 machine on the - * current list of targets for Linux/MIPS. - * (Duh, crap, there is someone with a triple R6k machine) - */ - //set_except_vector(14, handle_mc); - //set_except_vector(15, handle_ndc); - } + if (cpu_has_tr) + set_except_vector(13, handle_tr); + if (cpu_has_msa) + set_except_vector(14, handle_msa_fpe); if (board_nmi_handler_setup) board_nmi_handler_setup(); +#ifdef CONFIG_CPU_HAS_FPU if (cpu_has_fpu && !cpu_has_nofpuex) set_except_vector(15, handle_fpe); +#endif - set_except_vector(16, handle_ftlb); + if (cpu_has_ftlb) + set_except_vector(16, handle_ftlb); if (cpu_has_rixiex) { set_except_vector(19, tlb_do_page_fault_0); set_except_vector(20, tlb_do_page_fault_0); } - set_except_vector(21, handle_msa); - set_except_vector(22, handle_mdmx); + if (cpu_has_msa) + set_except_vector(21, handle_msa); + + if (cpu_has_mdmx) + set_except_vector(22, handle_mdmx); if (cpu_has_mcheck) set_except_vector(24, handle_mcheck); @@ -2180,15 +2303,13 @@ if (cpu_has_mipsmt) set_except_vector(25, handle_mt); - set_except_vector(26, handle_dsp); + if (cpu_has_dsp) + set_except_vector(26, handle_dsp); if (board_cache_error_setup) board_cache_error_setup(); - if (cpu_has_vce) - /* Special exception: R4[04]00 uses also the divec space. */ - set_handler(0x180, &except_vec3_r4000, 0x100); - else if (cpu_has_4kex) + if (cpu_has_4kex) set_handler(0x180, &except_vec3_generic, 0x80); else set_handler(0x080, &except_vec3_generic, 0x80);