--- zzzz-none-000/linux-4.9.276/arch/mips/kernel/smp-cmp.c 2021-07-20 14:21:16.000000000 +0000 +++ falcon-5530-750/linux-4.9.276/arch/mips/kernel/smp-cmp.c 2023-04-05 08:19:00.000000000 +0000 @@ -18,6 +18,7 @@ #undef DEBUG +#include #include #include #include @@ -38,11 +39,26 @@ #include #include #include +#ifdef CONFIG_LTQ_VMB +#include +#endif +#ifdef CONFIG_AVM_ENHANCED +#include +#endif /*--- #ifdef CONFIG_AVM_ENHANCED ---*/ + +#ifdef CONFIG_LTQ_ITC +#include +#endif static void cmp_init_secondary(void) { struct cpuinfo_mips *c __maybe_unused = ¤t_cpu_data; +#ifdef CONFIG_CPU_HAS_DSP_ASE + init_dsp(); /* initialize DSP register cpux */ +#endif + write_c0_errorepc(0); + /* Assume GIC is present */ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); @@ -54,6 +70,10 @@ c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; #endif + +#ifdef CONFIG_LTQ_ITC + itc_init(); +#endif } static void cmp_smp_finish(void) @@ -72,8 +92,17 @@ local_irq_enable(); } -/* - * Setup the PC, SP, and GP of a secondary processor and start it running +void play_dead(void) +{ + unsigned int cpu; + + cpu = smp_processor_id(); + pr_info("CPU%d going offline\n", cpu); +} + +static void *stub_addr; + +/* Setup the PC, SP, and GP of a secondary processor and start it running * smp_bootstrap is the place to resume from * __KSTK_TOS(idle) is apparently the stack pointer * (unsigned long)idle->thread_info the gp @@ -82,30 +111,118 @@ { struct thread_info *gp = task_thread_info(idle); unsigned long sp = __KSTK_TOS(idle); - unsigned long pc = (unsigned long)&smp_bootstrap; + unsigned long pc = (unsigned long)stub_addr; unsigned long a0 = 0; +#ifdef CONFIG_LTQ_VMB + int ret; + struct CPU_launch_t cpu_launch; +#endif pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(), __func__, cpu); -#if 0 - /* Needed? */ - flush_icache_range((unsigned long)gp, - (unsigned long)(gp + sizeof(struct thread_info))); -#endif +#ifdef CONFIG_LTQ_VMB + ret = vmb_cpu_alloc(cpu, "LINUX"); + if (ret == -VMB_EBUSY) { + pr_err("VPE %d is Busy !!!\n", cpu); + ret = vmb_cpu_alloc(MAX_CPU, "LINUX"); + pr_err("[%s]:[%d] CPU ret = %d\n", + __func__, __LINE__, ret); + if (ret == -VMB_EBUSY) { + pr_err("ALL the CPUs are Busy !\n"); + return; + } + } + memset(&cpu_launch, 0, sizeof(struct CPU_launch_t)); + +#if defined(CONFIG_VMB_LAUNCH_KSEG1) + cpu_launch.start_addr = CKSEG1ADDR(pc); +#else + cpu_launch.start_addr = pc; +#endif + cpu_launch.sp = sp; + cpu_launch.gp = (unsigned long)gp; + cpu_launch.a0 = a0; + + ret = vmb_cpu_start(ret, cpu_launch, 0, 0, 0); + if (ret == -VMB_ETIMEOUT || ret == -VMB_ENACK) { + pr_err("[%s]:[%d] FW %s could not be launched on CPU %d.", + __func__, __LINE__, "LINUX", cpu); + pr_err("The CPU has been force reset. Please use alloc and then start.\n"); + return; + } +#else amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0); +#endif } -/* - * Common setup before any secondaries are started +static unsigned int core_vpe_count(unsigned int core) +{ + unsigned int cfg; + + if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) && + (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) + return 1; + + mips_cm_lock_other(core, 0); + cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; + mips_cm_unlock_other(); + return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; +} + +/*Common setup before any secondaries are started */ void __init cmp_smp_setup(void) { int i; int ncpu = 0; + unsigned int ncores, nvpes, core_vpes; + int c, v, v_min; - pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); + /* Use a stub if the kernel code is located in >256MB, but we + * are configured to run in EVA mode. + */ + if (IS_ENABLED(CONFIG_EVA) && + (unsigned long)smp_bootstrap_end > ARCH_LOW_ADDRESS_LIMIT) { + size_t stub_len = smp_bootstrap_end - (char *)&smp_bootstrap; + phys_addr_t stub_phys; + + stub_addr = alloc_bootmem_low(stub_len); + stub_phys = virt_to_phys(stub_addr); + + memcpy(stub_addr, &smp_bootstrap, stub_len); + pr_info("SMPCMP: initialized the EVA stub code: %zu@%pa\n", + stub_len, &stub_phys); + } else { + stub_addr = &smp_bootstrap; + } + + pr_info("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); + + /* Detect & record VPE topology */ + ncores = mips_cm_numcores(); + pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE"); + + for (c = nvpes = 0; c < ncores; c++) { + core_vpes = core_vpe_count(c); + pr_cont("%c%u", c ? ',' : '{', core_vpes); + + /* Use the number of VPEs in core 0 for smp_num_siblings */ + if (!c) + smp_num_siblings = core_vpes; + v_min = NR_CPUS - nvpes; + v_min = min_t(int, core_vpes, v_min); + for (v = 0; v < v_min; v++) { + cpu_data[nvpes + v].core = c; +#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) + cpu_data[nvpes + v].vpe_id = v; +#endif + } + + nvpes += core_vpes; + } + pr_cont("} total %u\n", nvpes); #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ @@ -135,9 +252,28 @@ void __init cmp_prepare_cpus(unsigned int max_cpus) { + unsigned int cca; + bool cca_unsuitable; + pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n", smp_processor_id(), __func__, max_cpus); + /* Detect whether the CCA is unsuited to multi-core SMP */ + cca = read_c0_config() & CONF_CM_CMASK; + switch (cca) { + case 0x4: /* CWBE */ + case 0x5: /* CWB */ + pr_info("CCA is coherent, multi-core is fine\n"); + /* The CCA is coherent, multi-core is fine */ + cca_unsuitable = false; + break; + + default: + pr_info("CCA is not coherent, multi-core is not usable\n"); + /* CCA is not coherent, multi-core is not usable */ + cca_unsuitable = true; + } + #ifdef CONFIG_MIPS_MT /* * FIXME: some of these options are per-system, some per-core and