--- zzzz-none-000/linux-2.6.32.61/arch/mips/kernel/smtc.c 2013-06-10 09:43:48.000000000 +0000 +++ ar10-7272-687/linux-2.6.32.61/arch/mips/kernel/smtc.c 2014-03-28 16:50:08.000000000 +0000 @@ -40,6 +40,7 @@ #include #include #include +#include /* * SMTC Kernel needs to manipulate low-level CPU interrupt mask @@ -68,6 +69,7 @@ * Data structures purely associated with SMTC parallelism */ +extern unsigned int cpu_idle_state(int vpe); /* * Table for tracking ASIDs whose lifetime is prolonged. @@ -100,7 +102,11 @@ static int vpe0limit; static int ipibuffers; +#ifdef CONFIG_IFX_VPE_EXT +int nostlb = 0; +#else static int nostlb; +#endif static int asidmask; unsigned long smtc_asid_mask = 0xff; @@ -267,7 +273,7 @@ smtc_status |= SMTC_TLB_SHARED; local_flush_tlb_all(); - printk("TLB of %d entry pairs shared by %d VPEs\n", + printk("TLB of %d entry pairs shared by %d VPEs\n", tlbsiz, vpes); } else { printk("WARNING: TLB Not Sharable on SMTC Boot!\n"); @@ -278,7 +284,7 @@ /* * Incrementally build the CPU map out of constituent MIPS MT cores, - * using the specified available VPEs and TCs. Plaform code needs + * using the specified available VPEs and TCs. Plaform code needs * to ensure that each MIPS MT core invokes this routine on reset, * one at a time(!). * @@ -352,8 +358,6 @@ cpu_data[cpu].options &= ~MIPS_CPU_FPU; cpu_data[cpu].vpe_id = vpe; cpu_data[cpu].tc_id = tc; - /* Multi-core SMTC hasn't been tested, but be prepared */ - cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; } /* @@ -393,12 +397,13 @@ /* cpu_data index starts at zero */ cpu = 0; cpu_data[cpu].vpe_id = 0; - cpu_data[cpu].tc_id = 0; - cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; + cpu_data[cpu].tc_id = 0; + cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; cpu++; /* Report on boot-time options */ mips_mt_set_cpuoptions(); + tclimit = NR_CPUS; if (vpelimit > 0) printk("Limit of %d VPEs set\n", vpelimit); if (tclimit > 0) @@ -499,10 +504,17 @@ * Clear ERL/EXL of VPEs other than 0 * and set restricted interrupt enable/mask. */ + +#if defined(CONFIG_AR9) || defined(CONFIG_VR9) || defined(CONFIG_AR10) || defined(CONFIG_HN1) // Enable IE and IM bits for all the intr lines. + write_vpe_c0_status((read_vpe_c0_status() + & ~(ST0_BEV | ST0_ERL | ST0_EXL)) + | (ST0_IM | ST0_IE)); +#else write_vpe_c0_status((read_vpe_c0_status() & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM)) | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7 | ST0_IE)); +#endif /* * set config to be the same as vpe0, * particularly kseg0 coherency alg @@ -612,11 +624,6 @@ UNLOCK_MT_PRA(); } -void smtc_init_secondary(void) -{ - local_irq_enable(); -} - void smtc_smp_finish(void) { int cpu = smp_processor_id(); @@ -636,6 +643,16 @@ void smtc_cpus_done(void) { +#if defined(CONFIG_LANTIQ) + if(NR_CPUS <= LANTIQ_YIELD_TC1) { +#if LANTIQ_YIELD_MASK_TC1 > 0 + yield_context_init_on(1, LANTIQ_YIELD_TC1, LANTIQ_YIELD_MASK_TC1); +#endif +#if LANTIQ_YIELD_MASK_TC2 > 0 + yield_context_init_on(0, LANTIQ_YIELD_TC2, LANTIQ_YIELD_MASK_TC2); +#endif + } +#endif/*--- #if defined(CONFIG_LANTIQ) ---*/ } /* @@ -705,12 +722,10 @@ */ /* If no one is eligible, service locally */ - if (target >= NR_CPUS) { + if (target >= NR_CPUS) do_IRQ_no_affinity(irq); - return; - } - - smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); + else + smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); } #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ @@ -797,16 +812,34 @@ int mtflags; unsigned long tcrestart; extern void r4k_wait_irqoff(void), __pastwait(void); - int set_resched_flag = (type == LINUX_SMP_IPI && - action == SMP_RESCHEDULE_YOURSELF); - + int set_resched_flag = ((type == LINUX_SMP_IPI) && (action == SMP_RESCHEDULE_YOURSELF)); + if (cpu == smp_processor_id()) { printk("Cannot Send IPI to self!\n"); return; } - if (set_resched_flag && IPIQ[cpu].resched_flag != 0) - return; /* There is a reschedule queued already */ - + if (set_resched_flag) { +#if 0 + static unsigned int last_jiffies, not_idle, resched, cnt; + cnt++; + if(jiffies - last_jiffies > 10 *HZ) { + __printk("not_idle: %u %u %u\n", not_idle, resched, cnt); + not_idle = cnt = resched = 0; + last_jiffies = jiffies; + } +#endif + if(IPIQ[cpu].resched_flag != 0) { + /*--- resched++; ---*/ + return; /* There is a reschedule queued already */ + } +#if 0 +mbahr: Deaktiviert: evtl. fuer Hw-Watchdog "verantwortlich" ! + if(!cpu_idle_state(cpu_data[cpu].vpe_id)) { + /*--- not_idle++; ---*/ + return; /* There is not in idle */ + } +#endif + } /* Set up a descriptor, to be delivered either promptly or queued */ pipi = smtc_ipi_dq(&freeIPIq); if (pipi == NULL) { @@ -814,12 +847,14 @@ mips_mt_regdump(dvpe()); panic("IPI Msg. Buffers Depleted\n"); } +/*--- printk(KERN_ERR"%s(%d) dest_vpe_id =%x act vpe_id=%x\n", __func__, type, cpu_data[cpu].vpe_id, cpu_data[smp_processor_id()].vpe_id); ---*/ pipi->type = type; pipi->arg = (void *)action; pipi->dest = cpu; if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { /* If not on same VPE, enqueue and send cross-VPE interrupt */ IPIQ[cpu].resched_flag |= set_resched_flag; + smp_mb(); smtc_ipi_nq(&IPIQ[cpu], pipi); LOCK_CORE_PRA(); settc(cpu_data[cpu].tc_id); @@ -867,6 +902,7 @@ write_tc_c0_tchalt(0); UNLOCK_CORE_PRA(); IPIQ[cpu].resched_flag |= set_resched_flag; + smp_mb(); smtc_ipi_nq(&IPIQ[cpu], pipi); } else { postdirect: @@ -948,6 +984,7 @@ int irq = MIPS_CPU_IRQ_BASE + 1; smtc_ipi_nq(&freeIPIq, pipi); +/*--- printk(KERN_ERR"[%x]%s(%d)\n", cpu, __func__, type_copy); ---*/ switch (type_copy) { case SMTC_CLOCK_TICK: @@ -961,7 +998,9 @@ case LINUX_SMP_IPI: switch ((int)arg_copy) { case SMP_RESCHEDULE_YOURSELF: +/*--- printk(KERN_ERR"[%x]%s(%d)\n", cpu, __func__, type_copy); ---*/ ipi_resched_interrupt(); + IPIQ[cpu].resched_flag = 0; break; case SMP_CALL_FUNCTION: ipi_call_interrupt(); @@ -1020,9 +1059,6 @@ pipi = __smtc_ipi_dq(q); spin_unlock(&q->lock); if (pipi != NULL) { - if (pipi->type == LINUX_SMP_IPI && - (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) - IPIQ[cpu].resched_flag = 0; ipi_decode(pipi); } /* @@ -1105,9 +1141,6 @@ * with interrupts off */ local_irq_save(flags); - if (pipi->type == LINUX_SMP_IPI && - (int)pipi->arg == SMP_RESCHEDULE_YOURSELF) - IPIQ[cpu].resched_flag = 0; ipi_decode(pipi); local_irq_restore(flags); } @@ -1282,18 +1315,20 @@ void smtc_soft_dump(void) { int i; - +#if 0 printk("Counter Interrupts taken per CPU (TC)\n"); for (i=0; i < NR_CPUS; i++) { printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints); } +#endif printk("Self-IPI invocations:\n"); for (i=0; i < NR_CPUS; i++) { printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); } smtc_ipi_qdump(); - printk("%d Recoveries of \"stolen\" FPU\n", - atomic_read(&smtc_fpu_recoveries)); + if(atomic_read(&smtc_fpu_recoveries)) { + printk("%d Recoveries of \"stolen\" FPU\n", atomic_read(&smtc_fpu_recoveries)); + } } @@ -1328,6 +1363,13 @@ asid = asid_cache(cpu); do { +#ifdef CONFIG_IFX_VPE_EXT + /* If TLB is shared between AP and RP (AP is running SMTC), + leave out max ASID i.e., ASID_MASK for RP + */ + if (!nostlb && ((asid & ASID_MASK) == (ASID_MASK - 1))) + asid++; +#endif if (!((asid += ASID_INC) & ASID_MASK) ) { if (cpu_has_vtag_icache) flush_icache_all(); @@ -1460,3 +1502,80 @@ } mips_ihb(); } + + +/* VPE/SMP Prototype implements platform interfaces directly */ + +/* + * Cause the specified action to be performed on a targeted "CPU" + */ + +static void smtc_send_ipi_single(int cpu, unsigned int action) +{ + /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ + smtc_send_ipi(cpu, LINUX_SMP_IPI, action); +} + +static void smtc_send_ipi_mask(const struct cpumask *mask, unsigned int action) +{ + unsigned int i; + + for_each_cpu(i, mask) + smtc_send_ipi_single(i, action); +} + +/* + * Post-config but pre-boot cleanup entry point + */ +static void __cpuinit smtc_init_secondary(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + int myvpe; + + c->core = (read_c0_ebase() >> 1) & 0xff; + + /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */ + myvpe = read_c0_tcbind() & TCBIND_CURVPE; + if (myvpe != 0) { + /* Ideally, this should be done only once per VPE, but... */ + clear_c0_status(ST0_IM); + set_c0_status((0x100 << cp0_compare_irq) + | (0x100 << MIPS_CPU_IPI_IRQ)); + if (cp0_perfcount_irq >= 0) + set_c0_status(0x100 << cp0_perfcount_irq); + } + local_irq_enable(); +} + +/* + * Platform SMP pre-initialization + * + * As noted above, we can assume a single CPU for now + * but it may be multithreaded. + */ + +static void __init smtc_smp_setup(void) +{ + /* + * we won't get the definitive value until + * we've run smtc_prepare_cpus later, but + * we would appear to need an upper bound now. + */ + smp_num_siblings = smtc_build_cpu_map(0); +} + +static void __init msmtc_prepare_cpus(unsigned int max_cpus) +{ + smtc_prepare_cpus(max_cpus); +} + +struct plat_smp_ops smtc_smp_ops = { + .send_ipi_single = smtc_send_ipi_single, + .send_ipi_mask = smtc_send_ipi_mask, + .init_secondary = smtc_init_secondary, + .smp_finish = smtc_smp_finish, + .cpus_done = smtc_cpus_done, + .boot_secondary = smtc_boot_secondary, + .smp_setup = smtc_smp_setup, + .prepare_cpus = msmtc_prepare_cpus, +};