--- zzzz-none-000/linux-2.6.32.61/arch/mips/kernel/cevt-smtc.c 2013-06-10 09:43:48.000000000 +0000 +++ ar9-7330-650/linux-2.6.32.61/arch/mips/kernel/cevt-smtc.c 2015-04-09 11:31:23.000000000 +0000 @@ -43,8 +43,7 @@ * is always going to be overkill, but always going to be enough. */ -unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; -static int smtc_nextinvpe[NR_CPUS]; +int cp0_timer_irq_installed; /* * Timestamps stored are absolute values to be programmed @@ -73,91 +72,80 @@ */ #define CATCHUP_INCREMENT 64 - -static int mips_next_event(unsigned long delta, - struct clock_event_device *evt) -{ +static volatile unsigned long smtc_trigger[NR_CPUS][NR_CPUS]; +#if NR_CPUS > 2 +/*--------------------------------------------------------------------------------*\ + * get cpu that expire next time + * < 0 kein Eintrag +\*--------------------------------------------------------------------------------*/ +static int get_next_expire_cpu(unsigned long reference, unsigned int vpe) { + unsigned int i; + int cpu = -1; + unsigned long mindiff = (unsigned long)LONG_MAX; + + for_each_online_cpu(i) { + if(ISVALID(smtc_trigger[vpe][i])) { + unsigned long diff = smtc_trigger[vpe][i] - reference; + if(diff < mindiff) { + mindiff = diff; + cpu = i; + } + } + } + return cpu; +} +#endif +/*--------------------------------------------------------------------------------*\ +\*--------------------------------------------------------------------------------*/ +static int mips_next_event(unsigned long delta, struct clock_event_device *evt) { unsigned long flags; unsigned int mtflags; - unsigned long timestamp, reference, previous; - unsigned long nextcomp = 0L; + unsigned long timestamp, actual_timestamp; + int next_expire_cpu __attribute__((unused)); int vpe = current_cpu_data.vpe_id; int cpu = smp_processor_id(); local_irq_save(flags); mtflags = dmt(); + actual_timestamp = (unsigned long)read_c0_count(); + timestamp = MAKEVALID(actual_timestamp + delta); +#if NR_CPUS == 2 + /*--- only one TC per VPE: strictly programm this expire ---*/ + smtc_trigger[vpe][cpu] = timestamp; +#else /* * Maintain the per-TC virtual timer * and program the per-VPE shared Count register * as appropriate here... */ - reference = (unsigned long)read_c0_count(); - timestamp = MAKEVALID(reference + delta); - /* - * To really model the clock, we have to catch the case - * where the current next-in-VPE timestamp is the old - * timestamp for the calling CPE, but the new value is - * in fact later. In that case, we have to do a full - * scan and discover the new next-in-VPE CPU id and - * timestamp. - */ - previous = smtc_nexttime[vpe][cpu]; - if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) - && IS_SOONER(previous, timestamp, reference)) { - int i; - int soonest = cpu; - - /* - * Update timestamp array here, so that new - * value gets considered along with those of - * other virtual CPUs on the VPE. - */ - smtc_nexttime[vpe][cpu] = timestamp; - for_each_online_cpu(i) { - if (ISVALID(smtc_nexttime[vpe][i]) - && IS_SOONER(smtc_nexttime[vpe][i], - smtc_nexttime[vpe][soonest], reference)) { - soonest = i; - } - } - smtc_nextinvpe[vpe] = soonest; - nextcomp = smtc_nexttime[vpe][soonest]; - /* - * Otherwise, we don't have to process the whole array rank, - * we just have to see if the event horizon has gotten closer. - */ - } else { - if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || - IS_SOONER(timestamp, - smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { - smtc_nextinvpe[vpe] = cpu; - nextcomp = timestamp; - } - /* - * Since next-in-VPE may me the same as the executing - * virtual CPU, we update the array *after* checking - * its value. - */ - smtc_nexttime[vpe][cpu] = timestamp; - } - - /* - * It may be that, in fact, we don't need to update Compare, - * but if we do, we want to make sure we didn't fall into - * a crack just behind Count. - */ - if (ISVALID(nextcomp)) { - write_c0_compare(nextcomp); + next_expire_cpu = get_next_expire_cpu(actual_timestamp, vpe); + if(unlikely(next_expire_cpu < 0)) { + /*--- no actual expire on this vpe -> set actual timestamp at actual cpu (per-vpe) ---*/ + smtc_trigger[vpe][cpu] = timestamp; + } else if(IS_SOONER(timestamp, actual_timestamp, smtc_trigger[vpe][next_expire_cpu])) { + /*--- entry before actual expire -> program new expire at actual cpu (per-vpe) ---*/ + smtc_trigger[vpe][cpu] = timestamp; + } else { + if(next_expire_cpu != cpu) { + /*--- check if actual timestamp sooner than stored for this vpe ---*/ + if(IS_SOONER(timestamp, actual_timestamp, smtc_trigger[vpe][cpu])) { + smtc_trigger[vpe][cpu] = timestamp; + } + } + /*--- timestamp behind next expire ---*/ + timestamp = smtc_trigger[vpe][cpu]; + } +#endif + if (ISVALID(timestamp)) { + write_c0_compare(timestamp); ehb(); /* * We never return an error, we just make sure * that we trigger the handlers as quickly as * we can if we fell behind. */ - while ((nextcomp - (unsigned long)read_c0_count()) - > (unsigned long)LONG_MAX) { - nextcomp += CATCHUP_INCREMENT; - write_c0_compare(nextcomp); + if ((timestamp - (unsigned long)read_c0_count()) > (unsigned long)LONG_MAX) { + write_c0_compare((unsigned long)read_c0_count() + CATCHUP_INCREMENT); ehb(); } } @@ -165,74 +153,67 @@ local_irq_restore(flags); return 0; } - - -void smtc_distribute_timer(int vpe) -{ +/*--------------------------------------------------------------------------------*\ +\*--------------------------------------------------------------------------------*/ +void smtc_distribute_timer(int vpe) { unsigned long flags; unsigned int mtflags; + unsigned long nextstamp; int cpu; struct clock_event_device *cd; - unsigned long nextstamp; - unsigned long reference; - + unsigned long actual_timestamp; -repeat: - nextstamp = 0L; - for_each_online_cpu(cpu) { - /* - * Find virtual CPUs within the current VPE who have - * unserviced timer requests whose time is now past. - */ - local_irq_save(flags); - mtflags = dmt(); - if (cpu_data[cpu].vpe_id == vpe && - ISVALID(smtc_nexttime[vpe][cpu])) { - reference = (unsigned long)read_c0_count(); - if ((smtc_nexttime[vpe][cpu] - reference) - > (unsigned long)LONG_MAX) { - smtc_nexttime[vpe][cpu] = 0L; - emt(mtflags); - local_irq_restore(flags); - /* - * We don't send IPIs to ourself. - */ - if (cpu != smp_processor_id()) { - smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); - } else { - cd = &per_cpu(mips_clockevent_device, cpu); - cd->event_handler(cd); - } - } else { - /* Local to VPE but Valid Time not yet reached. */ - if (!ISVALID(nextstamp) || - IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, - reference)) { - smtc_nextinvpe[vpe] = cpu; - nextstamp = smtc_nexttime[vpe][cpu]; - } - emt(mtflags); - local_irq_restore(flags); - } - } else { - emt(mtflags); - local_irq_restore(flags); - - } - } - /* Reprogram for interrupt at next soonest timestamp for VPE */ - if (ISVALID(nextstamp)) { - write_c0_compare(nextstamp); - ehb(); - if ((nextstamp - (unsigned long)read_c0_count()) - > (unsigned long)LONG_MAX) - goto repeat; - } + for(;;) { + nextstamp = 0; + for_each_online_cpu(cpu) { + if (cpu_data[cpu].vpe_id != vpe) { + continue; + } + local_irq_save(flags); + mtflags = dmt(); + if(!ISVALID(smtc_trigger[vpe][cpu])) { + emt(mtflags); + local_irq_restore(flags); + /*--- local_irq_save(flags); ---*/ + /*--- mtflags = dmt(); ---*/ + continue; + } + actual_timestamp = (unsigned long)read_c0_count(); + if((smtc_trigger[vpe][cpu] - actual_timestamp) > (unsigned long)LONG_MAX) { + /*--- if(vpe== 1) ---*/ + /*--- printk("[%x]%s: act=%lu (%lu %lu) trigger cpu=%d", vpe, __func__, actual_timestamp, smtc_trigger[0], smtc_trigger[1], vpe); ---*/ + smtc_trigger[vpe][cpu] = 0L; + emt(mtflags); + local_irq_restore(flags); + /* We don't send IPIs to ourself. */ + if (cpu != smp_processor_id()) { + smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); + } else { + cd = &per_cpu(mips_clockevent_device, cpu); + cd->event_handler(cd); + } + } else { + if(!ISVALID(nextstamp) || IS_SOONER(smtc_trigger[vpe][cpu], nextstamp, actual_timestamp)) { + nextstamp = smtc_trigger[vpe][cpu]; + } + emt(mtflags); + local_irq_restore(flags); + } + } + /* Reprogram for interrupt at next soonest timestamp for VPE */ + if (ISVALID(nextstamp)) { + write_c0_compare(nextstamp); + ehb(); + if ((nextstamp - (unsigned long)read_c0_count()) > (unsigned long)LONG_MAX) { + continue; + } + } + break; + } } - - -irqreturn_t c0_compare_interrupt(int irq, void *dev_id) -{ +/*--------------------------------------------------------------------------------*\ +\*--------------------------------------------------------------------------------*/ +irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { int cpu = smp_processor_id(); /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ @@ -260,9 +241,8 @@ return -ENXIO; if (cpu == 0) { for (i = 0; i < num_possible_cpus(); i++) { - smtc_nextinvpe[i] = 0; for (j = 0; j < num_possible_cpus(); j++) - smtc_nexttime[i][j] = 0L; + smtc_trigger[i][j] = 0L; } /* * SMTC also can't have the usablility test @@ -277,9 +257,10 @@ * get_c0_compare_int is a hook to allow a platform to return the * interrupt number of it's liking. */ - irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; if (get_c0_compare_int) irq = get_c0_compare_int(); + else + irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; cd = &per_cpu(mips_clockevent_device, cpu);