--- zzzz-none-000/linux-3.10.107/drivers/clocksource/exynos_mct.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/clocksource/exynos_mct.c 2021-02-04 17:41:59.000000000 +0000 @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -23,9 +24,7 @@ #include #include #include - -#include -#include +#include #define EXYNOS4_MCTREG(x) (x) #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100) @@ -71,6 +70,10 @@ MCT_L1_IRQ, MCT_L2_IRQ, MCT_L3_IRQ, + MCT_L4_IRQ, + MCT_L5_IRQ, + MCT_L6_IRQ, + MCT_L7_IRQ, MCT_NR_IRQS, }; @@ -80,7 +83,7 @@ static int mct_irqs[MCT_NR_IRQS]; struct mct_clock_event_device { - struct clock_event_device *evt; + struct clock_event_device evt; unsigned long base; char name[10]; }; @@ -91,7 +94,7 @@ u32 mask; u32 i; - __raw_writel(value, reg_base + offset); + writel_relaxed(value, reg_base + offset); if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) { stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET; @@ -141,8 +144,8 @@ /* Wait maximum 1 ms until written values are applied */ for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++) - if (__raw_readl(reg_base + stat_addr) & mask) { - __raw_writel(mask, reg_base + stat_addr); + if (readl_relaxed(reg_base + stat_addr) & mask) { + writel_relaxed(mask, reg_base + stat_addr); return; } @@ -150,79 +153,123 @@ } /* Clocksource handling */ -static void exynos4_mct_frc_start(u32 hi, u32 lo) +static void exynos4_mct_frc_start(void) { u32 reg; - exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L); - exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U); - - reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); + reg = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); reg |= MCT_G_TCON_START; exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); } -static cycle_t exynos4_frc_read(struct clocksource *cs) +/** + * exynos4_read_count_64 - Read all 64-bits of the global counter + * + * This will read all 64-bits of the global counter taking care to make sure + * that the upper and lower half match. Note that reading the MCT can be quite + * slow (hundreds of nanoseconds) so you should use the 32-bit (lower half + * only) version when possible. + * + * Returns the number of cycles in the global counter. + */ +static u64 exynos4_read_count_64(void) { unsigned int lo, hi; - u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); + u32 hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); do { hi = hi2; - lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L); - hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); + lo = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); + hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); } while (hi != hi2); return ((cycle_t)hi << 32) | lo; } +/** + * exynos4_read_count_32 - Read the lower 32-bits of the global counter + * + * This will read just the lower 32-bits of the global counter. This is marked + * as notrace so it can be used by the scheduler clock. + * + * Returns the number of cycles in the global counter (lower 32 bits). + */ +static u32 notrace exynos4_read_count_32(void) +{ + return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); +} + +static cycle_t exynos4_frc_read(struct clocksource *cs) +{ + return exynos4_read_count_32(); +} + static void exynos4_frc_resume(struct clocksource *cs) { - exynos4_mct_frc_start(0, 0); + exynos4_mct_frc_start(); } -struct clocksource mct_frc = { +static struct clocksource mct_frc = { .name = "mct-frc", .rating = 400, .read = exynos4_frc_read, - .mask = CLOCKSOURCE_MASK(64), + .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, .resume = exynos4_frc_resume, }; +static u64 notrace exynos4_read_sched_clock(void) +{ + return exynos4_read_count_32(); +} + +static struct delay_timer exynos4_delay_timer; + +static cycles_t exynos4_read_current_timer(void) +{ + BUILD_BUG_ON_MSG(sizeof(cycles_t) != sizeof(u32), + "cycles_t needs to move to 32-bit for ARM64 usage"); + return exynos4_read_count_32(); +} + static void __init exynos4_clocksource_init(void) { - exynos4_mct_frc_start(0, 0); + exynos4_mct_frc_start(); + + exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer; + exynos4_delay_timer.freq = clk_rate; + register_current_timer_delay(&exynos4_delay_timer); if (clocksource_register_hz(&mct_frc, clk_rate)) panic("%s: can't register clocksource\n", mct_frc.name); + + sched_clock_register(exynos4_read_sched_clock, 32, clk_rate); } static void exynos4_mct_comp0_stop(void) { unsigned int tcon; - tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); + tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC); exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON); exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB); } -static void exynos4_mct_comp0_start(enum clock_event_mode mode, - unsigned long cycles) +static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles) { unsigned int tcon; cycle_t comp_cycle; - tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); + tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); - if (mode == CLOCK_EVT_MODE_PERIODIC) { + if (periodic) { tcon |= MCT_G_TCON_COMP0_AUTO_INC; exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR); } - comp_cycle = exynos4_frc_read(&mct_frc) + cycles; + comp_cycle = exynos4_read_count_64() + cycles; exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L); exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U); @@ -235,38 +282,38 @@ static int exynos4_comp_set_next_event(unsigned long cycles, struct clock_event_device *evt) { - exynos4_mct_comp0_start(evt->mode, cycles); + exynos4_mct_comp0_start(false, cycles); return 0; } -static void exynos4_comp_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int mct_set_state_shutdown(struct clock_event_device *evt) { - unsigned long cycles_per_jiffy; exynos4_mct_comp0_stop(); + return 0; +} - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - cycles_per_jiffy = - (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift); - exynos4_mct_comp0_start(mode, cycles_per_jiffy); - break; +static int mct_set_state_periodic(struct clock_event_device *evt) +{ + unsigned long cycles_per_jiffy; - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - break; - } + cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) + >> evt->shift); + exynos4_mct_comp0_stop(); + exynos4_mct_comp0_start(true, cycles_per_jiffy); + return 0; } static struct clock_event_device mct_comp_device = { - .name = "mct-comp", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .rating = 250, - .set_next_event = exynos4_comp_set_next_event, - .set_mode = exynos4_comp_set_mode, + .name = "mct-comp", + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + .rating = 250, + .set_next_event = exynos4_comp_set_next_event, + .set_state_periodic = mct_set_state_periodic, + .set_state_shutdown = mct_set_state_shutdown, + .set_state_oneshot = mct_set_state_shutdown, + .tick_resume = mct_set_state_shutdown, }; static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id) @@ -295,8 +342,6 @@ setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq); } -#ifdef CONFIG_LOCAL_TIMERS - static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick); /* Clock event handling */ @@ -306,7 +351,7 @@ unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START; unsigned long offset = mevt->base + MCT_L_TCON_OFFSET; - tmp = __raw_readl(reg_base + offset); + tmp = readl_relaxed(reg_base + offset); if (tmp & mask) { tmp &= ~mask; exynos4_mct_write(tmp, offset); @@ -328,7 +373,7 @@ /* enable MCT tick interrupt */ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET); - tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET); + tmp = readl_relaxed(reg_base + mevt->base + MCT_L_TCON_OFFSET); tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START | MCT_L_TCON_INTERVAL_MODE; exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); @@ -337,61 +382,54 @@ static int exynos4_tick_set_next_event(unsigned long cycles, struct clock_event_device *evt) { - struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); + struct mct_clock_event_device *mevt; + mevt = container_of(evt, struct mct_clock_event_device, evt); exynos4_mct_tick_start(cycles, mevt); - return 0; } -static inline void exynos4_tick_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int set_state_shutdown(struct clock_event_device *evt) { - struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); - unsigned long cycles_per_jiffy; + struct mct_clock_event_device *mevt; + mevt = container_of(evt, struct mct_clock_event_device, evt); exynos4_mct_tick_stop(mevt); + return 0; +} - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - cycles_per_jiffy = - (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift); - exynos4_mct_tick_start(cycles_per_jiffy, mevt); - break; +static int set_state_periodic(struct clock_event_device *evt) +{ + struct mct_clock_event_device *mevt; + unsigned long cycles_per_jiffy; - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_RESUME: - break; - } + mevt = container_of(evt, struct mct_clock_event_device, evt); + cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) + >> evt->shift); + exynos4_mct_tick_stop(mevt); + exynos4_mct_tick_start(cycles_per_jiffy, mevt); + return 0; } -static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) +static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) { - struct clock_event_device *evt = mevt->evt; - /* * This is for supporting oneshot mode. * Mct would generate interrupt periodically * without explicit stopping. */ - if (evt->mode != CLOCK_EVT_MODE_PERIODIC) + if (!clockevent_state_periodic(&mevt->evt)) exynos4_mct_tick_stop(mevt); /* Clear the MCT tick interrupt */ - if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) { + if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); - return 1; - } else { - return 0; - } } static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) { struct mct_clock_event_device *mevt = dev_id; - struct clock_event_device *evt = mevt->evt; + struct clock_event_device *evt = &mevt->evt; exynos4_mct_tick_clear(mevt); @@ -400,49 +438,33 @@ return IRQ_HANDLED; } -static struct irqaction mct_tick0_event_irq = { - .name = "mct_tick0_irq", - .flags = IRQF_TIMER | IRQF_NOBALANCING, - .handler = exynos4_mct_tick_isr, -}; - -static struct irqaction mct_tick1_event_irq = { - .name = "mct_tick1_irq", - .flags = IRQF_TIMER | IRQF_NOBALANCING, - .handler = exynos4_mct_tick_isr, -}; - -static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt) +static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt) { - struct mct_clock_event_device *mevt; + struct clock_event_device *evt = &mevt->evt; unsigned int cpu = smp_processor_id(); - mevt = this_cpu_ptr(&percpu_mct_tick); - mevt->evt = evt; - mevt->base = EXYNOS4_MCT_L_BASE(cpu); - sprintf(mevt->name, "mct_tick%d", cpu); + snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); evt->name = mevt->name; evt->cpumask = cpumask_of(cpu); evt->set_next_event = exynos4_tick_set_next_event; - evt->set_mode = exynos4_tick_set_mode; + evt->set_state_periodic = set_state_periodic; + evt->set_state_shutdown = set_state_shutdown; + evt->set_state_oneshot = set_state_shutdown; + evt->tick_resume = set_state_shutdown; evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; evt->rating = 450; exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); if (mct_int_type == MCT_INT_SPI) { - if (cpu == 0) { - mct_tick0_event_irq.dev_id = mevt; - evt->irq = mct_irqs[MCT_L0_IRQ]; - setup_irq(evt->irq, &mct_tick0_event_irq); - } else { - mct_tick1_event_irq.dev_id = mevt; - evt->irq = mct_irqs[MCT_L1_IRQ]; - setup_irq(evt->irq, &mct_tick1_event_irq); - irq_set_affinity(evt->irq, cpumask_of(1)); - } + + if (evt->irq == -1) + return -EIO; + + irq_force_affinity(evt->irq, cpumask_of(cpu)); + enable_irq(evt->irq); } else { enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); } @@ -452,27 +474,51 @@ return 0; } -static void exynos4_local_timer_stop(struct clock_event_device *evt) +static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt) { - unsigned int cpu = smp_processor_id(); - evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); - if (mct_int_type == MCT_INT_SPI) - if (cpu == 0) - remove_irq(evt->irq, &mct_tick0_event_irq); - else - remove_irq(evt->irq, &mct_tick1_event_irq); - else + struct clock_event_device *evt = &mevt->evt; + + evt->set_state_shutdown(evt); + if (mct_int_type == MCT_INT_SPI) { + if (evt->irq != -1) + disable_irq_nosync(evt->irq); + exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); + } else { disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); + } +} + +static int exynos4_mct_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + struct mct_clock_event_device *mevt; + + /* + * Grab cpu pointer in each case to avoid spurious + * preemptible warnings + */ + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + mevt = this_cpu_ptr(&percpu_mct_tick); + exynos4_local_timer_setup(mevt); + break; + case CPU_DYING: + mevt = this_cpu_ptr(&percpu_mct_tick); + exynos4_local_timer_stop(mevt); + break; + } + + return NOTIFY_OK; } -static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = { - .setup = exynos4_local_timer_setup, - .stop = exynos4_local_timer_stop, +static struct notifier_block exynos4_mct_cpu_nb = { + .notifier_call = exynos4_mct_cpu_notify, }; -#endif /* CONFIG_LOCAL_TIMERS */ static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) { + int err, cpu; + struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); struct clk *mct_clk, *tick_clk; tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : @@ -490,31 +536,45 @@ if (!reg_base) panic("%s: unable to ioremap mct address space\n", __func__); -#ifdef CONFIG_LOCAL_TIMERS if (mct_int_type == MCT_INT_PPI) { - int err; err = request_percpu_irq(mct_irqs[MCT_L0_IRQ], exynos4_mct_tick_isr, "MCT", &percpu_mct_tick); WARN(err, "MCT: can't request IRQ %d (%d)\n", mct_irqs[MCT_L0_IRQ], err); + } else { + for_each_possible_cpu(cpu) { + int mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; + struct mct_clock_event_device *pcpu_mevt = + per_cpu_ptr(&percpu_mct_tick, cpu); + + pcpu_mevt->evt.irq = -1; + + irq_set_status_flags(mct_irq, IRQ_NOAUTOEN); + if (request_irq(mct_irq, + exynos4_mct_tick_isr, + IRQF_TIMER | IRQF_NOBALANCING, + pcpu_mevt->name, pcpu_mevt)) { + pr_err("exynos-mct: cannot register IRQ (cpu%d)\n", + cpu); + + continue; + } + pcpu_mevt->evt.irq = mct_irq; + } } - local_timer_register(&exynos4_mct_tick_ops); -#endif /* CONFIG_LOCAL_TIMERS */ -} - -void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1) -{ - mct_irqs[MCT_G0_IRQ] = irq_g0; - mct_irqs[MCT_L0_IRQ] = irq_l0; - mct_irqs[MCT_L1_IRQ] = irq_l1; - mct_int_type = MCT_INT_SPI; + err = register_cpu_notifier(&exynos4_mct_cpu_nb); + if (err) + goto out_irq; + + /* Immediately configure the timer on the boot CPU */ + exynos4_local_timer_setup(mevt); + return; - exynos4_timer_resources(NULL, base); - exynos4_clocksource_init(); - exynos4_clockevent_init(); +out_irq: + free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); } static void __init mct_init_dt(struct device_node *np, unsigned int int_type)