--- zzzz-none-000/linux-3.10.107/arch/arm/kernel/smp.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/arm/kernel/smp.c 2021-02-04 17:41:59.000000000 +0000 @@ -21,10 +21,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include @@ -41,10 +43,18 @@ #include #include #include -#include #include #include #include +#include + +#define CREATE_TRACE_POINTS +#include + +#if defined(CONFIG_AVM_FASTIRQ) +// Fuer 'bool avm_trigger_all_cpu_backtrace(struct pt_regs *exception_regs);' +#include +#endif /* * as from 2.5, kernels no longer have an init_tasks structure @@ -57,7 +67,7 @@ * control for which core is the next to come out of the secondary * boot "holding pen" */ -volatile int __cpuinitdata pen_release = -1; +volatile int pen_release = -1; enum ipi_msg_type { IPI_WAKEUP, @@ -66,36 +76,56 @@ IPI_CALL_FUNC, IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP, + IPI_IRQ_WORK, + IPI_COMPLETION, + IPI_CPU_BACKTRACE = 15, }; static DECLARE_COMPLETION(cpu_running); static struct smp_operations smp_ops; -void __init smp_set_ops(struct smp_operations *ops) +void __init smp_set_ops(const struct smp_operations *ops) { if (ops) smp_ops = *ops; }; -int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) +static unsigned long get_arch_pgd(pgd_t *pgd) +{ +#ifdef CONFIG_ARM_LPAE + return __phys_to_pfn(virt_to_phys(pgd)); +#else + return virt_to_phys(pgd); +#endif +} + +int __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; + if (!smp_ops.smp_boot_secondary) + return -ENOSYS; + /* * We need to tell the secondary core where to find * its stack and the page tables. */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; +#ifdef CONFIG_ARM_MPU + secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr; +#endif + +#ifdef CONFIG_MMU secondary_data.pgdir = virt_to_phys(idmap_pgd); - secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); - __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); - outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); + secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); +#endif + sync_cache_w(&secondary_data); /* * Now bring the CPU into our world. */ - ret = boot_secondary(cpu, idle); + ret = smp_ops.smp_boot_secondary(cpu, idle); if (ret == 0) { /* * CPU was successfully started, wait for it @@ -112,9 +142,8 @@ pr_err("CPU%u: failed to boot: %d\n", cpu, ret); } - secondary_data.stack = NULL; - secondary_data.pgdir = 0; + memset(&secondary_data, 0, sizeof(secondary_data)); return ret; } @@ -125,16 +154,22 @@ smp_ops.smp_init_cpus(); } -int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +int platform_can_secondary_boot(void) { - if (smp_ops.smp_boot_secondary) - return smp_ops.smp_boot_secondary(cpu, idle); - return -ENOSYS; + return !!smp_ops.smp_boot_secondary; } +int platform_can_cpu_hotplug(void) +{ #ifdef CONFIG_HOTPLUG_CPU -static void percpu_timer_stop(void); + if (smp_ops.cpu_kill) + return 1; +#endif + + return 0; +} +#ifdef CONFIG_HOTPLUG_CPU static int platform_cpu_kill(unsigned int cpu) { if (smp_ops.cpu_kill) @@ -147,17 +182,30 @@ if (smp_ops.cpu_disable) return smp_ops.cpu_disable(cpu); + return 0; +} + +int platform_can_hotplug_cpu(unsigned int cpu) +{ + /* cpu_die must be specified to support hotplug */ + if (!smp_ops.cpu_die) + return 0; + + if (smp_ops.cpu_can_disable) + return smp_ops.cpu_can_disable(cpu); + /* * By default, allow disabling all CPUs except the first one, * since this is special on a lot of platforms, e.g. because * of clock tick interrupts. */ - return cpu == 0 ? -EPERM : 0; + return cpu != 0; } + /* * __cpu_disable runs on the processor to be shutdown. */ -int __cpuinit __cpu_disable(void) +int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); int ret; @@ -178,11 +226,6 @@ migrate_irqs(); /* - * Stop the local timer for this CPU. - */ - percpu_timer_stop(); - - /* * Flush user cache and TLB mappings, and then remove this CPU * from the vm mask set of all processes. * @@ -203,13 +246,13 @@ * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ -void __cpuinit __cpu_die(unsigned int cpu) +void __cpu_die(unsigned int cpu) { if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { pr_err("CPU%u: cpu didn't die\n", cpu); return; } - printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); + pr_notice("CPU%u: shutdown\n", cpu); /* * platform_cpu_kill() is generally expected to do the powering off @@ -219,7 +262,7 @@ * the requesting CPU and the dying CPU actually losing power. */ if (!platform_cpu_kill(cpu)) - printk("CPU%u: unable to kill\n", cpu); + pr_err("CPU%u: unable to kill\n", cpu); } /* @@ -230,7 +273,7 @@ * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ -void __ref cpu_die(void) +void arch_cpu_idle_dead(void) { unsigned int cpu = smp_processor_id(); @@ -276,6 +319,9 @@ if (smp_ops.cpu_die) smp_ops.cpu_die(cpu); + pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n", + cpu); + /* * Do not return to the idle loop - jump back to the secondary * cpu initialisation. There's some initialisation which needs @@ -293,7 +339,7 @@ * Called by both boot and secondaries to move global data into * per-processor storage. */ -static void __cpuinit smp_store_cpu_info(unsigned int cpuid) +static void smp_store_cpu_info(unsigned int cpuid) { struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); @@ -303,13 +349,11 @@ store_cpu_topology(cpuid); } -static void percpu_timer_setup(void); - /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */ -asmlinkage void __cpuinit secondary_start_kernel(void) +asmlinkage void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu; @@ -334,7 +378,7 @@ cpu_init(); - printk("CPU%u: Booted secondary processor\n", cpu); + pr_debug("CPU%u: Booted secondary processor\n", cpu); preempt_disable(); trace_hardirqs_off(); @@ -359,13 +403,9 @@ set_cpu_online(cpu, true); complete(&cpu_running); - /* - * Setup the percpu timer for this CPU. - */ - percpu_timer_setup(); - local_irq_enable(); local_fiq_enable(); + local_abt_enable(); /* * OK, it's off to the idle thread for us @@ -410,12 +450,6 @@ max_cpus = ncores; if (ncores > 1 && max_cpus) { /* - * Enable the local timer or broadcast device for the - * boot CPU, but only if we have more than one CPU. - */ - percpu_timer_setup(); - - /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. A platform should * re-initialize the map in the platforms smp_prepare_cpus() @@ -432,30 +466,15 @@ } } -static void (*smp_cross_call)(const struct cpumask *, unsigned int); +static void (*__smp_cross_call)(const struct cpumask *, unsigned int); void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { - if (!smp_cross_call) - smp_cross_call = fn; -} - -void arch_send_call_function_ipi_mask(const struct cpumask *mask) -{ - smp_cross_call(mask, IPI_CALL_FUNC); -} - -void arch_send_wakeup_ipi_mask(const struct cpumask *mask) -{ - smp_cross_call(mask, IPI_WAKEUP); -} - -void arch_send_call_function_single_ipi(int cpu) -{ - smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); + if (!__smp_cross_call) + __smp_cross_call = fn; } -static const char *ipi_types[NR_IPI] = { +static const char *ipi_types[NR_IPI] __tracepoint_string = { #define S(x,s) [x] = s S(IPI_WAKEUP, "CPU wakeup interrupts"), S(IPI_TIMER, "Timer broadcast interrupts"), @@ -463,8 +482,16 @@ S(IPI_CALL_FUNC, "Function call interrupts"), S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), S(IPI_CPU_STOP, "CPU stop interrupts"), + S(IPI_IRQ_WORK, "IRQ work interrupts"), + S(IPI_COMPLETION, "completion interrupts"), }; +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) +{ + trace_ipi_raise(target, ipi_types[ipinr]); + __smp_cross_call(target, ipinr); +} + void show_ipi_list(struct seq_file *p, int prec) { unsigned int cpu, i; @@ -491,76 +518,33 @@ return sum; } -/* - * Timer (local or broadcast) support - */ -static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); - -#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST -void tick_broadcast(const struct cpumask *mask) +void arch_send_call_function_ipi_mask(const struct cpumask *mask) { - smp_cross_call(mask, IPI_TIMER); + smp_cross_call(mask, IPI_CALL_FUNC); } -#endif -static void broadcast_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +void arch_send_wakeup_ipi_mask(const struct cpumask *mask) { + smp_cross_call(mask, IPI_WAKEUP); } -static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) +void arch_send_call_function_single_ipi(int cpu) { - evt->name = "dummy_timer"; - evt->features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_DUMMY; - evt->rating = 100; - evt->mult = 1; - evt->set_mode = broadcast_timer_set_mode; - - clockevents_register_device(evt); + smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); } -static struct local_timer_ops *lt_ops; - -#ifdef CONFIG_LOCAL_TIMERS -int local_timer_register(struct local_timer_ops *ops) +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) { - if (!is_smp() || !setup_max_cpus) - return -ENXIO; - - if (lt_ops) - return -EBUSY; - - lt_ops = ops; - return 0; + if (arch_irq_work_has_interrupt()) + smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); } #endif -static void __cpuinit percpu_timer_setup(void) -{ - unsigned int cpu = smp_processor_id(); - struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); - - evt->cpumask = cpumask_of(cpu); - - if (!lt_ops || lt_ops->setup(evt)) - broadcast_timer_setup(evt); -} - -#ifdef CONFIG_HOTPLUG_CPU -/* - * The generic clock events code purposely does not stop the local timer - * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it - * manually here. - */ -static void percpu_timer_stop(void) +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +void tick_broadcast(const struct cpumask *mask) { - unsigned int cpu = smp_processor_id(); - struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); - - if (lt_ops) - lt_ops->stop(evt); + smp_cross_call(mask, IPI_TIMER); } #endif @@ -574,7 +558,7 @@ if (system_state == SYSTEM_BOOTING || system_state == SYSTEM_RUNNING) { raw_spin_lock(&stop_lock); - printk(KERN_CRIT "CPU%u: stopping\n", cpu); + pr_crit("CPU%u: stopping\n", cpu); dump_stack(); raw_spin_unlock(&stop_lock); } @@ -588,6 +572,19 @@ cpu_relax(); } +static DEFINE_PER_CPU(struct completion *, cpu_completion); + +int register_ipi_completion(struct completion *completion, int cpu) +{ + per_cpu(cpu_completion, cpu) = completion; + return IPI_COMPLETION; +} + +static void ipi_complete(unsigned int cpu) +{ + complete(per_cpu(cpu_completion, cpu)); +} + /* * Main handler for inter-processor interrupts */ @@ -601,8 +598,10 @@ unsigned int cpu = smp_processor_id(); struct pt_regs *old_regs = set_irq_regs(regs); - if (ipinr < NR_IPI) + if ((unsigned)ipinr < NR_IPI) { + trace_ipi_entry_rcuidle(ipi_types[ipinr]); __inc_irq_stat(cpu, ipi_irqs[ipinr]); + } switch (ipinr) { case IPI_WAKEUP: @@ -638,11 +637,34 @@ irq_exit(); break; +#ifdef CONFIG_IRQ_WORK + case IPI_IRQ_WORK: + irq_enter(); + irq_work_run(); + irq_exit(); + break; +#endif + + case IPI_COMPLETION: + irq_enter(); + ipi_complete(cpu); + irq_exit(); + break; + + case IPI_CPU_BACKTRACE: + irq_enter(); + nmi_cpu_backtrace(regs); + irq_exit(); + break; + default: - printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", - cpu, ipinr); + pr_crit("CPU%u: Unknown IPI message 0x%x\n", + cpu, ipinr); break; } + + if ((unsigned)ipinr < NR_IPI) + trace_ipi_exit_rcuidle(ipi_types[ipinr]); set_irq_regs(old_regs); } @@ -667,7 +689,7 @@ udelay(1); if (num_online_cpus() > 1) - pr_warning("SMP: failed to stop secondary CPUs\n"); + pr_warn("SMP: failed to stop secondary CPUs\n"); } /* @@ -705,8 +727,7 @@ } if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || - (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || - (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, global_l_p_j_ref_freq, freq->new); @@ -730,3 +751,28 @@ core_initcall(register_cpufreq_notifier); #endif + +#if !defined(CONFIG_AVM_FASTIRQ) +static void raise_nmi(cpumask_t *mask) +{ + /* + * Generate the backtrace directly if we are running in a calling + * context that is not preemptible by the backtrace IPI. Note + * that nmi_cpu_backtrace() automatically removes the current cpu + * from mask. + */ + if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled()) + nmi_cpu_backtrace(NULL); + + smp_cross_call(mask, IPI_CPU_BACKTRACE); +} +#endif + +void arch_trigger_all_cpu_backtrace(bool include_self) +{ +#if defined(CONFIG_AVM_FASTIRQ) + (void)avm_trigger_all_cpu_backtrace(NULL, NULL); +#else + nmi_trigger_all_cpu_backtrace(include_self, raise_nmi); +#endif +}