--- zzzz-none-000/linux-3.10.107/drivers/acpi/acpi_pad.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/acpi/acpi_pad.c 2021-02-04 17:41:59.000000000 +0000 @@ -12,10 +12,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * */ #include @@ -26,10 +22,9 @@ #include #include #include -#include +#include #include -#include -#include +#include #include #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" @@ -42,8 +37,6 @@ static unsigned char tsc_detected_unstable; static unsigned char tsc_marked_unstable; -static unsigned char lapic_detected_unstable; -static unsigned char lapic_marked_unstable; static void power_saving_mwait_init(void) { @@ -83,13 +76,10 @@ */ if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) tsc_detected_unstable = 1; - if (!boot_cpu_has(X86_FEATURE_ARAT)) - lapic_detected_unstable = 1; break; default: - /* TSC & LAPIC could halt in idle */ + /* TSC could halt in idle */ tsc_detected_unstable = 1; - lapic_detected_unstable = 1; } #endif } @@ -111,7 +101,7 @@ mutex_lock(&round_robin_lock); cpumask_clear(tmp); for_each_cpu(cpu, pad_busy_cpus) - cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); + cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu)); cpumask_andnot(tmp, cpu_online_mask, tmp); /* avoid HT sibilings if possible */ if (cpumask_empty(tmp)) @@ -156,13 +146,11 @@ sched_setscheduler(current, SCHED_RR, ¶m); while (!kthread_should_stop()) { - int cpu; - u64 expire_time; - - try_to_freeze(); + unsigned long expire_time; /* round robin to cpus */ - if (last_jiffies + round_robin_time * HZ < jiffies) { + expire_time = last_jiffies + round_robin_time * HZ; + if (time_before(expire_time, jiffies)) { last_jiffies = jiffies; round_robin_cpu(tsk_index); } @@ -177,34 +165,18 @@ mark_tsc_unstable("TSC halts in idle"); tsc_marked_unstable = 1; } - if (lapic_detected_unstable && !lapic_marked_unstable) { - int i; - /* LAPIC could halt in idle, so notify users */ - for_each_online_cpu(i) - clockevents_notify( - CLOCK_EVT_NOTIFY_BROADCAST_ON, - &i); - lapic_marked_unstable = 1; - } local_irq_disable(); - cpu = smp_processor_id(); - if (lapic_marked_unstable) - clockevents_notify( - CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); + tick_broadcast_enable(); + tick_broadcast_enter(); stop_critical_timings(); - __monitor((void *)¤t_thread_info()->flags, 0, 0); - smp_mb(); - if (!need_resched()) - __mwait(power_saving_mwait_eax, 1); + mwait_idle_with_hints(power_saving_mwait_eax, 1); start_critical_timings(); - if (lapic_marked_unstable) - clockevents_notify( - CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); + tick_broadcast_exit(); local_irq_enable(); - if (jiffies > expire_time) { + if (time_before(expire_time, jiffies)) { do_sleep = 1; break; } @@ -219,8 +191,15 @@ * borrow CPU time from this CPU and cause RT task use > 95% * CPU time. To make 'avoid starvation' work, takes a nap here. */ - if (do_sleep) + if (unlikely(do_sleep)) schedule_timeout_killable(HZ * idle_pct / 100); + + /* If an external event has set the need_resched flag, then + * we need to deal with it, or this loop will continue to + * spin without calling __mwait(). + */ + if (unlikely(need_resched())) + schedule(); } exit_round_robin(tsk_index); @@ -231,16 +210,19 @@ static unsigned int ps_tsk_num; static int create_power_saving_task(void) { - int rc = -ENOMEM; + int rc; ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, (void *)(unsigned long)ps_tsk_num, "acpi_pad/%d", ps_tsk_num); - rc = PTR_RET(ps_tsks[ps_tsk_num]); - if (!rc) - ps_tsk_num++; - else + + if (IS_ERR(ps_tsks[ps_tsk_num])) { + rc = PTR_ERR(ps_tsks[ps_tsk_num]); ps_tsks[ps_tsk_num] = NULL; + } else { + rc = 0; + ps_tsk_num++; + } return rc; } @@ -343,12 +325,10 @@ static ssize_t acpi_pad_idlecpus_show(struct device *dev, struct device_attribute *attr, char *buf) { - int n = 0; - n = cpumask_scnprintf(buf, PAGE_SIZE-2, to_cpumask(pad_busy_cpus_bits)); - buf[n++] = '\n'; - buf[n] = '\0'; - return n; + return cpumap_print_to_pagebuf(false, buf, + to_cpumask(pad_busy_cpus_bits)); } + static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR, acpi_pad_idlecpus_show, acpi_pad_idlecpus_store); @@ -409,28 +389,14 @@ return num; } -/* Notify firmware how many CPUs are idle */ -static void acpi_pad_ost(acpi_handle handle, int stat, - uint32_t idle_cpus) -{ - union acpi_object params[3] = { - {.type = ACPI_TYPE_INTEGER,}, - {.type = ACPI_TYPE_INTEGER,}, - {.type = ACPI_TYPE_BUFFER,}, - }; - struct acpi_object_list arg_list = {3, params}; - - params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY; - params[1].integer.value = stat; - params[2].buffer.length = 4; - params[2].buffer.pointer = (void *)&idle_cpus; - acpi_evaluate_object(handle, "_OST", &arg_list, NULL); -} - static void acpi_pad_handle_notify(acpi_handle handle) { int num_cpus; uint32_t idle_cpus; + struct acpi_buffer param = { + .length = 4, + .pointer = (void *)&idle_cpus, + }; mutex_lock(&isolated_cpus_lock); num_cpus = acpi_pad_pur(handle); @@ -440,7 +406,7 @@ } acpi_pad_idle_cpus(num_cpus); idle_cpus = acpi_pad_idle_cpus_num(); - acpi_pad_ost(handle, 0, idle_cpus); + acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, ¶m); mutex_unlock(&isolated_cpus_lock); } @@ -452,7 +418,6 @@ switch (event) { case ACPI_PROCESSOR_AGGREGATOR_NOTIFY: acpi_pad_handle_notify(handle); - acpi_bus_generate_proc_event(device, event, 0); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break;