--- zzzz-none-000/linux-4.19.183/kernel/sched/core.c 2021-03-24 10:07:39.000000000 +0000 +++ bcm63-7530ax-756/linux-4.19.183/kernel/sched/core.c 2023-06-28 08:54:21.000000000 +0000 @@ -21,6 +21,13 @@ #define CREATE_TRACE_POINTS #include +#if defined(CONFIG_AVM_SIMPLE_PROFILING) +#include +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ + +#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) +#include +#endif DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); @@ -227,7 +234,9 @@ struct rq_flags rf; WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); - +#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) + BUZZZ_KNL3(SCHED_HRTICK, 0, 0); +#endif rq_lock(rq, &rf); update_rq_clock(rq); rq->curr->sched_class->task_tick(rq, rq->curr, 1); @@ -1084,6 +1093,10 @@ } do_set_cpus_allowed(p, new_mask); +#if defined(CONFIG_BCM_KF_CPU_AFFINITY_HINT) && \ + defined(CONFIG_BCM_PROC_CPU_AFFINITY_HINT) + cpumask_copy(&p->cpus_hint, new_mask); +#endif if (p->flags & PF_KTHREAD) { /* @@ -1457,6 +1470,11 @@ const struct cpumask *nodemask = NULL; enum { cpuset, possible, fail } state = cpuset; int dest_cpu; +#if defined(CONFIG_BCM_KF_CPU_AFFINITY_HINT) && \ + defined(CONFIG_BCM_PROC_CPU_AFFINITY_HINT) + struct cpumask cpus_valid; + int v; +#endif /* * If the node that the CPU is on has been offlined, cpu_to_node() @@ -1466,6 +1484,36 @@ if (nid != -1) { nodemask = cpumask_of_node(nid); +#if defined(CONFIG_BCM_KF_CPU_AFFINITY_HINT) && \ + defined(CONFIG_BCM_PROC_CPU_AFFINITY_HINT) + /* + * As cpu_active_mask is always a subset of cpu_online_mask, + * any CPU on this node which is also active and allowed could + * be selected as the dest_cpu. We prefer such CPU which is + * in addition hinted, if it exists. + * + * valid = (node & active & allowed); + * if (valid) { + * if (valid & hint) { + * recover allowed as hint; + * return dest_cpu = any_of(valid & hint); + * } + * return dest_cpu = any_of(valid); + * } + * goto try_other_nodes; + */ + v = cpumask_and(&cpus_valid, nodemask, cpu_active_mask) && + cpumask_and(&cpus_valid, &cpus_valid, &p->cpus_allowed); + if (v) { + dest_cpu = cpumask_any_and(&cpus_valid, &p->cpus_hint); + if (dest_cpu < nr_cpu_ids) { + do_set_cpus_allowed(p, &p->cpus_hint); + return dest_cpu; + } + + return cpumask_any(&cpus_valid); + } +#else /* Look for allowed, online CPU in same node. */ for_each_cpu(dest_cpu, nodemask) { if (!cpu_active(dest_cpu)) @@ -1473,9 +1521,41 @@ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) return dest_cpu; } +#endif } for (;;) { +#if defined(CONFIG_BCM_KF_CPU_AFFINITY_HINT) && \ + defined(CONFIG_BCM_PROC_CPU_AFFINITY_HINT) + if (p->flags & PF_KTHREAD) { + /* + * Kernel threads can be scheduled to run on online + * CPUs, even if the CPU is not active. + * + * valid = (allowed & online); + */ + v = cpumask_and(&cpus_valid, &p->cpus_allowed, + cpu_online_mask); + } else { + /* + * User-space threads must be scheduled to active CPUs. + * + * valid = (allowed & active); + */ + v = cpumask_and(&cpus_valid, &p->cpus_allowed, + cpu_active_mask); + } + dest_cpu = v ? cpumask_any_and(&cpus_valid, &p->cpus_hint) + : nr_cpu_ids; + if (dest_cpu < nr_cpu_ids) { + /* recover allowed as hint; */ + do_set_cpus_allowed(p, &p->cpus_hint); + goto out; /* return dest_cpu = any_of(valid & hint); */ + } + dest_cpu = v ? cpumask_any(&cpus_valid) : nr_cpu_ids; + if (dest_cpu < nr_cpu_ids) + goto out; /* return dest_cpu = any_of(valid); */ +#else /* Any allowed, online CPU? */ for_each_cpu(dest_cpu, &p->cpus_allowed) { if (!is_cpu_allowed(p, dest_cpu)) @@ -1483,6 +1563,7 @@ goto out; } +#endif /* No more Mr. Nice Guy. */ switch (state) { @@ -1520,6 +1601,43 @@ return dest_cpu; } +#if defined(CONFIG_BCM_KF_CPU_AFFINITY_HINT) && \ + defined(CONFIG_BCM_PROC_CPU_AFFINITY_HINT) +/* + * Called by select_task_rq() when the selected cpu is allowed and online to + * decide whether select_fallback_rq() should be called to replace the + * selected allowed online cpu with some active/online allowed hinted CPU. + * + * Return 0 (i.e., no need to try to replace the selected allowed online cpu) + * if: + * 1. no active/online hinted CPU, or + * 2. the selected cpu is one of the active/online hinted CPUs, or + * 3. all the active/online hinted CPUs are not allowed now. + * + * Condition 3 exists because do_set_cpus_allowed() is also called by + * cpuset_cpus_allowed_fallback() in kernel/cgroup/cpuset.c and + * __kthread_bind() in kernel/kthread.c, except for cases in this + * kernel/sched/core.c. It may be called at more places in the future kernels + * too. However, only for the case of set_cpus_allowed_ptr(), we copy + * cpus_allowed into cpus_hint after calling do_set_cpus_allowed(). + */ +static int try_valid_cpu_hint(int cpu, struct task_struct *p) +{ + struct cpumask cpus_valid; + int v; + + if (p->flags & PF_KTHREAD) + v = cpumask_and(&cpus_valid, &p->cpus_hint, + cpu_online_mask); + else + v = cpumask_and(&cpus_valid, &p->cpus_hint, + cpu_active_mask); + if (!v || cpumask_test_cpu(cpu, &cpus_valid)) + return 0; + + return cpumask_intersects(&cpus_valid, &p->cpus_allowed); +} +#endif /* * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. */ @@ -1543,7 +1661,13 @@ * [ this allows ->select_task() to simply return task_cpu(p) and * not worry about this generic constraint ] */ +#if defined(CONFIG_BCM_KF_CPU_AFFINITY_HINT) && \ + defined(CONFIG_BCM_PROC_CPU_AFFINITY_HINT) + if (unlikely(!is_cpu_allowed(p, cpu) || !cpu_online(cpu) || + try_valid_cpu_hint(cpu, p))) +#else if (unlikely(!is_cpu_allowed(p, cpu))) +#endif cpu = select_fallback_rq(task_cpu(p), p); return cpu; @@ -2672,6 +2796,9 @@ * transition, resulting in a double drop. */ prev_state = prev->state; +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_sched(); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ vtime_task_switch(prev); perf_event_task_sched_in(prev, current); finish_task(prev); @@ -3045,6 +3172,9 @@ struct task_struct *curr = rq->curr; struct rq_flags rf; +#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) + BUZZZ_KNL3(SCHED_TICK, jiffies, 0); +#endif sched_clock_tick(); rq_lock(rq, &rf); @@ -3513,6 +3643,10 @@ trace_sched_switch(preempt, prev, next); +#if defined(CONFIG_BCM_KF_BUZZZ) && defined(CONFIG_BUZZZ_KEVT) + BUZZZ_KNL3(TASK_OUT, prev->pid, prev); + BUZZZ_KNL3(TASK_IN, next->pid, next); +#endif /* Also unlocks the rq: */ rq = context_switch(rq, prev, next, &rf); } else {