--- zzzz-none-000/linux-2.6.39.4/kernel/sched.c 2011-08-03 19:43:28.000000000 +0000 +++ puma6-atom-6490-729/linux-2.6.39.4/kernel/sched.c 2021-11-10 13:38:18.000000000 +0000 @@ -82,6 +82,9 @@ #define CREATE_TRACE_POINTS #include +#if defined(CONFIG_AVM_SIMPLE_PROFILING) +#include +#endif/*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ /* * Convert user-nice values [ -20 ... 0 ... 19 ] @@ -2862,6 +2865,9 @@ * Manfred Spraul */ prev_state = prev->state; +#if defined(CONFIG_AVM_SIMPLE_PROFILING) && defined(avm_simple_profiling_sched) + avm_simple_profiling_sched(); +#endif/*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ finish_arch_switch(prev); #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW local_irq_disable(); @@ -2872,6 +2878,7 @@ #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ finish_lock_switch(rq, prev); + fire_sched_in_preempt_notifiers(current); if (mm) mmdrop(mm); @@ -3993,8 +4000,6 @@ */ static noinline void __schedule_bug(struct task_struct *prev) { - struct pt_regs *regs = get_irq_regs(); - printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", prev->comm, prev->pid, preempt_count()); @@ -4003,10 +4008,7 @@ if (irqs_disabled()) print_irqtrace_events(prev); - if (regs) - show_regs(regs); - else - dump_stack(); + dump_stack(); } /* @@ -8119,7 +8121,7 @@ void __init sched_init(void) { int i, j; - unsigned long alloc_size = 0, ptr; + unsigned long alloc_size = 0, ptr __attribute__((unused)); #ifdef CONFIG_FAIR_GROUP_SCHED alloc_size += 2 * nr_cpu_ids * sizeof(void **); @@ -8553,7 +8555,6 @@ { struct rt_rq *rt_rq; struct sched_rt_entity *rt_se; - struct rq *rq; int i; tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); @@ -8567,8 +8568,6 @@ ktime_to_ns(def_rt_bandwidth.rt_period), 0); for_each_possible_cpu(i) { - rq = cpu_rq(i); - rt_rq = kzalloc_node(sizeof(struct rt_rq), GFP_KERNEL, cpu_to_node(i)); if (!rt_rq)