--- zzzz-none-000/linux-2.6.28.10/kernel/sched.c 2009-05-02 18:54:43.000000000 +0000 +++ fusiv-7390-686/linux-2.6.28.10/kernel/sched.c 2016-03-07 12:13:36.000000000 +0000 @@ -73,12 +73,23 @@ #include #include #include +#if defined(CONFIG_AVM_SIMPLE_PROFILING) +#include +#endif/*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ #include #include #include "sched_cpupri.h" +#ifdef CONFIG_FUSIV_SOFTIRQ_CPU_UTILIZATION_FIX +//#include +#endif + +#ifdef CONFIG_FUSIV_KERNEL_PROFILER_MODULE +extern int loggerProfile(unsigned long event); +#endif + /* * Convert user-nice values [ -20 ... 0 ... 19 ] * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], @@ -4203,6 +4214,9 @@ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; struct rq *rq = this_rq(); cputime64_t tmp; +# ifdef CONFIG_FUSIV_SOFTIRQ_CPU_UTILIZATION_FIX + cputime64_t tmp1; +# endif /*--- #ifdef CONFIG_FUSIV_SOFTIRQ_CPU_UTILIZATION_FIX ---*/ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { account_guest_time(p, cputime); @@ -4214,10 +4228,39 @@ /* Add system time to cpustat. */ tmp = cputime_to_cputime64(cputime); + +#ifdef CONFIG_FUSIV_SOFTIRQ_CPU_UTILIZATION_FIX + // check if there are non-zero softirq ticks accumulated. If yes then + // account the current tick to softirq context and account any + // remaining ticks to other contexts. + + tmp1 = local_softirq_ticks(); + + if (tmp1) + { + // if the ticks to be allocated to softirq context is less than accumulated + // softirq ticks then decrement the softirq ticks by that much count + if (tmp < tmp1 ) + { + tmp1 = tmp; + local_decr_softirq_ticks(tmp); + } + // if the ticks to be allocated to softirq context is equal or greater than + // the accumulated softirq ticks then allocate only softirq ticks + else + { + local_reset_softirq_ticks(); + } + cpustat->softirq = cputime64_add(cpustat->softirq, tmp1); + tmp -= tmp1; + } +#endif if (hardirq_count() - hardirq_offset) cpustat->irq = cputime64_add(cpustat->irq, tmp); +#ifndef CONFIG_FUSIV_SOFTIRQ_CPU_UTILIZATION_FIX else if (softirq_count()) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); +#endif else if (p != rq->idle) cpustat->system = cputime64_add(cpustat->system, tmp); else if (atomic_read(&rq->nr_iowait) > 0) @@ -4534,12 +4577,25 @@ if (likely(prev != next)) { sched_info_switch(prev, next); - +#if defined(CONFIG_FUSIV_KERNEL_PROFILER_MODULE) && CONFIG_FUSIV_KERNEL_PROFILER_MODULE + { + struct pid * log_pid = NULL; + int log_pid_nr = 0; + if (next) + log_pid = get_task_pid(next, PIDTYPE_PID); + if (log_pid != NULL) + { + log_pid_nr = log_pid->numbers[log_pid->level].nr; + loggerProfile(log_pid_nr); + } + } +#endif rq->nr_switches++; rq->curr = next; ++*switch_count; context_switch(rq, prev, next); /* unlocks the rq */ + /* * the context switch might have flipped the stack from under * us, hence refresh the local variables. @@ -4746,6 +4802,7 @@ static inline long __sched do_wait_for_common(struct completion *x, long timeout, int state) { + /*--- printk("entering do_wait_for_common timeout=%ld \n", timeout); ---*/ if (!x->done) { DECLARE_WAITQUEUE(wait, current); @@ -4758,7 +4815,9 @@ } __set_current_state(state); spin_unlock_irq(&x->wait.lock); - timeout = schedule_timeout(timeout); + /*--- printk("before schedule_timeout\n"); ---*/ + timeout = schedule_timeout(timeout); /* hier haengt er */ + /*--- printk("schedule_timeout passed!\n"); ---*/ spin_lock_irq(&x->wait.lock); } while (!x->done && timeout); __remove_wait_queue(&x->wait, &wait);