--- zzzz-none-000/linux-2.6.28.10/kernel/softirq.c 2009-05-02 18:54:43.000000000 +0000 +++ fusiv-7390-686/linux-2.6.28.10/kernel/softirq.c 2013-10-18 12:55:51.000000000 +0000 @@ -23,8 +23,15 @@ #include #include #include +#include +#include "linux/kallsyms.h" // sprint_symbol() #include +#ifdef CONFIG_FUSIV_VX185 +#include +#include +extern struct clock_values fusiv_718x_clks; +#endif /* - No shared variables, all the data are CPU local. - If a softirq needs serialization, let it serialize itself @@ -48,6 +55,15 @@ EXPORT_SYMBOL(irq_stat); #endif +#ifdef CONFIG_FUSIV_SOFTIRQ_CPU_UTILIZATION_FIX +fusiv_irq_softirq_stats_t fusiv_irq_softirq_stats[] ____cacheline_aligned; +EXPORT_SYMBOL(fusiv_irq_softirq_stats); +#endif + +#if defined(CONFIG_FUSIV_KERNEL_PROFILER_MODULE) && CONFIG_FUSIV_KERNEL_PROFILER_MODULE +extern int loggerProfile(unsigned long event); +#endif + static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); @@ -67,6 +83,19 @@ wake_up_process(tsk); } +#if ( (defined(CONFIG_FUSIV_MIPS_BASED_VOICE) && CONFIG_FUSIV_MIPS_BASED_VOICE) | (defined(CONFIG_FUSIV_KERNEL_PROFILER_MODULE) && CONFIG_FUSIV_KERNEL_PROFILER_MODULE) | (defined(CONFIG_FUSIV_DSP_BASED_VOICE) && CONFIG_FUSIV_DSP_BASED_VOICE)) +int is_softirqd(struct task_struct *currentTask) +{ + struct task_struct *tsk = __get_cpu_var(ksoftirqd); + if(tsk == currentTask) + return 1; + else + return 0; +} +EXPORT_SYMBOL(is_softirqd); +#endif + + /* * This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: @@ -189,7 +218,10 @@ __u32 pending; int max_restart = MAX_SOFTIRQ_RESTART; int cpu; - +#ifdef CONFIG_FUSIV_SOFTIRQ_CPU_UTILIZATION_FIX + unsigned long start_cycles=0, end_cycles=0, cycles_used=0; + unsigned long period=(fusiv_718x_clks.cpu_clk_val/2)/100; // clk/1000 * 10 = 1 tick (10 msec) +#endif pending = local_softirq_pending(); account_system_vtime(current); @@ -197,6 +229,15 @@ trace_softirq_enter(); cpu = smp_processor_id(); + +#ifdef CONFIG_FUSIV_SOFTIRQ_CPU_UTILIZATION_FIX + // reset the hardirq cycles + local_reset_hardirq_cycles(); + + // take the start timestamp + start_cycles = GET_CYCLE_COUNTER(); +#endif + restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); @@ -209,13 +250,21 @@ if (pending & 1) { int prev_count = preempt_count(); +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_sw_irq_begin, (unsigned int)(h->action), (unsigned int)h); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ h->action(h); +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_sw_irq_end, (unsigned int)(h->action), (unsigned int)h); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ if (unlikely(prev_count != preempt_count())) { - printk(KERN_ERR "huh, entered softirq %td %p" + char buf[256]; + sprint_symbol(buf, (unsigned long)h->action); + printk(KERN_ERR "huh, entered softirq %td %p (%s)" "with preempt_count %08x," " exited with %08x?\n", h - softirq_vec, - h->action, prev_count, preempt_count()); + h->action, buf, prev_count, preempt_count()); preempt_count() = prev_count; } @@ -231,6 +280,34 @@ if (pending && --max_restart) goto restart; +#ifdef CONFIG_FUSIV_SOFTIRQ_CPU_UTILIZATION_FIX + + // take the end timestamp + end_cycles = GET_CYCLE_COUNTER(); + + // update the softirq cycles spent in processing the softirqs + // Note that the softirq cycles are accumulated otherwise the cpu cycles will always be under-reported + // by vmstat/top commands + cycles_used = (start_cycles <= end_cycles)?(end_cycles-start_cycles):(end_cycles + (0xffffffff-start_cycles)); + local_incr_softirq_cycles(cycles_used); + + // subtract the cycles consumed processing hardware interrupts to get the actual cycles consumed in + // processing softirqs + if (local_hardirq_cycles() <= local_softirq_cycles()) + local_decr_softirq_cycles (local_hardirq_cycles()); + else + printk ("\nhardirq [%d] > softirq [%d] cycles\n", local_hardirq_cycles(), local_softirq_cycles()); + + // update a tick counter, which accumulates the time spent in handling softirqs in terms of linux tick + // "while" loop ensures that all accumulated cycles are converted to linux ticks except the remaining + // cycles which are still less than 1 tick + while (local_softirq_cycles() >= period) + { + local_incr_softirq_ticks(1); + local_decr_softirq_cycles(period); + } +#endif + if (pending) wakeup_softirqd(); @@ -290,8 +367,12 @@ account_system_vtime(current); trace_hardirq_exit(); sub_preempt_count(IRQ_EXIT_OFFSET); - if (!in_interrupt() && local_softirq_pending()) - invoke_softirq(); + if(local_softirq_pending()) { + if (!in_interrupt()) { + invoke_softirq(); + } + } + #ifdef CONFIG_NO_HZ /* Make sure that timer wheel updates are propagated */ @@ -318,8 +399,9 @@ * Otherwise we wake up ksoftirqd to make sure we * schedule the softirq soon. */ - if (!in_interrupt()) + if (!in_interrupt()){ wakeup_softirqd(); + } } void raise_softirq(unsigned int nr) @@ -378,6 +460,10 @@ { struct tasklet_struct *list; +#if CONFIG_FUSIV_KERNEL_PROFILER_MODULE + int profileResult; +#endif + local_irq_disable(); list = __get_cpu_var(tasklet_vec).head; __get_cpu_var(tasklet_vec).head = NULL; @@ -386,15 +472,40 @@ while (list) { struct tasklet_struct *t = list; - list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { + int prev_count = preempt_count(); + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_tasklet_begin, (unsigned int)(t->func), (unsigned int)(t->data)); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ + +#if defined(CONFIG_FUSIV_KERNEL_PROFILER_MODULE) && CONFIG_FUSIV_KERNEL_PROFILER_MODULE + profileResult = loggerProfile(t->func); +#endif t->func(t->data); +#if defined(CONFIG_FUSIV_KERNEL_PROFILER_MODULE) && CONFIG_FUSIV_KERNEL_PROFILER_MODULE + if(profileResult == 0) + profileResult = loggerProfile(t->func); +#endif + +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_tasklet_end, (unsigned int)(t->func), (unsigned int)(t->data)); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ tasklet_unlock(t); + if (unlikely(prev_count != preempt_count())) { + char buf[256]; + sprint_symbol(buf, (unsigned long)t->func); + printk(KERN_ERR "huh, entered tasklet %p (%s)" + "with preempt_count %08x," + " exited with %08x?\n", + t->func, buf, prev_count, preempt_count()); + preempt_count() = prev_count; + } continue; } tasklet_unlock(t); @@ -413,6 +524,9 @@ { struct tasklet_struct *list; +#if defined(CONFIG_FUSIV_KERNEL_PROFILER_MODULE) && CONFIG_FUSIV_KERNEL_PROFILER_MODULE + int profileResult; +#endif local_irq_disable(); list = __get_cpu_var(tasklet_hi_vec).head; __get_cpu_var(tasklet_hi_vec).head = NULL; @@ -428,7 +542,23 @@ if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_hi_tasklet_begin, (unsigned int)(t->func), (unsigned int)(t->data)); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ + +#if defined(CONFIG_FUSIV_KERNEL_PROFILER_MODULE) && CONFIG_FUSIV_KERNEL_PROFILER_MODULE + profileResult = loggerProfile(t->func); +#endif t->func(t->data); +#if defined(CONFIG_FUSIV_KERNEL_PROFILER_MODULE) && CONFIG_FUSIV_KERNEL_PROFILER_MODULE + if(profileResult == 0) + profileResult = loggerProfile(t->func); +#endif + +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_hi_tasklet_end, (unsigned int)(t->func), (unsigned int)(t->data)); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ + tasklet_unlock(t); continue; }