--- zzzz-none-000/linux-2.6.28.10/kernel/softirq.c 2009-05-02 18:54:43.000000000 +0000 +++ puma5-6360-529/linux-2.6.28.10/kernel/softirq.c 2010-05-10 09:05:56.000000000 +0000 @@ -23,6 +23,8 @@ #include #include #include +#include +#include "linux/kallsyms.h" // sprint_symbol() #include /* @@ -67,6 +69,32 @@ wake_up_process(tsk); } +#if ((defined(CONFIG_FUSIV_MIPS_BASED_VOICE) && CONFIG_FUSIV_MIPS_BASED_VOICE) || (defined(CONFIG_FUSIV_KERNEL_PROFILER_MODULE) && CONFIG_FUSIV_KERNEL_PROFILER_MODULE) || (defined(CONFIG_FUSIV_DSP_BASED_VOICE) && CONFIG_FUSIV_DSP_BASED_VOICE)) +int is_softirqd(struct task_struct *currentTask) +{ + struct task_struct *tsk = __get_cpu_var(ksoftirqd); + if(tsk == currentTask) + return 1; + else + return 0; +} +EXPORT_SYMBOL(is_softirqd); +#endif /*--- #if ((defined(CONFIG_FUSIV_MIPS_BASED_VOICE) && CONFIG_FUSIV_MIPS_BASED_VOICE) || (defined(CONFIG_FUSIV_KERNEL_PROFILER_MODULE) && CONFIG_FUSIV_KERNEL_PROFILER_MODULE) || (defined(CONFIG_FUSIV_DSP_BASED_VOICE) && CONFIG_FUSIV_DSP_BASED_VOICE)) ---*/ + + +#if ( (defined(CONFIG_FUSIV_MIPS_BASED_VOICE) && CONFIG_FUSIV_MIPS_BASED_VOICE ) || (defined(CONFIG_FUSIV_KERNEL_PROFILER_MODULE) && CONFIG_FUSIV_KERNEL_PROFILER_MODULE ) || (defined(CONFIG_FUSIV_DSP_BASED_VOICE) && CONFIG_FUSIV_DSP_BASED_VOICE)) +int is_softirqd(struct task_struct *currentTask) +{ + struct task_struct *tsk = __get_cpu_var(ksoftirqd); + if(tsk == currentTask) + return 1; + else + return 0; +} +EXPORT_SYMBOL(is_softirqd); +#endif + + /* * This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: @@ -209,13 +237,21 @@ if (pending & 1) { int prev_count = preempt_count(); +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_sw_irq_begin, (unsigned int)(h->action), (unsigned int)h); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ h->action(h); +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_sw_irq_end, (unsigned int)(h->action), (unsigned int)h); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ if (unlikely(prev_count != preempt_count())) { - printk(KERN_ERR "huh, entered softirq %td %p" + char buf[256]; + sprint_symbol(buf, (unsigned long)h->action); + printk(KERN_ERR "huh, entered softirq %td %p (%s)" "with preempt_count %08x," " exited with %08x?\n", h - softirq_vec, - h->action, prev_count, preempt_count()); + h->action, buf, prev_count, preempt_count()); preempt_count() = prev_count; } @@ -243,7 +279,7 @@ #ifndef __ARCH_HAS_DO_SOFTIRQ asmlinkage void do_softirq(void) -{ +{ __u32 pending; unsigned long flags; @@ -290,8 +326,12 @@ account_system_vtime(current); trace_hardirq_exit(); sub_preempt_count(IRQ_EXIT_OFFSET); - if (!in_interrupt() && local_softirq_pending()) - invoke_softirq(); + if(local_softirq_pending()) { + if (!in_interrupt()) { + invoke_softirq(); + } + } + #ifdef CONFIG_NO_HZ /* Make sure that timer wheel updates are propagated */ @@ -318,8 +358,9 @@ * Otherwise we wake up ksoftirqd to make sure we * schedule the softirq soon. */ - if (!in_interrupt()) + if (!in_interrupt()){ wakeup_softirqd(); + } } void raise_softirq(unsigned int nr) @@ -386,15 +427,31 @@ while (list) { struct tasklet_struct *t = list; - list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { + int prev_count = preempt_count(); + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_tasklet_begin, (unsigned int)(t->func), (unsigned int)(t->data)); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ t->func(t->data); +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_tasklet_end, (unsigned int)(t->func), (unsigned int)(t->data)); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ tasklet_unlock(t); + if (unlikely(prev_count != preempt_count())) { + char buf[256]; + sprint_symbol(buf, (unsigned long)t->func); + printk(KERN_ERR "huh, entered tasklet %p (%s)" + "with preempt_count %08x," + " exited with %08x?\n", + t->func, buf, prev_count, preempt_count()); + preempt_count() = prev_count; + } continue; } tasklet_unlock(t); @@ -428,7 +485,13 @@ if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_hi_tasklet_begin, (unsigned int)(t->func), (unsigned int)(t->data)); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ t->func(t->data); +#if defined(CONFIG_AVM_SIMPLE_PROFILING) + avm_simple_profiling_log(avm_profile_data_type_hi_tasklet_end, (unsigned int)(t->func), (unsigned int)(t->data)); +#endif /*--- #if defined(CONFIG_AVM_SIMPLE_PROFILING) ---*/ tasklet_unlock(t); continue; } @@ -729,9 +792,8 @@ case CPU_UP_PREPARE_FROZEN: p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); if (IS_ERR(p)) { - printk("ksoftirqd for %i failed\n", hotcpu); return NOTIFY_BAD; - } + } kthread_bind(p, hotcpu); per_cpu(ksoftirqd, hotcpu) = p; break;