--- zzzz-none-000/linux-2.4.17/kernel/sched.c 2001-12-21 17:42:04.000000000 +0000 +++ sangam-fb-322/linux-2.4.17/kernel/sched.c 2004-11-24 13:21:27.000000000 +0000 @@ -30,6 +30,8 @@ #include #include +#include + #include #include @@ -92,6 +94,95 @@ spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* inner */ rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */ +/* Stop all tasks running with the given mm, except for the calling task. */ + +int stop_all_threads(struct mm_struct *mm) +{ + struct task_struct * p; + int all_stopped = 0; + + read_lock(&tasklist_lock); + for_each_task(p) { + if (p->mm == mm && p != current && p->state != TASK_STOPPED) { + send_sig (SIGSTOP, p, 1); + } + } + + /* Now wait for every task to cease running. */ + /* Beware: this loop might not terminate in the face of a malicious + program sending SIGCONT to threads. But it is still killable, and + only moderately disruptive (because of the tasklist_lock). */ + for (;;) { + all_stopped = 1; + for_each_task(p) { + if (p->mm == mm && p != current && p->state != TASK_STOPPED) { + all_stopped = 0; + break; + } + } + if (all_stopped) + break; + read_unlock(&tasklist_lock); + schedule_timeout(1); + read_lock(&tasklist_lock); + } + +#ifdef CONFIG_SMP + /* Make sure they've all gotten off their CPUs. */ + for (;;) { + all_stopped = 1; + for_each_task (p) { + if (p->mm == mm && p != current) { + task_lock(p); + if (p->state != TASK_STOPPED) { + task_unlock(p); + read_unlock(&tasklist_lock); + return -1; + } + if (!task_has_cpu(p)) { + task_unlock(p); + continue; + } + task_unlock(p); + all_stopped = 0; + break; + } + } + if (all_stopped) + break; + read_unlock(&tasklist_lock); + do { + if (p->state != TASK_STOPPED) + return -1; + barrier(); + cpu_relax(); + } while (task_has_cpu(p)); + read_lock(&tasklist_lock); + } +#endif + read_unlock(&tasklist_lock); + return 0; +} + +/* Restart all the tasks with the given mm. Hope none of them were in state + TASK_STOPPED for some other reason... */ +void start_all_threads(struct mm_struct *mm) +{ + struct task_struct * p; + + read_lock(&tasklist_lock); + for_each_task(p) { + if (p->mm == mm && p != current) { + send_sig (SIGCONT, p, 1); + } + } + read_unlock(&tasklist_lock); +} + +#ifdef CONFIG_RTSCHED +extern struct task_struct *child_reaper; +#include "rtsched.h" +#else static LIST_HEAD(runqueue_head); /* @@ -353,6 +444,8 @@ unsigned long flags; int success = 0; + TRACE_PROCESS(TRACE_EV_PROCESS_WAKEUP, p->pid, p->state); + /* * We want the common case fall through straight, thus the goto. */ @@ -373,11 +466,13 @@ { return try_to_wake_up(p, 0); } +#endif /* ifdef CONFIG_RTSCHED */ static void process_timeout(unsigned long __data) { struct task_struct * p = (struct task_struct *) __data; + TRACE_TIMER(TRACE_EV_TIMER_EXPIRED, 0, 0, 0); wake_up_process(p); } @@ -442,6 +537,8 @@ } } + TRACE_TIMER(TRACE_EV_TIMER_SETTIMEOUT, 0, timeout, 0); + expire = timeout + jiffies; init_timer(&timer); @@ -458,7 +555,7 @@ out: return timeout < 0 ? 0 : timeout; } - +#ifndef CONFIG_RTSCHED /* * schedule_tail() is getting called from the fork return path. This * cleans up all remaining scheduler things, without impacting the @@ -491,7 +588,7 @@ task_lock(prev); task_release_cpu(prev); mb(); - if (prev->state == TASK_RUNNING) + if (task_on_runqueue(prev)) goto needs_resched; out_unlock: @@ -521,7 +618,7 @@ goto out_unlock; spin_lock_irqsave(&runqueue_lock, flags); - if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev)) + if (task_on_runqueue(prev) && !task_has_cpu(prev)) reschedule_idle(prev); spin_unlock_irqrestore(&runqueue_lock, flags); goto out_unlock; @@ -534,6 +631,7 @@ asmlinkage void schedule_tail(struct task_struct *prev) { __schedule_tail(prev); + preempt_enable(); } /* @@ -556,6 +654,8 @@ spin_lock_prefetch(&runqueue_lock); + preempt_disable(); + if (!current->active_mm) BUG(); need_resched_back: prev = current; @@ -583,6 +683,9 @@ move_last_runqueue(prev); } +#ifdef CONFIG_PREEMPT + if (preempt_is_disabled() & PREEMPT_ACTIVE) goto treat_like_run; +#endif switch (prev->state) { case TASK_INTERRUPTIBLE: if (signal_pending(prev)) { @@ -593,6 +696,9 @@ del_from_runqueue(prev); case TASK_RUNNING:; } +#ifdef CONFIG_PREEMPT + treat_like_run: +#endif prev->need_resched = 0; /* @@ -690,6 +796,8 @@ } } + TRACE_SCHEDCHANGE(prev->pid, next->pid, prev->state); + /* * This just switches the register state and the * stack. @@ -701,8 +809,10 @@ reacquire_kernel_lock(current); if (current->need_resched) goto need_resched_back; + preempt_enable_no_resched(); return; } +#endif /* ifndef CONFIG_RTSCHED */ /* * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything @@ -897,7 +1007,7 @@ tsk = find_task_by_pid(pid); return tsk; } - +#ifndef CONFIG_RTSCHED static int setscheduler(pid_t pid, int policy, struct sched_param *param) { @@ -967,6 +1077,7 @@ out_nounlock: return retval; } +#endif /* ifndef CONFIG_RTSCHED */ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, struct sched_param *param) @@ -979,6 +1090,21 @@ return setscheduler(pid, -1, param); } +#ifdef CONFIG_PREEMPT +asmlinkage void preempt_schedule(void) +{ + while (current->need_resched) { + preempt_lock_start(2); + current->preempt_count += PREEMPT_ACTIVE + 1; + barrier(); + schedule(); + current->preempt_count -= PREEMPT_ACTIVE + 1; + barrier(); + preempt_lock_stop(); + } +} +#endif /* CONFIG_PREEMPT */ + asmlinkage long sys_sched_getscheduler(pid_t pid) { struct task_struct *p; @@ -1030,6 +1156,7 @@ return retval; } +#ifndef CONFIG_RTSCHED asmlinkage long sys_sched_yield(void) { /* @@ -1070,7 +1197,7 @@ } return 0; } - +#endif /* ifndef CONFIG_RTSCHED */ asmlinkage long sys_sched_get_priority_max(int policy) { int ret = -EINVAL; @@ -1078,7 +1205,7 @@ switch (policy) { case SCHED_FIFO: case SCHED_RR: - ret = 99; + ret = MAX_PRI; break; case SCHED_OTHER: ret = 0; @@ -1297,6 +1424,7 @@ atomic_inc(¤t->files->count); } +#ifndef CONFIG_RTSCHED extern unsigned long wait_init_idle; void __init init_idle(void) @@ -1312,6 +1440,12 @@ sched_data->curr = current; sched_data->last_schedule = get_cycles(); clear_bit(current->processor, &wait_init_idle); + +#ifdef CONFIG_PREEMPT + if (current->processor) { + current->preempt_count = 0; + } +#endif } extern void init_timervecs (void); @@ -1342,3 +1476,4 @@ atomic_inc(&init_mm.mm_count); enter_lazy_tlb(&init_mm, current, cpu); } +#endif /* ifndef CONFIG_RTSCHED */