--- zzzz-none-000/linux-2.4.17/kernel/fork.c 2001-11-21 18:18:42.000000000 +0000 +++ sangam-fb-322/linux-2.4.17/kernel/fork.c 2004-11-24 13:21:27.000000000 +0000 @@ -21,6 +21,8 @@ #include #include +#include + #include #include #include @@ -85,6 +87,7 @@ { static int next_safe = PID_MAX; struct task_struct *p; + int pid; if (flags & CLONE_PID) return current->pid; @@ -120,9 +123,10 @@ } read_unlock(&tasklist_lock); } + pid = last_pid; spin_unlock(&lastpid_lock); - return last_pid; + return pid; } static inline int dup_mmap(struct mm_struct * mm) @@ -604,6 +608,12 @@ if (p->binfmt && p->binfmt->module) __MOD_INC_USE_COUNT(p->binfmt->module); +#ifdef CONFIG_PREEMPT + /* Since we are keeping the context switch off state as part + * of the context, make sure we start with it off. + */ + p->preempt_count = 1; +#endif p->did_exec = 0; p->swappable = 0; p->state = TASK_UNINTERRUPTIBLE; @@ -659,6 +669,7 @@ goto bad_fork_cleanup_files; if (copy_sighand(clone_flags, p)) goto bad_fork_cleanup_fs; + INIT_LIST_HEAD(&p->posix_timers); if (copy_mm(clone_flags, p)) goto bad_fork_cleanup_sighand; retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); @@ -682,10 +693,20 @@ * more scheduling fairness. This is only important in the first * timeslice, on the long run the scheduling behaviour is unchanged. */ + /* + * SCHED_FIFO tasks don't count down and have a negative counter. + * Don't change these, least they all end up at -1. + */ +#ifdef CONFIG_RTSCHED + if (p->policy != SCHED_FIFO) +#endif + { + p->counter = (current->counter + 1) >> 1; current->counter >>= 1; if (!current->counter) current->need_resched = 1; + } /* * Ok, add it to the run-queues and make it @@ -722,6 +743,9 @@ if (p->ptrace & PT_PTRACED) send_sig(SIGSTOP, p, 1); + /* Trace the event */ + TRACE_PROCESS(TRACE_EV_PROCESS_FORK, retval, 0); + wake_up_process(p); /* do this last */ ++total_forks; if (clone_flags & CLONE_VFORK)