--- zzzz-none-000/linux-2.4.17/kernel/fork.c 2001-11-21 18:18:42.000000000 +0000 +++ sangam-fb-401/linux-2.4.17/kernel/fork.c 2005-09-20 12:49:54.000000000 +0000 @@ -21,6 +21,8 @@ #include #include +#include + #include #include #include @@ -72,10 +74,19 @@ * value: the thread structures can take up at most half * of memory. */ + + unsigned char* p; max_threads = mempages / (THREAD_SIZE/PAGE_SIZE) / 8; init_task.rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2; + + /*--- Freien Stack der "Ursuppe" mit 0x55 initialisieren fuer show_state() ---*/ + p = (unsigned char*)&init_task + sizeof(struct task_struct); + while ((unsigned int)p < (unsigned int)(&p)) + { + *p++ = 0x55; + } } /* Protects next_safe and last_pid. */ @@ -85,6 +96,7 @@ { static int next_safe = PID_MAX; struct task_struct *p; + int pid; if (flags & CLONE_PID) return current->pid; @@ -120,9 +132,10 @@ } read_unlock(&tasklist_lock); } + pid = last_pid; spin_unlock(&lastpid_lock); - return last_pid; + return pid; } static inline int dup_mmap(struct mm_struct * mm) @@ -582,6 +595,14 @@ if (!p) goto fork_out; + /*--- Kernel Stack mit 0x55 initialisieren fuer show_state() ---*/ + { + unsigned int ptr; + + for (ptr = (unsigned int)p; ptr < (unsigned int)p + KERNEL_STACK_SIZE; ptr+=4) + *(unsigned int*)ptr = 0x55555555; + } + *p = *current; retval = -EAGAIN; @@ -604,6 +625,12 @@ if (p->binfmt && p->binfmt->module) __MOD_INC_USE_COUNT(p->binfmt->module); +#ifdef CONFIG_PREEMPT + /* Since we are keeping the context switch off state as part + * of the context, make sure we start with it off. + */ + p->preempt_count = 1; +#endif p->did_exec = 0; p->swappable = 0; p->state = TASK_UNINTERRUPTIBLE; @@ -633,6 +660,9 @@ p->leader = 0; /* session leadership doesn't inherit */ p->tty_old_pgrp = 0; +#if defined(CONFIG_LOG_SYSTEM_TIMES) + p->real_times.tms_utime = p->real_times.tms_stime = 0; +#endif /*--- #if defined(CONFIG_LOG_SYSTEM_TIMES) ---*/ p->times.tms_utime = p->times.tms_stime = 0; p->times.tms_cutime = p->times.tms_cstime = 0; #ifdef CONFIG_SMP @@ -659,6 +689,7 @@ goto bad_fork_cleanup_files; if (copy_sighand(clone_flags, p)) goto bad_fork_cleanup_fs; + INIT_LIST_HEAD(&p->posix_timers); if (copy_mm(clone_flags, p)) goto bad_fork_cleanup_sighand; retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); @@ -682,10 +713,20 @@ * more scheduling fairness. This is only important in the first * timeslice, on the long run the scheduling behaviour is unchanged. */ + /* + * SCHED_FIFO tasks don't count down and have a negative counter. + * Don't change these, least they all end up at -1. + */ +#ifdef CONFIG_RTSCHED + if (p->policy != SCHED_FIFO) +#endif + { + p->counter = (current->counter + 1) >> 1; current->counter >>= 1; if (!current->counter) current->need_resched = 1; + } /* * Ok, add it to the run-queues and make it @@ -722,6 +763,9 @@ if (p->ptrace & PT_PTRACED) send_sig(SIGSTOP, p, 1); + /* Trace the event */ + TRACE_PROCESS(TRACE_EV_PROCESS_FORK, retval, 0); + wake_up_process(p); /* do this last */ ++total_forks; if (clone_flags & CLONE_VFORK)