--- zzzz-none-000/linux-2.4.17/include/asm-mips/mmu_context.h 2001-07-02 20:56:40.000000000 +0000 +++ sangam-fb-322/linux-2.4.17/include/asm-mips/mmu_context.h 2004-11-24 13:21:30.000000000 +0000 @@ -14,12 +14,31 @@ #include #include #include +#include -/* Fuck. The f-word is here so you can grep for it :-) */ -extern unsigned long asid_cache; -extern pgd_t *current_pgd[]; +/* + * For the fast tlb miss handlers, we currently keep a per cpu array + * of pointers to the current pgd for each processor. Also, the proc. + * id is stuffed into the context register. This should be changed to + * use the processor id via current->processor, where current is stored + * in watchhi/lo. The context register should be used to contiguously + * map the page tables. + */ +#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ + pgd_current[smp_processor_id()] = (unsigned long)(pgd) +#define TLBMISS_HANDLER_SETUP() \ + set_context((unsigned long) smp_processor_id() << (23 + 3)); \ + TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) +extern unsigned long pgd_current[]; -#if defined(CONFIG_CPU_R3000) +#ifndef CONFIG_SMP +#define CPU_CONTEXT(cpu, mm) (mm)->context +#else +#define CPU_CONTEXT(cpu, mm) (*((unsigned long *)((mm)->context) + cpu)) +#endif +#define ASID_CACHE(cpu) cpu_data[cpu].asid_cache + +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) || defined(CONFIG_CPU_LX45XXX) #define ASID_INC 0x40 #define ASID_MASK 0xfc0 @@ -42,22 +61,25 @@ #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) -extern inline void -get_new_mmu_context(struct mm_struct *mm, unsigned long asid) +static inline void +get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) { + unsigned long asid = ASID_CACHE(cpu); + if (! ((asid += ASID_INC) & ASID_MASK) ) { - flush_tlb_all(); /* start new asid cycle */ - if (!asid) /* fix version if needed */ + flush_icache_all(); + local_flush_tlb_all(); /* start new asid cycle */ + if (!asid) /* fix version if needed */ asid = ASID_FIRST_VERSION; } - mm->context = asid_cache = asid; + CPU_CONTEXT(cpu, mm) = ASID_CACHE(cpu) = asid; } /* * Initialize the context related info for a new mm_struct * instance. */ -extern inline int +static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { #ifndef CONFIG_SMP @@ -76,40 +98,46 @@ return 0; } -extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu) { - unsigned long asid = asid_cache; +#ifdef CONFIG_PREEMPT + if (preempt_is_disabled() == 0) + BUG(); +#endif /* Check if our ASID is of an older version and thus invalid */ - if ((next->context ^ asid) & ASID_VERSION_MASK) - get_new_mmu_context(next, asid); + if ((CPU_CONTEXT(cpu, next) ^ ASID_CACHE(cpu)) & ASID_VERSION_MASK) + get_new_mmu_context(next, cpu); - current_pgd[cpu] = next->pgd; - set_entryhi(next->context); + set_entryhi(CPU_CONTEXT(cpu, next)); + TLBMISS_HANDLER_SETUP_PGD(next->pgd); } /* * Destroy context related info for an mm_struct that is about * to be put to rest. */ -extern inline void destroy_context(struct mm_struct *mm) +static inline void destroy_context(struct mm_struct *mm) { - /* Nothing to do. */ +#ifdef CONFIG_SMP + if (mm->context) + kfree((void *)mm->context); +#endif } /* * After we have set current->mm to a new value, this activates * the context for the new mm so we see the new mappings. */ -extern inline void +static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { /* Unconditionally get a new ASID. */ - get_new_mmu_context(next, asid_cache); + get_new_mmu_context(next, smp_processor_id()); - current_pgd[smp_processor_id()] = next->pgd; - set_entryhi(next->context); + set_entryhi(CPU_CONTEXT(smp_processor_id(), next)); + TLBMISS_HANDLER_SETUP_PGD(next->pgd); } #endif /* _ASM_MMU_CONTEXT_H */