--- zzzz-none-000/linux-2.6.28.10/arch/arm/mm/mmu.c 2009-05-02 18:54:43.000000000 +0000 +++ puma5-6360-529/linux-2.6.28.10/arch/arm/mm/mmu.c 2009-06-08 15:09:22.000000000 +0000 @@ -453,8 +453,9 @@ unsigned long end, unsigned long pfn, const struct mem_type *type) { + /*--- printk("alloc_init_pte &pmd=0x%lx, pmd=0x%x, addr=%0xlx, end=0x%x, pfn=0x%x \n", (unsigned long)pmd, (*pmd), addr, end, pfn); ---*/ pte_t *pte; - + if (pmd_none(*pmd)) { pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); __pmd_populate(pmd, __pa(pte) | type->prot_l1); @@ -471,6 +472,7 @@ unsigned long end, unsigned long phys, const struct mem_type *type) { + /*--- printk("alloc_init_section: &pgd_t=0x%lx, pgd_t=0x%lx, addr=0x%x, end=0x%x, phys=0x%x \n", (unsigned long)pgd, (*pgd), addr, end, phys); ---*/ pmd_t *pmd = pmd_offset(pgd, addr); /* @@ -479,12 +481,13 @@ * L1 entries, whereas PGDs refer to a group of L1 entries making * up one logical pointer to an L2 table. */ + /*--- printk("~section_mask=0x%x, (addr | end | phys)=0x%x \n",~SECTION_MASK , (addr | end | phys)); ---*/ if (((addr | end | phys) & ~SECTION_MASK) == 0) { pmd_t *p = pmd; - + /*--- printk("addr=0x%x, SECTIONSIZE 0x%x \n", addr, SECTION_SIZE); ---*/ if (addr & SECTION_SIZE) pmd++; - + /*--- printk("pmd++ starting loop: &pmd=0x%lx, pmd=0x%lx, addr=0x%lx, end=0x%lx \n", (unsigned long)pmd, (*pmd), addr, end); ---*/ do { *pmd = __pmd(phys | type->prot_sect); phys += SECTION_SIZE; @@ -496,6 +499,7 @@ * No need to loop; pte's aren't interested in the * individual L1 entries. */ + alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); } } @@ -570,6 +574,8 @@ unsigned long phys, addr, length, end; const struct mem_type *type; pgd_t *pgd; + + /*--- printk("create_mapping: virt=0x%x, pfn=0x%x, length=0x%x, type=0x%x \n", md->virtual, md->pfn, md->length, md->type); ---*/ if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { printk(KERN_WARNING "BUG: not creating mapping for " @@ -585,12 +591,14 @@ __pfn_to_phys((u64)md->pfn), md->virtual); } + type = &mem_types[md->type]; /* * Catch 36-bit addresses */ if (md->pfn >= 0x100000) { + printk("Create 36bit mapping\n"); create_36bit_mapping(md, type); return; } @@ -608,6 +616,7 @@ pgd = pgd_offset_k(addr); end = addr + length; + /*--- printk("loop_entry: phys=0x%x, addr=0x%x, length=0x%x, end=0x%x \n", phys, addr, length, end); ---*/ do { unsigned long next = pgd_addr_end(addr, end); @@ -621,12 +630,19 @@ /* * Create the architecture specific mappings */ +void debug_mmu_virt_addr(unsigned int virt_addr); + void __init iotable_init(struct map_desc *io_desc, int nr) { int i; - - for (i = 0; i < nr; i++) - create_mapping(io_desc + i); + for (i = 0; i < nr; i++){ + create_mapping(io_desc + i); +/* +#ifdef CONFIG_DEBUG_LL + debug_mmu_virt_addr((io_desc +i)->virtual); +#endif +*/ + } } static unsigned long __initdata vmalloc_reserve = SZ_128M;