--- zzzz-none-000/linux-2.6.19.2/arch/arm/mm/ioremap.c 2007-01-10 19:10:37.000000000 +0000 +++ davinci-8020-5504/linux-2.6.19.2/arch/arm/mm/ioremap.c 2007-03-09 11:57:55.000000000 +0000 @@ -67,7 +67,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, - unsigned long phys_addr, unsigned long flags) + unsigned long phys_addr, unsigned long flags, unsigned long disable_flags) { unsigned long end; pgprot_t pgprot; @@ -81,7 +81,7 @@ phys_addr -= address; BUG_ON(address >= end); - pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags); + pgprot = __pgprot((L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags) & ~disable_flags); do { pte_t * pte = pte_alloc_kernel(pmd, address); if (!pte) @@ -95,7 +95,7 @@ static int remap_area_pages(unsigned long start, unsigned long pfn, - unsigned long size, unsigned long flags) + unsigned long size, unsigned long flags, unsigned long disable_flags) { unsigned long address = start; unsigned long end = start + size; @@ -113,7 +113,7 @@ break; } if (remap_area_pmd(pmd, address, end - address, - phys_addr + address, flags)) { + phys_addr + address, flags, disable_flags)) { err = -ENOMEM; break; } @@ -284,6 +284,76 @@ #endif +/*------------------------------------------------------------------------------------------*\ +\*------------------------------------------------------------------------------------------*/ +void __iomem * +__ioremap_pfn_read_only(unsigned long pfn, unsigned long offset, size_t size, + unsigned long flags) +{ + int err; + unsigned long addr; + struct vm_struct * area; + + /* + * High mappings must be supersection aligned + */ + if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) + return NULL; + + area = get_vm_area(size, VM_IOREMAP); + if (!area) + return NULL; + addr = (unsigned long)area->addr; + +#ifndef CONFIG_SMP + if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || + cpu_is_xsc3()) && + !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { + area->flags |= VM_ARM_SECTION_MAPPING; + err = remap_area_supersections(addr, pfn, size, flags); + } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { + area->flags |= VM_ARM_SECTION_MAPPING; + err = remap_area_sections(addr, pfn, size, flags); + } else +#endif + err = remap_area_pages(addr, pfn, size, flags, L_PTE_WRITE); + + if (err) { + vunmap((void *)addr); + return NULL; + } + + flush_cache_vmap(addr, addr + size); + return (void __iomem *) (offset + addr); +} +EXPORT_SYMBOL(__ioremap_pfn_read_only); + + +void __iomem * +__ioremap_read_only(unsigned long phys_addr, size_t size, unsigned long flags) +{ + unsigned long last_addr; + unsigned long offset = phys_addr & ~PAGE_MASK; + unsigned long pfn = __phys_to_pfn(phys_addr); + + /* + * Don't allow wraparound or zero size + */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) + return NULL; + + /* + * Page align the mapping size + */ + size = PAGE_ALIGN(last_addr + 1) - phys_addr; + + return __ioremap_pfn_read_only(pfn, offset, size, flags); +} +EXPORT_SYMBOL(__ioremap_read_only); + +/*------------------------------------------------------------------------------------------*\ +\*------------------------------------------------------------------------------------------*/ /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses @@ -326,7 +396,7 @@ err = remap_area_sections(addr, pfn, size, flags); } else #endif - err = remap_area_pages(addr, pfn, size, flags); + err = remap_area_pages(addr, pfn, size, flags, 0); if (err) { vunmap((void *)addr);