--- zzzz-none-000/linux-4.4.60/arch/arm/mm/mmu.c 2017-04-08 07:53:53.000000000 +0000 +++ wasp-540e-714/linux-4.4.60/arch/arm/mm/mmu.c 2019-07-03 09:21:34.000000000 +0000 @@ -37,6 +37,9 @@ #include #include #include +#if defined(CONFIG_AVM_BOOTMEM) +#include +#endif/*--- #if defined(CONFIG_AVM_BOOTMEM) ---*/ #include "fault.h" #include "mm.h" @@ -63,7 +66,7 @@ #define CPOLICY_WRITEALLOC 4 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; -static unsigned int ecc_mask __initdata = 0; +static unsigned int ecc_mask __initdata; pgprot_t pgprot_user; pgprot_t pgprot_kernel; pgprot_t pgprot_hyp_device; @@ -122,7 +125,7 @@ }; #ifdef CONFIG_CPU_CP15 -static unsigned long initial_pmd_value __initdata = 0; +static unsigned long initial_pmd_value __initdata; /* * Initialise the cache_policy variable with the initial state specified @@ -185,6 +188,7 @@ if (selected != cachepolicy) { unsigned long cr = __clear_cr(cache_policies[selected].cr_mask); + cachepolicy = selected; flush_cache_all(); set_cr(cr); @@ -196,6 +200,7 @@ static int __init early_nocache(char *__unused) { char *p = "buffered"; + pr_warn("nocache is deprecated; use cachepolicy=%s\n", p); early_cachepolicy(p); return 0; @@ -205,6 +210,7 @@ static int __init early_nowrite(char *__unused) { char *p = "uncached"; + pr_warn("nowb is deprecated; use cachepolicy=%s\n", p); early_cachepolicy(p); return 0; @@ -239,9 +245,9 @@ #endif /* ifdef CONFIG_CPU_CP15 / else */ -#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN +#define PROT_PTE_DEVICE (L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN) #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE -#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE +#define PROT_SECT_DEVICE (PMD_TYPE_SECT|PMD_SECT_AP_WRITE) static struct mem_type mem_types[] = { [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ @@ -653,6 +659,7 @@ for (i = 0; i < 16; i++) { pteval_t v = pgprot_val(protection_map[i]); + protection_map[i] = __pgprot(v | user_pgprot); } @@ -690,6 +697,7 @@ for (i = 0; i < ARRAY_SIZE(mem_types); i++) { struct mem_type *t = &mem_types[i]; + if (t->prot_l1) t->prot_l1 |= PMD_DOMAIN(t->domain); if (t->prot_sect) @@ -715,6 +723,7 @@ static void __init *early_alloc_aligned(unsigned long sz, unsigned long align) { void *ptr = __va(memblock_alloc(sz, align)); + memset(ptr, 0, sz); return ptr; } @@ -728,6 +737,7 @@ { if (pmd_none(*pmd)) { pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); + __pmd_populate(pmd, __pa(pte), prot); } BUG_ON(pmd_bad(*pmd)); @@ -739,6 +749,7 @@ const struct mem_type *type) { pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); + do { set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); pfn++; @@ -1106,7 +1117,7 @@ } early_param("vmalloc", early_vmalloc); -phys_addr_t arm_lowmem_limit __initdata = 0; +phys_addr_t arm_lowmem_limit __initdata; void __init sanity_check_meminfo(void) { @@ -1250,6 +1261,9 @@ */ memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE); +#if defined(CONFIG_AVM_BOOTMEM) + memblock_reserve_modulemem((unsigned long) __bss_stop); +#endif/*--- #if defined(CONFIG_AVM_BOOTMEM) ---*/ #ifdef CONFIG_SA1111 /* * Because of the SA1111 DMA bug, we want to preserve our @@ -1259,6 +1273,36 @@ #endif } + +/* AVM MAP IO */ +#ifdef CONFIG_AVM_FASTIRQ +#define IO_DEV(p, sz) { \ + .virtual = (unsigned long)(IO_ADDR(p)), \ + .pfn = __phys_to_pfn(p), \ + .length = sz, \ + .type = MT_DEVICE, \ + } + +static struct map_desc avm_io_map[] __initdata = { +#ifdef CONFIG_ARCH_IPQ40XX + /* Always map EDMA IO, may be unused. */ + IO_DEV(0x0c080000, SZ_8K), +#endif + IO_DEV(0x0B017000, SZ_64), + IO_DEV(0x0B020000, SZ_64K) +#ifdef CONFIG_AVM_ICE + , IO_DEV(0x0103a008, SZ_8) +#endif +}; +/* + * Map fix-mapped I/O that is needed before full MMU operation + */ +static void __init avm_map_io(void) +{ + iotable_init(avm_io_map, ARRAY_SIZE(avm_io_map)); +} +#endif + /* * Set up the device mappings. Since we clear out the page tables for all * mappings above VMALLOC_START, except early fixmap, we might remove debug @@ -1347,6 +1391,9 @@ /* * Ask the machine support to map in the statically mapped devices. */ +#ifdef CONFIG_AVM_FASTIRQ + avm_map_io(); +#endif if (mdesc->map_io) mdesc->map_io(); else @@ -1429,6 +1476,38 @@ create_mapping(&map); +#if defined(CONFIG_AVM_BOOTMEM) + { + phys_addr_t modulmem_start, modulmem_end; + + memblock_modulemem(&modulmem_start, &modulmem_end); + + modulmem_start = round_down(modulmem_start, SECTION_SIZE); + modulmem_end = round_up(modulmem_end + 1, SECTION_SIZE); + + if (kernel_x_end < modulmem_start) { + /*--- the up-aligned (x * 1M) data/bss-area of kernel: ---*/ + map.pfn = __phys_to_pfn(kernel_x_end); + map.virtual = __phys_to_virt(kernel_x_end); + map.length = modulmem_start - kernel_x_end; + map.type = MT_MEMORY_RW; + + create_mapping(&map); + kernel_x_end = modulmem_start; + } + if (modulmem_start) { + /*--- the down-and-up-aligned (x * 1M) modulmem: ---*/ + map.pfn = __phys_to_pfn(kernel_x_end); + map.virtual = __phys_to_virt(kernel_x_end); + map.length = modulmem_end - kernel_x_end; + map.type = MT_MEMORY_RWX; + + create_mapping(&map); + kernel_x_end = modulmem_end; + } + } +#endif/*--- #if defined(CONFIG_AVM_BOOTMEM) ---*/ + if (kernel_x_end < end) { map.pfn = __phys_to_pfn(kernel_x_end); map.virtual = __phys_to_virt(kernel_x_end);