--- zzzz-none-000/linux-4.4.271/arch/arm/mm/mmu.c 2021-06-03 06:22:09.000000000 +0000 +++ hawkeye-5590-750/linux-4.4.271/arch/arm/mm/mmu.c 2023-04-19 10:22:27.000000000 +0000 @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -37,6 +38,9 @@ #include #include #include +#if defined(CONFIG_AVM_BOOTMEM) +#include +#endif/*--- #if defined(CONFIG_AVM_BOOTMEM) ---*/ #include "fault.h" #include "mm.h" @@ -63,7 +67,7 @@ #define CPOLICY_WRITEALLOC 4 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; -static unsigned int ecc_mask __initdata = 0; +static unsigned int ecc_mask __initdata; pgprot_t pgprot_user; pgprot_t pgprot_kernel; pgprot_t pgprot_hyp_device; @@ -122,7 +126,7 @@ }; #ifdef CONFIG_CPU_CP15 -static unsigned long initial_pmd_value __initdata = 0; +static unsigned long initial_pmd_value __initdata; /* * Initialise the cache_policy variable with the initial state specified @@ -185,6 +189,7 @@ if (selected != cachepolicy) { unsigned long cr = __clear_cr(cache_policies[selected].cr_mask); + cachepolicy = selected; flush_cache_all(); set_cr(cr); @@ -196,6 +201,7 @@ static int __init early_nocache(char *__unused) { char *p = "buffered"; + pr_warn("nocache is deprecated; use cachepolicy=%s\n", p); early_cachepolicy(p); return 0; @@ -205,6 +211,7 @@ static int __init early_nowrite(char *__unused) { char *p = "uncached"; + pr_warn("nowb is deprecated; use cachepolicy=%s\n", p); early_cachepolicy(p); return 0; @@ -239,9 +246,9 @@ #endif /* ifdef CONFIG_CPU_CP15 / else */ -#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN +#define PROT_PTE_DEVICE (L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN) #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE -#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE +#define PROT_SECT_DEVICE (PMD_TYPE_SECT|PMD_SECT_AP_WRITE) static struct mem_type mem_types[] = { [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ @@ -653,6 +660,7 @@ for (i = 0; i < 16; i++) { pteval_t v = pgprot_val(protection_map[i]); + protection_map[i] = __pgprot(v | user_pgprot); } @@ -690,6 +698,7 @@ for (i = 0; i < ARRAY_SIZE(mem_types); i++) { struct mem_type *t = &mem_types[i]; + if (t->prot_l1) t->prot_l1 |= PMD_DOMAIN(t->domain); if (t->prot_sect) @@ -715,6 +724,7 @@ static void __init *early_alloc_aligned(unsigned long sz, unsigned long align) { void *ptr = __va(memblock_alloc(sz, align)); + memset(ptr, 0, sz); return ptr; } @@ -728,6 +738,7 @@ { if (pmd_none(*pmd)) { pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); + __pmd_populate(pmd, __pa(pte), prot); } BUG_ON(pmd_bad(*pmd)); @@ -739,6 +750,7 @@ const struct mem_type *type) { pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); + do { set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); pfn++; @@ -1106,7 +1118,7 @@ } early_param("vmalloc", early_vmalloc); -phys_addr_t arm_lowmem_limit __initdata = 0; +phys_addr_t arm_lowmem_limit __initdata; void __init sanity_check_meminfo(void) { @@ -1205,13 +1217,16 @@ /* * Clear out all the mappings below the kernel image. */ - for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE) + for (addr = 0; addr < TASK_SIZE; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); #ifdef CONFIG_XIP_KERNEL /* The XIP kernel is mapped in the module area -- skip over it */ addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK; #endif +#ifdef CONFIG_KASAN + addr = MODULES_VADDR; +#endif for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); @@ -1259,6 +1274,32 @@ #endif } + +/* AVM MAP IO */ +#ifdef CONFIG_AVM_FASTIRQ +#define IO_DEV(p, sz) { \ + .virtual = (unsigned long)(IO_ADDR(p)), \ + .pfn = __phys_to_pfn(p), \ + .length = sz, \ + .type = MT_DEVICE, \ + } + +static struct map_desc avm_io_map[] __initdata = { + /* Always map EDMA IO, may be unused. */ + IO_DEV(0x0c080000, SZ_8K) +#ifdef CONFIG_AVM_ICE + , IO_DEV(0x0103a008, SZ_8) +#endif +}; +/* + * Map fix-mapped I/O that is needed before full MMU operation + */ +static void __init avm_map_io(void) +{ + iotable_init(avm_io_map, ARRAY_SIZE(avm_io_map)); +} +#endif + /* * Set up the device mappings. Since we clear out the page tables for all * mappings above VMALLOC_START, except early fixmap, we might remove debug @@ -1347,6 +1388,9 @@ /* * Ask the machine support to map in the statically mapped devices. */ +#ifdef CONFIG_AVM_FASTIRQ + avm_map_io(); +#endif if (mdesc->map_io) mdesc->map_io(); else @@ -1593,4 +1637,5 @@ empty_zero_page = virt_to_page(zero_page); __flush_dcache_page(NULL, empty_zero_page); + kasan_init(); }