--- zzzz-none-000/linux-3.10.107/arch/parisc/mm/init.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/parisc/mm/init.c 2021-02-04 17:41:59.000000000 +0000 @@ -23,6 +23,7 @@ #include #include /* for node_online_map */ #include /* for release_pages and page_cache_release */ +#include #include #include @@ -30,10 +31,12 @@ #include #include #include +#include extern int data_start; +extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ -#if PT_NLEVELS == 3 +#if CONFIG_PGTABLE_LEVELS == 3 /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout * with the first pmd adjacent to the pgd and below it. gcc doesn't actually * guarantee that global objects will be laid out in memory in the same order @@ -214,7 +217,6 @@ mem_limit_func(); /* check for "mem=" argument */ mem_max = 0; - num_physpages = 0; for (i = 0; i < npmem_ranges; i++) { unsigned long rsize; @@ -229,10 +231,8 @@ npmem_ranges = i + 1; mem_max = mem_limit; } - num_physpages += pmem_ranges[i].pages; break; } - num_physpages += pmem_ranges[i].pages; mem_max += rsize; } @@ -327,8 +327,9 @@ reserve_bootmem_node(NODE_DATA(0), 0UL, (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT); - reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text), - (unsigned long)(_end - _text), BOOTMEM_DEFAULT); + reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START), + (unsigned long)(_end - KERNEL_BINARY_TEXT_START), + BOOTMEM_DEFAULT); reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT), BOOTMEM_DEFAULT); @@ -381,6 +382,17 @@ request_resource(&sysram_resources[0], &pdcdata_resource); } +static int __init parisc_text_address(unsigned long vaddr) +{ + static unsigned long head_ptr __initdata; + + if (!head_ptr) + head_ptr = PAGE_MASK & (unsigned long) + dereference_function_descriptor(&parisc_kernel_start); + + return core_kernel_text(vaddr) || vaddr == head_ptr; +} + static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot, int force) @@ -397,15 +409,11 @@ unsigned long vaddr; unsigned long ro_start; unsigned long ro_end; - unsigned long fv_addr; - unsigned long gw_addr; - extern const unsigned long fault_vector_20; - extern void * const linux_gateway_page; + unsigned long kernel_end; ro_start = __pa((unsigned long)_text); ro_end = __pa((unsigned long)&data_start); - fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; - gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; + kernel_end = __pa((unsigned long)&_end); end_paddr = start_paddr + size; @@ -463,24 +471,25 @@ for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { pte_t pte; - /* - * Map the fault vector writable so we can - * write the HPMC checksum. - */ if (force) pte = __mk_pte(address, pgprot); - else if (core_kernel_text(vaddr) && - address != fv_addr) + else if (parisc_text_address(vaddr)) { pte = __mk_pte(address, PAGE_KERNEL_EXEC); + if (address >= ro_start && address < kernel_end) + pte = pte_mkhuge(pte); + } else #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) - if (address >= ro_start && address < ro_end - && address != fv_addr - && address != gw_addr) - pte = __mk_pte(address, PAGE_KERNEL_RO); - else + if (address >= ro_start && address < ro_end) { + pte = __mk_pte(address, PAGE_KERNEL_EXEC); + pte = pte_mkhuge(pte); + } else #endif + { pte = __mk_pte(address, pgprot); + if (address >= ro_start && address < kernel_end) + pte = pte_mkhuge(pte); + } if (address >= end_paddr) { if (force) @@ -524,15 +533,12 @@ /* force the kernel to see the new TLB entries */ __flush_tlb_range(0, init_begin, init_end); - /* Attempt to catch anyone trying to execute code here - * by filling the page with BRK insns. - */ - memset((void *)init_begin, 0x00, init_end - init_begin); + /* finally dump all the instructions which were cached, since the * pages are no-longer executable */ flush_icache_range(init_begin, init_end); - num_physpages += free_initmem_default(0); + free_initmem_default(POISON_FREE_INITMEM); /* set up a new led state on systems shipped LED State panel */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); @@ -580,7 +586,19 @@ void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; + /* Do sanity checks on IPC (compat) structures */ + BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48); +#ifndef CONFIG_64BIT + BUILD_BUG_ON(sizeof(struct semid64_ds) != 80); + BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104); + BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104); +#endif +#ifdef CONFIG_COMPAT + BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm)); + BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80); + BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104); + BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104); +#endif /* Do sanity checks on page table constants */ BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); @@ -590,45 +608,8 @@ > BITS_PER_LONG); high_memory = __va((max_pfn << PAGE_SHIFT)); - -#ifndef CONFIG_DISCONTIGMEM - max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1; - totalram_pages += free_all_bootmem(); -#else - { - int i; - - for (i = 0; i < npmem_ranges; i++) - totalram_pages += free_all_bootmem_node(NODE_DATA(i)); - } -#endif - - codesize = (unsigned long)_etext - (unsigned long)_text; - datasize = (unsigned long)_edata - (unsigned long)_etext; - initsize = (unsigned long)__init_end - (unsigned long)__init_begin; - - reservedpages = 0; -{ - unsigned long pfn; -#ifdef CONFIG_DISCONTIGMEM - int i; - - for (i = 0; i < npmem_ranges; i++) { - for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) { - if (PageReserved(pfn_to_page(pfn))) - reservedpages++; - } - } -#else /* !CONFIG_DISCONTIGMEM */ - for (pfn = 0; pfn < max_pfn; pfn++) { - /* - * Only count reserved RAM pages - */ - if (PageReserved(pfn_to_page(pfn))) - reservedpages++; - } -#endif -} + set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); + free_all_bootmem(); #ifdef CONFIG_PA11 if (hppa_dma_ops == &pcxl_dma_ops) { @@ -643,15 +624,7 @@ parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); #endif - printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10 - ); - + mem_init_print_info(NULL); #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ printk("virtual kernel memory layout:\n" " vmalloc : 0x%p - 0x%p (%4ld MB)\n" @@ -682,55 +655,30 @@ void show_mem(unsigned int filter) { - int i,free = 0,total = 0,reserved = 0; - int shared = 0, cached = 0; + int total = 0,reserved = 0; + pg_data_t *pgdat; printk(KERN_INFO "Mem-info:\n"); show_free_areas(filter); - if (filter & SHOW_MEM_FILTER_PAGE_COUNT) - return; -#ifndef CONFIG_DISCONTIGMEM - i = max_mapnr; - while (i-- > 0) { - total++; - if (PageReserved(mem_map+i)) - reserved++; - else if (PageSwapCache(mem_map+i)) - cached++; - else if (!page_count(&mem_map[i])) - free++; - else - shared += page_count(&mem_map[i]) - 1; - } -#else - for (i = 0; i < npmem_ranges; i++) { - int j; - for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { - struct page *p; - unsigned long flags; - - pgdat_resize_lock(NODE_DATA(i), &flags); - p = nid_page_nr(i, j) - node_start_pfn(i); - - total++; - if (PageReserved(p)) - reserved++; - else if (PageSwapCache(p)) - cached++; - else if (!page_count(p)) - free++; - else - shared += page_count(p) - 1; - pgdat_resize_unlock(NODE_DATA(i), &flags); - } + for_each_online_pgdat(pgdat) { + unsigned long flags; + int zoneid; + + pgdat_resize_lock(pgdat, &flags); + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { + struct zone *zone = &pgdat->node_zones[zoneid]; + if (!populated_zone(zone)) + continue; + + total += zone->present_pages; + reserved = zone->present_pages - zone->managed_pages; + } + pgdat_resize_unlock(pgdat, &flags); } -#endif + printk(KERN_INFO "%d pages of RAM\n", total); printk(KERN_INFO "%d reserved pages\n", reserved); - printk(KERN_INFO "%d pages shared\n", shared); - printk(KERN_INFO "%d pages swap cached\n", cached); - #ifdef CONFIG_DISCONTIGMEM { @@ -774,8 +722,8 @@ unsigned long size; start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; - end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); size = pmem_ranges[range].pages << PAGE_SHIFT; + end_paddr = start_paddr + size; map_pages((unsigned long)__va(start_paddr), start_paddr, size, PAGE_KERNEL, 0); @@ -790,7 +738,6 @@ #endif empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); - memset(empty_zero_page, 0, PAGE_SIZE); } static void __init gateway_init(void) @@ -813,78 +760,6 @@ PAGE_SIZE, PAGE_GATEWAY, 1); } -#ifdef CONFIG_HPUX -void -map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm) -{ - pgd_t *pg_dir; - pmd_t *pmd; - pte_t *pg_table; - unsigned long start_pmd; - unsigned long start_pte; - unsigned long address; - unsigned long hpux_gw_page_addr; - /* FIXME: This is 'const' in order to trick the compiler - into not treating it as DP-relative data. */ - extern void * const hpux_gateway_page; - - hpux_gw_page_addr = HPUX_GATEWAY_ADDR & PAGE_MASK; - - /* - * Setup HP-UX Gateway page. - * - * The HP-UX gateway page resides in the user address space, - * so it needs to be aliased into each process. - */ - - pg_dir = pgd_offset(mm,hpux_gw_page_addr); - -#if PTRS_PER_PMD == 1 - start_pmd = 0; -#else - start_pmd = ((hpux_gw_page_addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); -#endif - start_pte = ((hpux_gw_page_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); - - address = __pa(&hpux_gateway_page); -#if PTRS_PER_PMD == 1 - pmd = (pmd_t *)__pa(pg_dir); -#else - pmd = (pmd_t *) pgd_address(*pg_dir); - - /* - * pmd is physical at this point - */ - - if (!pmd) { - pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL); - pmd = (pmd_t *) __pa(pmd); - } - - __pgd_val_set(*pg_dir, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pmd); -#endif - /* now change pmd to kernel virtual addresses */ - - pmd = (pmd_t *)__va(pmd) + start_pmd; - - /* - * pg_table is physical at this point - */ - - pg_table = (pte_t *) pmd_address(*pmd); - if (!pg_table) - pg_table = (pte_t *) __pa(get_zeroed_page(GFP_KERNEL)); - - __pmd_val_set(*pmd, PxD_FLAG_PRESENT | PxD_FLAG_VALID | (unsigned long) pg_table); - - /* now change pg_table to kernel virtual addresses */ - - pg_table = (pte_t *) __va(pg_table) + start_pte; - set_pte(pg_table, __mk_pte(address, PAGE_GATEWAY)); -} -EXPORT_SYMBOL(map_hpux_gateway_page); -#endif - void __init paging_init(void) { int i; @@ -1101,6 +976,6 @@ #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - num_physpages += free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif