--- zzzz-none-000/linux-3.10.107/include/asm-generic/vmlinux.lds.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/include/asm-generic/vmlinux.lds.h 2021-02-04 17:41:59.000000000 +0000 @@ -40,6 +40,8 @@ * } * * [__init_begin, __init_end] is the init section that may be freed after init + * // __init_begin and __init_end should be page aligned, so that we can + * // free the whole .init memory * [_stext, _etext] is the text section * [_sdata, _edata] is the data section * @@ -52,6 +54,16 @@ #define LOAD_OFFSET 0 #endif +#ifndef SYMTAB_KEEP +#define SYMTAB_KEEP KEEP(*(SORT(___ksymtab+*))) +#define SYMTAB_KEEP_GPL KEEP(*(SORT(___ksymtab_gpl+*))) +#endif + +#ifndef SYMTAB_DISCARD +#define SYMTAB_DISCARD +#define SYMTAB_DISCARD_GPL +#endif + #include /* Align . to a 8 byte boundary equals to maximum function alignment. */ @@ -68,14 +80,6 @@ * are handled as text/data or they can be discarded (which * often happens at runtime) */ -#ifdef CONFIG_HOTPLUG -#define DEV_KEEP(sec) *(.dev##sec) -#define DEV_DISCARD(sec) -#else -#define DEV_KEEP(sec) -#define DEV_DISCARD(sec) *(.dev##sec) -#endif - #ifdef CONFIG_HOTPLUG_CPU #define CPU_KEEP(sec) *(.cpu##sec) #define CPU_DISCARD(sec) @@ -95,7 +99,7 @@ #ifdef CONFIG_FTRACE_MCOUNT_RECORD #define MCOUNT_REC() . = ALIGN(8); \ VMLINUX_SYMBOL(__start_mcount_loc) = .; \ - *(__mcount_loc) \ + KEEP(*(__mcount_loc)) \ VMLINUX_SYMBOL(__stop_mcount_loc) = .; #else #define MCOUNT_REC() @@ -103,7 +107,7 @@ #ifdef CONFIG_TRACE_BRANCH_PROFILING #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \ - *(_ftrace_annotated_branch) \ + KEEP(*(_ftrace_annotated_branch)) \ VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .; #else #define LIKELY_PROFILE() @@ -111,70 +115,95 @@ #ifdef CONFIG_PROFILE_ALL_BRANCHES #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \ - *(_ftrace_branch) \ + KEEP(*(_ftrace_branch)) \ VMLINUX_SYMBOL(__stop_branch_profile) = .; #else #define BRANCH_PROFILE() #endif +#ifdef CONFIG_KPROBES +#define KPROBE_BLACKLIST() . = ALIGN(8); \ + VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ + KEEP(*(_kprobe_blacklist)) \ + VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; +#else +#define KPROBE_BLACKLIST() +#endif + #ifdef CONFIG_EVENT_TRACING #define FTRACE_EVENTS() . = ALIGN(8); \ VMLINUX_SYMBOL(__start_ftrace_events) = .; \ - *(_ftrace_events) \ - VMLINUX_SYMBOL(__stop_ftrace_events) = .; + KEEP(*(_ftrace_events)) \ + VMLINUX_SYMBOL(__stop_ftrace_events) = .; \ + VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \ + KEEP(*(_ftrace_enum_map)) \ + VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .; #else #define FTRACE_EVENTS() #endif #ifdef CONFIG_TRACING #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \ - *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \ + KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .; +#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \ + *(__tracepoint_str) /* Trace_printk fmt' pointer */ \ + VMLINUX_SYMBOL(__stop___tracepoint_str) = .; #else #define TRACE_PRINTKS() +#define TRACEPOINT_STR() #endif #ifdef CONFIG_FTRACE_SYSCALLS #define TRACE_SYSCALLS() . = ALIGN(8); \ VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \ - *(__syscalls_metadata) \ + KEEP(*(__syscalls_metadata)) \ VMLINUX_SYMBOL(__stop_syscalls_metadata) = .; #else #define TRACE_SYSCALLS() #endif -#ifdef CONFIG_CLKSRC_OF -#define CLKSRC_OF_TABLES() . = ALIGN(8); \ - VMLINUX_SYMBOL(__clksrc_of_table) = .; \ - *(__clksrc_of_table) \ - *(__clksrc_of_table_end) -#else -#define CLKSRC_OF_TABLES() +#ifdef CONFIG_SERIAL_EARLYCON +#define EARLYCON_TABLE() STRUCT_ALIGN(); \ + VMLINUX_SYMBOL(__earlycon_table) = .; \ + KEEP(*(__earlycon_table)) \ + VMLINUX_SYMBOL(__earlycon_table_end) = .; +#else +#define EARLYCON_TABLE() #endif -#ifdef CONFIG_IRQCHIP -#define IRQCHIP_OF_MATCH_TABLE() \ +#define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name) +#define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name) +#define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name) +#define _OF_TABLE_0(name) +#define _OF_TABLE_1(name) \ . = ALIGN(8); \ - VMLINUX_SYMBOL(__irqchip_begin) = .; \ - *(__irqchip_of_table) \ - *(__irqchip_of_end) -#else -#define IRQCHIP_OF_MATCH_TABLE() -#endif + VMLINUX_SYMBOL(__##name##_of_table) = .; \ + KEEP(*(__##name##_of_table)) \ + KEEP(*(__##name##_of_table_end)) + +#define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) +#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) +#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) +#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) +#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) +#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) +#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method) -#ifdef CONFIG_COMMON_CLK -#define CLK_OF_TABLES() . = ALIGN(8); \ - VMLINUX_SYMBOL(__clk_of_table) = .; \ - *(__clk_of_table) \ - *(__clk_of_table_end) +#ifdef CONFIG_ACPI +#define ACPI_PROBE_TABLE(name) \ + . = ALIGN(8); \ + VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \ + *(__##name##_acpi_probe_table) \ + VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .; #else -#define CLK_OF_TABLES() +#define ACPI_PROBE_TABLE(name) #endif #define KERNEL_DTB() \ STRUCT_ALIGN(); \ VMLINUX_SYMBOL(__dtb_start) = .; \ - *(.dtb.init.rodata) \ + KEEP(*(.dtb.init.rodata)) \ VMLINUX_SYMBOL(__dtb_end) = .; /* .data section */ @@ -182,10 +211,6 @@ *(.data) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ - DEV_KEEP(init.data) \ - DEV_KEEP(exit.data) \ - CPU_KEEP(init.data) \ - CPU_KEEP(exit.data) \ MEM_KEEP(init.data) \ MEM_KEEP(exit.data) \ *(.data.unlikely) \ @@ -194,15 +219,17 @@ /* implement dynamic printk debug */ \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___jump_table) = .; \ - *(__jump_table) \ + KEEP(*(__jump_table)) \ VMLINUX_SYMBOL(__stop___jump_table) = .; \ . = ALIGN(8); \ VMLINUX_SYMBOL(__start___verbose) = .; \ - *(__verbose) \ + KEEP(*(__verbose)) \ VMLINUX_SYMBOL(__stop___verbose) = .; \ LIKELY_PROFILE() \ BRANCH_PROFILE() \ - TRACE_PRINTKS() + TRACE_PRINTKS() \ + TRACEPOINT_STR() \ + *(.data.[a-zA-Z_]*) /* * Data section helpers @@ -256,90 +283,86 @@ /* PCI quirks */ \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ - *(.pci_fixup_early) \ + KEEP(*(.pci_fixup_early)) \ VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ - *(.pci_fixup_header) \ + KEEP(*(.pci_fixup_header)) \ VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ - *(.pci_fixup_final) \ + KEEP(*(.pci_fixup_final)) \ VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ - *(.pci_fixup_enable) \ + KEEP(*(.pci_fixup_enable)) \ VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ - *(.pci_fixup_resume) \ + KEEP(*(.pci_fixup_resume)) \ VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \ - *(.pci_fixup_resume_early) \ + KEEP(*(.pci_fixup_resume_early)) \ VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \ - *(.pci_fixup_suspend) \ + KEEP(*(.pci_fixup_suspend)) \ VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \ + VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \ + KEEP(*(.pci_fixup_suspend_late)) \ + VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \ } \ \ /* Built-in firmware blobs */ \ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start_builtin_fw) = .; \ - *(.builtin_fw) \ + KEEP(*(.builtin_fw)) \ VMLINUX_SYMBOL(__end_builtin_fw) = .; \ } \ \ - /* RapidIO route ops */ \ - .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \ - VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \ - *(.rio_switch_ops) \ - VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \ - } \ - \ TRACEDATA \ \ /* Kernel symbol table: Normal symbols */ \ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab) = .; \ - *(SORT(___ksymtab+*)) \ + SYMTAB_KEEP \ VMLINUX_SYMBOL(__stop___ksymtab) = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ - *(SORT(___ksymtab_gpl+*)) \ + SYMTAB_KEEP_GPL \ VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ } \ \ /* Kernel symbol table: Normal unused symbols */ \ __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ - *(SORT(___ksymtab_unused+*)) \ + KEEP(*(SORT(___ksymtab_unused+*))) \ VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ } \ \ /* Kernel symbol table: GPL-only unused symbols */ \ __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ - *(SORT(___ksymtab_unused_gpl+*)) \ + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \ VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ } \ \ /* Kernel symbol table: GPL-future-only symbols */ \ __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ - *(SORT(___ksymtab_gpl_future+*)) \ + KEEP(*(SORT(___ksymtab_gpl_future+*))) \ VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ } \ \ /* Kernel symbol table: Normal symbols */ \ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab) = .; \ - *(SORT(___kcrctab+*)) \ + KEEP(*(SORT(___kcrctab+*))) \ VMLINUX_SYMBOL(__stop___kcrctab) = .; \ } \ \ /* Kernel symbol table: GPL-only symbols */ \ __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ - *(SORT(___kcrctab_gpl+*)) \ + KEEP(*(SORT(___kcrctab_gpl+*))) \ VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ } \ \ @@ -353,29 +376,25 @@ /* Kernel symbol table: GPL-only unused symbols */ \ __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ - *(SORT(___kcrctab_unused_gpl+*)) \ + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \ VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ } \ \ /* Kernel symbol table: GPL-future-only symbols */ \ __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ - *(SORT(___kcrctab_gpl_future+*)) \ + KEEP(*(SORT(___kcrctab_gpl_future+*))) \ VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ } \ \ /* Kernel symbol table: strings */ \ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ - *(__ksymtab_strings) \ + *(__ksymtab_strings+*) \ } \ \ /* __*init sections */ \ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ *(.ref.rodata) \ - DEV_KEEP(init.rodata) \ - DEV_KEEP(exit.rodata) \ - CPU_KEEP(init.rodata) \ - CPU_KEEP(exit.rodata) \ MEM_KEEP(init.rodata) \ MEM_KEEP(exit.rodata) \ } \ @@ -383,14 +402,14 @@ /* Built-in module parameters. */ \ __param : AT(ADDR(__param) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___param) = .; \ - *(__param) \ + KEEP(*(__param)) \ VMLINUX_SYMBOL(__stop___param) = .; \ } \ \ /* Built-in module versions. */ \ __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___modver) = .; \ - *(__modver) \ + KEEP(*(__modver)) \ VMLINUX_SYMBOL(__stop___modver) = .; \ . = ALIGN((align)); \ VMLINUX_SYMBOL(__end_rodata) = .; \ @@ -413,16 +432,10 @@ * during second ld run in second ld pass when generating System.map */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ - *(.text.hot) \ - *(.text) \ + *(.text.hot .text .text.fixup .text.unlikely) \ *(.ref.text) \ - DEV_KEEP(init.text) \ - DEV_KEEP(exit.text) \ - CPU_KEEP(init.text) \ - CPU_KEEP(exit.text) \ MEM_KEEP(init.text) \ MEM_KEEP(exit.text) \ - *(.text.unlikely) /* sched.text is aling to function alignment to secure we have same @@ -450,7 +463,7 @@ #define ENTRY_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__entry_text_start) = .; \ - *(.entry.text) \ + KEEP(*(.entry.text)) \ VMLINUX_SYMBOL(__entry_text_end) = .; #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -478,7 +491,7 @@ . = ALIGN(align); \ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___ex_table) = .; \ - *(__ex_table) \ + KEEP(*(__ex_table)) \ VMLINUX_SYMBOL(__stop___ex_table) = .; \ } @@ -494,7 +507,9 @@ #ifdef CONFIG_CONSTRUCTORS #define KERNEL_CTORS() . = ALIGN(8); \ VMLINUX_SYMBOL(__ctors_start) = .; \ - *(.ctors) \ + KEEP(*(.ctors)) \ + *(SORT(.init_array.*)) \ + KEEP(*(.init_array)) \ VMLINUX_SYMBOL(__ctors_end) = .; #else #define KERNEL_CTORS() @@ -503,41 +518,41 @@ /* init and exit section handling */ #define INIT_DATA \ *(.init.data) \ - DEV_DISCARD(init.data) \ - CPU_DISCARD(init.data) \ MEM_DISCARD(init.data) \ KERNEL_CTORS() \ MCOUNT_REC() \ *(.init.rodata) \ FTRACE_EVENTS() \ TRACE_SYSCALLS() \ - DEV_DISCARD(init.rodata) \ - CPU_DISCARD(init.rodata) \ + KPROBE_BLACKLIST() \ MEM_DISCARD(init.rodata) \ CLK_OF_TABLES() \ + RESERVEDMEM_OF_TABLES() \ CLKSRC_OF_TABLES() \ + IOMMU_OF_TABLES() \ + CPU_METHOD_OF_TABLES() \ + CPUIDLE_METHOD_OF_TABLES() \ KERNEL_DTB() \ - IRQCHIP_OF_MATCH_TABLE() + IRQCHIP_OF_MATCH_TABLE() \ + ACPI_PROBE_TABLE(irqchip) \ + ACPI_PROBE_TABLE(clksrc) \ + EARLYCON_TABLE() #define INIT_TEXT \ *(.init.text) \ - DEV_DISCARD(init.text) \ - CPU_DISCARD(init.text) \ + *(.text.startup) \ MEM_DISCARD(init.text) #define EXIT_DATA \ *(.exit.data) \ - DEV_DISCARD(exit.data) \ - DEV_DISCARD(exit.rodata) \ - CPU_DISCARD(exit.data) \ - CPU_DISCARD(exit.rodata) \ + *(.fini_array) \ + *(.dtors) \ MEM_DISCARD(exit.data) \ MEM_DISCARD(exit.rodata) #define EXIT_TEXT \ *(.exit.text) \ - DEV_DISCARD(exit.text) \ - CPU_DISCARD(exit.text) \ + *(.text.exit) \ MEM_DISCARD(exit.text) #define EXIT_CALL \ @@ -550,7 +565,7 @@ #define SBSS(sbss_align) \ . = ALIGN(sbss_align); \ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \ - *(.sbss) \ + *(.sbss .sbss.*) \ *(.scommon) \ } @@ -568,7 +583,7 @@ BSS_FIRST_SECTIONS \ *(.bss..page_aligned) \ *(.dynbss) \ - *(.bss) \ + *(.bss .bss.*) \ *(COMMON) \ } @@ -617,7 +632,7 @@ . = ALIGN(8); \ __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__start___bug_table) = .; \ - *(__bug_table) \ + KEEP(*(__bug_table)) \ VMLINUX_SYMBOL(__stop___bug_table) = .; \ } #else @@ -629,7 +644,7 @@ . = ALIGN(4); \ .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ VMLINUX_SYMBOL(__tracedata_start) = .; \ - *(.tracedata) \ + KEEP(*(.tracedata)) \ VMLINUX_SYMBOL(__tracedata_end) = .; \ } #else @@ -646,17 +661,17 @@ #define INIT_SETUP(initsetup_align) \ . = ALIGN(initsetup_align); \ VMLINUX_SYMBOL(__setup_start) = .; \ - *(.init.setup) \ + KEEP(*(.init.setup)) \ VMLINUX_SYMBOL(__setup_end) = .; #define INIT_CALLS_LEVEL(level) \ VMLINUX_SYMBOL(__initcall##level##_start) = .; \ - *(.initcall##level##.init) \ - *(.initcall##level##s.init) \ + KEEP(*(.initcall##level##.init)) \ + KEEP(*(.initcall##level##s.init)) \ #define INIT_CALLS \ VMLINUX_SYMBOL(__initcall_start) = .; \ - *(.initcallearly.init) \ + KEEP(*(.initcallearly.init)) \ INIT_CALLS_LEVEL(0) \ INIT_CALLS_LEVEL(1) \ INIT_CALLS_LEVEL(2) \ @@ -670,21 +685,21 @@ #define CON_INITCALL \ VMLINUX_SYMBOL(__con_initcall_start) = .; \ - *(.con_initcall.init) \ + KEEP(*(.con_initcall.init)) \ VMLINUX_SYMBOL(__con_initcall_end) = .; #define SECURITY_INITCALL \ VMLINUX_SYMBOL(__security_initcall_start) = .; \ - *(.security_initcall.init) \ + KEEP(*(.security_initcall.init)) \ VMLINUX_SYMBOL(__security_initcall_end) = .; #ifdef CONFIG_BLK_DEV_INITRD #define INIT_RAM_FS \ . = ALIGN(4); \ VMLINUX_SYMBOL(__initramfs_start) = .; \ - *(.init.ramfs) \ + KEEP(*(.init.ramfs)) \ . = ALIGN(8); \ - *(.init.ramfs.info) + KEEP(*(.init.ramfs.info)) #else #define INIT_RAM_FS #endif @@ -703,6 +718,8 @@ EXIT_TEXT \ EXIT_DATA \ EXIT_CALL \ + SYMTAB_DISCARD \ + SYMTAB_DISCARD_GPL \ *(.discard) \ *(.discard.*) \ } @@ -723,7 +740,7 @@ . = ALIGN(PAGE_SIZE); \ *(.data..percpu..page_aligned) \ . = ALIGN(cacheline); \ - *(.data..percpu..readmostly) \ + *(.data..percpu..read_mostly) \ . = ALIGN(cacheline); \ *(.data..percpu) \ *(.data..percpu..shared_aligned) \