--- zzzz-none-000/linux-5.4.213/kernel/module.c 2022-09-15 10:04:56.000000000 +0000 +++ alder-5690pro-762/linux-5.4.213/kernel/module.c 2024-08-14 09:02:11.000000000 +0000 @@ -55,6 +55,14 @@ #include #include #include "module-internal.h" +#if defined(CONFIG_AVM_FASTIRQ) +#include +#endif/*--- #if defined(CONFIG_AVM_FASTIRQ) ---*/ +#if defined(CONFIG_BUG_EXTRA_INFO) +#include +#endif/*--- #if defined(CONFIG_BUG_EXTRA_INFO) ---*/ + +#include #define CREATE_TRACE_POINTS #include @@ -252,6 +260,11 @@ struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ #endif /* CONFIG_KGDB_KDB */ +#ifdef CONFIG_QCA_MINIDUMP +struct list_head *minidump_modules = &modules; +EXPORT_SYMBOL(minidump_modules); +#endif /* CONFIG_QCA_MINIDUMP */ + static void module_assert_mutex(void) { lockdep_assert_held(&module_mutex); @@ -979,7 +992,7 @@ { struct module *mod; char name[MODULE_NAME_LEN]; - int ret, forced = 0; + int ret, forced = 0, status; if (!capable(CAP_SYS_MODULE) || modules_disabled) return -EPERM; @@ -1032,11 +1045,17 @@ /* Final destruction now no one is using it. */ if (mod->exit != NULL) mod->exit(); - blocking_notifier_call_chain(&module_notify_list, + status = blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); klp_module_going(mod); ftrace_release_mod(mod); +#if defined(CONFIG_AVM_ENHANCED) + if (status == NOTIFY_BAD) { + return -EBUSY; + } +#endif/*--- #if defined(CONFIG_AVM_ENHANCED) ---*/ + async_synchronize_full(); /* Store the name of the last unloaded module for diagnostic purposes */ @@ -1281,6 +1300,7 @@ static const char vermagic[] = VERMAGIC_STRING; +#if defined (CONFIG_MODVERSIONS) || !defined (CONFIG_MODULE_STRIPPED) static int try_to_force_load(struct module *mod, const char *reason) { #ifdef CONFIG_MODULE_FORCE_LOAD @@ -1292,6 +1312,7 @@ return -ENOEXEC; #endif } +#endif #ifdef CONFIG_MODVERSIONS @@ -2277,7 +2298,7 @@ /* This may be empty, but that's OK */ module_arch_freeing_init(mod); - module_memfree(mod->init_layout.base); + avm_module_mem_free(mod->init_layout.base); kfree(mod->args); percpu_modfree(mod); @@ -2285,7 +2306,7 @@ lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); /* Finally, free the core (containing the module structure) */ - module_memfree(mod->core_layout.base); + avm_module_mem_free(mod->core_layout.base); } void *__symbol_get(const char *symbol) @@ -2510,7 +2531,7 @@ if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL - || strstarts(sname, ".init")) + || module_init_section(sname)) continue; s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i); pr_debug("\t%s\n", sname); @@ -2543,7 +2564,7 @@ if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL - || !strstarts(sname, ".init")) + || !module_init_section(sname)) continue; s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i) | INIT_OFFSET_MASK); @@ -2878,6 +2899,11 @@ return vmalloc_exec(size); } +bool __weak module_init_section(const char *name) +{ + return strstarts(name, ".init"); +} + bool __weak module_exit_section(const char *name) { return strstarts(name, ".exit"); @@ -3256,9 +3282,11 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags) { - const char *modmagic = get_modinfo(info, "vermagic"); int err; +#ifndef CONFIG_MODULE_STRIPPED + const char *modmagic = get_modinfo(info, "vermagic"); + if (flags & MODULE_INIT_IGNORE_VERMAGIC) modmagic = NULL; @@ -3279,6 +3307,7 @@ mod->name); add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); } +#endif check_modinfo_retpoline(mod, info); @@ -3404,7 +3433,7 @@ void *ptr; /* Do the allocs. */ - ptr = module_alloc(mod->core_layout.size); + ptr = avm_module_mem_alloc(mod, mod->core_layout.size, avm_module_mem_type_core); /* * The pointer to this block is stored in the module structure * which is inside the block. Just mark it as not being a @@ -3418,7 +3447,7 @@ mod->core_layout.base = ptr; if (mod->init_layout.size) { - ptr = module_alloc(mod->init_layout.size); + ptr = avm_module_mem_alloc(mod, mod->init_layout.size, avm_module_mem_type_init); /* * The pointer to this block is stored in the module structure * which is inside the block. This block doesn't need to be @@ -3427,7 +3456,7 @@ */ kmemleak_ignore(ptr); if (!ptr) { - module_memfree(mod->core_layout.base); + avm_module_mem_free(mod->core_layout.base); return -ENOMEM; } memset(ptr, 0, mod->init_layout.size); @@ -3613,8 +3642,8 @@ { percpu_modfree(mod); module_arch_freeing_init(mod); - module_memfree(mod->init_layout.base); - module_memfree(mod->core_layout.base); + avm_module_mem_free(mod->init_layout.base); + avm_module_mem_free(mod->core_layout.base); } int __weak module_finalize(const Elf_Ehdr *hdr, @@ -3688,7 +3717,7 @@ llist_for_each_safe(pos, n, list) { initfree = container_of(pos, struct mod_initfree, node); - module_memfree(initfree->module_init); + avm_module_mem_free(initfree->module_init); kfree(initfree); } } @@ -4573,6 +4602,10 @@ { const struct exception_table_entry *e = NULL; struct module *mod; +#if defined(CONFIG_AVM_FASTIRQ) + if (firq_is_avm_rte_restricted_mem_access()) + return e; +#endif preempt_disable(); mod = __module_address(addr); @@ -4624,6 +4657,10 @@ { struct module *mod; +#if defined(CONFIG_AVM_FASTIRQ) + if (firq_is_avm_rte_restricted_mem_access()) + return NULL; +#endif if (addr < module_addr_min || addr > module_addr_max) return NULL; @@ -4682,6 +4719,11 @@ struct module *mod; char buf[MODULE_FLAGS_BUF_SIZE]; +#if defined(CONFIG_AVM_FASTIRQ) + if (firq_is_avm_rte_restricted_mem_access()) + return; +#endif + printk(KERN_DEFAULT "Modules linked in:"); /* Most callers should already have preempt disabled, but make sure */ preempt_disable();