--- zzzz-none-000/linux-3.10.107/drivers/acpi/apei/ghes.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/acpi/apei/ghes.c 2021-02-04 17:41:59.000000000 +0000 @@ -23,17 +23,12 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include -#include #include #include #include @@ -48,11 +43,11 @@ #include #include #include +#include #include -#include +#include #include -#include #include "apei-internal.h" @@ -75,20 +70,18 @@ #define GHES_ESTATUS_CACHE_LEN(estatus_len) \ (sizeof(struct ghes_estatus_cache) + (estatus_len)) #define GHES_ESTATUS_FROM_CACHE(estatus_cache) \ - ((struct acpi_hest_generic_status *) \ + ((struct acpi_hest_generic_status *) \ ((struct ghes_estatus_cache *)(estatus_cache) + 1)) #define GHES_ESTATUS_NODE_LEN(estatus_len) \ (sizeof(struct ghes_estatus_node) + (estatus_len)) -#define GHES_ESTATUS_FROM_NODE(estatus_node) \ +#define GHES_ESTATUS_FROM_NODE(estatus_node) \ ((struct acpi_hest_generic_status *) \ ((struct ghes_estatus_node *)(estatus_node) + 1)) bool ghes_disable; module_param_named(disable, ghes_disable, bool, 0); -static int ghes_panic_timeout __read_mostly = 30; - /* * All error sources notified with SCI shares one notifier function, * so they need to be linked and checked one by one. This is applied @@ -98,16 +91,9 @@ * list changing, not for traversing. */ static LIST_HEAD(ghes_sci); -static LIST_HEAD(ghes_nmi); static DEFINE_MUTEX(ghes_list_mutex); /* - * NMI may be triggered on any CPU, so ghes_nmi_lock is used for - * mutual exclusion. - */ -static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); - -/* * Because the memory area used to transfer hardware error information * from BIOS to Linux can be determined only in NMI, IRQ or timer * handler, but general ioremap can not be used in atomic context, so @@ -115,12 +101,16 @@ */ /* - * Two virtual pages are used, one for NMI context, the other for - * IRQ/PROCESS context + * Two virtual pages are used, one for IRQ/PROCESS context, the other for + * NMI context (optionally). */ -#define GHES_IOREMAP_PAGES 2 -#define GHES_IOREMAP_NMI_PAGE(base) (base) -#define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE) +#ifdef CONFIG_HAVE_ACPI_APEI_NMI +#define GHES_IOREMAP_PAGES 2 +#else +#define GHES_IOREMAP_PAGES 1 +#endif +#define GHES_IOREMAP_IRQ_PAGE(base) (base) +#define GHES_IOREMAP_NMI_PAGE(base) ((base) + PAGE_SIZE) /* virtual memory area for atomic ioremap */ static struct vm_struct *ghes_ioremap_area; @@ -131,20 +121,10 @@ static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); -/* - * printk is not safe in NMI context. So in NMI handler, we allocate - * required memory from lock-less memory allocator - * (ghes_estatus_pool), save estatus into it, put them into lock-less - * list (ghes_estatus_llist), then delay printk into IRQ context via - * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record - * required pool size by all NMI error source. - */ static struct gen_pool *ghes_estatus_pool; static unsigned long ghes_estatus_pool_size_request; -static struct llist_head ghes_estatus_llist; -static struct irq_work ghes_proc_irq_work; -struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; +static struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; static atomic_t ghes_estatus_cache_alloced; static int ghes_ioremap_init(void) @@ -177,11 +157,15 @@ static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) { - unsigned long vaddr; + unsigned long vaddr, paddr; + pgprot_t prot; vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); - ioremap_page_range(vaddr, vaddr + PAGE_SIZE, - pfn << PAGE_SHIFT, PAGE_KERNEL); + + paddr = pfn << PAGE_SHIFT; + prot = arch_apei_get_mem_attribute(paddr); + + ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); return (void __iomem *)vaddr; } @@ -193,7 +177,7 @@ BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); unmap_kernel_range_noflush(vaddr, PAGE_SIZE); - __flush_tlb_one(vaddr); + arch_apei_flush_tlb_one(vaddr); } static void ghes_iounmap_irq(void __iomem *vaddr_ptr) @@ -203,7 +187,7 @@ BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); unmap_kernel_range_noflush(vaddr, PAGE_SIZE); - __flush_tlb_one(vaddr); + arch_apei_flush_tlb_one(vaddr); } static int ghes_estatus_pool_init(void) @@ -250,11 +234,6 @@ return 0; } -static void ghes_estatus_pool_shrink(unsigned long len) -{ - ghes_estatus_pool_size_request -= PAGE_ALIGN(len); -} - static struct ghes *ghes_new(struct acpi_hest_generic *generic) { struct ghes *ghes; @@ -378,17 +357,17 @@ ghes->flags |= GHES_TO_CLEAR; rc = -EIO; - len = apei_estatus_len(ghes->estatus); + len = cper_estatus_len(ghes->estatus); if (len < sizeof(*ghes->estatus)) goto err_read_block; if (len > ghes->generic->error_block_length) goto err_read_block; - if (apei_estatus_check_header(ghes->estatus)) + if (cper_estatus_check_header(ghes->estatus)) goto err_read_block; ghes_copy_tofrom_phys(ghes->estatus + 1, buf_paddr + sizeof(*ghes->estatus), len - sizeof(*ghes->estatus), 1); - if (apei_estatus_check(ghes->estatus)) + if (cper_estatus_check(ghes->estatus)) goto err_read_block; rc = 0; @@ -409,6 +388,38 @@ ghes->flags &= ~GHES_TO_CLEAR; } +static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev) +{ +#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE + unsigned long pfn; + int flags = -1; + int sec_sev = ghes_severity(gdata->error_severity); + struct cper_sec_mem_err *mem_err; + mem_err = (struct cper_sec_mem_err *)(gdata + 1); + + if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) + return; + + pfn = mem_err->physical_addr >> PAGE_SHIFT; + if (!pfn_valid(pfn)) { + pr_warn_ratelimited(FW_WARN GHES_PFX + "Invalid address in generic error data: %#llx\n", + mem_err->physical_addr); + return; + } + + /* iff following two events can be handled properly by now */ + if (sec_sev == GHES_SEV_CORRECTED && + (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED)) + flags = MF_SOFT_OFFLINE; + if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE) + flags = 0; + + if (flags != -1) + memory_failure_queue(pfn, 0, flags); +#endif +} + static void ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { @@ -424,19 +435,8 @@ mem_err = (struct cper_sec_mem_err *)(gdata+1); ghes_edac_report_mem_error(ghes, sev, mem_err); -#ifdef CONFIG_X86_MCE - apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED, - mem_err); -#endif -#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE - if (sev == GHES_SEV_RECOVERABLE && - sec_sev == GHES_SEV_RECOVERABLE && - mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) { - unsigned long pfn; - pfn = mem_err->physical_addr >> PAGE_SHIFT; - memory_failure_queue(pfn, 0, 0); - } -#endif + arch_apei_report_mem_error(sev, mem_err); + ghes_handle_memory_failure(gdata, sev); } #ifdef CONFIG_ACPI_APEI_PCIEAER else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, @@ -449,9 +449,19 @@ pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) { unsigned int devfn; int aer_severity; + devfn = PCI_DEVFN(pcie_err->device_id.device, pcie_err->device_id.function); aer_severity = cper_severity_to_aer(sev); + + /* + * If firmware reset the component to contain + * the error, we must reinitialize it before + * use, so treat it as a fatal AER error. + */ + if (gdata->flags & CPER_SEC_RESET) + aer_severity = AER_FATAL; + aer_recover_queue(pcie_err->device_id.segment, pcie_err->device_id.bus, devfn, aer_severity, @@ -483,7 +493,7 @@ snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno); printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", pfx_seq, generic->header.source_id); - apei_estatus_print(pfx_seq, estatus); + cper_estatus_print(pfx_seq, estatus); } static int ghes_print_estatus(const char *pfx, @@ -518,7 +528,7 @@ struct ghes_estatus_cache *cache; struct acpi_hest_generic_status *cache_estatus; - len = apei_estatus_len(estatus); + len = cper_estatus_len(estatus); rcu_read_lock(); for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { cache = rcu_dereference(ghes_estatus_caches[i]); @@ -553,7 +563,7 @@ atomic_dec(&ghes_estatus_cache_alloced); return NULL; } - len = apei_estatus_len(estatus); + len = cper_estatus_len(estatus); cache_len = GHES_ESTATUS_CACHE_LEN(len); cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len); if (!cache) { @@ -573,7 +583,7 @@ { u32 len; - len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache)); + len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache)); len = GHES_ESTATUS_CACHE_LEN(len); gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len); atomic_dec(&ghes_estatus_cache_alloced); @@ -702,19 +712,31 @@ return ret; } -static struct llist_node *llist_nodes_reverse(struct llist_node *llnode) -{ - struct llist_node *next, *tail = NULL; +static struct notifier_block ghes_notifier_sci = { + .notifier_call = ghes_notify_sci, +}; - while (llnode) { - next = llnode->next; - llnode->next = tail; - tail = llnode; - llnode = next; - } +#ifdef CONFIG_HAVE_ACPI_APEI_NMI +/* + * printk is not safe in NMI context. So in NMI handler, we allocate + * required memory from lock-less memory allocator + * (ghes_estatus_pool), save estatus into it, put them into lock-less + * list (ghes_estatus_llist), then delay printk into IRQ context via + * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record + * required pool size by all NMI error source. + */ +static struct llist_head ghes_estatus_llist; +static struct irq_work ghes_proc_irq_work; - return tail; -} +/* + * NMI may be triggered on any CPU, so ghes_in_nmi is used for + * having only one concurrent reader. + */ +static atomic_t ghes_in_nmi = ATOMIC_INIT(0); + +static LIST_HEAD(ghes_nmi); + +static int ghes_panic_timeout __read_mostly = 30; static void ghes_proc_in_irq(struct irq_work *irq_work) { @@ -729,13 +751,13 @@ * Because the time order of estatus in list is reversed, * revert it back to proper order. */ - llnode = llist_nodes_reverse(llnode); + llnode = llist_reverse_order(llnode); while (llnode) { next = llnode->next; estatus_node = llist_entry(llnode, struct ghes_estatus_node, llnode); estatus = GHES_ESTATUS_FROM_NODE(estatus_node); - len = apei_estatus_len(estatus); + len = cper_estatus_len(estatus); node_len = GHES_ESTATUS_NODE_LEN(len); ghes_do_proc(estatus_node->ghes, estatus); if (!ghes_estatus_cached(estatus)) { @@ -762,12 +784,12 @@ * Because the time order of estatus in list is reversed, * revert it back to proper order. */ - llnode = llist_nodes_reverse(llnode); + llnode = llist_reverse_order(llnode); while (llnode) { estatus_node = llist_entry(llnode, struct ghes_estatus_node, llnode); estatus = GHES_ESTATUS_FROM_NODE(estatus_node); - len = apei_estatus_len(estatus); + len = cper_estatus_len(estatus); node_len = GHES_ESTATUS_NODE_LEN(len); generic = estatus_node->generic; ghes_print_estatus(NULL, generic, estatus); @@ -775,80 +797,79 @@ } } +/* Save estatus for further processing in IRQ context */ +static void __process_error(struct ghes *ghes) +{ +#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + u32 len, node_len; + struct ghes_estatus_node *estatus_node; + struct acpi_hest_generic_status *estatus; + + if (ghes_estatus_cached(ghes->estatus)) + return; + + len = cper_estatus_len(ghes->estatus); + node_len = GHES_ESTATUS_NODE_LEN(len); + + estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len); + if (!estatus_node) + return; + + estatus_node->ghes = ghes; + estatus_node->generic = ghes->generic; + estatus = GHES_ESTATUS_FROM_NODE(estatus_node); + memcpy(estatus, ghes->estatus, len); + llist_add(&estatus_node->llnode, &ghes_estatus_llist); +#endif +} + +static void __ghes_panic(struct ghes *ghes) +{ + oops_begin(); + ghes_print_queued_estatus(); + __ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus); + + /* reboot to log the error! */ + if (panic_timeout == 0) + panic_timeout = ghes_panic_timeout; + panic("Fatal hardware error!"); +} + static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) { - struct ghes *ghes, *ghes_global = NULL; - int sev, sev_global = -1; - int ret = NMI_DONE; + struct ghes *ghes; + int sev, ret = NMI_DONE; + + if (!atomic_add_unless(&ghes_in_nmi, 1, 1)) + return ret; - raw_spin_lock(&ghes_nmi_lock); list_for_each_entry_rcu(ghes, &ghes_nmi, list) { if (ghes_read_estatus(ghes, 1)) { ghes_clear_estatus(ghes); continue; + } else { + ret = NMI_HANDLED; } - sev = ghes_severity(ghes->estatus->error_severity); - if (sev > sev_global) { - sev_global = sev; - ghes_global = ghes; - } - ret = NMI_HANDLED; - } - if (ret == NMI_DONE) - goto out; - - if (sev_global >= GHES_SEV_PANIC) { - oops_begin(); - ghes_print_queued_estatus(); - __ghes_print_estatus(KERN_EMERG, ghes_global->generic, - ghes_global->estatus); - /* reboot to log the error! */ - if (panic_timeout == 0) - panic_timeout = ghes_panic_timeout; - panic("Fatal hardware error!"); - } + sev = ghes_severity(ghes->estatus->error_severity); + if (sev >= GHES_SEV_PANIC) + __ghes_panic(ghes); - list_for_each_entry_rcu(ghes, &ghes_nmi, list) { -#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - u32 len, node_len; - struct ghes_estatus_node *estatus_node; - struct acpi_hest_generic_status *estatus; -#endif if (!(ghes->flags & GHES_TO_CLEAR)) continue; -#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - if (ghes_estatus_cached(ghes->estatus)) - goto next; - /* Save estatus for further processing in IRQ context */ - len = apei_estatus_len(ghes->estatus); - node_len = GHES_ESTATUS_NODE_LEN(len); - estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, - node_len); - if (estatus_node) { - estatus_node->ghes = ghes; - estatus_node->generic = ghes->generic; - estatus = GHES_ESTATUS_FROM_NODE(estatus_node); - memcpy(estatus, ghes->estatus, len); - llist_add(&estatus_node->llnode, &ghes_estatus_llist); - } -next: -#endif + + __process_error(ghes); ghes_clear_estatus(ghes); } + #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - irq_work_queue(&ghes_proc_irq_work); + if (ret == NMI_HANDLED) + irq_work_queue(&ghes_proc_irq_work); #endif - -out: - raw_spin_unlock(&ghes_nmi_lock); + atomic_dec(&ghes_in_nmi); return ret; } -static struct notifier_block ghes_notifier_sci = { - .notifier_call = ghes_notify_sci, -}; - static unsigned long ghes_esource_prealloc_size( const struct acpi_hest_generic *generic) { @@ -864,11 +885,71 @@ return prealloc_size; } +static void ghes_estatus_pool_shrink(unsigned long len) +{ + ghes_estatus_pool_size_request -= PAGE_ALIGN(len); +} + +static void ghes_nmi_add(struct ghes *ghes) +{ + unsigned long len; + + len = ghes_esource_prealloc_size(ghes->generic); + ghes_estatus_pool_expand(len); + mutex_lock(&ghes_list_mutex); + if (list_empty(&ghes_nmi)) + register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes"); + list_add_rcu(&ghes->list, &ghes_nmi); + mutex_unlock(&ghes_list_mutex); +} + +static void ghes_nmi_remove(struct ghes *ghes) +{ + unsigned long len; + + mutex_lock(&ghes_list_mutex); + list_del_rcu(&ghes->list); + if (list_empty(&ghes_nmi)) + unregister_nmi_handler(NMI_LOCAL, "ghes"); + mutex_unlock(&ghes_list_mutex); + /* + * To synchronize with NMI handler, ghes can only be + * freed after NMI handler finishes. + */ + synchronize_rcu(); + len = ghes_esource_prealloc_size(ghes->generic); + ghes_estatus_pool_shrink(len); +} + +static void ghes_nmi_init_cxt(void) +{ + init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); +} +#else /* CONFIG_HAVE_ACPI_APEI_NMI */ +static inline void ghes_nmi_add(struct ghes *ghes) +{ + pr_err(GHES_PFX "ID: %d, trying to add NMI notification which is not supported!\n", + ghes->generic->header.source_id); + BUG(); +} + +static inline void ghes_nmi_remove(struct ghes *ghes) +{ + pr_err(GHES_PFX "ID: %d, trying to remove NMI notification which is not supported!\n", + ghes->generic->header.source_id); + BUG(); +} + +static inline void ghes_nmi_init_cxt(void) +{ +} +#endif /* CONFIG_HAVE_ACPI_APEI_NMI */ + static int ghes_probe(struct platform_device *ghes_dev) { struct acpi_hest_generic *generic; struct ghes *ghes = NULL; - unsigned long len; + int rc = -EINVAL; generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; @@ -879,7 +960,13 @@ case ACPI_HEST_NOTIFY_POLLED: case ACPI_HEST_NOTIFY_EXTERNAL: case ACPI_HEST_NOTIFY_SCI: + break; case ACPI_HEST_NOTIFY_NMI: + if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) { + pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n", + generic->header.source_id); + goto err; + } break; case ACPI_HEST_NOTIFY_LOCAL: pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", @@ -940,14 +1027,7 @@ mutex_unlock(&ghes_list_mutex); break; case ACPI_HEST_NOTIFY_NMI: - len = ghes_esource_prealloc_size(generic); - ghes_estatus_pool_expand(len); - mutex_lock(&ghes_list_mutex); - if (list_empty(&ghes_nmi)) - register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, - "ghes"); - list_add_rcu(&ghes->list, &ghes_nmi); - mutex_unlock(&ghes_list_mutex); + ghes_nmi_add(ghes); break; default: BUG(); @@ -969,7 +1049,6 @@ { struct ghes *ghes; struct acpi_hest_generic *generic; - unsigned long len; ghes = platform_get_drvdata(ghes_dev); generic = ghes->generic; @@ -990,18 +1069,7 @@ mutex_unlock(&ghes_list_mutex); break; case ACPI_HEST_NOTIFY_NMI: - mutex_lock(&ghes_list_mutex); - list_del_rcu(&ghes->list); - if (list_empty(&ghes_nmi)) - unregister_nmi_handler(NMI_LOCAL, "ghes"); - mutex_unlock(&ghes_list_mutex); - /* - * To synchronize with NMI handler, ghes can only be - * freed after NMI handler finishes. - */ - synchronize_rcu(); - len = ghes_esource_prealloc_size(generic); - ghes_estatus_pool_shrink(len); + ghes_nmi_remove(ghes); break; default: BUG(); @@ -1022,7 +1090,6 @@ static struct platform_driver ghes_platform_driver = { .driver = { .name = "GHES", - .owner = THIS_MODULE, }, .probe = ghes_probe, .remove = ghes_remove, @@ -1045,7 +1112,7 @@ return -EINVAL; } - init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); + ghes_nmi_init_cxt(); rc = ghes_ioremap_init(); if (rc)