--- zzzz-none-000/linux-3.10.107/arch/tile/mm/fault.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/tile/mm/fault.c 2021-02-04 17:41:59.000000000 +0000 @@ -34,6 +34,8 @@ #include #include #include +#include +#include #include #include @@ -122,10 +124,9 @@ pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) return NULL; - if (!pmd_present(*pmd)) { + if (!pmd_present(*pmd)) set_pmd(pmd, *pmd_k); - arch_flush_lazy_mmu_mode(); - } else + else BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k)); return pmd_k; } @@ -149,8 +150,6 @@ pmd_k = vmalloc_sync_one(pgd, address); if (!pmd_k) return -1; - if (pmd_huge(*pmd_k)) - return 0; /* support TILE huge_vmap() API */ pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) return -1; @@ -171,8 +170,7 @@ while (pte_migrating(*pte)) { barrier(); if (++retries > bound) - panic("Hit migrating PTE (%#llx) and" - " page PFN %#lx still migrating", + panic("Hit migrating PTE (%#llx) and page PFN %#lx still migrating", pte->val, pte_pfn(*pte)); } } @@ -282,7 +280,7 @@ flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; - is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL); + is_kernel_mode = !user_mode(regs); tsk = validate_current(); @@ -294,11 +292,10 @@ */ stack_offset = stack_pointer & (THREAD_SIZE-1); if (stack_offset < THREAD_SIZE / 8) { - pr_alert("Potential stack overrun: sp %#lx\n", - stack_pointer); + pr_alert("Potential stack overrun: sp %#lx\n", stack_pointer); show_regs(regs); pr_alert("Killing current process %d/%s\n", - tsk->pid, tsk->comm); + tsk->pid, tsk->comm); do_group_exit(SIGKILL); } @@ -357,9 +354,9 @@ /* * If we're in an interrupt, have no user context or are running in an - * atomic region then we must not take the fault. + * region with pagefaults disabled then we must not take the fault. */ - if (in_atomic() || !mm) { + if (pagefault_disabled() || !mm) { vma = NULL; /* happy compiler */ goto bad_area_nosemaphore; } @@ -423,7 +420,7 @@ } else if (write) { #ifdef TEST_VERIFY_AREA if (!is_page_fault && regs->cs == KERNEL_CS) - pr_err("WP fault at "REGFMT"\n", regs->eip); + pr_err("WP fault at " REGFMT "\n", regs->eip); #endif if (!(vma->vm_flags & VM_WRITE)) goto bad_area; @@ -470,28 +467,15 @@ } } -#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() - /* - * If this was an asynchronous fault, - * restart the appropriate engine. - */ - switch (fault_num) { #if CHIP_HAS_TILE_DMA() + /* If this was a DMA TLB fault, restart the DMA engine. */ + switch (fault_num) { case INT_DMATLB_MISS: case INT_DMATLB_MISS_DWNCL: case INT_DMATLB_ACCESS: case INT_DMATLB_ACCESS_DWNCL: __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); break; -#endif -#if CHIP_HAS_SN_PROC() - case INT_SNITLB_MISS: - case INT_SNITLB_MISS_DWNCL: - __insn_mtspr(SPR_SNCTL, - __insn_mfspr(SPR_SNCTL) & - ~SPR_SNCTL__FRZPROC_MASK); - break; -#endif } #endif @@ -536,16 +520,15 @@ pte_t *pte = lookup_address(address); if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) - pr_crit("kernel tried to execute" - " non-executable page - exploit attempt?" - " (uid: %d)\n", current->uid); + pr_crit("kernel tried to execute non-executable page - exploit attempt? (uid: %d)\n", + current->uid); } #endif if (address < PAGE_SIZE) pr_alert("Unable to handle kernel NULL pointer dereference\n"); else pr_alert("Unable to handle kernel paging request\n"); - pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n", + pr_alert(" at virtual address " REGFMT ", pc " REGFMT "\n", address, regs->pc); show_regs(regs); @@ -592,9 +575,10 @@ #ifndef __tilegx__ /* We must release ICS before panicking or we won't get anywhere. */ -#define ics_panic(fmt, ...) do { \ - __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ - panic(fmt, __VA_ARGS__); \ +#define ics_panic(fmt, ...) \ +do { \ + __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ + panic(fmt, ##__VA_ARGS__); \ } while (0) /* @@ -632,8 +616,7 @@ fault_num != INT_DTLB_ACCESS)) { unsigned long old_pc = regs->pc; regs->pc = pc; - ics_panic("Bad ICS page fault args:" - " old PC %#lx, fault %d/%d at %#lx\n", + ics_panic("Bad ICS page fault args: old PC %#lx, fault %d/%d at %#lx", old_pc, fault_num, write, address); } @@ -686,8 +669,8 @@ #endif fixup = search_exception_tables(pc); if (!fixup) - ics_panic("ICS atomic fault not in table:" - " PC %#lx, fault %d", pc, fault_num); + ics_panic("ICS atomic fault not in table: PC %#lx, fault %d", + pc, fault_num); regs->pc = fixup->fixup; regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0); } @@ -716,13 +699,64 @@ * interrupt away appropriately and return immediately. We can't do * page faults for user code while in kernel mode. */ -void do_page_fault(struct pt_regs *regs, int fault_num, - unsigned long address, unsigned long write) +static inline void __do_page_fault(struct pt_regs *regs, int fault_num, + unsigned long address, unsigned long write) { int is_page_fault; +#ifdef CONFIG_KPROBES + /* + * This is to notify the fault handler of the kprobes. The + * exception code is redundant as it is also carried in REGS, + * but we pass it anyhow. + */ + if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1, + regs->faultnum, SIGSEGV) == NOTIFY_STOP) + return; +#endif + +#ifdef __tilegx__ + /* + * We don't need early do_page_fault_ics() support, since unlike + * Pro we don't need to worry about unlocking the atomic locks. + * There is only one current case in GX where we touch any memory + * under ICS other than our own kernel stack, and we handle that + * here. (If we crash due to trying to touch our own stack, + * we're in too much trouble for C code to help out anyway.) + */ + if (write & ~1) { + unsigned long pc = write & ~1; + if (pc >= (unsigned long) __start_unalign_asm_code && + pc < (unsigned long) __end_unalign_asm_code) { + struct thread_info *ti = current_thread_info(); + /* + * Our EX_CONTEXT is still what it was from the + * initial unalign exception, but now we've faulted + * on the JIT page. We would like to complete the + * page fault however is appropriate, and then retry + * the instruction that caused the unalign exception. + * Our state has been "corrupted" by setting the low + * bit in "sp", and stashing r0..r3 in the + * thread_info area, so we revert all of that, then + * continue as if this were a normal page fault. + */ + regs->sp &= ~1UL; + regs->regs[0] = ti->unalign_jit_tmp[0]; + regs->regs[1] = ti->unalign_jit_tmp[1]; + regs->regs[2] = ti->unalign_jit_tmp[2]; + regs->regs[3] = ti->unalign_jit_tmp[3]; + write &= 1; + } else { + pr_alert("%s/%d: ICS set at page fault at %#lx: %#lx\n", + current->comm, current->pid, pc, address); + show_regs(regs); + do_group_exit(SIGKILL); + } + } +#else /* This case should have been handled by do_page_fault_ics(). */ BUG_ON(write & ~1); +#endif #if CHIP_HAS_TILE_DMA() /* @@ -751,10 +785,6 @@ case INT_DMATLB_MISS: case INT_DMATLB_MISS_DWNCL: #endif -#if CHIP_HAS_SN_PROC() - case INT_SNITLB_MISS: - case INT_SNITLB_MISS_DWNCL: -#endif is_page_fault = 1; break; @@ -770,8 +800,8 @@ panic("Bad fault number %d in do_page_fault", fault_num); } -#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() - if (EX1_PL(regs->ex1) != USER_PL) { +#if CHIP_HAS_TILE_DMA() + if (!user_mode(regs)) { struct async_tlb *async; switch (fault_num) { #if CHIP_HAS_TILE_DMA() @@ -782,12 +812,6 @@ async = ¤t->thread.dma_async_tlb; break; #endif -#if CHIP_HAS_SN_PROC() - case INT_SNITLB_MISS: - case INT_SNITLB_MISS_DWNCL: - async = ¤t->thread.sn_async_tlb; - break; -#endif default: async = NULL; } @@ -801,8 +825,7 @@ set_thread_flag(TIF_ASYNC_TLB); if (async->fault_num != 0) { - panic("Second async fault %d;" - " old fault was %d (%#lx/%ld)", + panic("Second async fault %d; old fault was %d (%#lx/%ld)", fault_num, async->fault_num, address, write); } @@ -819,35 +842,23 @@ handle_page_fault(regs, fault_num, is_page_fault, address, write); } - -#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() -/* - * Check an async_tlb structure to see if a deferred fault is waiting, - * and if so pass it to the page-fault code. - */ -static void handle_async_page_fault(struct pt_regs *regs, - struct async_tlb *async) +void do_page_fault(struct pt_regs *regs, int fault_num, + unsigned long address, unsigned long write) { - if (async->fault_num) { - /* - * Clear async->fault_num before calling the page-fault - * handler so that if we re-interrupt before returning - * from the function we have somewhere to put the - * information from the new interrupt. - */ - int fault_num = async->fault_num; - async->fault_num = 0; - handle_page_fault(regs, fault_num, async->is_fault, - async->address, async->is_write); - } + enum ctx_state prev_state = exception_enter(); + __do_page_fault(regs, fault_num, address, write); + exception_exit(prev_state); } +#if CHIP_HAS_TILE_DMA() /* * This routine effectively re-issues asynchronous page faults * when we are returning to user space. */ void do_async_page_fault(struct pt_regs *regs) { + struct async_tlb *async = ¤t->thread.dma_async_tlb; + /* * Clear thread flag early. If we re-interrupt while processing * code here, we will reset it and recall this routine before @@ -855,21 +866,28 @@ */ clear_thread_flag(TIF_ASYNC_TLB); -#if CHIP_HAS_TILE_DMA() - handle_async_page_fault(regs, ¤t->thread.dma_async_tlb); -#endif -#if CHIP_HAS_SN_PROC() - handle_async_page_fault(regs, ¤t->thread.sn_async_tlb); -#endif + if (async->fault_num) { + /* + * Clear async->fault_num before calling the page-fault + * handler so that if we re-interrupt before returning + * from the function we have somewhere to put the + * information from the new interrupt. + */ + int fault_num = async->fault_num; + async->fault_num = 0; + handle_page_fault(regs, fault_num, async->is_fault, + async->address, async->is_write); + } } -#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */ +#endif /* CHIP_HAS_TILE_DMA() */ void vmalloc_sync_all(void) { #ifdef __tilegx__ /* Currently all L1 kernel pmd's are static and shared. */ - BUG_ON(pgd_index(VMALLOC_END) != pgd_index(VMALLOC_START)); + BUILD_BUG_ON(pgd_index(VMALLOC_END - PAGE_SIZE) != + pgd_index(VMALLOC_START)); #else /* * Note that races in the updates of insync and start aren't