--- zzzz-none-000/linux-3.10.107/arch/arc/mm/fault.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/arc/mm/fault.c 2021-02-04 17:41:59.000000000 +0000 @@ -14,9 +14,18 @@ #include #include #include +#include #include +#include -static int handle_vmalloc_fault(unsigned long address) +/* + * kernel virtual address is required to implement vmalloc/pkmap/fixmap + * Refer to asm/processor.h for System Memory Map + * + * It simply copies the PMD entry (pointer to 2nd level page table or hugepage) + * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared + */ +noinline static int handle_kernel_vaddr_fault(unsigned long address) { /* * Synchronize this task's top level page-table @@ -51,14 +60,14 @@ return 1; } -void do_page_fault(struct pt_regs *regs, int write, unsigned long address, - unsigned long cause_code) +void do_page_fault(unsigned long address, struct pt_regs *regs) { struct vm_area_struct *vma = NULL; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; siginfo_t info; int fault, ret; + int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; /* @@ -70,8 +79,8 @@ * only copy the information from the master page table, * nothing more. */ - if (address >= VMALLOC_START && address <= VMALLOC_END) { - ret = handle_vmalloc_fault(address); + if (address >= VMALLOC_START) { + ret = handle_kernel_vaddr_fault(address); if (unlikely(ret)) goto bad_area_nosemaphore; else @@ -84,7 +93,7 @@ * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_atomic() || !mm) + if (faulthandler_disabled() || !mm) goto no_context; if (user_mode(regs)) @@ -110,7 +119,8 @@ /* Handle protection violation, execute on heap or stack */ - if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH)) + if ((regs->ecr_vec == ECR_V_PROTV) && + (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) goto bad_area; if (write) { @@ -137,13 +147,20 @@ return; } + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + if (likely(!(fault & VM_FAULT_ERROR))) { if (flags & FAULT_FLAG_ALLOW_RETRY) { /* To avoid updating stats twice for retry case */ - if (fault & VM_FAULT_MAJOR) + if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - else + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, + regs, address); + } else { tsk->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, + regs, address); + } if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; @@ -157,7 +174,6 @@ return; } - /* TBD: switch to pagefault_out_of_memory() */ if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) @@ -179,7 +195,6 @@ /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { tsk->thread.fault_address = address; - tsk->thread.cause_code = cause_code; info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ @@ -200,7 +215,7 @@ if (fixup_exception(regs)) return; - die("Oops", regs, address, cause_code); + die("Oops", regs, address); out_of_memory: up_read(&mm->mmap_sem); @@ -219,7 +234,6 @@ goto no_context; tsk->thread.fault_address = address; - tsk->thread.cause_code = cause_code; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR;