--- zzzz-none-000/linux-3.10.107/arch/arm/include/asm/kvm_mmu.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/arm/include/asm/kvm_mmu.h 2021-02-04 17:41:59.000000000 +0000 @@ -37,8 +37,14 @@ */ #define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE) +/* + * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. + */ +#define KVM_MMU_CACHE_MIN_PAGES 2 + #ifndef __ASSEMBLY__ +#include #include #include @@ -47,10 +53,11 @@ void free_boot_hyp_pgd(void); void free_hyp_pgds(void); +void stage2_unmap_vm(struct kvm *kvm); int kvm_alloc_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, - phys_addr_t pa, unsigned long size); + phys_addr_t pa, unsigned long size, bool writable); int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); @@ -62,9 +69,15 @@ int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); +static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd) +{ + *pmd = new_pmd; + flush_pmd_entry(pmd); +} + static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) { - pte_val(*pte) = new_pte; + *pte = new_pte; /* * flush_pmd_entry just takes a void pointer and cleans the necessary * cache entries, so we can reuse the function for ptes. @@ -72,20 +85,14 @@ flush_pmd_entry(pte); } -static inline bool kvm_is_write_fault(unsigned long hsr) +static inline void kvm_clean_pgd(pgd_t *pgd) { - unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; - if (hsr_ec == HSR_EC_IABT) - return false; - else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR)) - return false; - else - return true; + clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); } -static inline void kvm_clean_pgd(pgd_t *pgd) +static inline void kvm_clean_pmd(pmd_t *pmd) { - clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); + clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t)); } static inline void kvm_clean_pmd_entry(pmd_t *pmd) @@ -103,9 +110,81 @@ pte_val(*pte) |= L_PTE_S2_RDWR; } +static inline void kvm_set_s2pmd_writable(pmd_t *pmd) +{ + pmd_val(*pmd) |= L_PMD_S2_RDWR; +} + +static inline void kvm_set_s2pte_readonly(pte_t *pte) +{ + pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY; +} + +static inline bool kvm_s2pte_readonly(pte_t *pte) +{ + return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY; +} + +static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) +{ + pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY; +} + +static inline bool kvm_s2pmd_readonly(pmd_t *pmd) +{ + return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY; +} + + +/* Open coded p*d_addr_end that can deal with 64bit addresses */ +#define kvm_pgd_addr_end(addr, end) \ +({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#define kvm_pud_addr_end(addr,end) (end) + +#define kvm_pmd_addr_end(addr, end) \ +({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#define kvm_pgd_index(addr) pgd_index(addr) + +static inline bool kvm_page_empty(void *ptr) +{ + struct page *ptr_page = virt_to_page(ptr); + return page_count(ptr_page) == 1; +} + +#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) +#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) +#define kvm_pud_table_empty(kvm, pudp) (0) + +#define KVM_PREALLOC_LEVEL 0 + +static inline void *kvm_get_hwpgd(struct kvm *kvm) +{ + return kvm->arch.pgd; +} + +static inline unsigned int kvm_get_hwpgd_size(void) +{ + return PTRS_PER_S2_PGD * sizeof(pgd_t); +} + struct kvm; -static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) +#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) + +static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; +} + +static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, + unsigned long size, + bool ipa_uncached) { /* * If we are going to insert an instruction page and the icache is @@ -118,17 +197,80 @@ * * VIVT caches are tagged using both the ASID and the VMID and doesn't * need any kind of flushing (DDI 0406C.b - Page B3-1392). + * + * We need to do this through a kernel mapping (using the + * user-space mapping has proved to be the wrong + * solution). For that, we need to kmap one page at a time, + * and iterate over the range. */ - if (icache_is_pipt()) { - unsigned long hva = gfn_to_hva(kvm, gfn); - __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); - } else if (!icache_is_vivt_asid_tagged()) { + + VM_BUG_ON(size & ~PAGE_MASK); + + while (size) { + void *va = kmap_atomic_pfn(pfn); + + kvm_flush_dcache_to_poc(va, PAGE_SIZE); + + if (icache_is_pipt()) + __cpuc_coherent_user_range((unsigned long)va, + (unsigned long)va + PAGE_SIZE); + + size -= PAGE_SIZE; + pfn++; + + kunmap_atomic(va); + } + + if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { /* any kind of VIPT cache */ __flush_icache_all(); } } -#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) +static inline void __kvm_flush_dcache_pte(pte_t pte) +{ + void *va = kmap_atomic(pte_page(pte)); + + kvm_flush_dcache_to_poc(va, PAGE_SIZE); + + kunmap_atomic(va); +} + +static inline void __kvm_flush_dcache_pmd(pmd_t pmd) +{ + unsigned long size = PMD_SIZE; + pfn_t pfn = pmd_pfn(pmd); + + while (size) { + void *va = kmap_atomic_pfn(pfn); + + kvm_flush_dcache_to_poc(va, PAGE_SIZE); + + pfn++; + size -= PAGE_SIZE; + + kunmap_atomic(va); + } +} + +static inline void __kvm_flush_dcache_pud(pud_t pud) +{ +} + +#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) + +void kvm_set_way_flush(struct kvm_vcpu *vcpu); +void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); + +static inline bool __kvm_cpu_uses_extended_idmap(void) +{ + return false; +} + +static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, + pgd_t *hyp_pgd, + pgd_t *merged_hyp_pgd, + unsigned long hyp_idmap_start) { } #endif /* !__ASSEMBLY__ */