--- zzzz-none-000/linux-3.10.107/arch/x86/include/asm/paravirt.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/x86/include/asm/paravirt.h 2021-02-04 17:41:59.000000000 +0000 @@ -19,6 +19,12 @@ return pv_info.paravirt_enabled; } +static inline int paravirt_has_feature(unsigned int feature) +{ + WARN_ON_ONCE(!pv_info.paravirt_enabled); + return (pv_info.features & feature); +} + static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread) { @@ -80,16 +86,16 @@ PVOP_VCALL1(pv_mmu_ops.write_cr3, x); } -static inline unsigned long read_cr4(void) +static inline unsigned long __read_cr4(void) { return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4); } -static inline unsigned long read_cr4_safe(void) +static inline unsigned long __read_cr4_safe(void) { return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe); } -static inline void write_cr4(unsigned long x) +static inline void __write_cr4(unsigned long x) { PVOP_VCALL1(pv_cpu_ops.write_cr4, x); } @@ -153,7 +159,11 @@ val = paravirt_read_msr(msr, &_err); \ } while (0) -#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) +static inline void wrmsrl(unsigned msr, u64 val) +{ + wrmsr(msr, (u32)val, (u32)(val>>32)); +} + #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b) /* rdmsr with exception handling */ @@ -174,19 +184,6 @@ return err; } -static inline u64 paravirt_read_tsc(void) -{ - return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); -} - -#define rdtscl(low) \ -do { \ - u64 _l = paravirt_read_tsc(); \ - low = (int)_l; \ -} while (0) - -#define rdtscll(val) (val = paravirt_read_tsc()) - static inline unsigned long long paravirt_sched_clock(void) { return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); @@ -215,27 +212,6 @@ #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter)) -static inline unsigned long long paravirt_rdtscp(unsigned int *aux) -{ - return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux); -} - -#define rdtscp(low, high, aux) \ -do { \ - int __aux; \ - unsigned long __val = paravirt_rdtscp(&__aux); \ - (low) = (u32)__val; \ - (high) = (u32)(__val >> 32); \ - (aux) = __aux; \ -} while (0) - -#define rdtscpll(val, aux) \ -do { \ - unsigned long __aux; \ - val = paravirt_rdtscp(&__aux); \ - (aux) = __aux; \ -} while (0) - static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) { PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries); @@ -330,13 +306,13 @@ PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next); } -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm); } -static inline void arch_exit_mmap(struct mm_struct *mm) +static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) { PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm); } @@ -545,7 +521,7 @@ PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); } -#if PAGETABLE_LEVELS >= 3 +#if CONFIG_PGTABLE_LEVELS >= 3 static inline pmd_t __pmd(pmdval_t val) { pmdval_t ret; @@ -585,7 +561,7 @@ PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, val); } -#if PAGETABLE_LEVELS == 4 +#if CONFIG_PGTABLE_LEVELS == 4 static inline pud_t __pud(pudval_t val) { pudval_t ret; @@ -636,9 +612,9 @@ set_pud(pudp, __pud(0)); } -#endif /* PAGETABLE_LEVELS == 4 */ +#endif /* CONFIG_PGTABLE_LEVELS == 4 */ -#endif /* PAGETABLE_LEVELS >= 3 */ +#endif /* CONFIG_PGTABLE_LEVELS >= 3 */ #ifdef CONFIG_X86_PAE /* Special-case pte-setting operations for PAE, which can't update a @@ -712,39 +688,46 @@ #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) -static inline int arch_spin_is_locked(struct arch_spinlock *lock) +#ifdef CONFIG_QUEUED_SPINLOCKS + +static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, + u32 val) { - return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); + PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val); } -static inline int arch_spin_is_contended(struct arch_spinlock *lock) +static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) { - return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); + PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock); } -#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void arch_spin_lock(struct arch_spinlock *lock) +static __always_inline void pv_wait(u8 *ptr, u8 val) { - PVOP_VCALL1(pv_lock_ops.spin_lock, lock); + PVOP_VCALL2(pv_lock_ops.wait, ptr, val); } -static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock, - unsigned long flags) +static __always_inline void pv_kick(int cpu) { - PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); + PVOP_VCALL1(pv_lock_ops.kick, cpu); } -static __always_inline int arch_spin_trylock(struct arch_spinlock *lock) +#else /* !CONFIG_QUEUED_SPINLOCKS */ + +static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock, + __ticket_t ticket) { - return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); + PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket); } -static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) +static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, + __ticket_t ticket) { - PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); + PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket); } -#endif +#endif /* CONFIG_QUEUED_SPINLOCKS */ + +#endif /* SMP && PARAVIRT_SPINLOCKS */ #ifdef CONFIG_X86_32 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;" @@ -801,9 +784,9 @@ */ #define PV_CALLEE_SAVE_REGS_THUNK(func) \ extern typeof(func) __raw_callee_save_##func; \ - static void *__##func##__ __used = func; \ \ asm(".pushsection .text;" \ + ".globl __raw_callee_save_" #func " ; " \ "__raw_callee_save_" #func ": " \ PV_SAVE_ALL_CALLER_REGS \ "call " #func ";" \ @@ -996,15 +979,20 @@ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ CLBR_NONE, \ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64)) - -#define ENABLE_INTERRUPTS_SYSEXIT32 \ - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ - CLBR_NONE, \ - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) #endif /* CONFIG_X86_32 */ #endif /* __ASSEMBLY__ */ #else /* CONFIG_PARAVIRT */ # define default_banner x86_init_noop +#ifndef __ASSEMBLY__ +static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ +} + +static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) +{ +} +#endif /* __ASSEMBLY__ */ #endif /* !CONFIG_PARAVIRT */ #endif /* _ASM_X86_PARAVIRT_H */