--- zzzz-none-000/linux-3.10.107/arch/arm64/include/asm/io.h 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/arch/arm64/include/asm/io.h 2021-02-04 17:41:59.000000000 +0000 @@ -22,62 +22,124 @@ #ifdef __KERNEL__ #include +#include #include #include +#include #include +#include +#include +#include +#include + +#include /* * Generic IO read/write. These perform native-endian accesses. + * that some architectures will want to re-define __raw_{read,write}w. */ -static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr) { asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr)); } -static inline void __raw_writew(u16 val, volatile void __iomem *addr) +static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr) { asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr)); } -static inline void __raw_writel(u32 val, volatile void __iomem *addr) +static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr) { asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr)); } -static inline void __raw_writeq(u64 val, volatile void __iomem *addr) +static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr) { asm volatile("str %0, [%1]" : : "r" (val), "r" (addr)); } -static inline u8 __raw_readb(const volatile void __iomem *addr) +static inline u8 __raw_readb_no_log(const volatile void __iomem *addr) { u8 val; - asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr)); + asm volatile(ALTERNATIVE("ldrb %w0, [%1]", + "ldarb %w0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + : "=r" (val) : "r" (addr)); return val; } -static inline u16 __raw_readw(const volatile void __iomem *addr) +static inline u16 __raw_readw_no_log(const volatile void __iomem *addr) { u16 val; - asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr)); + + asm volatile(ALTERNATIVE("ldrh %w0, [%1]", + "ldarh %w0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + : "=r" (val) : "r" (addr)); return val; } -static inline u32 __raw_readl(const volatile void __iomem *addr) +static inline u32 __raw_readl_no_log(const volatile void __iomem *addr) { u32 val; - asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr)); + asm volatile(ALTERNATIVE("ldr %w0, [%1]", + "ldar %w0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + : "=r" (val) : "r" (addr)); return val; } -static inline u64 __raw_readq(const volatile void __iomem *addr) +static inline u64 __raw_readq_no_log(const volatile void __iomem *addr) { u64 val; - asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr)); + asm volatile(ALTERNATIVE("ldr %0, [%1]", + "ldar %0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + : "=r" (val) : "r" (addr)); return val; } +/* + * There may be cases when clients don't want to support or can't support the + * logging, The appropriate functions can be used but clinets should carefully + * consider why they can't support the logging + */ + +#define __raw_write_logged(v, a, _t) ({ \ + int _ret; \ + volatile void __iomem *_a = (a); \ + void *_addr = (void __force *)(_a); \ + _ret = uncached_logk(LOGK_WRITEL, _addr); \ + ETB_WAYPOINT; \ + __raw_write##_t##_no_log((v), _a); \ + if (_ret) \ + LOG_BARRIER; \ + }) + +#define __raw_writeb(v, a) __raw_write_logged((v), a, b) +#define __raw_writew(v, a) __raw_write_logged((v), a, w) +#define __raw_writel(v, a) __raw_write_logged((v), a, l) +#define __raw_writeq(v, a) __raw_write_logged((v), a, q) + +#define __raw_read_logged(a, _l, _t) ({ \ + _t __a; \ + const volatile void __iomem *_a = (const volatile void __iomem *)(a); \ + void *_addr = (void __force *)(_a); \ + int _ret; \ + _ret = uncached_logk(LOGK_READL, _addr); \ + ETB_WAYPOINT; \ + __a = __raw_read##_l##_no_log(_a); \ + if (_ret) \ + LOG_BARRIER; \ + __a; \ + }) + +#define __raw_readb(a) __raw_read_logged((a), b, u8) +#define __raw_readw(a) __raw_read_logged((a), w, u16) +#define __raw_readl(a) __raw_read_logged((a), l, u32) +#define __raw_readq(a) __raw_read_logged((a), q, u64) + /* IO barriers */ #define __iormb() rmb() #define __iowmb() wmb() @@ -89,16 +151,26 @@ * ordering rules but do not guarantee any ordering relative to Normal memory * accesses. */ -#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) -#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) -#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) -#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; }) +#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; }) +#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; }) +#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) +#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) #define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) +#define readb_relaxed_no_log(c) ({ u8 __v = __raw_readb_no_log(c); __v; }) +#define readw_relaxed_no_log(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw_no_log(c)); __v; }) +#define readl_relaxed_no_log(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl_no_log(c)); __v; }) +#define readq_relaxed_no_log(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq_no_log(c)); __v; }) + +#define writeb_relaxed_no_log(v, c) ((void)__raw_writeb_no_log((v), (c))) +#define writew_relaxed_no_log(v, c) ((void)__raw_writew_no_log((__force u16)cpu_to_le32(v), (c))) +#define writel_relaxed_no_log(v, c) ((void)__raw_writel_no_log((__force u32)cpu_to_le32(v), (c))) +#define writeq_relaxed_no_log(v, c) ((void)__raw_writeq_no_log((__force u64)cpu_to_le32(v), (c))) + /* * I/O memory access primitives. Reads are ordered relative to any * following Normal memory access. Writes are ordered relative to any prior @@ -114,99 +186,22 @@ #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) #define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); }) +#define readb_no_log(c) ({ u8 __v = readb_relaxed_no_log(c); __iormb(); __v; }) +#define readw_no_log(c) ({ u16 __v = readw_relaxed_no_log(c); __iormb(); __v; }) +#define readl_no_log(c) ({ u32 __v = readl_relaxed_no_log(c); __iormb(); __v; }) +#define readq_no_log(c) ({ u64 __v = readq_relaxed_no_log(c); __iormb(); __v; }) + +#define writeb_no_log(v, c) ({ __iowmb(); writeb_relaxed_no_log((v), (c)); }) +#define writew_no_log(v, c) ({ __iowmb(); writew_relaxed_no_log((v), (c)); }) +#define writel_no_log(v, c) ({ __iowmb(); writel_relaxed_no_log((v), (c)); }) +#define writeq_no_log(v, c) ({ __iowmb(); writeq_relaxed_no_log((v), (c)); }) + /* * I/O port access primitives. */ -#define IO_SPACE_LIMIT 0xffff -#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M)) - -static inline u8 inb(unsigned long addr) -{ - return readb(addr + PCI_IOBASE); -} - -static inline u16 inw(unsigned long addr) -{ - return readw(addr + PCI_IOBASE); -} - -static inline u32 inl(unsigned long addr) -{ - return readl(addr + PCI_IOBASE); -} - -static inline void outb(u8 b, unsigned long addr) -{ - writeb(b, addr + PCI_IOBASE); -} - -static inline void outw(u16 b, unsigned long addr) -{ - writew(b, addr + PCI_IOBASE); -} - -static inline void outl(u32 b, unsigned long addr) -{ - writel(b, addr + PCI_IOBASE); -} - -#define inb_p(addr) inb(addr) -#define inw_p(addr) inw(addr) -#define inl_p(addr) inl(addr) - -#define outb_p(x, addr) outb((x), (addr)) -#define outw_p(x, addr) outw((x), (addr)) -#define outl_p(x, addr) outl((x), (addr)) - -static inline void insb(unsigned long addr, void *buffer, int count) -{ - u8 *buf = buffer; - while (count--) - *buf++ = __raw_readb(addr + PCI_IOBASE); -} - -static inline void insw(unsigned long addr, void *buffer, int count) -{ - u16 *buf = buffer; - while (count--) - *buf++ = __raw_readw(addr + PCI_IOBASE); -} - -static inline void insl(unsigned long addr, void *buffer, int count) -{ - u32 *buf = buffer; - while (count--) - *buf++ = __raw_readl(addr + PCI_IOBASE); -} - -static inline void outsb(unsigned long addr, const void *buffer, int count) -{ - const u8 *buf = buffer; - while (count--) - __raw_writeb(*buf++, addr + PCI_IOBASE); -} - -static inline void outsw(unsigned long addr, const void *buffer, int count) -{ - const u16 *buf = buffer; - while (count--) - __raw_writew(*buf++, addr + PCI_IOBASE); -} - -static inline void outsl(unsigned long addr, const void *buffer, int count) -{ - const u32 *buf = buffer; - while (count--) - __raw_writel(*buf++, addr + PCI_IOBASE); -} - -#define insb_p(port,to,len) insb(port,to,len) -#define insw_p(port,to,len) insw(port,to,len) -#define insl_p(port,to,len) insl(port,to,len) - -#define outsb_p(port,from,len) outsb(port,from,len) -#define outsw_p(port,from,len) outsw(port,from,len) -#define outsl_p(port,from,len) outsl(port,from,len) +#define arch_has_dev_port() (1) +#define IO_SPACE_LIMIT (PCI_IO_SIZE - 1) +#define PCI_IOBASE ((void __iomem *)PCI_IO_START) /* * String version of I/O memory access operations. @@ -224,31 +219,22 @@ */ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot); extern void __iounmap(volatile void __iomem *addr); - -#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) -#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) -#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) +extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) +#define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define iounmap __iounmap -#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) -#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) - -#define ARCH_HAS_IOREMAP_WC -#include - /* - * More restrictive address range checking than the default implementation - * (PHYS_OFFSET and PHYS_MASK taken into account). + * io{read,write}{16,32}be() macros */ -#define ARCH_HAS_VALID_PHYS_ADDR_RANGE -extern int valid_phys_addr_range(unsigned long addr, size_t size); -extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); +#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) +#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) -extern int devmem_is_allowed(unsigned long pfn); +#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); }) +#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); }) /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem @@ -261,5 +247,24 @@ */ #define xlate_dev_kmem_ptr(p) p +#include + +/* + * More restrictive address range checking than the default implementation + * (PHYS_OFFSET and PHYS_MASK taken into account). + */ +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +extern int valid_phys_addr_range(phys_addr_t addr, size_t size); +extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); + +extern int devmem_is_allowed(unsigned long pfn); + +struct bio_vec; +extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, + const struct bio_vec *vec2); +#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ + (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) + #endif /* __KERNEL__ */ #endif /* __ASM_IO_H */