--- zzzz-none-000/linux-4.4.60/arch/arm/include/asm/io.h 2017-04-08 07:53:53.000000000 +0000 +++ jet-2400-727/linux-4.4.60/arch/arm/include/asm/io.h 2021-03-17 14:36:41.000000000 +0000 @@ -29,6 +29,7 @@ #include #include #include +#include #include /* @@ -62,23 +63,21 @@ * the bus. Rather than special-case the machine, just let the compiler * generate the access for CPUs prior to ARMv6. */ -#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) -#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))) +#define __raw_readw_no_log(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) +#define __raw_writew_no_log(v, a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))) #else /* * When running under a hypervisor, we want to avoid I/O accesses with * writeback addressing modes as these incur a significant performance * overhead (the address generation must be emulated in software). */ -#define __raw_writew __raw_writew -static inline void __raw_writew(u16 val, volatile void __iomem *addr) +static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr) { asm volatile("strh %1, %0" : : "Q" (*(volatile u16 __force *)addr), "r" (val)); } -#define __raw_readw __raw_readw -static inline u16 __raw_readw(const volatile void __iomem *addr) +static inline u16 __raw_readw_no_log(const volatile void __iomem *addr) { u16 val; asm volatile("ldrh %0, %1" @@ -88,22 +87,30 @@ } #endif -#define __raw_writeb __raw_writeb -static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr) { asm volatile("strb %1, %0" : : "Qo" (*(volatile u8 __force *)addr), "r" (val)); } -#define __raw_writel __raw_writel -static inline void __raw_writel(u32 val, volatile void __iomem *addr) +static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr) { asm volatile("str %1, %0" : : "Qo" (*(volatile u32 __force *)addr), "r" (val)); } -#define __raw_readb __raw_readb -static inline u8 __raw_readb(const volatile void __iomem *addr) +static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr) +{ + register u64 v asm ("r2"); + + v = val; + + asm volatile("strd %1, %0" + : "+Qo" (*(volatile u64 __force *)addr) + : "r" (v)); +} + +static inline u8 __raw_readb_no_log(const volatile void __iomem *addr) { u8 val; asm volatile("ldrb %0, %1" @@ -112,8 +119,7 @@ return val; } -#define __raw_readl __raw_readl -static inline u32 __raw_readl(const volatile void __iomem *addr) +static inline u32 __raw_readl_no_log(const volatile void __iomem *addr) { u32 val; asm volatile("ldr %0, %1" @@ -122,6 +128,58 @@ return val; } +static inline u64 __raw_readq_no_log(const volatile void __iomem *addr) +{ + register u64 val asm ("r2"); + + asm volatile("ldrd %1, %0" + : "+Qo" (*(volatile u64 __force *)addr), + "=r" (val)); + return val; +} + +/* + * There may be cases when clients don't want to support or can't support the + * logging. The appropriate functions can be used but clients should carefully + * consider why they can't support the logging. + */ + +#define __raw_write_logged(v, a, _t) ({ \ + int _ret; \ + volatile void __iomem *_a = (a); \ + void *_addr = (void __force *)(_a); \ + _ret = uncached_logk(LOGK_WRITEL, _addr); \ + ETB_WAYPOINT; \ + __raw_write##_t##_no_log((v), _a); \ + if (_ret) \ + LOG_BARRIER; \ + }) + + +#define __raw_writeb(v, a) __raw_write_logged((v), (a), b) +#define __raw_writew(v, a) __raw_write_logged((v), (a), w) +#define __raw_writel(v, a) __raw_write_logged((v), (a), l) +#define __raw_writeq(v, a) __raw_write_logged((v), (a), q) + +#define __raw_read_logged(a, _l, _t) ({ \ + unsigned _t __a; \ + const volatile void __iomem *_a = (a); \ + void *_addr = (void __force *)(_a); \ + int _ret; \ + _ret = uncached_logk(LOGK_READL, _addr); \ + ETB_WAYPOINT; \ + __a = __raw_read##_l##_no_log(_a);\ + if (_ret) \ + LOG_BARRIER; \ + __a; \ + }) + + +#define __raw_readb(a) __raw_read_logged((a), b, char) +#define __raw_readw(a) __raw_read_logged((a), w, short) +#define __raw_readl(a) __raw_read_logged((a), l, int) +#define __raw_readq(a) __raw_read_logged((a), q, long long) + /* * Architecture ioremap implementation. */ @@ -291,18 +349,32 @@ __raw_readw(c)); __r; }) #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ __raw_readl(c)); __r; }) - -#define writeb_relaxed(v,c) __raw_writeb(v,c) -#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c) -#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c) +#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64) \ + __raw_readq(c)); __r; }) +#define readb_relaxed_no_log(c) ({ u8 __r = __raw_readb_no_log(c); __r; }) +#define readl_relaxed_no_log(c) ({ u32 __r = le32_to_cpu((__force __le32) \ + __raw_readl_no_log(c)); __r; }) +#define readq_relaxed_no_log(c) ({ u64 __r = le64_to_cpu((__force __le64) \ + __raw_readq_no_log(c)); __r; }) + + +#define writeb_relaxed(v, c) __raw_writeb(v, c) +#define writew_relaxed(v, c) __raw_writew((__force u16) cpu_to_le16(v), c) +#define writel_relaxed(v, c) __raw_writel((__force u32) cpu_to_le32(v), c) +#define writeq_relaxed(v, c) __raw_writeq((__force u64) cpu_to_le64(v), c) +#define writeb_relaxed_no_log(v, c) ((void)__raw_writeb_no_log((v), (c))) +#define writel_relaxed_no_log(v, c) __raw_writel_no_log((__force u32) cpu_to_le32(v), c) +#define writeq_relaxed_no_log(v, c) __raw_writeq_no_log((__force u64) cpu_to_le64(v), c) #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) +#define writeq(v, c) ({ __iowmb(); writeq_relaxed(v, c); }) #define readsb(p,d,l) __raw_readsb(p,d,l) #define readsw(p,d,l) __raw_readsw(p,d,l) @@ -401,6 +473,23 @@ void iounmap(volatile void __iomem *iomem_cookie); #define iounmap iounmap +/* + * io{read,write}{8,16,32,64} macros + */ +#ifndef ioread8 +#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; }) +#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; }) +#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; }) +#define ioread64(p) ({ unsigned int __v = le64_to_cpu((__force __le64)__raw_readq(p)); __iormb(); __v; }) + +#define ioread64be(p) ({ unsigned int __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; }) + +#define iowrite8(v, p) ({ __iowmb(); __raw_writeb(v, p); }) +#define iowrite16(v, p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); }) +#define iowrite32(v, p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); }) +#define iowrite64(v, p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_le64(v), p); }) + +#define iowrite64be(v, p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); }) /* * io{read,write}{16,32}be() macros @@ -419,6 +508,7 @@ #define ioport_unmap ioport_unmap extern void ioport_unmap(void __iomem *addr); #endif +#endif struct pci_dev;