--- zzzz-none-000/linux-2.6.39.4/arch/arm/include/asm/dma-mapping.h 2011-08-03 19:43:28.000000000 +0000 +++ puma6-arm-6490-729/linux-2.6.39.4/arch/arm/include/asm/dma-mapping.h 2021-11-10 13:23:09.000000000 +0000 @@ -20,22 +20,22 @@ * addresses. They must not be used by drivers. */ #ifndef __arch_pfn_to_dma -static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) +static inline dma_addr_t pfn_to_dma(struct device *dev __attribute__((unused)), unsigned long pfn) { return (dma_addr_t)__pfn_to_bus(pfn); } -static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) +static inline unsigned long dma_to_pfn(struct device *dev __attribute__((unused)), dma_addr_t addr) { return __bus_to_pfn(addr); } -static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) +static inline void *dma_to_virt(struct device *dev __attribute__((unused)), dma_addr_t addr) { return (void *)__bus_to_virt(addr); } -static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) +static inline dma_addr_t virt_to_dma(struct device *dev __attribute__((unused)), void *addr) { return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); } @@ -124,7 +124,7 @@ * FIXME: This should really be a platform specific issue - we should * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. */ -static inline int dma_supported(struct device *dev, u64 mask) +static inline int dma_supported(struct device *dev __attribute__((unused)), u64 mask) { if (mask < ISA_DMA_THRESHOLD) return 0; @@ -152,23 +152,23 @@ /* * DMA errors are defined by all-bits-set in the DMA address. */ -static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +static inline int dma_mapping_error(struct device *dev __attribute__((unused)), dma_addr_t dma_addr) { - return dma_addr == ~0; + return dma_addr == ~0U; } /* * Dummy noncoherent implementation. We don't provide a dma_cache_sync * function so drivers using this API are highlighted with build warnings. */ -static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp) +static inline void *dma_alloc_noncoherent(struct device *dev __attribute__((unused)), size_t size __attribute__((unused)), + dma_addr_t *handle __attribute__((unused)), gfp_t gfp __attribute__((unused))) { return NULL; } -static inline void dma_free_noncoherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle) +static inline void dma_free_noncoherent(struct device *dev __attribute__((unused)), size_t size __attribute__((unused)), + void *cpu_addr __attribute__((unused)), dma_addr_t handle __attribute__((unused))) { } @@ -315,14 +315,14 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); #else -static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, - unsigned long offset, size_t size, enum dma_data_direction dir) +static inline int dmabounce_sync_for_cpu(struct device *d __attribute__((unused)), dma_addr_t addr __attribute__((unused)), + unsigned long offset __attribute__((unused)), size_t size __attribute__((unused)), enum dma_data_direction dir __attribute__((unused))) { return 1; } -static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, - unsigned long offset, size_t size, enum dma_data_direction dir) +static inline int dmabounce_sync_for_device(struct device *d __attribute__((unused)), dma_addr_t addr __attribute__((unused)), + unsigned long offset __attribute__((unused)), size_t size __attribute__((unused)), enum dma_data_direction dir __attribute__((unused))) { return 1; }