--- zzzz-none-000/linux-2.4.17/include/asm-mips/pci.h 2001-10-12 22:35:54.000000000 +0000 +++ sangam-fb-322/linux-2.4.17/include/asm-mips/pci.h 2004-11-24 13:21:34.000000000 +0000 @@ -7,6 +7,8 @@ #define _ASM_PCI_H #include +#include +#include /* for virt_to_bus() */ #ifdef __KERNEL__ @@ -23,12 +25,12 @@ #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM 0x10000000 -extern inline void pcibios_set_master(struct pci_dev *dev) +static inline void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling */ } -extern inline void pcibios_penalize_isa_irq(int irq) +static inline void pcibios_penalize_isa_irq(int irq) { /* We don't do dynamic PCI IRQ allocation */ } @@ -84,13 +86,13 @@ * Once the device is given the dma address, the device owns this memory * until either pci_unmap_single or pci_dma_sync_single is performed. */ -extern inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, +static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) { if (direction == PCI_DMA_NONE) BUG(); -#ifndef CONFIG_COHERENT_IO +#ifdef CONFIG_NONCOHERENT_IO dma_cache_wback_inv((unsigned long)ptr, size); #endif @@ -105,7 +107,7 @@ * After this call, reads by the cpu to the buffer are guarenteed to see * whatever the device wrote there. */ -extern inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, +static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) { if (direction == PCI_DMA_NONE) @@ -115,6 +117,36 @@ } /* + * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical + * to pci_map_single, but takes a struct page instead of a virtual address + */ +static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, + unsigned long offset, size_t size, + int direction) +{ + unsigned long addr; + + if (direction == PCI_DMA_NONE) + BUG(); + + addr = (unsigned long) page_address(page); + addr += offset; +#ifdef CONFIG_NONCOHERENT_IO + dma_cache_wback_inv(addr, size); +#endif + + return virt_to_bus((void *)addr); +} + +static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, + size_t size, int direction) +{ + if (direction == PCI_DMA_NONE) + BUG(); + /* Nothing to do */ +} + +/* * Map a set of buffers described by scatterlist in streaming * mode for DMA. This is the scather-gather version of the * above pci_map_single interface. Here the scatter gather list @@ -130,17 +162,17 @@ * Device ownership issues as mentioned above for pci_map_single are * the same here. */ -extern inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, +static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) { -#ifndef CONFIG_COHERENT_IO +#ifdef CONFIG_NONCOHERENT_IO int i; #endif if (direction == PCI_DMA_NONE) BUG(); -#ifndef CONFIG_COHERENT_IO +#ifdef CONFIG_NONCOHERENT_IO /* Make sure that gcc doesn't leave the empty loop body. */ for (i = 0; i < nents; i++, sg++) dma_cache_wback_inv((unsigned long)sg->address, sg->length); @@ -154,7 +186,7 @@ * Again, cpu read rules concerning calls here are the same as for * pci_unmap_single() above. */ -extern inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, +static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) { if (direction == PCI_DMA_NONE) @@ -173,14 +205,14 @@ * next point you give the PCI dma address back to the card, the * device again owns the buffer. */ -extern inline void pci_dma_sync_single(struct pci_dev *hwdev, +static inline void pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) { if (direction == PCI_DMA_NONE) BUG(); -#ifndef CONFIG_COHERENT_IO +#ifdef CONFIG_NONCOHERENT_IO dma_cache_wback_inv((unsigned long)bus_to_virt(dma_handle), size); #endif } @@ -192,11 +224,11 @@ * The same as pci_dma_sync_single but for a scatter-gather list, * same rules and usage. */ -extern inline void pci_dma_sync_sg(struct pci_dev *hwdev, +static inline void pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) { -#ifndef CONFIG_COHERENT_IO +#ifdef CONFIG_NONCOHERENT_IO int i; #endif @@ -204,7 +236,7 @@ BUG(); /* Make sure that gcc doesn't leave the empty loop body. */ -#ifndef CONFIG_COHERENT_IO +#ifdef CONFIG_NONCOHERENT_IO for (i = 0; i < nelems; i++, sg++) dma_cache_wback_inv((unsigned long)sg->address, sg->length); #endif @@ -215,7 +247,7 @@ * only drive the low 24-bits during PCI bus mastering, then * you would pass 0x00ffffff as the mask to this function. */ -extern inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) +static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) { /* * we fall back to GFP_DMA when the mask isn't all 1s, @@ -228,6 +260,39 @@ return 1; } +/* This is always fine. */ +/* Well ... this actually needs more thought ... */ +#define pci_dac_dma_supported(pci_dev, mask) (0) + +#if 0 +static __inline__ dma64_addr_t +pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction) +{ + return ((dma64_addr_t) page_to_bus(page) + + (dma64_addr_t) offset); +} + +static __inline__ struct page * +pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr) +{ + unsigned long poff = (dma_addr >> PAGE_SHIFT); + + return mem_map + poff; +} + +static __inline__ unsigned long +pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr) +{ + return (dma_addr & ~PAGE_MASK); +} + +static __inline__ void +pci_dac_dma_sync_single(struct pci_dev *pdev, dma64_addr_t dma_addr, + size_t len, int direction) +{ + /* Nothing to do. */ +} +#endif /* Return the index of the PCI controller for device. */ #define pci_controller_num(pdev) (0)