--- zzzz-none-000/linux-2.4.17/include/asm-ppc/io.h 2001-11-03 01:43:54.000000000 +0000 +++ sangam-fb-322/linux-2.4.17/include/asm-ppc/io.h 2004-11-24 13:21:50.000000000 +0000 @@ -1,12 +1,15 @@ /* - * BK Id: SCCS/s.io.h 1.14 10/16/01 15:58:42 trini + * BK Id: SCCS/s.io.h 1.29 11/28/01 13:29:57 paulus */ + #ifdef __KERNEL__ #ifndef _PPC_IO_H #define _PPC_IO_H #include #include +#include + #include #include @@ -25,24 +28,25 @@ #define PREP_PCI_DRAM_OFFSET 0x80000000 #if defined(CONFIG_4xx) -#include +#include #elif defined(CONFIG_8xx) #include #elif defined(CONFIG_8260) #include #elif defined(CONFIG_APUS) -#define _IO_BASE 0 -#define _ISA_MEM_BASE 0 +#define _IO_BASE 0 +#define _ISA_MEM_BASE 0 #define PCI_DRAM_OFFSET 0 #else /* Everyone else */ -extern unsigned long isa_io_base; -extern unsigned long isa_mem_base; -extern unsigned long pci_dram_offset; #define _IO_BASE isa_io_base #define _ISA_MEM_BASE isa_mem_base #define PCI_DRAM_OFFSET pci_dram_offset #endif /* Platform-dependant I/O */ +extern unsigned long isa_io_base; +extern unsigned long isa_mem_base; +extern unsigned long pci_dram_offset; + #define readb(addr) in_8((volatile u8 *)(addr)) #define writeb(b,addr) out_8((volatile u8 *)(addr), (b)) #if defined(CONFIG_APUS) @@ -80,9 +84,22 @@ #ifdef CONFIG_ALL_PPC /* - * We have to handle possible machine checks here on powermacs - * and potentially some CHRPs -- paulus. + * On powermacs, we will get a machine check exception if we + * try to read data from a non-existent I/O port. Because the + * machine check is an asynchronous exception, it isn't + * well-defined which instruction SRR0 will point to when the + * exception occurs. + * With the sequence below (twi; isync; nop), we have found that + * the machine check occurs on one of the three instructions on + * all PPC implementations tested so far. The twi and isync are + * needed on the 601 (in fact twi; sync works too), the isync and + * nop are needed on 604[e|r], and any of twi, sync or isync will + * work on 603[e], 750, 74x0. + * The twi creates an explicit data dependency on the returned + * value which seems to be needed to make the 601 wait for the + * load to finish. */ + #define __do_in_asm(name, op) \ extern __inline__ unsigned int name(unsigned int port) \ { \ @@ -181,7 +198,6 @@ #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) -#ifdef __KERNEL__ /* * Map in an area of physical address space, for accessing * I/O devices etc. @@ -205,23 +221,26 @@ */ extern inline unsigned long virt_to_bus(volatile void * address) { -#ifndef CONFIG_APUS - if (address == (void *)0) - return 0; - return (unsigned long)address - KERNELBASE + PCI_DRAM_OFFSET; +#if defined(CONFIG_APUS) || defined(CONFIG_8xx) || defined(CONFIG_4xx) || defined(CONFIG_MENF1) || defined(CONFIG_PCORE) + /* I think everyone will be using this version if we start allowing + * uncached pages in alternate virtual spaces. -- Dan + */ + return (iopa((unsigned long) address) + PCI_DRAM_OFFSET); #else - return iopa ((unsigned long) address); + if (address == (void *)0) + return 0; + return (unsigned long)address - KERNELBASE + PCI_DRAM_OFFSET; #endif } extern inline void * bus_to_virt(unsigned long address) { -#ifndef CONFIG_APUS - if (address == 0) - return 0; - return (void *)(address - PCI_DRAM_OFFSET + KERNELBASE); +#if defined(CONFIG_APUS) || defined(CONFIG_8xx) || defined(CONFIG_4xx) + return (void*) mm_ptov (address - PCI_DRAM_OFFSET); #else - return (void*) mm_ptov (address); + if (address == 0) + return 0; + return (void *)(address - PCI_DRAM_OFFSET + KERNELBASE); #endif } @@ -231,30 +250,28 @@ */ extern inline unsigned long virt_to_phys(volatile void * address) { -#ifndef CONFIG_APUS - return (unsigned long) address - KERNELBASE; -#else +#if defined(CONFIG_APUS) || defined(CONFIG_8xx) || defined(CONFIG_4xx) return iopa ((unsigned long) address); +#else + return (unsigned long) address - KERNELBASE; #endif } extern inline void * phys_to_virt(unsigned long address) { -#ifndef CONFIG_APUS - return (void *) (address + KERNELBASE); -#else +#if defined(CONFIG_APUS) || defined(CONFIG_8xx) || defined(CONFIG_4xx) return (void*) mm_ptov (address); +#else + return (void *) (address + KERNELBASE); #endif } /* * Change "struct page" to physical address. */ -#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) +#define page_to_phys(page) (((page - mem_map) << PAGE_SHIFT) + PPC_MEMSTART) #define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET) -#endif /* __KERNEL__ */ - /* * Enforce In-order Execution of I/O: * Acts as a barrier to ensure all previous I/O accesses have @@ -265,7 +282,7 @@ __asm__ __volatile__ ("eieio" : : : "memory"); } -/* Enforce in-order execution of data I/O. +/* Enforce in-order execution of data I/O. * No distinction between read/write on PPC; use eieio for all three. */ #define iobarrier_rw() eieio() @@ -367,11 +384,47 @@ return 0; } -/* Nothing to do */ +#ifdef CONFIG_NOT_COHERENT_CACHE + +/* + * DMA-consistent mapping functions for PowerPCs that don't support + * cache snooping. These allocate/free a region of uncached mapped + * memory space for use with DMA devices. Alternatively, you could + * allocate the space "normally" and use the cache management functions + * to ensure it is consistent. + */ +extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle); +#ifdef CONFIG_4xx +extern void consistent_free(void *vaddr); +#else +extern void consistent_free(void *vaddr, size_t size); +#endif +extern void consistent_sync(void *vaddr, size_t size, int rw); +extern void consistent_sync_page(struct page *page, unsigned long offset, + size_t size, int rw); + +#define dma_cache_inv(_start,_size) \ + invalidate_dcache_range(_start, (_start + _size)) +#define dma_cache_wback(_start,_size) \ + clean_dcache_range(_start, (_start + _size)) +#define dma_cache_wback_inv(_start,_size) \ + flush_dcache_range(_start, (_start + _size)) + +#else /* CONFIG_NOT_COHERENT_CACHE */ + +/* + * Cache coherent cores. + */ #define dma_cache_inv(_start,_size) do { } while (0) #define dma_cache_wback(_start,_size) do { } while (0) #define dma_cache_wback_inv(_start,_size) do { } while (0) -#endif +#define consistent_alloc(gfp, size, handle) NULL +#define consistent_free(addr, size) do { } while (0) +#define consistent_sync(addr, size, rw) do { } while (0) +#define consistent_sync_page(pg, off, sz, rw) do { } while (0) + +#endif /* CONFIG_NOT_COHERENT_CACHE */ +#endif /* _PPC_IO_H */ #endif /* __KERNEL__ */