/****************************************************************************** ** ** FILE NAME : amazon_s_ssc.c ** PROJECT : Amazon-S ** MODULES : SSC (Synchronous Serial Controller) ** ** DATE : 4 Jan 2008 ** AUTHOR : Lei Chuanhua ** DESCRIPTION : SCC Driver ** COPYRIGHT : Copyright (c) 2004 ** Infineon Technologies AG ** Am Campeon 1-12, 85579 Neubiberg, Germany ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** HISTORY ** $Date $Author $Comment ** 20 Nov, 07 Lei Chuanhua Remove priority mechanism ** 10 Dec, 07 Lei Chuanhua DMA support ** 20 Dec, 07 Lei Chuanhua Remove FIFO/DMA TX memory copy ** 04 Jan, 08 Lei Chuanhua Supports kernel 2.6.17 and later ** 25 Feb, 08 Lei Chuanhua TIR support in TX DMA, remove TX DMA interrupt ** 25 Mar, 08 Lei Chuanhua Remove Rx DMA memory copy ** 09 Apr, 08 Lei Chuanhua Added doxygen comment ** 17 Apr, 08 Lei Chuanhua Fixed multiple psuedo interrupts in Rx DMA ** 13 Nov, 08 Lei Chuanhua Sanity check for DMA alignment ** 25 Nov, 08 Lei Chuanhua Fixed softirq by closing dma channel after tran ** -action is done ** 24 Dec, 08 Lei Chuanhua Rewrite this driver according to new SPI spec, ** this version try to solve real-time issue ** 20 May, 09 Lei Chuanhua Move SPI CS to specific SPI client driver *******************************************************************************/ /*! \defgroup AMAZON_S_SSC \ingroup AMAZON_S_BSP \brief amazon_s ssc driver module */ /*! \file amazon_s_ssc.c \ingroup AMAZON_S_SSC \brief ssc driver file */ /*! \defgroup AMAZON_S_SSC_FUNCTIONS \ingroup AMAZON_S_SSC \brief amazon_s ssc driver functions */ #ifndef EXPORT_SYMTAB #define EXPORT_SYMTAB #endif #ifndef AUTOCONF_INCLUDED #include #endif /* AUTOCONF_INCLUDED */ #include #include #include #include #include #include #include #include #include #include #include #include #include #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,16) #include #else #include #endif /* Project header */ #include #include #include #include #include #include #include #define IFX_SSC_DRV_VERSION "1.5.1" /* Always report fatal error */ #define KASSERT(exp, msg) do { \ if (unlikely(!(exp))) { \ printk msg; \ BUG(); \ } \ } while (0) #define TRUE 1 #define FALSE 0 #define IFX_SSC_NAME "ifx_ssc" /* Short reg read/write for easy identification */ #define SSC_WRITE_REG(data, addr) ((*(volatile u32 *)(addr)) = (data)) #define SSC_READ_REG(addr) (*(volatile u32 *)(addr)) /* &0x1F is equal to %/32, but much faster */ #define IRQ_MASK_VALUE 0x1F /* = 32 - 1 */ /* Don't enable module level interrupt for this irq number, * otherwise something will be mixtured */ #define IFX_SSC_FAKE_IRQ_NO IFX_SSC_TIR #ifdef CONFIG_AMAZON_S #define IFX_SSC_MEM_BASE AMAZON_S_SSC1 #elif defined(CONFIG_DANUBE) #define IFX_SSC_MEM_BASE DANUBE_SSC1 #else #error "Define your platform first" #endif /** * defining SSC_ASYNCHRONOUS_SUPPORT will support synchronous and asynchronous * API at the same time. SSC context will run in tasklet. * If not define this MACRO, only synchronous API is supported, SSC context will * in kernel thread mode. * XXX, create new kernel thread is not recommended, work queue is prefered in kernel */ #define SSC_ASYNCHRONOUS_SUPPORT /** * This is the per-channel data structure containing pointers, flags * and variables for the port. isp is allocated in ifx_ssc_init() * based on the chip version. */ static struct ifx_ssc_port *isp = NULL; #define IFX_SSC_DEBUG #ifdef IFX_SSC_DEBUG #define INLINE enum { SSC_MSG_TX_FIFO = 0x00000001, SSC_MSG_TX_DMA = 0x00000002, SSC_MSG_RX_FIFO = 0x00000004, SSC_MSG_RX_DMA = 0x00000008, SSC_MSG_INT = 0x00000010, /* Interrupt msg */ SSC_MSG_CFG = 0x00000020, SSC_MSG_THREAD = 0x00000040, SSC_MSG_TASKLET = 0x00000080, SSC_MSG_DEBUG = 0x00000100, SSC_MSG_ERROR = 0x00000200, SSC_MSG_INIT = 0x00000400, /* Initialization msg */ SSC_MSG_QUEUE = 0x00000800, SSC_MSG_LOCK = 0x00001000, SSC_MSG_CALLBACK = 0x00002000, SSC_MSG_ANY = 0xffffffff, /* anything */ }; static void ifx_ssc_debug(struct ifx_ssc_port *port, const char *fmt, ...); #define IFX_SSC_PRINT(_port, _m, _fmt, args...) do { \ if ((_port)->ssc_debug & (_m)) { \ ifx_ssc_debug((_port), (_fmt), ##args); \ } \ } while (0) #else #define INLINE inline #define IFX_SSC_PRINT(_port, _m, _fmt, ...) #endif /* IFX_SSC_DEBUG */ extern unsigned int amazon_s_get_fpi_hz(void); typedef struct { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) int (*request) (unsigned int irq, irqreturn_t (*handler) (int, void *), unsigned long irqflags, const char *devname, void *dev_id); #else int (*request) (unsigned int irq, irqreturn_t (*handler) (int, void *, struct pt_regs *), unsigned long irqflags, const char *devname, void *dev_id); #endif void (*free) (unsigned int irq, void *dev_id); } ifx_int_wrapper_t; static ifx_int_wrapper_t ifx_int_wrapper = { .request = request_irq, .free = free_irq, }; /** * ICU Interrupt Request Set Register is used for test purpose. All these * interrupts don't go through real hardware, just simulate hardware interrupts * So all related hardware interrupts must be disabled. At the same tme, the * following stuff should to go ICU implementation. However, ICU * implementation doesn't include IRSR. It is really platform-specific. * XXX, move IRSR to interrupt.c */ static volatile u32 * icu_im_irsr_array[] = { #ifdef CONFIG_AMAZON_S AMAZON_S_ICU_IM0_IRSR, AMAZON_S_ICU_IM1_IRSR, AMAZON_S_ICU_IM2_IRSR, AMAZON_S_ICU_IM3_IRSR, AMAZON_S_ICU_IM4_IRSR, #elif defined CONFIG_DANUBE DANUBE_ICU_IM0_IRSR, DANUBE_ICU_IM1_IRSR, DANUBE_ICU_IM2_IRSR, DANUBE_ICU_IM3_IRSR, DANUBE_ICU_IM4_IRSR, #else #error "Define your platform first" #endif }; /** * Set ICU request register bit to generate fake interrupt * * \param irq fake interrupt irq number */ static INLINE void ifx_ssc_fake_irq_set(unsigned int irq) { unsigned long flags; BUG_ON(irq > INT_NUM_IM4_IRL31); local_irq_save(flags); *(icu_im_irsr_array[irq >> 5]) |= (1 << (irq & IRQ_MASK_VALUE)); local_irq_restore(flags); } /** * Clear ICU request register bit to ack fake interrupt * * \param irq fake interrupt irq number */ static INLINE void ifx_ssc_fake_irq_clr(unsigned int irq) { unsigned long flags; BUG_ON(irq > INT_NUM_IM4_IRL31); local_irq_save(flags); *(icu_im_irsr_array[irq >> 5]) &= ~(1 << (irq & IRQ_MASK_VALUE)); local_irq_restore(flags); } /** * Set ICU request register bit to generate fake interrupt, just a wrapper for debug * * \param irq fake interrupt irq number */ static INLINE void ifx_ssc_raise_fake_irq(unsigned int irq) { #ifdef IFX_SSC_DEBUG struct ifx_ssc_port *port = (struct ifx_ssc_port *) &isp[0]; IFX_SSC_PRINT(port, SSC_MSG_INT, "%s irq %d triggered \n", __func__, irq); #endif ifx_ssc_fake_irq_set(irq); } /** * Trigger different schedule procedures according to different context. * if caller is already in tasklet, it will be done in caller's tasklet * * \param port Pointer to structure #ifx_ssc_port * \return none */ static void ifx_ssc_start_tasklet(struct ifx_ssc_port *port) { struct tasklet_struct *ptasklet; /* * Calls the internal process to serve the queue. This routine would * immediately return in case the SSC hardware is currently used to serve * another request. */ ptasklet = &port->ssc_txrxq; if (in_irq()) { /* Hardware irq */ IFX_SSC_PRINT(port, SSC_MSG_INT, "%s hardware irq schedule\n", __func__); tasklet_hi_schedule(ptasklet); } else if (in_softirq()){ /* Softirq or tasklet */ IFX_SSC_PRINT(port, SSC_MSG_TASKLET, "%s softirq schedule\n", __func__); if (tasklet_trylock(ptasklet)) { /* tasklet_trylock for SMP */ ptasklet->func(ptasklet->data); tasklet_unlock(ptasklet); } else { IFX_SSC_PRINT(port, SSC_MSG_TASKLET, "%s should never happen\n", __func__); } } else { /* Process context */ IFX_SSC_PRINT(port, SSC_MSG_THREAD, "%s process schedule\n", __func__); ifx_ssc_raise_fake_irq(port->ssc_fake_irq); } } /** * Fake interrupt handler * * \param irq fake interrupt irq number * \param dev_id vague type, will be converted to * pointer to structure #ifx_ssc_port */ static irqreturn_t #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) ifx_ssc_fake_isr (int irq, void *dev_id) #else ifx_ssc_fake_isr (int irq, void *dev_id, struct pt_regs *regs) #endif { struct ifx_ssc_port *port = (struct ifx_ssc_port *) dev_id; IFX_SSC_PRINT(port, SSC_MSG_INT, "%s irq %d served\n", __func__, irq); ifx_ssc_fake_irq_clr(irq); ifx_ssc_start_tasklet(port); return IRQ_HANDLED; } #ifdef IFX_SSC_DEBUG /** * Debug all kinds of level message * * \param port Pointer to structure #ifx_ssc_port * \param fmt debug output format * */ static void ifx_ssc_debug(struct ifx_ssc_port *port, const char *fmt, ...) { char buf[256] = {0}; /* XXX */ va_list ap; #ifdef SSC_ASYNCHRONOUS_SUPPORT static const char *p = "tasklet"; #else static const char *p = "kthread"; #endif va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); printk("%s %s: %s\n", p, port->name, buf); } #endif /* IFX_SSC_DEBUG */ /** * Wait for SPI bus becomes idle * * \param port Pointer to structure #ifx_ssc_port */ static INLINE void ifx_ssc_wait_finished(struct ifx_ssc_port *port) { while((SSC_READ_REG(port->mapbase + IFX_SSC_STATE) & IFX_SSC_STATE_BUSY)){ ; /* Do nothing */ } } /** * Get SSC clock speed. * Returns the current operating speed of the SSC peripheral, depending on chip * specific bus speed and RMC setting in CLC register. * * \param port Pointer to structure #ifx_ssc_port * \return >0 Peripheral speed in HZ * \return 0 Error */ static INLINE unsigned int ifx_ssc_get_kernel_clk(struct ifx_ssc_port *port) { /* This function assumes that the CLC register is set with the * appropriate value for RMC. */ unsigned int rmc; rmc = (SSC_READ_REG(port->mapbase + IFX_SSC_CLC) & IFX_CLC_RUN_DIVIDER_MASK) >> IFX_CLC_RUN_DIVIDER_OFFSET; if (rmc == 0) { printk(KERN_ERR "%s rmc==0 \n", __func__); return 0; } return (amazon_s_get_fpi_hz() / rmc); } /** * SSC proc file read. * This function creates the proc file output for the SSC driver. * * \param page Buffer to write the string to * \param start not used (Linux internal) * \param offset not used (Linux internal) * \param count not used (Linux internal) * \param eof Set to 1 when all data is stored in buffer * \param data not used (Linux internal) * \return len Lenght of data in buffer */ static int ifx_ssc_read_proc(char *page, char **start, off_t offset, int count, int *eof, void *data) { unsigned long flags; int off = 0; int t = 0; struct ifx_ssc_port *port; ssc_device_t *dev; off += sprintf(page + off, "Statistics for Infineon Synchronous Serial Controller\n\n"); for (t = 0; t < IFX_SSC_MAX_PORT_NUM; t++) { off += sprintf(page + off, "SSC%d\n",isp[t].port_idx); off += sprintf(page + off, "RX overflow errors %d\n", isp[t].stats.rxOvErr); off += sprintf(page + off, "RX underflow errors %d\n", isp[t].stats.rxUnErr); off += sprintf(page + off, "TX overflow errors %d\n", isp[t].stats.txOvErr); off += sprintf(page + off, "TX underflow errors %d\n", isp[t].stats.txUnErr); off += sprintf(page + off, "Abort errors %d\n", isp[t].stats.abortErr); off += sprintf(page + off, "Mode errors %d\n", isp[t].stats.modeErr); off += sprintf(page + off, "RX Bytes %d\n", isp[t].stats.rxBytes); off += sprintf(page + off, "TX Bytes %d\n", isp[t].stats.txBytes); off += sprintf(page + off, "TX FIFO %d TX DMA %d\n", isp[t].stats.txFifo, isp[t].stats.txDma); off += sprintf(page + off, "RX FIFO %d RX DMA %d\n", isp[t].stats.rxFifo, isp[t].stats.rxDma); port = &isp[t]; local_irq_save(flags); off += sprintf(page + off, "IFX_SSC_CLC 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_CLC)); off += sprintf(page + off, "IFX_SSC_ID 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_ID)); off += sprintf(page + off, "IFX_SSC_MCON 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_CON)); off += sprintf(page + off, "IFX_SSC_STATE 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_STATE)); off += sprintf(page + off, "IFX_SSC_TB 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_TB)); off += sprintf(page + off, "IFX_SSC_FSTAT 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_FSTAT)); off += sprintf(page + off, "IFX_SSC_RXFCON 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_RXFCON)); off += sprintf(page + off, "IFX_SSC_TXFCON 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_TXFCON)); off += sprintf(page + off, "IFX_SSC_BR 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_BR)); off += sprintf(page + off, "IFX_SSC_SFCON 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_SFCON)); off += sprintf(page + off, "IFX_SSC_SFSTAT 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_SFSTAT)); off += sprintf(page + off, "IFX_SSC_GPOCON 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_GPOCON)); off += sprintf(page + off, "IFX_SSC_GPOSTAT 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_GPOSTAT)); off += sprintf(page + off, "IFX_SSC_RXREQ 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_RXREQ)); off += sprintf(page + off, "IFX_SSC_RXCNT 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_RXCNT)); off += sprintf(page + off, "IFX_SSC_DMACON 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_DMACON)); off += sprintf(page + off, "IFX_SSC_IRN_EN 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_IRN_EN)); off += sprintf(page + off, "IFX_SSC_IRN_CR 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_IRN_CR)); off += sprintf(page + off, "IFX_SSC_IRN_ICR 0x%08x\n", SSC_READ_REG(port->mapbase + IFX_SSC_IRN_ICR)); off += sprintf(page + off, "\n"); local_irq_restore(flags); /* Per device statistics */ SSC_SEM_LOCK(port->dev_sem); TAILQ_FOREACH(dev, &port->ssc_devq, dev_entry){ off += sprintf(page + off, "Device %s:\n", dev->dev_name); off += sprintf(page + off, "Rx Bytes %d\n", dev->stats.rxBytes); off += sprintf(page + off, "Tx Bytes %d\n", dev->stats.txBytes); } SSC_SEM_UNLOCK(port->dev_sem); } *eof = 1; return off; } /** * Setup dma direction and enable/disable * * \param port Pointer to structure #ifx_ssc_port * \param dir DMA Direction, tx/rx * \param enabled DMA enable/disable * \return none */ static void ifx_ssc_dma_setup(struct ifx_ssc_port *port, int dir, int enabled) { u32 reg = 0; SSC_IRQ_LOCK(port); reg = SSC_READ_REG(port->mapbase + IFX_SSC_DMACON); if (dir == IFX_SSC_DIR_RX) {/* RX */ if (enabled) { reg |= IFX_SSC_DMACON_RXON; } else { reg &= ~IFX_SSC_DMACON_RXON; } } else { if (enabled) { reg |= IFX_SSC_DMACON_TXON; } else { reg &= ~IFX_SSC_DMACON_TXON; } } SSC_WRITE_REG(reg, port->mapbase + IFX_SSC_DMACON); SSC_IRQ_UNLOCK(port); } /** * DMA interrupt received, this function calls to reschedule or wake up sleep * kernel thread * * \param port Pointer to structure #ifx_ssc_port * \return none */ static INLINE void ifx_ssc_dma_irq_finished(struct ifx_ssc_port *port) { /* * Reset the flag that we are waiting for the DMA to complete * This flag should be reset before the following stuff, otherwise * start_tasklet will stop */ atomic_set(&port->dma_wait_state, 0); #ifdef SSC_ASYNCHRONOUS_SUPPORT ifx_ssc_start_tasklet(port); #else SSC_WAKEUP_EVENT(port->ssc_thread_wait, SSC_THREAD_EVENT, port->event_flags); #endif } /** * Pseudo Interrupt handler for DMA. * This function processes DMA interrupts notified to the SSC device driver. * It is installed at the DMA core as interrupt handler for the SSC DMA device * and handles the following DMA interrupts: * - In case of a DMA receive interrupt the received data is passed to the * upper layer. * * \param dma_dev pointer to DMA device structure * \param status type of interrupt being notified (RCV_INT: DMA receive * interrupt, TX_BUF_FULL_INT: transmit buffer full interrupt, * TRANSMIT_CPT_INT: transmission complete interrupt) * \return OK In case of successful data reception from DMA */ static int ifx_ssc_dma_int_handler( struct dma_device_info* dma_dev, int status ) { int i; struct ifx_ssc_port *port; port = (struct ifx_ssc_port *) dma_dev->priv; IFX_SSC_PRINT(port, SSC_MSG_INT, "Interrupt status %d\n", status); switch(status) { case RCV_INT: IFX_SSC_PRINT(port, SSC_MSG_INT, "RCV_INT\n"); ifx_ssc_dma_setup(port, IFX_SSC_DIR_RX, IFX_SSC_DMA_DISABLE); dma_device_clear_int(dma_dev, IFX_SSC_DIR_RX); /* XXX, where is the best place because of HW limitation * NB, for very low baudrate device such as 20KHz, this wait is * needed. */ ifx_ssc_wait_finished(port); /* Disable SPI DMA Rx channel */ (dma_dev->rx_chan[dma_dev->current_rx_chan])->close(dma_dev->rx_chan[dma_dev->current_rx_chan]); /* * HW WAR, if packet length is indivisible by 4, last 1~3 bytes, * special swapping and memcpy involved. */ { char *p; int i, j, k; i = port->actual_rx_len >> 2; j = port->actual_rx_len & 0x3; if (j != 0) { /* Hit last 1~3 byte case */ p = port->dma_rxbuf + (i << 2); for (k = 0; k < j; k++) { p[k] = p[4 - j + k]; } memcpy((char *)port->rxbuf_ptr, port->dma_rxbuf, port->actual_rx_len); /* Reset these for FIFO usage */ port->rxbuf_ptr = NULL; port->actual_rx_len = 0; } } ifx_ssc_dma_irq_finished(port); break; case TX_BUF_FULL_INT: for(i = 0; i < dma_dev->max_tx_chan_num; i++) { if((dma_dev->tx_chan[i])->control == AMAZON_S_DMA_CH_ON) { dma_dev->tx_chan[i]->enable_irq(dma_dev->tx_chan[i]); } } IFX_SSC_PRINT(port, SSC_MSG_INT, "TX_BUF_FULL_INT\n"); break; case TRANSMIT_CPT_INT: for(i = 0; i< dma_dev->max_tx_chan_num; i++) { dma_dev->tx_chan[i]->disable_irq(dma_dev->tx_chan[i]); } IFX_SSC_PRINT(port, SSC_MSG_INT, "TRANSMIT_CPT_INT\n"); /* XXX, where is the best place because of HW limitation * NB, all cases should wait because it just indicates DMA has finished * transfered data to SSC FIFO, but the data could be still in SSC FIFO. */ ifx_ssc_wait_finished(port); ifx_ssc_dma_irq_finished(port); break; default: printk(KERN_ERR "%s unknow interrupt %d\n", __func__, status); break; } return 0; } /** * Allocates buffer for SSC DMA. * This function is installed as DMA callback function to be called when the DMA * needs to allocate a new buffer. * * \param len Length of packet * \param *byte_offset Pointer to byte offset * \param **opt unused * \return NULL In case of buffer allocation fails * \return buffer Pointer to allocated memory */ static u8* ifx_ssc_dma_buffer_alloc(int len, int* byte_offset, void** opt) { return NULL; } /** * Free DMA buffer. * This function frees a buffer previously allocated by the DMA. * * \param *dataptr Pointer to data buffer * \param *opt unused * \return 0 OK */ static int ifx_ssc_dma_buffer_free(u8* dataptr,void* opt) { /* SPI will be in charge of memory free if necessary */ return 0; } /** * Initialize SSC DMA device. * This function initializes the passed DMA device structure for usage as * SSC DMA device. * * \param line SSC device (0) * \param *dma_dev Pointer to dma device structure to be initialized * \return 0 OK */ static int ifx_ssc_init_dma_device(int line, struct dma_device_info *dma_dev) { int i; dma_dev->priv = &isp[line]; dma_dev->num_tx_chan = DEFAULT_SSC_TX_CHANNEL_NUM; dma_dev->num_rx_chan = DEFAULT_SSC_RX_CHANNEL_NUM; dma_dev->tx_burst_len = DEFAULT_SSC_TX_BURST_LEN; dma_dev->rx_burst_len = DEFAULT_SSC_RX_BURST_LEN; for(i = 0; i < dma_dev->num_tx_chan; i++){ dma_dev->tx_chan[i]->desc_len = DEFAULT_SSC_TX_CHANNEL_DESCR_NUM; dma_dev->tx_chan[i]->control = AMAZON_S_DMA_CH_ON; dma_dev->tx_chan[i]->packet_size = DEFAULT_SSC_FRAGMENT_SIZE; } for(i = 0; i < dma_dev->num_rx_chan; i++){ dma_dev->rx_chan[i]->desc_len = DEFAULT_SSC_RX_CHANNEL_DESCR_NUM; dma_dev->rx_chan[i]->packet_size = DEFAULT_SSC_FRAGMENT_SIZE; dma_dev->rx_chan[i]->control = AMAZON_S_DMA_CH_ON; dma_dev->rx_chan[i]->byte_offset= 0; } dma_dev->current_tx_chan = 0; dma_dev->current_rx_chan = 0; /* * set DMA handler functions for rx-interrupts, * buffer allocation and release */ dma_dev->intr_handler = ifx_ssc_dma_int_handler; dma_dev->buffer_alloc = ifx_ssc_dma_buffer_alloc; dma_dev->buffer_free = ifx_ssc_dma_buffer_free; dma_device_register(dma_dev); return 0; } /* GPIO pin in the global view, that is, dont care of port * XXX, platform-specific stuff */ #define IFX_SSC_SPI_DIN 16 #define IFX_SSC_SPI_DOUT 17 #define IFX_SSC_SPI_CLK 18 /** * GPIO Init. * Initialize MUX settings to enable SPI interface */ static INLINE void ifx_ssc_gpio_init(void) { /* GPIO module will report error once conflict exists */ IFX_SSC_PIN_RESERVE(IFX_SSC_SPI_DIN); IFX_SSC_PIN_RESERVE(IFX_SSC_SPI_DOUT); IFX_SSC_PIN_RESERVE(IFX_SSC_SPI_CLK); /* p1.6 SPI_CS2(SFLASH), p1.0 SPI_DIN, p1.1 SPI_DOUT, p1.2 SPI_CLK */ IFX_SSC_DIR_IN(IFX_SSC_SPI_DIN); IFX_SSC_ALTSEL0_SET(IFX_SSC_SPI_DIN); IFX_SSC_ALTSEL1_CLR(IFX_SSC_SPI_DIN); IFX_SSC_DIR_OUT(IFX_SSC_SPI_DOUT); IFX_SSC_ALTSEL0_SET(IFX_SSC_SPI_DOUT); IFX_SSC_ALTSEL1_CLR(IFX_SSC_SPI_DOUT); IFX_SSC_OD_SET(IFX_SSC_SPI_DOUT); IFX_SSC_DIR_OUT(IFX_SSC_SPI_CLK); IFX_SSC_ALTSEL0_SET(IFX_SSC_SPI_CLK); IFX_SSC_ALTSEL1_CLR(IFX_SSC_SPI_CLK); IFX_SSC_OD_SET(IFX_SSC_SPI_CLK); } /** * GPIO release. * Release reserverd gpio resource so that other module could use it */ static INLINE void ifx_ssc_gpio_release(void) { IFX_SSC_PIN_FREE(IFX_SSC_SPI_DIN); IFX_SSC_PIN_FREE(IFX_SSC_SPI_DOUT); IFX_SSC_PIN_FREE(IFX_SSC_SPI_CLK); } /** * Rx/Tx mode set. * Set the transmission mode while SSC is idle * * \param info Pointer to structure #ifx_ssc_port * \param val Rx/Tx mode * \return 0 OK * \return -EINVAL Invalid parameters supplied * \return -EBUSY Transmission or reception ongoing */ static INLINE int ifx_ssc_rxtx_mode_set(struct ifx_ssc_port *port, unsigned int val) { u32 reg; if (!(port) || (val & ~(IFX_SSC_MODE_MASK))) { return -EINVAL; } /* check BUSY and RXCNT */ if ((SSC_READ_REG(port->mapbase + IFX_SSC_STATE) & IFX_SSC_STATE_BUSY)) { printk(KERN_ERR "%s state busy\n", __func__); return -EBUSY; } if ((SSC_READ_REG(port->mapbase + IFX_SSC_RXCNT) & IFX_SSC_RXCNT_TODO_MASK)) { printk(KERN_ERR "%s rx todo busy\n", __func__); return -EBUSY; } SSC_IRQ_LOCK(port); reg = (SSC_READ_REG(port->mapbase + IFX_SSC_CON) & ~(IFX_SSC_CON_RX_OFF | IFX_SSC_CON_TX_OFF)) | (val); SSC_WRITE_REG(reg, port->mapbase + IFX_SSC_CON); port->opts.modeRxTx = val; SSC_IRQ_UNLOCK(port); return 0; } /** * Display SSC driver version after initilazation succeeds */ static INLINE void ifx_show_version(void) { printk(KERN_INFO "Infineon Technologies Synchronous Serial Controller (SSC) driver version %s\n", IFX_SSC_DRV_VERSION ); } /** * SSC set hardware options. * This routine intializes the SSC appropriately depending on slave/master and * full-/half-duplex mode. It assumes that the SSC is disabled and the fifo's * and buffers are flushed later on. * * \param port Pointer to structure #ifx_ssc_port * \return 0 OK * \return -EINVAL Invalid hardware options supplied */ static int ifx_ssc_sethwopts(struct ifx_ssc_port *port) { unsigned long bits; struct ifx_ssc_hwopts *opts = &port->opts; /* sanity checks */ if ((opts->dataWidth < IFX_SSC_MIN_DATA_WIDTH) || (opts->dataWidth > IFX_SSC_MAX_DATA_WIDTH)) { printk(KERN_ERR "%s: sanity check failed\n", __func__); return -EINVAL; } bits = (opts->dataWidth - 1) << IFX_SSC_CON_DATA_WIDTH_OFFSET; bits |= IFX_SSC_CON_ENABLE_BYTE_VALID; if (opts->rxOvErrDetect) bits |= IFX_SSC_CON_RX_OFL_CHECK; if (opts->rxUndErrDetect) bits |= IFX_SSC_CON_RX_UFL_CHECK; if (opts->txOvErrDetect) bits |= IFX_SSC_CON_TX_OFL_CHECK; if (opts->txUndErrDetect) bits |= IFX_SSC_CON_TX_UFL_CHECK; if (opts->loopBack) bits |= IFX_SSC_CON_LOOPBACK_MODE; if (opts->echoMode) bits |= IFX_SSC_CON_ECHO_MODE_ON; if (opts->headingControl) bits |= IFX_SSC_CON_MSB_FIRST; if (opts->clockPhase) bits |= IFX_SSC_CON_LATCH_THEN_SHIFT; if (opts->clockPolarity) bits |= IFX_SSC_CON_CLOCK_FALL; switch (opts->modeRxTx) { case IFX_SSC_MODE_TX: bits |= IFX_SSC_CON_RX_OFF; break; case IFX_SSC_MODE_RX: bits |= IFX_SSC_CON_TX_OFF; break; } SSC_WRITE_REG(bits, port->mapbase + IFX_SSC_CON); SSC_WRITE_REG((port->opts.gpoCs << IFX_SSC_GPOCON_ISCSB0_POS) | (port->opts.gpoInv << IFX_SSC_GPOCON_INVOUT0_POS), port->mapbase + IFX_SSC_GPOCON); /* TODO: disable cs */ SSC_WRITE_REG(port->opts.gpoCs << IFX_SSC_WHBGPOSTAT_SETOUT0_POS, port->mapbase + IFX_SSC_WHBGPOSTAT); if (opts->masterSelect) { SSC_WRITE_REG(IFX_SSC_WHBSTATE_SET_MASTER_SELECT, port->mapbase + IFX_SSC_WHBSTATE); } else { SSC_WRITE_REG(IFX_SSC_WHBSTATE_CLR_MASTER_SELECT, port->mapbase + IFX_SSC_WHBSTATE); } /* init serial framing */ SSC_WRITE_REG(0, port->mapbase + IFX_SSC_SFCON); /* set up the port pins */ ifx_ssc_gpio_init(); return 0; } /** * Chip select enable. * This function sets the given chip select for SSC0 to low. * * \param pin Selected CS pin * \return 0 OK * \return -EINVAL Invalid GPIO pin provided */ INLINE int amazon_s_ssc_cs_low(u32 pin) { int ret; /* XXX, how to determine */ struct ifx_ssc_port *port = &isp[0]; if (pin > IFX_SSC_MAX_GPO_OUT) { ret = -EINVAL; } else { SSC_WRITE_REG(1 << (pin + IFX_SSC_WHBGPOSTAT_CLROUT0_POS), port->mapbase + IFX_SSC_WHBGPOSTAT); wmb(); ret = 0; } return ret; } EXPORT_SYMBOL(amazon_s_ssc_cs_low); /** * Chip select disable. * This function sets the given chip select for SSC0 to high. * * \param pin Selected CS pin * \return 0 OK * \return -EINVAL Invalid GPIO pin provided */ INLINE int amazon_s_ssc_cs_high(u32 pin) { int ret; struct ifx_ssc_port *port = &isp[0]; if (pin > IFX_SSC_MAX_GPO_OUT) { ret = -EINVAL; } else { SSC_WRITE_REG(1 << (pin + IFX_SSC_WHBGPOSTAT_SETOUT0_POS), port->mapbase + IFX_SSC_WHBGPOSTAT); wmb(); ret = 0; } return ret; } EXPORT_SYMBOL(amazon_s_ssc_cs_high); /** * FIFO Receive data handler. * This function processes received data. It will read data from the FIFO and * copy it to the receive buffer. * * \param port Pointer to structure #ifx_ssc_port * Description: * In Tx/Rx mode, to void memory copy, where rx data starts must be determined * To this end, rx_start_position will trace rx data starting position. It involves * several special cases * 1) If txsize is divisable by 4, all tx data will be skipped. * 2) If txsize is not divisable by 4,including less than 4 bytes. The left 1~3 bytes * have to do swap. */ static void ifx_ssc_start_rxfifo(struct ifx_ssc_port *port) { int fifo_fill_lev, bytes_in_buf, i, j; unsigned long tmp_val; unsigned int rx_valid_cnt; unsigned long tx_dummy = 0; i = port->actual_tx_len & 0x3; /* number of words waiting in the RX FIFO */ fifo_fill_lev = (SSC_READ_REG(port->mapbase + IFX_SSC_FSTAT) & IFX_SSC_FSTAT_RECEIVED_WORDS_MASK) >> IFX_SSC_FSTAT_RECEIVED_WORDS_OFFSET; // Note: There are always 32 bits in a fifo-entry except for the last // word of a contigous transfer block and except for not in rx-only // mode and CON.ENBV set. But for this case it should be a convention // in software which helps: // In tx or rx/tx mode all transfers from the buffer to the FIFO are // 32-bit wide, except for the last three bytes, which could be a // combination of 16- and 8-bit access. // => The whole block is received as 32-bit words as a contigous stream, // even if there was a gap in tx which has the fifo run out of data! // Just the last fifo entry *may* be partially filled (0, 1, 2 or 3 bytes)! bytes_in_buf = port->txrx_len; /* transfer with 32 bits per entry */ while ((bytes_in_buf >= 4) && (fifo_fill_lev > 0)) { /* Skip the first tx parts which are divisible by 4 */ if (port->tx_counter_for_rx > 0) { tx_dummy = SSC_READ_REG(port->mapbase + IFX_SSC_RB); port->tx_counter_for_rx--; port->rx_start_position += 4; } else if (i != 0) { /* last 1 ~ 3 byte belongs to rx */ tmp_val = SSC_READ_REG(port->mapbase + IFX_SSC_RB); for (j = 0; j < 4 - i; j++) { *port->rxbuf_ptr = (tmp_val >> (8 * ( 4 - i - j - 1))) & 0xff; port->rxbuf_ptr++; } port->rx_start_position += i; } else { *(u32 *) port->rxbuf_ptr = SSC_READ_REG(port->mapbase + IFX_SSC_RB); port->rxbuf_ptr += 4; } fifo_fill_lev--; bytes_in_buf -= 4; port->txrx_len -= 4; } /* now do the rest as mentioned in STATE.RXBV */ while ((bytes_in_buf > 0) && (fifo_fill_lev > 0)) { rx_valid_cnt = (SSC_READ_REG(port->mapbase + IFX_SSC_STATE) & IFX_SSC_STATE_RX_BYTE_VALID_MASK)>> IFX_SSC_STATE_RX_BYTE_VALID_OFFSET; if (rx_valid_cnt == 0) break; if (rx_valid_cnt > bytes_in_buf) { // ### TO DO: warning message: not block aligned data, other data // in this entry will be lost rx_valid_cnt = bytes_in_buf; } tmp_val = SSC_READ_REG(port->mapbase + IFX_SSC_RB); for (i = 0; i < rx_valid_cnt; i++) { if (port->rx_start_position >= port->actual_tx_len) { *port->rxbuf_ptr = (tmp_val >> (8 * (rx_valid_cnt - i - 1))) & 0xff; port->rxbuf_ptr++; } else { port->rx_start_position++; } bytes_in_buf--; port->txrx_len--; } } if ((port->opts.modeRxTx == IFX_SSC_MODE_RX) && (SSC_READ_REG(port->mapbase + IFX_SSC_RXCNT) == 0)) { if (port->txrx_len < IFX_SSC_RXREQ_BLOCK_SIZE) { SSC_WRITE_REG(port->txrx_len << IFX_SSC_RXREQ_RXCOUNT_OFFSET, port->mapbase +IFX_SSC_RXREQ); } else { SSC_WRITE_REG(IFX_SSC_RXREQ_BLOCK_SIZE << IFX_SSC_RXREQ_RXCOUNT_OFFSET, port->mapbase + IFX_SSC_RXREQ); } } } /** * FIFO transmit data . * This function copies remaining data in the transmit buffer into the FIFO * * \param port Pointer to structure #ifx_ssc_port * Description: * If txsize is not equal to zero, ssc driver will generate dummy data according * to different cases. * If txsize is equal to zero, just send dummy data whose length is equal to * rxsize for clock generation. */ static void ifx_ssc_start_txfifo(struct ifx_ssc_port *port) { int fifo_space, fill, i, j; u32 tx_dummy = 0; j = port->actual_tx_len & 0x3; fifo_space = ((SSC_READ_REG(port->mapbase + IFX_SSC_ID) & IFX_SSC_PERID_TXFS_MASK) >> IFX_SSC_PERID_TXFS_OFFSET) - ((SSC_READ_REG(port->mapbase + IFX_SSC_FSTAT) & IFX_SSC_FSTAT_TRANSMIT_WORDS_MASK) >> IFX_SSC_FSTAT_TRANSMIT_WORDS_OFFSET); if (fifo_space == 0) { return; } fill = port->txbuf_end - port->txbuf_ptr; if (fill > fifo_space * 4) fill = fifo_space * 4; for (i = 0; i < fill / 4; i++) { if (port->tx_counter_for_tx > 0) { /* at first 32 bit access */ SSC_WRITE_REG(*(u32 *) port->txbuf_ptr, port->mapbase + IFX_SSC_TB); port->tx_counter_for_tx--; port->tx_end_position += 4; port->txbuf_ptr += 4; } else if (j != 0) { /* 1~3 bytes, tmp buffer to generate dummy data */ int k; u8 tbuf[4] = {0}; /* XXX, Seperate transmission doesn't work; combine data and dummy into * into one 32bit data. */ for (k = 0; k < j; k++) { tbuf[k] = *(u8*)port->txbuf_ptr; port->txbuf_ptr ++; } SSC_WRITE_REG(*(u32 *) tbuf, port->mapbase + IFX_SSC_TB); port->txbuf_ptr += 4 - j; /* Totally move 4 bytes of address */ port->tx_end_position += j; } else { port->txbuf_ptr += 4; SSC_WRITE_REG(tx_dummy, port->mapbase + IFX_SSC_TB); } } fifo_space -= fill / 4; fill &= 0x3; if ((fifo_space > 0) & (fill > 1)) { if (port->tx_end_position < port->actual_tx_len) { if ((port->actual_tx_len - port->tx_end_position) >= 2) { /* trailing 16 bit access */ WRITE_PERIPHERAL_REGISTER_16(*(u16 *) port->txbuf_ptr, port->mapbase + IFX_SSC_TB); port->tx_end_position += 2; } else { u8 tbuf[2] = {0}; /* XXX, combine into 16 bit data */ tbuf[0] = *(u8 *) port->txbuf_ptr; WRITE_PERIPHERAL_REGISTER_16(*(u16*)tbuf, port->mapbase + IFX_SSC_TB); port->tx_end_position += 1; } } else { WRITE_PERIPHERAL_REGISTER_16((u16)tx_dummy, port->mapbase + IFX_SSC_TB); } port->txbuf_ptr += 2; fifo_space--; fill -= 2; } if ((fifo_space > 0) & (fill > 0)) { if (port->tx_end_position < port->actual_tx_len) { /* trailing 8 bit access */ WRITE_PERIPHERAL_REGISTER_8(*(u8 *) port->txbuf_ptr, port->mapbase + IFX_SSC_TB); port->tx_end_position += 1; } else { WRITE_PERIPHERAL_REGISTER_8((u8)tx_dummy, port->mapbase + IFX_SSC_TB); } port->txbuf_ptr++; fifo_space--; fill -= 1; } } /** * SSC Rx FIFO operation preparation. * * \param port Pointer to structure ## struct ifx_ssc_port * \param rxbuf Pointer to receive buffer * \param rxsize Length of receive buffer * \return >0 Number of bytes received * \return -EFAULT Invalid txrx mode */ static ssize_t ifx_ssc_rxfifo_prepare(struct ifx_ssc_port *port, char *rxbuf, size_t rxsize) { ssize_t ret_val; if (port->opts.modeRxTx == IFX_SSC_MODE_TX) { return -EFAULT; } SSC_IRQ_LOCK(port); port->rxbuf_ptr = rxbuf; port->rxbuf_end = rxbuf + rxsize; port->rx_start_position = 0; /* Every transaction, initialized once */ port->tx_end_position = 0; /* TXRX in poll mode, rxsize and txsize determines when the transaction will end */ while (port->txrx_len > 0) { if (port->txbuf_ptr < port->txbuf_end) { ifx_ssc_start_txfifo(port); } ifx_ssc_start_rxfifo(port); } ret_val = port->rxbuf_ptr - rxbuf; SSC_IRQ_UNLOCK(port); return (ret_val); } /** * SSC Tx FIFO operation preparation. * \param port Pointer to structure #ifx_ssc_port * \param txbuf Pointer to receive buffer * \param txsize Length of receive buffer * \return >0 Number of bytes received * \return -EFAULT Invalid txrx mode */ static ssize_t ifx_ssc_txfifo_prepare (struct ifx_ssc_port *port, const char *txbuf, size_t txsize) { if (port->opts.modeRxTx == IFX_SSC_MODE_RX) { return -EFAULT; } SSC_IRQ_LOCK(port); port->txbuf_ptr = (char *)txbuf; port->txbuf_end = (char *)txbuf + txsize; /* Start the transmission */ if (port->opts.modeRxTx == IFX_SSC_MODE_TX) { ifx_ssc_start_txfifo(port); } SSC_IRQ_UNLOCK(port); return txsize; } /** * SSC set baudrate. * Sets the baudrate of the corresponding port according to the passed * rate after reading out the current module speed. * * \param port Pointer to structure #ifx_ssc_port * \param baudrate Desired baudrate * \return 0 OK * \return -EINVAL Could not retrieve system clock or invalid baudrate setting */ static int ifx_ssc_set_baudrate (struct ifx_ssc_port *port, unsigned int baudrate) { unsigned int ifx_ssc_clock; unsigned int br; int enabled; unsigned long flags; if (port->prev_baudrate == baudrate) return 0; ifx_ssc_clock = ifx_ssc_get_kernel_clk(port); if (ifx_ssc_clock == 0) { return -EINVAL; } local_irq_save(flags); /* have to disable the SSC to set the baudrate */ enabled = (SSC_READ_REG(port->mapbase + IFX_SSC_STATE) & IFX_SSC_STATE_IS_ENABLED) != 0; SSC_WRITE_REG(IFX_SSC_WHBSTATE_CLR_ENABLE, port->mapbase + IFX_SSC_WHBSTATE); /* compute divider */ br = (((ifx_ssc_clock >> 1) + baudrate / 2) / baudrate) - 1; asm ("SYNC"); if (br > 0xffff ||((br == 0) && ((SSC_READ_REG(port->mapbase + IFX_SSC_STATE) & IFX_SSC_STATE_IS_MASTER) == 0))) { local_irq_restore(flags); printk(KERN_ERR "%s: illegal baudrate %u br %d\n", __func__, baudrate, br); return -EINVAL; } SSC_WRITE_REG(br, port->mapbase + IFX_SSC_BR); if (enabled) { SSC_WRITE_REG(IFX_SSC_WHBSTATE_SET_ENABLE, port->mapbase + IFX_SSC_WHBSTATE); } local_irq_restore(flags); port->prev_baudrate = baudrate; return 0; } /** * SSC hardware initialization. * Initializes the SSC port hardware with the desired baudrate and transmission * options. * * \param port Pointer to structure #ifx_ssc_port * \return 0 OK * \return -EINVAL Error during initialization */ static int ifx_ssc_hwinit (struct ifx_ssc_port *port) { int enabled; /* have to disable the SSC */ enabled = (SSC_READ_REG(port->mapbase + IFX_SSC_STATE) & IFX_SSC_STATE_IS_ENABLED) != 0; SSC_WRITE_REG(IFX_SSC_WHBSTATE_CLR_ENABLE, port->mapbase + IFX_SSC_WHBSTATE); if (ifx_ssc_sethwopts(port) < 0) { printk(KERN_ERR "%s: setting the hardware options failed\n", __func__); return -EINVAL; } if (ifx_ssc_set_baudrate(port, port->baudrate) < 0) { printk(KERN_ERR "%s: setting the baud rate failed\n", __func__); return -EINVAL; } /* TX FIFO */ SSC_WRITE_REG((IFX_SSC_DEF_TXFIFO_FL << IFX_SSC_XFCON_ITL_OFFSET) | IFX_SSC_XFCON_FIFO_FLUSH | IFX_SSC_XFCON_FIFO_ENABLE, port->mapbase + IFX_SSC_TXFCON); /* RX FIFO */ SSC_WRITE_REG((IFX_SSC_DEF_RXFIFO_FL << IFX_SSC_XFCON_ITL_OFFSET) | IFX_SSC_XFCON_FIFO_FLUSH | IFX_SSC_XFCON_FIFO_ENABLE, port->mapbase + IFX_SSC_RXFCON); if (enabled) { SSC_WRITE_REG(IFX_SSC_WHBSTATE_SET_ENABLE, port->mapbase + IFX_SSC_WHBSTATE); } return 0; } /** * Called to transmit/receive to/from SSC in one step using FIFO mode. * \param port Pointer to structure #ifx_ssc_port * \param txbuf Pointer to the data packet to transmit * \param txsize Amount of Bytes to transmit * \param rxbuf Pointer to store the received data packet * \param rxsize Amount of Bytes to receive. * \return >= 0 Number of bytes received (if rxbuf != 0) or transmitted * < 0 error number * Description: * 0. copy data to internal buffer * 1a. If SSC_SESSION_MODE_TXONLY, read txsize data * 2b. If not Read back (txsize + rxsize) data */ static int ifx_ssc_txrx_fifo(struct ifx_ssc_port *port, char *txbuf, u32 txsize, char *rxbuf, u32 rxsize) { int ret = 0; int eff_size = 0; if (txbuf == NULL || txsize == 0) { if (rxbuf != NULL && rxsize != 0) { eff_size = rxsize; } } else if (rxbuf == NULL || rxsize == 0) { if (txbuf != NULL && txsize != 0) { eff_size = txsize; } } else { eff_size = txsize + rxsize; } port->actual_tx_len = txsize; port->actual_rx_len = rxsize; port->tx_counter_for_rx = txsize >> 2; port->tx_counter_for_tx = txsize >> 2; port->txrx_len = eff_size; ret = ifx_ssc_txfifo_prepare(port, txbuf, eff_size); if ( ret != eff_size ){ printk(KERN_ERR "ifx_ssc_txfifo_prepare return %d\n", ret); goto txrx_exit; } ret = ifx_ssc_rxfifo_prepare(port, rxbuf, eff_size); if ( ret != rxsize ){ printk(KERN_ERR "ifx_ssc_rxfifo_prepare return %d\n", ret); goto txrx_exit; } ret = eff_size; txrx_exit: if (ret < 0) { printk(KERN_ERR "%s failed\n", __func__); } return ret; } /** * Called to transmit data to SSC using FIFO mode . * \param port Pointer to structure #ifx_ssc_port * \param txbuf Pointer to the data packet to transmit * \param txsize Amount of Bytes to transmit * \return >= 0 Number of bytes transmitted * < 0 error number */ static INLINE int ifx_ssc_txfifo(struct ifx_ssc_port *port, char *txbuf, u32 txsize) { return ifx_ssc_txrx_fifo(port, txbuf, txsize, NULL, 0); } /** * Called to receive from SSC using FIFO mode . * \param port Pointer to structure #ifx_ssc_port * \param rxbuf Pointer to store the received data packet * \param rxsize Amount of Bytes to receive. * \return >= 0 Number of bytes received * < 0 error number */ static INLINE int ifx_ssc_rxfifo(struct ifx_ssc_port *port, char *rxbuf, u32 rxsize) { return ifx_ssc_txrx_fifo(port, NULL, 0, rxbuf, rxsize); } /** * SSC set ssc_ mode * Sets the spi mode of the corresponding device. SSC mode is per device * parameter. It is initialized during registeration * * \param dev Pointer to device * \return 0 OK * \return -EBUSY could not set ssc mode because the system is busy */ static INLINE int ifx_ssc_set_spi_mode(ssc_device_t *dev) { u32 reg; int val = 0; IFX_SSC_CONFIGURE_t *ssc_cfg; struct ifx_ssc_port *port; ssc_cfg = &dev->conn_id; port = dev->port; if (port->prev_ssc_mode == ssc_cfg->ssc_mode) return 0; if((SSC_READ_REG(port->mapbase + IFX_SSC_STATE) & IFX_SSC_STATE_BUSY) ||(SSC_READ_REG(port->mapbase + IFX_SSC_RXCNT) & IFX_SSC_RXCNT_TODO_MASK)) { printk(KERN_ERR "%s failed to set spi mode\n", __func__); return -EBUSY; } switch(ssc_cfg->ssc_mode) { case IFX_SSC_MODE_0: val = IFX_SSC_CON_PH; break; case IFX_SSC_MODE_1: val = 0; break; case IFX_SSC_MODE_2: val = IFX_SSC_CON_PO | IFX_SSC_CON_PH; break; case IFX_SSC_MODE_3: val = IFX_SSC_CON_PO; break; default: break; } SSC_IRQ_LOCK(port); SSC_WRITE_REG(IFX_SSC_WHBSTATE_CLR_ENABLE, port->mapbase + IFX_SSC_WHBSTATE); reg = (SSC_READ_REG(port->mapbase + IFX_SSC_CON) & ~(IFX_SSC_CON_PO | IFX_SSC_CON_PH)) | (val); SSC_WRITE_REG(reg, port->mapbase + IFX_SSC_CON); SSC_WRITE_REG(IFX_SSC_WHBSTATE_SET_ENABLE, port->mapbase + IFX_SSC_WHBSTATE); SSC_IRQ_UNLOCK(port); port->prev_ssc_mode = ssc_cfg->ssc_mode; return 0; } /** * SSC set txrx direction if using FIFO mode. * To use FIFO for transmission and reception. Firstly, rx/tx mode muse be set * * \param port Pointer to structure #ifx_ssc_port * \return none */ static INLINE void ifx_ssc_txrx_setup( struct ifx_ssc_port *port) { u32 reg; /* Disable DMA for TX and RX */ SSC_IRQ_LOCK(port); reg = SSC_READ_REG(port->mapbase + IFX_SSC_DMACON); reg &= ~IFX_SSC_DMACON_TXON; reg &= ~IFX_SSC_DMACON_RXON; SSC_WRITE_REG(reg, port->mapbase + IFX_SSC_DMACON); SSC_IRQ_UNLOCK(port); if (port->opts.modeRxTx != IFX_SSC_MODE_RXTX) { ifx_ssc_rxtx_mode_set(port, IFX_SSC_MODE_RXTX); } } /** * SSC set Tx mode * * \param port Pointer to structure #ifx_ssc_port * \return none */ static INLINE void ifx_ssc_tx_setup( struct ifx_ssc_port *port) { if (port->opts.modeRxTx != IFX_SSC_MODE_TX) { ifx_ssc_rxtx_mode_set(port, IFX_SSC_MODE_TX); } } /** * SSC set Rx mode * * \param port Pointer to structure #ifx_ssc_port * \return none */ static INLINE void ifx_ssc_rx_setup( struct ifx_ssc_port *port) { if (port->opts.modeRxTx != IFX_SSC_MODE_RX) { ifx_ssc_rxtx_mode_set(port, IFX_SSC_MODE_RX); } } /** * SSC add queue entry to priority queue * * \param port Pointer to structure #IFX_SSC_QUEUE_t * \return none */ static void ifx_ssc_enqueue(IFX_SSC_QUEUE_t *queue) { ssc_device_t *dev; struct ifx_ssc_port *port; KASSERT((queue != NULL), ("%s should never happen\n", __func__)); dev = queue->dev; port = dev->port; IFX_SSC_PRINT(port, SSC_MSG_QUEUE, "%s dev %s prio %d enqueued\n", __func__, dev->dev_name, dev->dev_prio); SSC_Q_LOCK_BH(port); if (dev->dev_prio == IFX_SSC_PRIO_ASYNC) { TAILQ_INSERT_TAIL(&port->ssc_asyncq, queue, q_next); } else { TAILQ_INSERT_TAIL(&port->ssc_syncq[dev->dev_prio], queue, q_next); } SSC_Q_UNLOCK_BH(port); } /** * SSC remove queue entry from priority queue * * \param port Pointer to structure #IFX_SSC_QUEUE_t * \return none */ static void ifx_ssc_dequeue(IFX_SSC_QUEUE_t *queue) { ssc_device_t *dev; struct ifx_ssc_port *port; KASSERT((queue != NULL), ("%s should never happen\n", __func__)); dev = queue->dev; port = dev->port; IFX_SSC_PRINT(port, SSC_MSG_QUEUE, "%s dev %s prio %d dequeued\n", __func__, dev->dev_name, dev->dev_prio); SSC_Q_LOCK_BH(port); if (dev->dev_prio == IFX_SSC_PRIO_ASYNC) { if (!TAILQ_EMPTY(&port->ssc_asyncq)) { TAILQ_REMOVE(&port->ssc_asyncq, queue, q_next); } } else { if (!TAILQ_EMPTY(&port->ssc_syncq[dev->dev_prio])) { TAILQ_REMOVE(&port->ssc_syncq[dev->dev_prio], queue, q_next); } } SSC_Q_UNLOCK_BH(port); } /** * SSC chip select function, set spi mode, baudrate, call registered * device-specific cs set function. * * \param dev Pointer to structure #ssc_device_t * \return none */ static void ifx_ssc_cs_lock(ssc_device_t *dev) { IFX_SSC_CONFIGURE_t *ssc_cfg; struct ifx_ssc_port *port; ssc_cfg = &dev->conn_id; port = dev->port; IFX_SSC_PRINT(port, SSC_MSG_LOCK, "%s enter\n", __func__); ifx_ssc_set_spi_mode(dev); ifx_ssc_set_baudrate(port, ssc_cfg->baudrate); if (ssc_cfg->csset_cb != NULL) { ssc_cfg->csset_cb(IFX_SSC_CS_ON, ssc_cfg->cs_data); } } /** * SSC chip un select function, call registered device-specific cs reset function. * * \param dev Pointer to structure #ssc_device_t * \return none */ static INLINE void ifx_ssc_cs_unlock(ssc_device_t *dev) { IFX_SSC_CONFIGURE_t *ssc_cfg; ssc_cfg = &dev->conn_id; IFX_SSC_PRINT(dev->port, SSC_MSG_LOCK, "%s exit\n", __func__); if(ssc_cfg->csset_cb != NULL) { ssc_cfg->csset_cb(IFX_SSC_CS_OFF, ssc_cfg->cs_data); } } /** *\fn int amazon_s_sscLock(IFX_SSC_HANDLE handler) *\brief Called to lock and reserve the whole SSC interface * for the given 'handler' * * The chipselect, belonging to this SSC session is already * activated. This means the chipselect callback is called. * After complete data transmission and reception, ifx_sscUnLock * has to be called to release the SSC interface again for * other clients. * * \param handler Handle of the connection where to make the * configuration on * \return = 0 * \ingroup AMAZON_S_SSC_FUNCTIONS */ int amazon_s_sscLock(IFX_SSC_HANDLE handler) { ssc_device_t *dev; IFX_SSC_CONFIGURE_t *ssc_cfg; struct ifx_ssc_port *port; IFX_SSC_QUEUE_t *pqueue; KASSERT((handler != NULL), ("%s Invalid parameter\n", __func__)); dev = (ssc_device_t *)handler; pqueue = &dev->queue; ssc_cfg = &dev->conn_id; port = dev->port; if (port->lock_qentry == pqueue) { /* We hold the lock already -> nothing to request here! */ return (-1); } /* * Check if the queue entry of the ConnId is already queued with a request * but this request is not served yet. Every ConnId can only queue up one * request at one time. */ if (atomic_read(&pqueue->isqueued) == 1) { return (-1); } /* Place a lock request in the queue */ pqueue->request_lock = TRUE; atomic_set(&pqueue->isqueued, 1); /* Add queue entry to priority queue */ ifx_ssc_enqueue(pqueue); /* * If no async support is avaiable, trigger the SSC kernel thread and * wait pending till job is done. */ #ifdef SSC_ASYNCHRONOUS_SUPPORT IFX_SSC_PRINT(port, SSC_MSG_TASKLET, "%s raise fake interrupt\n", __func__); ifx_ssc_start_tasklet(port); #else IFX_SSC_PRINT(port, SSC_MSG_THREAD, "%s wake up ssc kernel thread\n", __func__); /* Kick off SSC kernel thread */ SSC_WAKEUP_EVENT(port->ssc_thread_wait, SSC_THREAD_EVENT, port->event_flags); #endif /* Wait till wake up from SSC kernel thread */ SSC_WAIT_EVENT(dev->dev_thread_wait, DEV_THREAD_EVENT, dev->event_flags); IFX_SSC_PRINT(port, SSC_MSG_THREAD, "%s wakeup received from ssc kernel thread\n", __func__); return 0; } EXPORT_SYMBOL (amazon_s_sscLock); /** *\fn int amazon_s_sscUnlock(IFX_SSC_HANDLE handler) * \brief This function releases the SSC lock that was placed before by calling ifx_SscLock. * * This function also inactivate the chipselect signal, which was set in ifx_SscLock. * * \param handler Handle of the connection where to make the * configuration on * \return = 0 * \ingroup AMAZON_S_SSC_FUNCTIONS */ int amazon_s_sscUnlock(IFX_SSC_HANDLE handler) { ssc_device_t *dev; IFX_SSC_CONFIGURE_t *ssc_cfg = NULL; struct ifx_ssc_port *port = NULL; KASSERT((handler != NULL), ("%s Invalid parameter\n", __func__)); dev = (ssc_device_t *)handler; ssc_cfg = &dev->conn_id; port = dev->port; if (port->lock_qentry != &dev->queue) { /* We do not hold the lock, therefore we can not release it! */ return -1; } /* Just forget about the lock, then the SSC driver would just take it * as a normel queue entry */ ifx_ssc_cs_unlock(dev); port->lock_qentry = NULL; return 0; } EXPORT_SYMBOL (amazon_s_sscUnlock); /** * \fn int amazon_s_sscSetBaud(IFX_SSC_HANDLE handler, unsigned int baud) * \brief Configures the Baudrate of a given connection. * * The baudrate can also be change multiple times * for a single connection. The baudrate change * will take place for the next call of ifx_SscTx, * ifx_SscRx or ifx_SscTxRx. * * \param handler Handle of the connection where to make the * configuration on * \param baud Baudrate to configure. This value can be rounded * during the calculation of the SSC clock divider * * \return = 0 OK * < 0 error number * \ingroup AMAZON_S_SSC_FUNCTIONS */ int amazon_s_sscSetBaud(IFX_SSC_HANDLE handler, unsigned int baud) { ssc_device_t *dev; IFX_SSC_CONFIGURE_t *ssc_cfg; /* Sanity check */ KASSERT((handler != NULL), ("%s Invalid parameter\n", __func__)); dev = (ssc_device_t *)handler; ssc_cfg = &dev->conn_id; /* XXX, protection because of being used in other places */ ssc_cfg->baudrate = baud; return 0; } EXPORT_SYMBOL (amazon_s_sscSetBaud); /** * Called to transmit the data using DMA mode . * * \param port Pointer to structure #ifx_ssc_port * \param txbuf Pointer to the data packet to transmit * \param txsize Amount of Bytes to transmit * \return >= 0 Number of bytes transmitted * < 0 error number */ static int ifx_ssc_txdma(struct ifx_ssc_port *port, char *txbuf, int txsize) { int i, j; int retval = 0; int k, m; char *p; char *pbuf; struct dma_device_info* dma_dev; ifx_ssc_dma_setup(port, IFX_SSC_DIR_TX, IFX_SSC_DMA_ENABLE); /* * HW WAR, last 1~3 byte must do swapping, but the wanted bytes could be * out of orignal tx buffer, so pre-allocated buffer used, and what's * more, memcpy involved. * XXX, actually, potential issues include data length which is a multiple of DMA * burst length. */ i = txsize >> 2; /* divided by 4 */ j = txsize & 0x3; /* modulo */ if (j != 0) { /* Hit last 1~3 byte case */ memcpy(port->dma_txbuf, txbuf, txsize); p = port->dma_txbuf + (i << 2); for (m = 0, k = j - 1; k >= 0; k--, m++) { p[4 - m - 1] = p[k]; /* Possibly out of range if no memory copy */ } pbuf = port->dma_txbuf; } else { pbuf = txbuf; } dma_dev = port->dma_dev; /** * Set a flag that we are waiting for the DMA to complete. This flag * will be reseted again in the DMA interrupt. * NB, it must be ahead of the following stuff, because once descriptor * is prepared, interrupt may come back immediately */ atomic_set(&port->dma_wait_state, 1); /** * Run in tasklet or kernel thread, DMA tasklet may run the same function * Lock must be used. */ SSC_IRQ_LOCK(port); retval = dma_device_write(dma_dev, pbuf, txsize, NULL); SSC_IRQ_UNLOCK(port); KASSERT(retval == txsize, ("%s retval %d != txsize %d\n", __func__, retval, txsize)); return retval; } /** * Called to receive the data using DMA mode . * * \param port Pointer to structure #ifx_ssc_port * \param rxbuf Pointer to the data packet to be received * \param rxsize Amount of Bytes to be received * \return >= 0 Number of bytes received * < 0 error number */ static int ifx_ssc_rxdma(struct ifx_ssc_port *port, char *rxbuf, int rxsize) { char *pbuf; int dma_rxsize; struct dma_device_info* dma_dev; dma_dev = port->dma_dev; /* * Backup original buffer, so that later we can find it in dma handler * Borrow two variables from FIFO usage */ port->rxbuf_ptr = rxbuf; port->actual_rx_len = rxsize; /* * HW WAR, last 1~3 byte must do swapping, but the wanted bytes could be * out of orignal rx buffer, so pre-allocated buffer used, and what's * more, memcpy involved. * XXX, actually, potential issues include data length which is a multiple of DMA * burst length. */ if ((rxsize & 0x3)){ /* Can't be divisible by 4 */ pbuf = port->dma_rxbuf; dma_rxsize = (rxsize & ~0x3) + 4; /* Round up one dword to make sure enough space */ KASSERT(dma_rxsize <= DEFAULT_SSC_FRAGMENT_SIZE, ("%s fragment %d out of range\n", __func__, dma_rxsize)); } else { pbuf = rxbuf; dma_rxsize = rxsize; } /* NB, DMA descriptoer must be setup before request counter */ dma_device_desc_setup(dma_dev, pbuf, dma_rxsize); ifx_ssc_dma_setup(port, IFX_SSC_DIR_RX, IFX_SSC_DMA_ENABLE); /* Enable SPI DMA channel */ (dma_dev->rx_chan[dma_dev->current_rx_chan])->open(dma_dev->rx_chan[dma_dev->current_rx_chan]); /* Set a flag that we are waiting for the DMA to complete. This flag * will be reseted again in the DMA interrupt. */ atomic_set(&port->dma_wait_state, 1); /* * Set up request counter after DMA setting is ready, * Otherwise, receive overrun will happen. */ SSC_IRQ_LOCK(port); if (rxsize < IFX_SSC_RXREQ_BLOCK_SIZE) { /* At least, rxsize will cause some issues, maybe * rxsize smaller than DMA burst len. */ SSC_WRITE_REG(rxsize << IFX_SSC_RXREQ_RXCOUNT_OFFSET, port->mapbase + IFX_SSC_RXREQ); } else { SSC_WRITE_REG(IFX_SSC_RXREQ_BLOCK_SIZE << IFX_SSC_RXREQ_RXCOUNT_OFFSET, port->mapbase + IFX_SSC_RXREQ); } SSC_IRQ_UNLOCK(port); return rxsize; } /** *\fn int amazon_s_sscTxRx (IFX_SSC_HANDLE handler, char *txbuf, u32 txsize, char *rxbuf, u32 rxsize) *\brief Called to transmit/receive to/from SSC in one step. * This means that the data transmission and reception is done in parallel. * No DMA is possible here. The SSC driver sets the chipselect when the * data transmission starts and resets it when the transmission is * completed. The transmit and receive buffer memory allocation and * de-allocation is done by the SSC client. * * \param handler Handle of the connection where to make the * configuration on * \param txbuf Pointer to the data packet to transmit * \param txsize Amount of Bytes to transmit * \param rxbuf Pointer to store the received data packet * \param rxsize Amount of Bytes to receive. * \return >= 0 Number of bytes received (if rxbuf != 0) or transmitted * < 0 error number * \ingroup AMAZON_S_SSC_FUNCTIONS */ int amazon_s_sscTxRx(IFX_SSC_HANDLE handler, char *txbuf, u32 txsize, char *rxbuf, u32 rxsize) { ssc_device_t *dev; IFX_SSC_CONFIGURE_t *ssc_cfg; IFX_SSC_QUEUE_t *pqueue; struct ifx_ssc_port *port; /* Sanity check */ KASSERT((handler != NULL), ("%s Invalid parameter\n", __func__)); KASSERT(!((rxbuf == NULL) && (rxsize == 0) && (txbuf == NULL) && (txsize == 0)), ("%s Invalid parameter\n", __func__)); dev = (ssc_device_t *)handler; port = dev->port; if (in_interrupt()) { printk(KERN_ERR "%s can't be called in interupt context< irq, softirq, tasklet>\n", __func__); return 0; } ssc_cfg = &dev->conn_id; pqueue = &dev->queue; if (txsize > ssc_cfg->fragSize || rxsize > ssc_cfg->fragSize) { printk(KERN_ERR "%s Device driver must do its own fragmentation\n", __func__); return 0; } /* * Ensure that only asynchronous SSC Handles could enqueue a * synchronous request. The parameter 'handle_type' is set during the * ConnId allocation process. */ if (pqueue->handle_type != IFX_SSC_HANDL_TYPE_SYNC) { printk(KERN_ERR "%s must use sync handler\n", __func__); return 0; } /* * Check if the queue entry of the ConnId is already queued with a request * but this request is not served yet. Every ConnId can only queue up one * request at one time. */ if (atomic_read(&pqueue->isqueued) == 1) { printk(KERN_ERR "%s same queue has queued more than once\n", __func__); return 0; } /* Add pointer and sizes to the queue entry of this SSC handle. */ pqueue->txbuf = txbuf; pqueue->txsize = txsize; pqueue->rxbuf = rxbuf; pqueue->rxsize = rxsize; atomic_set(&pqueue->isqueued, 1); pqueue->exchange_bytes = 0; pqueue->request_lock = FALSE; memset(&pqueue->callback, 0, sizeof (IFX_SSC_ASYNC_CALLBACK_t)); /* Add queue entry to priority queue */ ifx_ssc_enqueue(pqueue); /* If no async support is avaiable, trigger the SSC kernel thread and * wait pending till job is done. */ #ifdef SSC_ASYNCHRONOUS_SUPPORT IFX_SSC_PRINT(port, SSC_MSG_TASKLET, "%s raise fake interrupt\n", __func__); ifx_ssc_start_tasklet(port); #else IFX_SSC_PRINT(port, SSC_MSG_THREAD, "%s wake up ssc kernel thread\n", __func__); /* Kick off SSC kernel thread */ SSC_WAKEUP_EVENT(port->ssc_thread_wait, SSC_THREAD_EVENT, port->event_flags); #endif /* Wait till wakeup from SSC kernel thread */ SSC_WAIT_EVENT(dev->dev_thread_wait, DEV_THREAD_EVENT, dev->event_flags); IFX_SSC_PRINT(port, SSC_MSG_THREAD, "%s wakeup event received from ssc kernel thread\n", __func__); /* Reset queue pointer */ pqueue->txbuf = NULL; pqueue->rxbuf = NULL; return pqueue->txsize + pqueue->rxsize; } EXPORT_SYMBOL(amazon_s_sscTxRx); /** *\fn int amazon_s_sscTx (IFX_SSC_HANDLE handler, char *txbuf, u32 txsize) *\brief Called to transmit the data. * transmission starts and resets it when the transmission * the transmit buffer is done by the SSC client. * * \param handler Handle of the connection where to make the * configuration on * \param txbuf Pointer to the data packet to transmit * \param txsize Amount of Bytes to transmit * \return >= 0 Number of bytes transmitted * < 0 error number * \ingroup AMAZON_S_SSC_FUNCTIONS */ int amazon_s_sscTx(IFX_SSC_HANDLE handler, char *txbuf, u32 txsize) { return amazon_s_sscTxRx(handler, txbuf, txsize, NULL, 0); } EXPORT_SYMBOL (amazon_s_sscTx); /** *\fn int amazon_s_sscRx (IFX_SSC_HANDLE handler, char *rxbuf, u32 rxsize) *\brief Called to receive the data. * The SSC driver sets the chipselect when the data reception starts and * resets it when the reception is completed. The memory allocation and * de-allocation of the receive buffer is done by the SSC client. * * \param handler Handle of the connection where to make the * configuration on * \param rxbuf Pointer to the data packet to be received * \param rxsize Amount of Bytes to be received * \return >= 0 Number of bytes received * < 0 error number * \ingroup AMAZON_S_SSC_FUNCTIONS */ int amazon_s_sscRx (IFX_SSC_HANDLE handler, char *rxbuf, u32 rxsize) { return amazon_s_sscTxRx(handler, NULL, 0, rxbuf, rxsize); } EXPORT_SYMBOL (amazon_s_sscRx); /** * Called to serve every queue entry and it is a common function for * SSC kernel thread and tasklet * * \param port Pointer to structure #ifx_ssc_port * \return 1 Continue to loop this function until return 0 * 0 Immediately exit this function. For kernel thread, * it will sleep, for tasklet, it will exit dynamically */ static int ifx_ssc_serve_qentry(struct ifx_ssc_port *port) { IFX_SSC_QUEUE_t *qentry = NULL; ssc_device_t *dev; IFX_SSC_CONFIGURE_t *ssc_cfg = NULL; struct dma_device_info *dma_dev; /* * Don't serve the current or next queue entry in case we are currently * waiting for the DMA interrupt to report the transmission completion. */ if (atomic_read(&port->dma_wait_state) == 1) { /* DMA interrupt will spawn tasklet or wake up kernel thread * in order to continue. */ return 0; } /* Identify the queue entry to serve */ if (port->serve_qentry != NULL) { /* Continues serving the queue that was served before */ qentry = port->serve_qentry; } else if (port->lock_qentry != NULL) { /* If one queue holds the lock, only serve this one element and * ignore all others. */ qentry = port->lock_qentry; port->serve_qentry = qentry; ifx_ssc_dequeue(qentry); } else { SSC_Q_LOCK_BH(port); if (!TAILQ_EMPTY(&port->ssc_asyncq)) { qentry = TAILQ_FIRST(&port->ssc_asyncq); } else { int i; /* Choose the highest queue entry first */ for (i = IFX_SSC_PRIO_HIGH; i >=IFX_SSC_PRIO_LOW; i--) { if (!TAILQ_EMPTY(&port->ssc_syncq[i])){ qentry = TAILQ_FIRST(&port->ssc_syncq[i]); break; } } } SSC_Q_UNLOCK_BH(port); /* Remember that we are working on this queue now */ port->serve_qentry = qentry; /* Remove entry from queue to serve, we are going to serve it now */ if (qentry != NULL) { ifx_ssc_dequeue(qentry); } } /* No queue found that should be served */ if (qentry == NULL) { return 0; } /* Get connection handle */ dev = qentry->dev; ssc_cfg = &dev->conn_id; dma_dev = port->dma_dev; if (qentry->txbuf != NULL) { int tx_dma_aligned = ((((u32)qentry->txbuf) & ((dma_dev->tx_burst_len << 2) - 1)) == 0) ? 1 : 0; if ((port->ssc_cs_locked == FALSE) && (port->lock_qentry == NULL)) { /* Set the chipselect aktiv before transmission */ port->ssc_cs_locked = TRUE; /* Call the Chipselect set callback of the SSC-Handle */ ifx_ssc_cs_lock(dev); } /* If buffer not aligned on DMA burst length, fall back to FIFO */ if ((qentry->txsize > ssc_cfg->maxFIFOSize) && (tx_dma_aligned == 1)) { IFX_SSC_PRINT(port, SSC_MSG_TX_DMA, "%s TX DMA enter\n", __func__); ifx_ssc_tx_setup(port); ifx_ssc_txdma(port, qentry->txbuf, qentry->txsize); port->stats.txDma++; /* Reset the data pointer, because this data are done on the * SSC hardware. */ qentry->txbuf = NULL; /* Count the number of transmitted bytes for this queue entry */ qentry->exchange_bytes = qentry->txsize; dev->stats.txBytes += qentry->txsize; port->stats.txBytes += qentry->txsize; return 0; } else { ifx_ssc_txrx_setup(port); ifx_ssc_txfifo(port, qentry->txbuf, qentry->txsize); port->stats.txFifo++; IFX_SSC_PRINT(port, SSC_MSG_TX_FIFO, "%s TX FIFO enter\n", __func__); /* Reset the data pointer, because this data are done on the * SSC hardware. */ qentry->txbuf = NULL; /* Count the number of transmitted bytes for this queue entry */ qentry->exchange_bytes = qentry->txsize; dev->stats.txBytes += qentry->txsize; port->stats.txBytes += qentry->txsize; /* NB, Make sure data has been sent out */ ifx_ssc_wait_finished(port); return 1; } } else if (qentry->rxbuf != NULL) { int rx_dma_aligned = ((((u32) qentry->rxbuf) & ((dma_dev->rx_burst_len << 2) - 1)) == 0) ? 1 : 0; if ((port->ssc_cs_locked == FALSE) && (port->lock_qentry == NULL)) { /* Set the chipselect aktiv before transmission */ port->ssc_cs_locked = TRUE; /* Call the Chipselect set callback of the SSC-Handle */ ifx_ssc_cs_lock(dev); } /* If buffer not aligned on DMA burst length, fall back to FIFO */ if ((qentry->rxsize > ssc_cfg->maxFIFOSize) && (rx_dma_aligned == 1)) { IFX_SSC_PRINT(port, SSC_MSG_RX_DMA, "%s RX DMA enter\n", __func__); ifx_ssc_rx_setup(port); ifx_ssc_rxdma(port, qentry->rxbuf, qentry->rxsize); port->stats.rxDma++; /* Reset the data pointer, because this data are done on the * SSC hardware. */ qentry->rxbuf = NULL; /* Count the number of transmitted bytes for this queue entry */ qentry->exchange_bytes += qentry->rxsize; dev->stats.rxBytes += qentry->rxsize; port->stats.rxBytes += qentry->rxsize; return 0; } else { ifx_ssc_txrx_setup(port); ifx_ssc_rxfifo(port, qentry->rxbuf, qentry->rxsize); port->stats.rxFifo++; IFX_SSC_PRINT(port, SSC_MSG_RX_FIFO, "%s RX FIFO enter\n", __func__); /* Reset the data pointer, because this data are done on the * SSC hardware. */ qentry->rxbuf = NULL; /* Count the number of recevied bytes for this queue entry */ qentry->exchange_bytes += qentry->rxsize; dev->stats.rxBytes += qentry->rxsize; port->stats.rxBytes += qentry->rxsize; /* NB, Make sure data has been sent out */ ifx_ssc_wait_finished(port); return 1; } } else if (qentry->request_lock) { /* A lock request found */ IFX_SSC_PRINT(port, SSC_MSG_LOCK, "%s request lock enter\n", __func__); port->lock_qentry = qentry; qentry->request_lock = FALSE; /* Serving the current queue entry is done */ port->serve_qentry = NULL; /* Reset the flag in the queue element that this one is queued with a * request to be served. */ atomic_set(&qentry->isqueued, 0); port->ssc_cs_locked = TRUE; /* Call the Chipselect set callback of the SSC-Handle */ ifx_ssc_cs_lock(dev); if (qentry->callback.pFunction) { /* Store the callback parameter local to cleanup the queue entry before * calling the callback. */ IFX_SSC_ASYNC_CALLBACK_t callback = qentry->callback; qentry->callback.pFunction = NULL; IFX_SSC_PRINT(port, SSC_MSG_CALLBACK, "%s line%d request callback\n", __func__, __LINE__); /* Callback to call */ callback.pFunction(callback.functionHandle, 0); } return 1; } else if (qentry->callback.pFunction != NULL) { /* * Store the callback parameter local to cleanup the queue entry before * calling the callback. */ IFX_SSC_ASYNC_CALLBACK_t callback = qentry->callback; qentry->callback.pFunction = NULL; /* Serving the current queue entry is done */ port->serve_qentry = NULL; /* Reset the flag in the queue element that this one is queued with a * request to be served. */ atomic_set(&qentry->isqueued, 0); if ((port->ssc_cs_locked == TRUE) && (port->lock_qentry == NULL)) { /* Set the chipselect inaktiv again after transmission complete */ port->ssc_cs_locked = FALSE; /* Call the Chipselect reset callback of the SSC-Handle */ ifx_ssc_cs_unlock(dev); } IFX_SSC_PRINT(port, SSC_MSG_CALLBACK, "%s line%d direct callback\n", __func__, __LINE__); /* Callback to call */ callback.pFunction(callback.functionHandle, qentry->exchange_bytes); return 1; } else if (qentry->handle_type == IFX_SSC_HANDL_TYPE_SYNC) { if ((port->ssc_cs_locked == TRUE) && (port->lock_qentry == NULL)) { /* Set the chipselect inaktiv again after transmission complete */ port->ssc_cs_locked = FALSE; /* Call the Chipselect reset callback of the SSC-Handle */ ifx_ssc_cs_unlock(dev); } /* Wake up the pending thread */ SSC_WAKEUP_EVENT(dev->dev_thread_wait, DEV_THREAD_EVENT, dev->event_flags); IFX_SSC_PRINT(port, SSC_MSG_THREAD, "%s wakeup SSC client kernel thread\n", __func__); /* Serving the current queue entry is done */ port->serve_qentry = NULL; /* Reset the flag in the queue element that this one is queued with a * request to be served. */ atomic_set(&qentry->isqueued, 0); if (port->ssc_cs_locked == TRUE) { return 0; } else { return 1; } } return 0; } #ifdef SSC_ASYNCHRONOUS_SUPPORT /** * Called to serve every queue entry in tasklet * * \param port Pointer to structure #ifx_ssc_port * \return none */ static INLINE void ifx_ssc_tasklet_serve_queue(struct ifx_ssc_port *port) { /* * Serve queue entries till no queue entry anymore to serve, we wait for * DMA or the lock entry is not in the queue. */ while (ifx_ssc_serve_qentry(port)); } /** * SSC tasklet implementation * * \param arg cast to structure #ifx_ssc_port * \return none */ static INLINE void ifx_ssc_tasklet(unsigned long arg) { struct ifx_ssc_port *port = (struct ifx_ssc_port *)arg; IFX_SSC_PRINT(port, SSC_MSG_TASKLET, "%s enter\n", __func__); ifx_ssc_tasklet_serve_queue(port); } /** * SSC tasklet initialization * * \param port Pointer to structure #ifx_ssc_port * \return none */ static INLINE void ifx_ssc_tasklet_init(struct ifx_ssc_port *port) { tasklet_init(&port->ssc_txrxq, ifx_ssc_tasklet, (unsigned long)port); } #else #define IFX_SSC_THREAD_OPTIONS (CLONE_FS | CLONE_FILES | CLONE_SIGHAND) /** * SSC kernel thread implementation function * * \param arg cast to structure #ifx_ssc_port * \return none */ static int ifx_ssc_kthread(void *arg) { struct ifx_ssc_port *port = (struct ifx_ssc_port *)arg; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) struct task_struct *kthread = current; #endif /* do LINUX specific setup */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) daemonize(); reparent_to_init(); /* * lock the kernel. A new kernel thread starts without * the big kernel lock, regardless of the lock state * of the creator (the lock level is *not* inheritated) */ lock_kernel(); /* Don't care about any signals. */ siginitsetinv(¤t->blocked, 0); /* set name of this process */ strcpy(kthread->comm, port->name); /* let others run */ unlock_kernel(); #else daemonize(port->name); #endif while(1) { /* Serve queue entries till no queue entry anymore to serve, we wait for * DMA or the lock entry is not in the queue. */ while (ifx_ssc_serve_qentry(port)); /* Wait for DMA interrupt or sync queue to wakes us up */ SSC_WAIT_EVENT(port->ssc_thread_wait, SSC_THREAD_EVENT, port->event_flags); IFX_SSC_PRINT(port, SSC_MSG_THREAD, "%s DMA or sync queue event received\n", __func__); } return 0; } /** * SSC kernel thread initialization * * \param port Pointer to structure #ifx_ssc_port * \return none */ static INLINE int ifx_ssc_thread_init(struct ifx_ssc_port *port) { port->ssc_pid = kernel_thread(ifx_ssc_kthread, (void*) port, IFX_SSC_THREAD_OPTIONS); IFX_SSC_PRINT(port, SSC_MSG_INIT, "%s pid %d\n", __func__, port->ssc_pid); return 0; } #endif /* SSC_ASYNCHRONOUS_SUPPORT */ /** * \fn int amazon_s_sscAsyncTxRx(IFX_SSC_HANDLE handler, IFX_SSC_ASYNC_CALLBACK_t *pCallback, * char *txbuf, int txsize, char *rxbuf, int rxsize) * \brief Transmit/receive to/from SSC in one step. It performs the data transmission and * then the data reception. * The SSC driver sets the chipselect when the data transmission starts and * resets it when the transmission is completed. * This routine is called to start an asynchronous data transmission. * The provided callback routine is called after the transmission is done. * * \param handler Handle of the connection where to make the configuration on * \param pCallback Function callback that is called after the request is * performed. * \param txbuf Pointer to the data packet to transmit. * \param txsize Amount of Bytes to transmit. * \param rxbuf Pointer to store the received data packet. * \param rxsize Amount of Bytes to receive. * \return Returns the amount of bytes that are transmitted in case of * successful transmission. In case of an error the function returns (-1). * * \remarks This function activates the chipselect before data transmission and * deactivates it after transmission (when function returns). This is done with * the callback function. The chipselect is not modified in case * \ingroup AMAZON_S_SSC_FUNCTIONS */ int amazon_s_sscAsyncTxRx(IFX_SSC_HANDLE handler, IFX_SSC_ASYNC_CALLBACK_t *pCallback, char *txbuf, int txsize, char *rxbuf, int rxsize) { ssc_device_t *dev; IFX_SSC_CONFIGURE_t *ssc_cfg; IFX_SSC_QUEUE_t *pqueue; struct ifx_ssc_port *port; /* Sanity check */ KASSERT((handler != NULL), ("%s Invalid parameter\n", __func__)); KASSERT(!((rxbuf == NULL) && (rxsize == 0) && (txbuf == NULL) && (txsize == 0)), ("%s Invalid parameter\n", __func__)); dev = (ssc_device_t *)handler; port = dev->port; if (!in_interrupt()) { printk(KERN_ERR "%s must be called in interrupt context\n", __func__); return -1; } ssc_cfg = &dev->conn_id; pqueue = &dev->queue; if (txsize > ssc_cfg->fragSize || rxsize > ssc_cfg->fragSize) { printk(KERN_ERR "%s Device driver must do its own fragmentation\n", __func__); return -1; } /* * Ensure that only asynchronous SSC Handles could enqueue an * asynchronous request. The parameter 'sscHandleType' is set during the * ConnId allocation process. */ if (pqueue->handle_type != IFX_SSC_HANDL_TYPE_ASYNC) { printk(KERN_ERR "%s must use async handler\n", __func__); return -1; } /* * Check if the queue entry of the ConnId is already queued with a request * but this request is not served yet. Every ConnId can only queue up one * request at the time. */ if (atomic_read(&pqueue->isqueued) == 1) { printk(KERN_ERR "%s the same queue has been queued more than once\n", __func__); return (-1); } /* Add pointer and sizes to the queue entry of this SSC handle. */ pqueue->txbuf = txbuf; pqueue->txsize = txsize; pqueue->rxbuf = rxbuf; pqueue->rxsize = rxsize; if (pCallback != NULL) { pqueue->callback = *pCallback; } pqueue->request_lock = FALSE; pqueue->exchange_bytes = 0; atomic_set(&pqueue->isqueued, 1); /* Add queue entry to priority synchronous queue */ ifx_ssc_enqueue(pqueue); /* * Trigger schedule or tasklet or fake interrupt according to different * cases. */ ifx_ssc_start_tasklet(port); return 0; } EXPORT_SYMBOL(amazon_s_sscAsyncTxRx); /** * \fn int amazon_s_sscAsyncTx(IFX_SSC_HANDLE handler, IFX_SSC_ASYNC_CALLBACK_t *pCallback, * char *txbuf, int txsize) * \brief transmit the data, located at "txbuf". The "txsize" amount of bytes is * transmitted over SSC. * The SSC driver sets the chipselect when the data transmission starts and * resets it when the transmission is completed. * This routine is called to start an asynchronous data transmission. * The provided callback routine is called after the transmission is done. * * \param handler Handle of the connection where to make the configuration on * \param pCallback Function callback that is called after the request is * performed. * \param txbuf Pointer to the data packet to transmit. * \param txsize Amount of Bytes to transmit. * \return Returns the amount of bytes that are transmitted in case of * successful transmission. In case of an error the function returns (-1). * * \remarks This function activates the chipselect before data transmission and * deactivates it after transmission (when function returns). This is done with * the callback function. The chipselect is not modified in case * \ingroup AMAZON_S_SSC_FUNCTIONS */ int amazon_s_sscAsyncTx(IFX_SSC_HANDLE handler, IFX_SSC_ASYNC_CALLBACK_t *pCallback, char *txbuf, int txsize) { return amazon_s_sscAsyncTxRx(handler, pCallback, txbuf, txsize, NULL, 0); } EXPORT_SYMBOL(amazon_s_sscAsyncTx); /** * \fn int amazon_s_sscAsyncRx(IFX_SSC_HANDLE handler, IFX_SSC_ASYNC_CALLBACK_t *pCallback, * char *rxbuf, int rxsize) * \brief Receive from SSC. The received data are stored at "rxbuf". The "rxsize" * describes the amount of bytes to receive from SSC. * The SSC driver sets the chipselect when the data reception starts and * resets it when the reception is completed. * * \param handler Handle of the connection where to make the configuration on * \param pCallback Function callback that is called after the request is * performed. * \param rxbuf Pointer to store the received data packet. * \param rxsize Amount of Bytes to receive. * \return Returns the amount of bytes that are received in case of successful transmission. In case of an error the function returns (-1). (-1). * * \remarks This function activates the chipselect before data transmission and * deactivates it after transmission (when function returns). This is done with * the callback function. The chipselect is not modified in case * \ingroup AMAZON_S_SSC_FUNCTIONS */ int amazon_s_sscAsyncRx(IFX_SSC_HANDLE handler, IFX_SSC_ASYNC_CALLBACK_t *pCallback, char *rxbuf, int rxsize) { return amazon_s_sscAsyncTxRx(handler, pCallback, NULL, 0, rxbuf, rxsize); } EXPORT_SYMBOL(amazon_s_sscAsyncRx); /** * \fn int amazon_s_sscAsyncLock(IFX_SSC_HANDLE handler, IFX_SSC_ASYNC_CALLBACK_t *pCallback) * * \brief This function locks and reserves the whole SSC interface for the given 'handler'. * The chipselect, belonging to this SSC session is already * activated. This means the chipselect callback is called. * After complete data transmission and reception, * This routine is called to get the lock on tasklet level. * The provided callback routine is called after the lock is set. * * \param handler Handle of the connection. * \param pCallback Function callback that is called after the request is performed. * * \return Return (0) in case of success, otherwise (-1) in case of errors. * * \remarks This function actives the SSC chipselect of this 'handler' by calling * the callback function. * \ingroup AMAZON_S_SSC_FUNCTIONS */ int amazon_s_sscAsyncLock(IFX_SSC_HANDLE handler, IFX_SSC_ASYNC_CALLBACK_t *pCallback) { ssc_device_t *dev; IFX_SSC_CONFIGURE_t *ssc_cfg; struct ifx_ssc_port *port; IFX_SSC_QUEUE_t *pqueue; KASSERT((handler != NULL), ("%s Invalid parameter\n", __func__)); dev = (ssc_device_t *)handler; pqueue = &dev->queue; ssc_cfg = &dev->conn_id; port = dev->port; if (port->lock_qentry == pqueue) { /* We hold the lock already -> nothing to request here! */ return (-1); } /* * Check if the queue entry of the ConnId is already queued with a request * but this request is not served yet. Every ConnId can only queue up one * request at the time. */ if (atomic_read(&pqueue->isqueued) == 1) { return (-1); } /* Place a lock request in the queue */ pqueue->request_lock = TRUE; if (pCallback != NULL) { pqueue->callback = *pCallback; } atomic_set(&pqueue->isqueued, 1); /* Add queue entry to priority synchronous queue */ ifx_ssc_enqueue(pqueue); /* * Calls the internal process to serve the queue. This routine would * immediately return in case the SSC hardware is currently used to serve * another request. */ ifx_ssc_start_tasklet(port); return 0; } EXPORT_SYMBOL(amazon_s_sscAsyncLock); /** * \fn int amazon_s_sscAsyncUnLock(IFX_SSC_HANDLE handler) * \brief This function releases the SSC lock that was placed before by calling * \ref amazon_s_sscAsyncLock. This function also inactivate the chipselect signal, which * was set in \ref amazon_s_sscAsyncLock. * \param handler Handle of the connection. * * \return Return (0) in case of success, otherwise (-1) in case of errors. * \ingroup AMAZON_S_SSC_FUNCTIONS */ int amazon_s_sscAsyncUnLock(IFX_SSC_HANDLE handler) { ssc_device_t *dev; IFX_SSC_CONFIGURE_t *ssc_cfg = NULL; struct ifx_ssc_port *port = NULL; KASSERT((handler != NULL), ("%s Invalid parameter\n", __func__)); dev = (ssc_device_t *)handler; ssc_cfg = &dev->conn_id; port = dev->port; if (port->lock_qentry != &dev->queue) { /* We do not hold the lock, therefore we can not release it! */ return -1; } /* Just forget about the lock, then the SSC driver would just take it * as a normel queue entry */ ifx_ssc_cs_unlock(dev); port->lock_qentry = NULL; return 0; } EXPORT_SYMBOL(amazon_s_sscAsyncUnLock); #ifdef CONFIG_SYSCTL /* * Deal with the sysctl handler api changing. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) #define SSC_SYSCTL_DECL(f, ctl, write, filp, buffer, lenp, ppos) \ f(ctl_table *ctl, int write, struct file *filp, void *buffer, \ size_t *lenp) #define SSC_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \ proc_dointvec(ctl, write, filp, buffer, lenp) #define SSC_SYSCTL_PROC_DOSTRING(ctl, write, filp, buffer, lenp, ppos) \ proc_dostring(ctl, write, filp, buffer, lenp) #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) */ #define SSC_SYSCTL_DECL(f, ctl, write, filp, buffer, lenp, ppos) \ f(ctl_table *ctl, int write, struct file *filp, void *buffer,\ size_t *lenp, loff_t *ppos) #define SSC_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \ proc_dointvec(ctl, write, filp, buffer, lenp, ppos) #define SSC_SYSCTL_PROC_DOSTRING(ctl, write, filp, buffer, lenp, ppos) \ proc_dostring(ctl, write, filp, buffer, lenp, ppos) #endif /* SSC Client driver proc entry for parameter configuration */ enum { IFX_SSC_PRIV_FRAGMENT_SIZE = 1, IFX_SSC_PRIV_FIFO_SIZE = 2, IFX_SSC_PRIV_BAUDRATE = 3, IFX_SSC_PRIV_MODE = 4, }; static int SSC_SYSCTL_DECL(ssc_sysctl_private, ctl, write, filp, buffer, lenp, ppos) { ssc_device_t *dev = ctl->extra1; IFX_SSC_CONFIGURE_t *ssc_cfg = &dev->conn_id; struct ifx_ssc_port *port; u32 val; int ret; port = dev->port; ctl->data = &val; ctl->maxlen = sizeof(val); if (write) { ret = SSC_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,lenp, ppos); if (ret == 0) { switch (ctl->ctl_name) { case IFX_SSC_PRIV_FRAGMENT_SIZE: if (val < IFX_SSC_MIN_FRAGSIZE || val > IFX_SSC_MAX_FRAGSIZE) return -EINVAL; ssc_cfg->fragSize = val; port->ssc_fragSize = val; break; case IFX_SSC_PRIV_FIFO_SIZE: if (val < IFX_SSC_FIFO_MIN_THRESHOULD || val > IFX_SSC_FIFO_MAX_THRESHOULD) return -EINVAL; ssc_cfg->maxFIFOSize = val; break; case IFX_SSC_PRIV_BAUDRATE: /* XXX, sanity check */ ssc_cfg->baudrate = val; break; case IFX_SSC_PRIV_MODE: ret = -EINVAL; break; default: return -EINVAL; } } } else { switch (ctl->ctl_name) { case IFX_SSC_PRIV_FRAGMENT_SIZE: val = ssc_cfg->fragSize; break; case IFX_SSC_PRIV_FIFO_SIZE: val = ssc_cfg->maxFIFOSize; break; case IFX_SSC_PRIV_BAUDRATE: val = ssc_cfg->baudrate; break; case IFX_SSC_PRIV_MODE: val = ssc_cfg->ssc_mode; break; default: return -EINVAL; } ret = SSC_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos); } return ret; } static const ctl_table ssc_sysctl_template[] = { /* NB: must be last entry before NULL */ { .ctl_name = IFX_SSC_PRIV_FRAGMENT_SIZE, .procname = "fragment_size", .mode = 0644, .proc_handler = ssc_sysctl_private }, { .ctl_name = IFX_SSC_PRIV_FIFO_SIZE, .procname = "fifosize", .mode = 0644, .proc_handler = ssc_sysctl_private }, { .ctl_name = IFX_SSC_PRIV_BAUDRATE, .procname = "baudrate", .mode = 0644, .proc_handler = ssc_sysctl_private }, { .ctl_name = IFX_SSC_PRIV_MODE, .procname = "spimode", .mode = 0644, .proc_handler = ssc_sysctl_private }, { 0 } }; #define CTL_AUTO -2 /* cannot be CTL_ANY or CTL_NONE */ static void ifx_ssc_sysctl_attach(ssc_device_t *dev) { int i, space; space = 5 * sizeof(struct ctl_table) + sizeof(ssc_sysctl_template); dev->ssc_sysctls = kmalloc(space, GFP_KERNEL); if (dev->ssc_sysctls == NULL) { printk("%s: no memory for sysctl table!\n", __func__); return; } /* setup the table */ memset(dev->ssc_sysctls, 0, space); dev->ssc_sysctls[0].ctl_name = CTL_DEV; dev->ssc_sysctls[0].procname = "dev"; dev->ssc_sysctls[0].mode = 0555; dev->ssc_sysctls[0].child = &dev->ssc_sysctls[2]; /* [1] is NULL terminator */ dev->ssc_sysctls[2].ctl_name = CTL_AUTO; dev->ssc_sysctls[2].procname = dev->dev_name; dev->ssc_sysctls[2].mode = 0555; dev->ssc_sysctls[2].child = &dev->ssc_sysctls[4]; /* [3] is NULL terminator */ /* copy in pre-defined data */ memcpy(&dev->ssc_sysctls[4], ssc_sysctl_template, sizeof(ssc_sysctl_template)); /* add in dynamic data references */ for (i = 4; dev->ssc_sysctls[i].ctl_name; i++){ if (dev->ssc_sysctls[i].extra1 == NULL) { dev->ssc_sysctls[i].extra1 = dev; } } /* tack on back-pointer to parent device */ dev->ssc_sysctls[i - 1].data = dev->dev_name; /* and register everything */ dev->ssc_sysctl_header = register_sysctl_table(dev->ssc_sysctls); if (dev->ssc_sysctl_header == NULL ) { printk("%s: failed to register sysctls!\n", dev->dev_name); kfree(dev->ssc_sysctls); dev->ssc_sysctls = NULL; } } static void ifx_ssc_sysctl_detach(ssc_device_t *dev) { if (dev->ssc_sysctl_header != NULL) { unregister_sysctl_table(dev->ssc_sysctl_header); dev->ssc_sysctl_header = NULL; } if (dev->ssc_sysctls != NULL) { kfree(dev->ssc_sysctls); dev->ssc_sysctls = NULL; } } /* SSC Driver itself proc support for debug and future configuration */ enum { IFX_SSC_PRIV_DEBUG = 1, }; static int SSC_SYSCTL_DECL(port_sysctl_private, ctl, write, filp, buffer, lenp, ppos) { struct ifx_ssc_port *port = ctl->extra1; u32 val; int ret; ctl->data = &val; ctl->maxlen = sizeof(val); if (write) { ret = SSC_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,lenp, ppos); if (ret == 0) { switch (ctl->ctl_name) { case IFX_SSC_PRIV_DEBUG: port->ssc_debug = val; break; default: return -EINVAL; } } } else { switch (ctl->ctl_name) { case IFX_SSC_PRIV_DEBUG: val = port->ssc_debug; break; default: return -EINVAL; } ret = SSC_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos); } return ret; } static const ctl_table port_sysctl_template[] = { /* NB: must be last entry before NULL */ { .ctl_name = IFX_SSC_PRIV_DEBUG, .procname = "debug", .mode = 0644, .proc_handler = port_sysctl_private }, { 0 } }; #define CTL_AUTO -2 /* cannot be CTL_ANY or CTL_NONE */ static void ifx_ssc_port_sysctl_attach(struct ifx_ssc_port *port) { int i, space; space = 5 * sizeof(struct ctl_table) + sizeof(port_sysctl_template); port->port_sysctls = kmalloc(space, GFP_KERNEL); if (port->port_sysctls == NULL) { printk("%s: no memory for sysctl table!\n", __func__); return; } /* setup the table */ memset(port->port_sysctls, 0, space); port->port_sysctls[0].ctl_name = CTL_DEV; port->port_sysctls[0].procname = "dev"; port->port_sysctls[0].mode = 0555; port->port_sysctls[0].child = &port->port_sysctls[2]; /* [1] is NULL terminator */ port->port_sysctls[2].ctl_name = CTL_AUTO; port->port_sysctls[2].procname = port->name; port->port_sysctls[2].mode = 0555; port->port_sysctls[2].child = &port->port_sysctls[4]; /* [3] is NULL terminator */ /* copy in pre-defined data */ memcpy(&port->port_sysctls[4], port_sysctl_template, sizeof(port_sysctl_template)); /* add in dynamic data references */ for (i = 4; port->port_sysctls[i].ctl_name; i++){ if (port->port_sysctls[i].extra1 == NULL) { port->port_sysctls[i].extra1 = port; } } /* tack on back-pointer to parent device */ port->port_sysctls[i - 1].data = port->name; /* and register everything */ port->port_sysctl_header = register_sysctl_table(port->port_sysctls); if (port->port_sysctl_header == NULL ) { printk("%s: failed to register sysctls!\n", port->name); kfree(port->port_sysctls); port->port_sysctls = NULL; } } static void ifx_ssc_port_sysctl_detach(struct ifx_ssc_port *port) { if (port->port_sysctl_header != NULL) { unregister_sysctl_table(port->port_sysctl_header); port->port_sysctl_header = NULL; } if (port->port_sysctls != NULL) { kfree(port->port_sysctls); port->port_sysctls = NULL; } } #endif /* CONFIG_SYSCTL */ /** *\fn IFX_SSC_HANDLE amazon_s_sscAllocConnection (char *dev_name, IFX_SSC_CONFIGURE_t *connid) *\brief Allocate and create a Connection ID "ConnId" * * Allocate and create a Connection ID "ConnId" to communicate over SSC. * This ConnId is needed for all remaining SSC driver API calls. This * ConnId is a handle that helps the SSC driver to find the configuration * that belongs to the connection. ConnId specific parameters are e.g. * Baudrate, Priority, Chipselect Callback, etc. * * \param dev_name unique name for this connection. If null, will alloc * one unique name automatically * \param connid Connectin id * \return a handle "IFX_SSC_HANDLE" in case the allocation was successful. * In case of an error, the return handle is zero (NULL). * \ingroup AMAZON_S_SSC_FUNCTIONS */ IFX_SSC_HANDLE amazon_s_sscAllocConnection (char *dev_name, IFX_SSC_CONFIGURE_t *connid) { struct ifx_ssc_port *port; ssc_device_t *p; ssc_device_t *q; IFX_SSC_QUEUE_t *queue; char buf[IFX_SSC_MAX_DEVNAME] = {0}; char *pName; /* Sanity check first! */ if (isp == NULL) { printk("%s ssc driver must be loaded first!\n", __func__); return NULL; } port = &isp[0]; /* XXX */ if (port->ssc_ndevs >= IFX_SSC_MAX_DEVICE) { IFX_SSC_PRINT(port, SSC_MSG_ERROR, "%s device number out of range\n", __func__); return NULL; } if (connid == NULL) { IFX_SSC_PRINT(port, SSC_MSG_ERROR, "%s must provide connection portrmation!\n", __func__); return NULL; } if ((connid->ssc_mode < IFX_SSC_MODE_0) || (connid->ssc_mode > IFX_SSC_MODE_3)) { IFX_SSC_PRINT(port, SSC_MSG_ERROR, "%s invalid spi mode <%d~%d>!\n", __func__, IFX_SSC_MODE_0, IFX_SSC_MODE_3); return NULL; } if (connid->ssc_prio < IFX_SSC_PRIO_LOW || (connid->ssc_prio > IFX_SSC_PRIO_MAX)) { IFX_SSC_PRINT(port, SSC_MSG_ERROR, "%s invalid priority <%d~%d>!\n", __func__, IFX_SSC_PRIO_LOW, IFX_SSC_PRIO_MAX); } if (connid->csset_cb == NULL) { IFX_SSC_PRINT(port, SSC_MSG_ERROR, "%s must provide cs function\n", __func__); return NULL; } if (connid->fragSize < IFX_SSC_MIN_FRAGSIZE || connid->fragSize > IFX_SSC_MAX_FRAGSIZE) { IFX_SSC_PRINT(port, SSC_MSG_ERROR, "%s invalid fragment size <%d~%d>!\n", __func__, IFX_SSC_MIN_FRAGSIZE, IFX_SSC_MAX_FRAGSIZE); return NULL; } if (connid->maxFIFOSize < IFX_SSC_FIFO_MIN_THRESHOULD || connid->maxFIFOSize > IFX_SSC_FIFO_MAX_THRESHOULD) { IFX_SSC_PRINT(port, SSC_MSG_ERROR, "%s invalid fifo size <%d~%d>!\n", __func__, IFX_SSC_FIFO_MIN_THRESHOULD, IFX_SSC_FIFO_MAX_THRESHOULD); return NULL; } /* If no name specified, will assign one name for identification */ if (dev_name == NULL) { sprintf(buf, "ssc%d", port->ssc_ndevs); pName = buf; } else { if (strlen(dev_name) > (IFX_SSC_MAX_DEVNAME - 1) ) { IFX_SSC_PRINT(port, SSC_MSG_ERROR, "%s device name is too long\n", __func__); return NULL; } pName = dev_name; } p = (ssc_device_t *)kmalloc(sizeof (ssc_device_t), GFP_KERNEL); if (p == NULL) { IFX_SSC_PRINT(port, SSC_MSG_ERROR,"%s failed to allocate memory\n", __func__); return NULL; } memset(p, 0, sizeof (ssc_device_t)); SSC_SEM_LOCK(port->dev_sem); TAILQ_FOREACH(q, &port->ssc_devq, dev_entry) { if (strcmp(q->dev_name, pName) == 0) { kfree(p); SSC_SEM_UNLOCK(port->dev_sem); IFX_SSC_PRINT(port, SSC_MSG_ERROR, "%s device registered already!\n", __func__); return NULL; } } SSC_SEM_UNLOCK(port->dev_sem); /* Follow net device driver name rule */ memcpy(p->dev_name, pName, IFX_SSC_MAX_DEVNAME); memcpy((char *)&p->conn_id, (char *)connid, sizeof (IFX_SSC_CONFIGURE_t)); queue = &p->queue; /* Queue handler type converted from priority */ if (connid->ssc_prio == IFX_SSC_PRIO_ASYNC) { queue->handle_type = IFX_SSC_HANDL_TYPE_ASYNC; } else { queue->handle_type = IFX_SSC_HANDL_TYPE_SYNC; } /* Back pointer to later usage */ queue->dev = p; atomic_set(&queue->isqueued, 0); queue->request_lock = FALSE; /* * Just for fast access, priority based on device, instead of packet * Still keep per packet priority there for future change. */ p->dev_prio = connid->ssc_prio; SSC_WAKELIST_INIT(p->dev_thread_wait); p->port = port; /* back pointer to port for easy reference later */ port->ssc_ndevs++; #ifdef CONFIG_SYSCTL ifx_ssc_sysctl_attach(p); #endif /* CONFIG_SYSCTL */ SSC_SEM_LOCK(port->dev_sem); TAILQ_INSERT_TAIL(&port->ssc_devq, p, dev_entry); SSC_SEM_UNLOCK(port->dev_sem); IFX_SSC_PRINT(port, SSC_MSG_INIT, "%s: device %s register sucessfully!\n", __func__, p->dev_name); return (IFX_SSC_HANDLE)p; } EXPORT_SYMBOL(amazon_s_sscAllocConnection); /*! *\fn int amazon_s_sscFreeConnection (IFX_SSC_HANDLE handler) *\brief Release ssc connnection * * Release a ConnId handle that was allocated by the function ifx_SscAllocConnection * before. An allocated ConnId has to be released by the client driver module * when the SSC driver is not used anymore. Note that all allocated ConnId's should * be released before the SSC driver is unloaded from the kernel. * * \param handler ConnId handle allocated by ifx_SscAllocConnection * \returns (0) in case of success, otherwise (-1) in case of errors. * \ingroup AMAZON_S_SSC_FUNCTIONS */ int amazon_s_sscFreeConnection (IFX_SSC_HANDLE handler) { ssc_device_t *p; struct ifx_ssc_port *port; ssc_device_t *q, *next; KASSERT((handler != NULL), ("%s Invalid parameter\n", __func__)); p = (ssc_device_t *)handler; port = p->port; SSC_SEM_LOCK(port->dev_sem); TAILQ_FOREACH_SAFE(q, &port->ssc_devq, dev_entry, next) { if (strcmp(q->dev_name, p->dev_name) == 0) { TAILQ_REMOVE(&port->ssc_devq, q, dev_entry); #ifdef CONFIG_SYSCTL ifx_ssc_sysctl_detach(q); #endif /* CONFIG_SYSCTL */ kfree(q); port->ssc_ndevs--; SSC_SEM_UNLOCK(port->dev_sem); IFX_SSC_PRINT(port, SSC_MSG_INIT, "%s device %s unregistered\n", __func__, p->dev_name); return 0; } } SSC_SEM_UNLOCK(port->dev_sem); return -1; } EXPORT_SYMBOL(amazon_s_sscFreeConnection); /** * SSC module Initialization. */ static int __init ifx_ssc_init (void) { struct ifx_ssc_port *port; int i, j, nbytes; int ret_val = -ENOMEM; static int ifx_ssc_initialized = 0; if (ifx_ssc_initialized == 1) { return 0; } else { ifx_ssc_initialized = 1; } nbytes = IFX_SSC_MAX_PORT_NUM * sizeof (struct ifx_ssc_port); isp = (struct ifx_ssc_port *) kmalloc (nbytes, GFP_KERNEL); if (isp == NULL) { printk(KERN_ERR "%s: no memory for isp\n", __func__); return (ret_val); } memset (isp, 0, nbytes); /* set default values in ifx_ssc_port */ for (i = 0; i < IFX_SSC_MAX_PORT_NUM; i++) { port = &isp[i]; port->port_idx = i; /* default values for the HwOpts */ port->opts.abortErrDetect = IFX_SSC_DEF_ABRT_ERR_DETECT; port->opts.rxOvErrDetect = IFX_SSC_DEF_RO_ERR_DETECT; port->opts.rxUndErrDetect = IFX_SSC_DEF_RU_ERR_DETECT; port->opts.txOvErrDetect = IFX_SSC_DEF_TO_ERR_DETECT; port->opts.txUndErrDetect = IFX_SSC_DEF_TU_ERR_DETECT; port->opts.loopBack = IFX_SSC_DEF_LOOP_BACK; port->opts.echoMode = IFX_SSC_DEF_ECHO_MODE; port->opts.idleValue = IFX_SSC_DEF_IDLE_DATA; port->opts.clockPolarity = IFX_SSC_DEF_CLOCK_POLARITY; port->opts.clockPhase = IFX_SSC_DEF_CLOCK_PHASE; port->opts.headingControl = IFX_SSC_DEF_HEADING_CONTROL; port->opts.dataWidth = IFX_SSC_DEF_DATA_WIDTH; port->opts.modeRxTx = IFX_SSC_DEF_MODE_RXTX; port->opts.gpoCs = IFX_SSC_DEF_GPO_CS; port->opts.gpoInv = IFX_SSC_DEF_GPO_INV; port->opts.masterSelect = IFX_SSC_DEF_MASTERSLAVE; port->baudrate = IFX_SSC_DEF_BAUDRATE; port->prev_baudrate = 0; port->prev_ssc_mode = IFX_SSC_MODE_UNKNOWN; port->ssc_ndevs = 0; port->ssc_fragSize = DEFAULT_SSC_FRAGMENT_SIZE; /* values specific to SSC1 */ port->mapbase = IFX_SSC_MEM_BASE; /* Register with DMA engine */ port->dma_dev = dma_device_reserve("SPI"); if (port->dma_dev == NULL) { printk(KERN_ERR "%s: Failed to reserve dma device!\n", __func__); goto errout1; } ifx_ssc_init_dma_device(i, port->dma_dev); strcpy(port->name, IFX_SSC_NAME); port->ssc_cs_locked = FALSE; #ifdef IFX_SSC_DEBUG port->ssc_debug = SSC_MSG_ERROR | SSC_MSG_INIT; #else port->ssc_debug = 0; #endif /* IFX_SSC_DEBUG */ atomic_set(&port->dma_wait_state, 0); /* Either SSC tasklet or SSC kernel thread support, not both */ #ifdef SSC_ASYNCHRONOUS_SUPPORT ifx_ssc_tasklet_init(port); #else ifx_ssc_thread_init(port); #endif /* SSC_ASYNCHRONOUS_SUPPORT */ /* The following buffer allocation for HW WAR, last 1~3 bytes in DMA * It will make sure buffer will align on dma burst length */ port->dma_orig_txbuf = kmalloc(DEFAULT_SSC_FRAGMENT_SIZE \ + ((port->dma_dev->tx_burst_len << 2) - 1), GFP_KERNEL); if (port->dma_orig_txbuf == NULL) { printk(KERN_ERR "%s: no memory for dma_orig_txbuf\n", __func__); goto errout1; } port->dma_txbuf = (char *)(((u32)( port->dma_orig_txbuf + \ ((port->dma_dev->tx_burst_len << 2) - 1))) \ & ~((port->dma_dev->tx_burst_len << 2) - 1)); port->dma_orig_rxbuf = kmalloc(DEFAULT_SSC_FRAGMENT_SIZE \ + ((port->dma_dev->rx_burst_len << 2) - 1), GFP_KERNEL); if (port->dma_orig_rxbuf == NULL) { printk(KERN_ERR "%s: no memory for dma_orig_rxbuf\n", __func__); goto errout2; } port->dma_rxbuf = (char *)(((u32)( port->dma_orig_rxbuf + \ ((port->dma_dev->rx_burst_len << 2) - 1))) \ & ~((port->dma_dev->rx_burst_len << 2) - 1)); /* Queue initialization */ TAILQ_INIT(&port->ssc_devq); TAILQ_INIT(&port->ssc_asyncq); for (j = 0; j < IFX_SSC_PRIO_MAX; j++) { TAILQ_INIT(&port->ssc_syncq[j]); } SSC_Q_LOCK_INIT(port); SSC_SEM_INIT(port->dev_sem); SSC_WAKELIST_INIT(port->ssc_thread_wait); SSC_IRQ_LOCK_INIT(port, "ifx_ssc_lock"); /* activate SSC */ SPI_PMU_SETUP(PMU_ENABLE); /* CLC.DISS = 0 */ SSC_WRITE_REG(IFX_SSC_DEF_RMC << IFX_CLC_RUN_DIVIDER_OFFSET, port->mapbase + IFX_SSC_CLC); port->ssc_fake_irq = IFX_SSC_FAKE_IRQ_NO; ret_val = ifx_int_wrapper.request(port->ssc_fake_irq, ifx_ssc_fake_isr, IRQF_DISABLED, "ifx_ssc_tx", port); if (ret_val) { printk(KERN_ERR "%s: unable to get irq %d\n", __func__, port->ssc_fake_irq); goto errout3; } /* Disable SSC module level real hardware interrupts */ SSC_WRITE_REG(0, port->mapbase + IFX_SSC_IRN_EN); /* init serial framing register */ SSC_WRITE_REG(IFX_SSC_DEF_SFCON, port->mapbase + IFX_SSC_SFCON); if (ifx_ssc_hwinit(port) < 0) { ifx_ssc_gpio_release(); printk(KERN_ERR "%s: hardware init failed for port %d\n", __func__, i); goto errout4; } #ifdef CONFIG_SYSCTL ifx_ssc_port_sysctl_attach(port); #endif /* CONFIG_SYSCTL */ } create_proc_read_entry ("driver/ifx_ssc", 0, NULL, ifx_ssc_read_proc, NULL); ifx_show_version(); return 0; errout4: ifx_int_wrapper.free(port->ssc_fake_irq, port); errout3: kfree(port->dma_orig_rxbuf); errout2: kfree(port->dma_orig_txbuf); errout1: kfree(isp); return (ret_val); } /** * SSC Module Cleanup. * Upon removal of the SSC module this function will free all allocated * resources and unregister devices. */ static void __exit ifx_ssc_exit (void) { int i; struct ifx_ssc_port *port; struct dma_device_info *dma_dev; /* free up any allocated memory */ for (i = 0; i < IFX_SSC_MAX_PORT_NUM; i++) { port = &isp[i]; /* disable the SSC */ SSC_WRITE_REG(IFX_SSC_WHBSTATE_CLR_ENABLE, port->mapbase + IFX_SSC_WHBSTATE); /* XXX, Free all connnections */ ifx_int_wrapper.free(port->ssc_fake_irq, port); #ifdef SSC_ASYNCHRONOUS_SUPPORT tasklet_kill(&port->ssc_txrxq); #endif /* SSC_ASYNCHRONOUS_SUPPORT */ SSC_IRQ_LOCK_DESTROY(port); dma_dev = port->dma_dev; if (dma_dev != NULL) { dma_device_unregister(dma_dev); dma_device_release(dma_dev); } #ifdef CONFIG_SYSCTL ifx_ssc_port_sysctl_detach(port); #endif /* CONFIG_SYSCTL */ kfree(port->dma_orig_rxbuf); kfree(port->dma_orig_txbuf); } ifx_ssc_gpio_release(); kfree(isp); remove_proc_entry ("driver/ifx_ssc", NULL); SPI_PMU_SETUP(PMU_DISABLE); } module_init (ifx_ssc_init); module_exit (ifx_ssc_exit); MODULE_LICENSE ("GPL"); MODULE_AUTHOR ("Michael Schoenenborn"); MODULE_DESCRIPTION ("IFX SSC driver"); MODULE_SUPPORTED_DEVICE ("ifx_ssc");