/****************************************************************************** ** ** FILE NAME : amazon_s_ptm.c ** PROJECT : Amazon-S ** MODULES : Second ETH Interface (MII1) ** ** DATE : 28 NOV 2005 ** AUTHOR : Xu Liang ** DESCRIPTION : Second ETH Interface (MII1) Driver ** COPYRIGHT : Copyright (c) 2006 ** Infineon Technologies AG ** Am Campeon 1-12, 85579 Neubiberg, Germany ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License as published by ** the Free Software Foundation; either version 2 of the License, or ** (at your option) any later version. ** ** HISTORY ** $Date $Author $Comment ** 28 NOV 2005 Xu Liang Initiate Version ** 23 AUG 2006 Xu Liang Add feature for D1 support: ** 1. DPLUS shared by both ETH0 and ETH2. ** 2. TX descriptors are moved to share buffer. ** 23 OCT 2006 Xu Liang Add GPL header. ** 30 May 2008 Lei Chuanhua Porting to Amazon_S and verified ** 11 Jun 2008 Lei Chuanhua Fixed FCS error in ptm firmware *******************************************************************************/ #ifndef AUTOCONF_INCLUDED #include #endif /* AUTOCONF_INCLUDED */ #include #include #include #include #include #include #include #include #include /* eth_type_trans */ #include /* ethtool_cmd */ #include #include #include #include #include #include /* Chip Specific Head File */ #include #include #include #include #include #define AMAZON_S_PTM_VERSION "1.0.1" #define AMAZON_S_PTM_NAME "ptm" #define ENABLE_PTM_DEBUG 0 #define ENABLE_PTM_ASSERT 0 #define ENABLE_DEBUG_COUNTER 0 #define DEBUG_DUMP_RX_SKB_BEFORE 0 /* before function eth_type_trans */ #define DEBUG_DUMP_RX_SKB_AFTER 0 /* after function eth_type_trans */ #define DEBUG_DUMP_TX_SKB 0 #define ENABLE_PTM_HW_FLOWCONTROL 1 #define DEBUG_MEM_PROC 1 #define ENABLE_DBG_PROC 1 #define PPE_MAILBOX_IGU1_INT INT_NUM_IM2_IRL24 #define MY_ETHADDR my_ethaddr #if (defined(DEBUG_DUMP_RX_SKB_BEFORE) && DEBUG_DUMP_RX_SKB_BEFORE) || (defined(DEBUG_DUMP_RX_SKB_AFTER) && DEBUG_DUMP_RX_SKB_AFTER) #define DEBUG_DUMP_RX_SKB #define ENABLE_DBG_PROC #endif #if defined(CONFIG_NET_HW_FLOWCONTROL) && (defined(ENABLE_PTM_HW_FLOWCONTROL) && ENABLE_PTM_HW_FLOWCONTROL) #define PTM_HW_FLOWCONTROL #endif #if defined(ENABLE_PTM_DEBUG) && ENABLE_PTM_DEBUG #define PTM_DBG(format, arg...) \ printk(KERN_WARNING __FILE__ ":%d:%s: " format "\n", \ __LINE__, __func__, ##arg) #define INLINE inline #else #define PTM_DBG(format, arg...) do {} while (0) #define INLINE #endif #if defined(ENABLE_PTM_ASSERT) && ENABLE_PTM_ASSERT #define PTM_ASSERT(cond, format, arg...) \ do { \ if ( !(cond) ) printk(KERN_ERR __FILE__ ":%d:%s: " format "\n", \ __LINE__, __func__, ##arg); \ } while ( 0 ) #else #define PTM_ASSERT(cond, format, arg...) \ do { } while ( 0 ) #endif static int write_desc_delay = 0x20; /* Write descriptor delay */ static int rx_max_pkt_size = 0x05EE; /* Max packet size for RX */ static int rx_min_pkt_size = 0x0040; /* Min packet size for RX */ static int tx_max_pkt_size = 0x05EE; /* Max packet size for TX */ static int tx_min_pkt_size = 0x0040; /* Min packet size for TX */ static int dma_rx_desc_len = PTM_DEFAULT_RX_DESC_NUM; static int dma_tx_desc_len = PTM_DEFAULT_TX_DESC_NUM; static int eth_efmtc_crc_cfg = 0x03100710 ; /* default: tx_eth_crc_check: 1, tx_tc_crc_check: 1, tx_tc_crc_len = 16 * rx_eth_crc_present: 1, rx_eth_crc_check: 1, rx_tc_crc_check: 1, rx_tc_crc_len = 16 */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,52)) MODULE_PARM(write_desc_delay, "i"); MODULE_PARM(rx_max_pkt_size, "i"); MODULE_PARM(rx_min_pkt_size, "i"); MODULE_PARM(tx_max_pkt_size, "i"); MODULE_PARM(tx_min_pkt_size, "i"); MODULE_PARM(dma_rx_desc_len, "i"); MODULE_PARM(dma_tx_desc_len, "i"); MODULE_PARM(eth_efmtc_crc_cfg, "i"); #else module_param(write_desc_delay, int, 0600); module_param(rx_max_pkt_size, int, 0600); module_param(rx_min_pkt_size, int, 0600); module_param(tx_max_pkt_size, int, 0600); module_param(tx_min_pkt_size, int, 0600); module_param(dma_rx_desc_len, int, 0600); module_param(dma_tx_desc_len, int, 0600); module_param(eth_efmtc_crc_cfg, int, 0600); #endif MODULE_PARM_DESC(write_desc_delay, "PPE core clock cycles between descriptor write and effectiveness in external RAM"); MODULE_PARM_DESC(rx_max_pkt_size, "Max packet size in byte for downstream ethernet frames"); MODULE_PARM_DESC(rx_min_pkt_size, "Min packet size in byte for downstream ethernet frames"); MODULE_PARM_DESC(tx_max_pkt_size, "Max packet size in byte for upstream ethernet frames"); MODULE_PARM_DESC(tx_min_pkt_size, "Min packet size in byte for upstream ethernet frames"); MODULE_PARM_DESC(dma_rx_desc_len, "Number of descriptor assigned to DMA RX channel (>16)"); MODULE_PARM_DESC(dma_tx_desc_len, "Number of descriptor assigned to DMA TX channel (>16)"); MODULE_PARM_DESC(eth_efmtc_crc_cfg, "Configuration for PTM TX/RX ethernet/efm-tc CRC"); extern int (*IFX_MEI_adsl_led_flash_cb)(void); static struct ptm_drv_dev ptm_dev; static u8 my_ethaddr[MAX_ADDR_LEN] = {0x00, 0x02, 0x03, 0x04, 0xDB, 0x30, 0x00, 0x00}; static struct proc_dir_entry *ptm_proc_dir; #if defined(ENABLE_DBG_PROC) static u32 dbg_enable; static int ptm_tx_quota = -1; #endif static void do_ptm_tasklet(unsigned long); static DECLARE_TASKLET(ptm_tasklet, do_ptm_tasklet, 0); #ifdef PTM_HW_FLOWCONTROL static void ptm_xon(struct net_device *); #endif static int ptm_init(struct net_device *dev); static struct net_device ptm_net_dev[] = { { name: "ptm0", init: ptm_init, }, { name: "ptmfast0", init: ptm_init, }, }; static struct net_device_stats ptm_net_stats[ARRAY_SIZE(ptm_net_dev)] = {{0}}; extern int (*IFX_MEI_adsl_led_flash_cb)(void); #if defined (ENABLE_PTM_DEBUG) && ENABLE_PTM_DEBUG static void ptm_register_dump(void) { int i; printk("CFG_WAN_WRDES_DELAY = %d\n", *CFG_WAN_WRDES_DELAY); printk("WRX_DMACH_ON = 0x%08X\n", *CFG_WRX_DMACH_ON); printk("WTX_DMACH_ON = 0x%08X\n", *CFG_WTX_DMACH_ON); printk("CFG_WRX_LOOK_BITTH = %d\n", *CFG_WRX_LOOK_BITTH); printk("CFG_ETH_EFMTC_CRC = 0x%08X\n", *(u32*)CFG_ETH_EFMTC_CRC); printk("WRX_PORT_CONFIG\n"); for ( i = 0; i < RX_TOTAL_CHANNEL_USED; i++ ) { printk("%2d, 0x%08X, 0x%08X 0x%08X\n", i, (u32)WRX_PORT_CONFIG(i), *(u32*)WRX_PORT_CONFIG(i), *((u32*)WRX_PORT_CONFIG(i) + 1)); } printk("WRX_DMA_CHANNEL_CONFIG\n"); for ( i = 0; i < RX_TOTAL_CHANNEL_USED; i++ ){ printk("%2d, 0x%08X, 0x%08X 0x%08X 0x%08X\n", i, (u32)WRX_DMA_CHANNEL_CONFIG(i), *(u32*)WRX_DMA_CHANNEL_CONFIG(i), *((u32*)WRX_DMA_CHANNEL_CONFIG(i) + 1), *((u32*)WRX_DMA_CHANNEL_CONFIG(i) + 2)); } printk("WTX_PORT_CONFIG\n"); for ( i = 0; i < RX_TOTAL_CHANNEL_USED; i++ ) { printk("%2d, 0x%08X, 0x%08X\n", i, (u32)WTX_PORT_CONFIG(i), *(u32*)WTX_PORT_CONFIG(i)); } printk("WTX_DMA_CHANNEL_CONFIG\n"); for ( i = 0; i < RX_TOTAL_CHANNEL_USED; i++ ) { printk("%2d, 0x%08X, 0x%08X 0x%08X 0x%08X\n", i, (u32)WTX_DMA_CHANNEL_CONFIG(i), *(u32*)WTX_DMA_CHANNEL_CONFIG(i), *((u32*)WTX_DMA_CHANNEL_CONFIG(i) + 1), *((u32*)WTX_DMA_CHANNEL_CONFIG(i) + 2)); } } #endif static INLINE int adsl_led_flash(void) { if ( IFX_MEI_adsl_led_flash_cb ) IFX_MEI_adsl_led_flash_cb(); return 0; } /* * Description: * Implement strategy to pick up TX DMA channel to transmit packet. Not * implemented yet. * Input: * skb --- struct sk_buff *, packet waiting to be transmitted. * dev --- struct net_device *, device used to transmit packet. * Output: * int --- 0: Success * else: Error Code */ static INLINE int ptm_pickup_tx_chan(struct net_device *dev) { return dev - ptm_net_dev; } /* * Description: * Allocate a TX descriptor for DMA channel. * Input: * ch --- int, connection ID * f_full --- int *, a pointer to get descriptor full flag * 1: full, 0: not full * Output: * int --- negative value: descriptor is used up. * else: index of descriptor relative to the first one * of this channel. */ static INLINE int ptm_allock_tx_desc(int ch, int *f_full) { int desc_base; PTM_IRQ_LOCK(ch); desc_base = ptm_dev.tx_desc_num * ch + ptm_dev.tx_desc_alloc_pos[ch]; if ( !ptm_dev.tx_descr_base[desc_base].own ){ if ( ++ptm_dev.tx_desc_alloc_pos[ch] == ptm_dev.tx_desc_num ) ptm_dev.tx_desc_alloc_pos[ch] = 0; PTM_ASSERT(f_full, "pointer \"f_full\" must be valid!"); if ( ptm_dev.tx_descr_base[ptm_dev.tx_desc_num * ch + ptm_dev.tx_desc_alloc_pos[ch]].own ) *f_full = 1; else *f_full = 0; } else desc_base = -1; PTM_IRQ_UNLOCK(ch); return desc_base; } /* * Description: * Allocate a sk_buff for RX path using. The size is maximum packet size * plus maximum overhead size. * Input: * none * Output: * sk_buff* --- 0: Failed * else: Pointer to sk_buff */ static INLINE struct sk_buff* ptm_alloc_rx_skb(void) { struct sk_buff *skb; /* allocate memroy including trailer and padding */ skb = dev_alloc_skb(ptm_dev.rx_buf_size + RX_HEAD_MAC_ADDR_ALIGNMENT + EMA_ALIGN); if ( skb ) { /* must be burst length alignment and reserve two more bytes for MAC address alignment */ if ( ((u32)skb->data & (EMA_ALIGN - 1)) != 0 ) skb_reserve(skb, ~((u32)skb->data + (EMA_ALIGN - 1)) & (EMA_ALIGN - 1)); /* invalidate cache */ *((u32*)skb->data - 1) = (u32)skb; dma_cache_inv((unsigned long)skb->head, (u32)skb->end - (u32)skb->head); /* put skb in reserved area "skb->data - 4" */ } return skb; } /* * Description: * Allocate a sk_buff for TX path using. * Input: * size --- unsigned int, size of the buffer * Output: * sk_buff* --- 0: Failed * else: Pointer to sk_buff */ static INLINE struct sk_buff* ptm_alloc_tx_skb(unsigned int size) { struct sk_buff *skb; /* allocate memory including padding */ size = (size + EMA_ALIGN - 1) & ~(EMA_ALIGN - 1); skb = dev_alloc_skb(size + EMA_ALIGN); /* must be burst length alignment */ if ( skb && ((u32)skb->data & (EMA_ALIGN - 1)) != 0 ) skb_reserve(skb, ~((u32)skb->data + (EMA_ALIGN - 1)) & (EMA_ALIGN - 1)); return skb; } /* * Description: * Signal PPE firmware a TX packet ready or RX descriptor updated. * Input: * ch --- unsigned int, connection ID * is_tx --- int, 0 means RX, else means TX * Output: * none */ static INLINE void ptm_mailbox_signal(unsigned int ch, int is_tx) { if ( is_tx ){ #if !defined(ENABLE_DEBUG_COUNTER) || !ENABLE_DEBUG_COUNTER while ( MBOX_IGU3_ISR_ISR(ch + MBOX_TX_POS_BOUNDARY)){ ; } #else while ( MBOX_IGU3_ISR_ISR(ch + MBOX_TX_POS_BOUNDARY)){ ptm_dev.tx_desc_update_wait_loop++; } #endif *MBOX_IGU3_ISRS = MBOX_IGU3_ISRS_SET(ch + MBOX_TX_POS_BOUNDARY); } else { #if !defined(ENABLE_DEBUG_COUNTER) || !ENABLE_DEBUG_COUNTER while ( MBOX_IGU3_ISR_ISR(ch)){ ; } #else while ( MBOX_IGU3_ISR_ISR(ch)) { ptm_dev.rx_desc_update_wait_loop++; } #endif *MBOX_IGU3_ISRS = MBOX_IGU3_ISRS_SET(ch); } } static struct net_device_stats * ptm_get_stats(struct net_device *dev) { int ndev; if ( dev->priv != &ptm_dev ) return NULL; ndev = dev - ptm_net_dev; PTM_ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(ptm_net_dev), "ndev = %d (wrong value)", ndev); ptm_net_stats[ndev].rx_packets = WAN_MIB_TABLE[ndev].wrx_correct_pdu; ptm_net_stats[ndev].rx_bytes = WAN_MIB_TABLE[ndev].wrx_correct_pdu_bytes; ptm_net_stats[ndev].rx_errors = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu + WAN_MIB_TABLE[ndev].wrx_ethcrc_err_pdu; ptm_net_stats[ndev].rx_dropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu + ptm_dev.rx_drop_counter; ptm_net_stats[ndev].tx_packets = WAN_MIB_TABLE[ndev].wtx_total_pdu; ptm_net_stats[ndev].tx_bytes = WAN_MIB_TABLE[ndev].wtx_total_bytes; return ptm_net_stats + ndev; } static int ptm_open(struct net_device *dev) { #if defined(PTM_HW_FLOWCONTROL) if ( (ptm_dev.fc_bit = netdev_register_fc(dev, ptm_xon)) == 0 ) { printk("Hardware Flow Control register fails\n"); } #endif /* enable RX DMA */ *CFG_WRX_DMACH_ON = (1 << RX_TOTAL_CHANNEL_USED) - 1; netif_start_queue(dev); return 0; } static int ptm_stop(struct net_device *dev) { /* disable RX DMA */ *CFG_WRX_DMACH_ON = 0x00; #if defined(PTM_HW_FLOWCONTROL) if (ptm_dev.fc_bit > 0) { netdev_unregister_fc(ptm_dev.fc_bit); } #endif netif_stop_queue(dev); return 0; } static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { int ret = 0; int ndev = dev - ptm_net_dev; int ch; int f_full; int desc_base; struct tx_descriptor reg_desc; struct tx_descriptor *desc; PTM_ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(ptm_net_dev), "ndev = %d (wrong value)", ndev); #if defined(ENABLE_DBG_PROC) if(ptm_tx_quota == 0) { dev_kfree_skb_any(skb); ptm_net_stats[ndev].tx_dropped++; return 0; } if(ptm_tx_quota > 0) ptm_tx_quota --; #endif ch = ptm_pickup_tx_chan(dev); /* allocate descriptor */ desc_base = ptm_allock_tx_desc(ch, &f_full); if ( desc_base < 0 ) { dev->trans_start = jiffies; netif_stop_queue(dev); PTM_IRQ_LOCK(dev); ptm_dev.tx_irq |= 1 << ch; *MBOX_IGU1_ISRC = 1 << (ch + MBOX_TX_POS_BOUNDARY); *MBOX_IGU1_IER = (ptm_dev.tx_irq << MBOX_TX_POS_BOUNDARY) | ptm_dev.rx_irq; PTM_IRQ_UNLOCK(dev); ret = 0; goto ALLOC_TX_CONNECTION_FAIL; } else if ( f_full ) { dev->trans_start = jiffies; netif_stop_queue(dev); PTM_IRQ_LOCK(dev); ptm_dev.tx_irq |= 1 << ch; *MBOX_IGU1_ISRC = 1 << (ch + MBOX_TX_POS_BOUNDARY); *MBOX_IGU1_IER = (ptm_dev.tx_irq << MBOX_TX_POS_BOUNDARY) | ptm_dev.rx_irq; PTM_IRQ_UNLOCK(dev); } if ( ptm_dev.tx_skb_pointers[desc_base] ) dev_kfree_skb_any(ptm_dev.tx_skb_pointers[desc_base]); desc = &ptm_dev.tx_descr_base[desc_base]; /* load descriptor from memory */ reg_desc = *desc; /* if data pointer is not aligned, allocate new sk_buff */ if ( ((u32)skb->data & ~(EMA_ALIGN - 1)) < (u32)skb->head ){ struct sk_buff *new_skb; new_skb = ptm_alloc_tx_skb(skb->len); if ( new_skb == NULL ) { ret = 0; ptm_dev.tx_skb_pointers[desc_base] = NULL; goto ALLOC_SKB_TX_FAIL; } skb_put(new_skb, skb->len); memcpy(new_skb->data, skb->data, skb->len); dev_kfree_skb_any(skb); skb = new_skb; } reg_desc.dataptr = (u32)skb->data >> 2; reg_desc.datalen = skb->len < MIN_TX_PKT_LEN ? MIN_TX_PKT_LEN : skb->len; reg_desc.byteoff = ((u32)skb->data) & (EMA_ALIGN - 1); reg_desc.own = 1; reg_desc.c = 1; /* update descriptor send pointer */ ptm_dev.tx_skb_pointers[desc_base] = skb; #if defined(DEBUG_DUMP_TX_SKB) && DEBUG_DUMP_TX_SKB dump_skb(skb, 0); #endif adsl_led_flash(); /* write discriptor to memory and write back cache */ *desc = reg_desc; dma_cache_wback((unsigned long)skb->data, skb->len); wmb(); /* signal firmware */ dev->trans_start = jiffies; ptm_mailbox_signal(ch, MBOX_TX_DIR); #if defined(ENABLE_DEBUG_COUNTER) && ENABLE_DEBUG_COUNTER ptm_dev.tx_success++; #endif return 0; ALLOC_SKB_TX_FAIL: PTM_DBG("ALLOC_SKB_TX_FAIL"); ALLOC_TX_CONNECTION_FAIL: dev_kfree_skb_any(skb); ptm_net_stats[ndev].tx_dropped++; return ret; } static int ptm_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = (struct sockaddr *)p; printk("%s: change MAC from %02X:%02X:%02X:%02X:%02X:%02X to %02X:%02X:%02X:%02X:%02X:%02X\n", dev->name, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5], addr->sa_data[0], addr->sa_data[1], addr->sa_data[2], addr->sa_data[3], addr->sa_data[4], addr->sa_data[5]); memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); return 0; } static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int ndev; ndev = dev - ptm_net_dev; PTM_ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(ptm_net_dev), "ndev = %d (wrong value)", ndev); switch ( cmd ) { case IFX_PTM_MIB_CW_GET: ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = WAN_MIB_TABLE[ndev].wrx_nonidle_cw; ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = WAN_MIB_TABLE[ndev].wrx_idle_cw; ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = WAN_MIB_TABLE[ndev].wrx_err_cw; ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = 0; ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = 0; break; case IFX_PTM_MIB_FRAME_GET: ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxCorrect = WAN_MIB_TABLE[ndev].wrx_correct_pdu; ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TC_CrcError = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu; ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxDropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu; ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TxSend = WAN_MIB_TABLE[ndev].wtx_total_pdu; break; case IFX_PTM_CFG_GET: ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = CFG_ETH_EFMTC_CRC->rx_eth_crc_present; ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = CFG_ETH_EFMTC_CRC->rx_eth_crc_check; ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = CFG_ETH_EFMTC_CRC->rx_tc_crc_check; ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = CFG_ETH_EFMTC_CRC->rx_tc_crc_len; ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = CFG_ETH_EFMTC_CRC->tx_eth_crc_gen; ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = CFG_ETH_EFMTC_CRC->tx_tc_crc_gen; ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = CFG_ETH_EFMTC_CRC->tx_tc_crc_len; break; case IFX_PTM_CFG_SET: CFG_ETH_EFMTC_CRC->rx_eth_crc_present = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent ? 1 : 0; CFG_ETH_EFMTC_CRC->rx_eth_crc_check = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 1 : 0; if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck && (((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 32) ) { CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 1; CFG_ETH_EFMTC_CRC->rx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen; } else { CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 0; CFG_ETH_EFMTC_CRC->rx_tc_crc_len = 0; } CFG_ETH_EFMTC_CRC->tx_eth_crc_gen = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 1 : 0; if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen && (((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 32) ) { CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 1; CFG_ETH_EFMTC_CRC->tx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen; } else { CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 0; CFG_ETH_EFMTC_CRC->tx_tc_crc_len = 0; } break; default: return -EOPNOTSUPP; } return 0; } static int ptm_change_mtu(struct net_device *dev, int new_mtu) { PTM_DBG("not implemented"); /* not implemented */ return 0; } static void ptm_tx_timeout(struct net_device *dev) { int ndev = dev - ptm_net_dev; PTM_ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(ptm_net_dev), "ndev = %d (wrong value)", ndev); /* must restart TX channel (pending) */ /* disable TX irq, release skb when sending new packet */ PTM_IRQ_LOCK(dev); ptm_dev.tx_irq &= ~(1 << ndev); *MBOX_IGU1_IER = ptm_dev.tx_irq ? ((ptm_dev.tx_irq << MBOX_TX_POS_BOUNDARY) | ptm_dev.rx_irq) : ptm_dev.rx_irq; PTM_IRQ_UNLOCK(dev); /* wake up TX queue */ netif_wake_queue(dev); } static int ptm_init(struct net_device *dev) { u64 retval; int ndev = dev - ptm_net_dev; int i; PTM_ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(ptm_net_dev), "ndev = %d (wrong value)", ndev); ether_setup(dev); /* assign some members */ /* hook network operations */ dev->get_stats = ptm_get_stats; dev->open = ptm_open; dev->stop = ptm_stop; dev->hard_start_xmit = ptm_hard_start_xmit; dev->set_mac_address = ptm_set_mac_address; dev->do_ioctl = ptm_ioctl; dev->change_mtu = ptm_change_mtu; dev->tx_timeout = ptm_tx_timeout; dev->watchdog_timeo = PTM_WATCHDOG_TIMEOUT; dev->priv = &ptm_dev; /* Read MAC address from the MAC table and put them into device */ retval = 0; for ( i = 0; i < ETH_ADDR_LEN; i++ ) retval += MY_ETHADDR[i]; if ( retval == 0 ) { /* ethaddr not set in u-boot */ dev->dev_addr[0] = 0x00; dev->dev_addr[1] = 0x20; dev->dev_addr[2] = 0xda; dev->dev_addr[3] = 0x86; dev->dev_addr[4] = 0x23; dev->dev_addr[5] = 0x74 + 1 + ndev; } else { for ( i = 0; i < ETH_ADDR_LEN; i++ ) dev->dev_addr[i] = MY_ETHADDR[i]; dev->dev_addr[5] += 1 + ndev; } return 0; } #if defined(PTM_HW_FLOWCONTROL) static void ptm_xon(struct net_device *dev) { printk("%s: trying to recover RX_DMACH\n", __func__); clear_bit(ptm_dev.fc_bit, &netdev_fc_xoff); if ( netif_running(dev) ) { *CFG_WRX_DMACH_ON = (1 << RX_TOTAL_CHANNEL_USED) - 1; } } #endif /* * Description: * Handle IRQ triggered by received packet (RX). * Input: * channel --- unsigned int, channel ID which triggered IRQ * Output: * int --- 0: Success * else: Error Code (-EAGAIN, retry until owner flag set) */ static int ptm_rx_handler(unsigned int channel) { struct sk_buff *skb; register struct rx_descriptor reg_desc; struct rx_descriptor *desc; PTM_ASSERT(channel >= 0 && channel < ARRAY_SIZE(ptm_net_dev), "channel = %d (wrong value)", channel); desc = ptm_dev.rx_desc_ch_base[channel] + ptm_dev.rx_desc_read_pos[channel]; reg_desc = *desc; /* if PP32 hold descriptor or show not completed */ if ( reg_desc.own || !reg_desc.c ){ return -EAGAIN; } /* update read position */ if ( ++ptm_dev.rx_desc_read_pos[channel] == ptm_dev.rx_desc_num){ ptm_dev.rx_desc_read_pos[channel] = 0; } /* get skb address */ skb = *(struct sk_buff **)((((u32)reg_desc.dataptr << 2) | KSEG0) - 4); PTM_ASSERT((u32)skb >= 0x80000000, "ptm_rx_handler: skb = 0x%08X", (u32)skb); if ( !reg_desc.err ){ struct sk_buff *new_skb; new_skb = ptm_alloc_rx_skb(); if ( new_skb ) { skb_reserve(skb, reg_desc.byteoff); skb_put(skb, reg_desc.datalen - RX_TAIL_CRC_LENGTH); #if defined(DEBUG_DUMP_RX_SKB_BEFORE) && DEBUG_DUMP_RX_SKB_BEFORE dump_skb(skb, 1); #endif PTM_ASSERT(skb->len >= 60, "ptm_rx_handler: skb->len = %d, reg_desc.datalen = %d", skb->len, reg_desc.datalen); /* parse protocol header */ skb->dev = ptm_net_dev + channel; skb->protocol = eth_type_trans(skb, ptm_net_dev + channel); #if defined(DEBUG_DUMP_RX_SKB_AFTER) && DEBUG_DUMP_RX_SKB_AFTER dump_skb(skb, 1); #endif #if defined(PTM_HW_FLOWCONTROL) if ( netif_rx(skb) == NET_RX_DROP ) { if ( ptm_dev.fc_bit && !test_and_set_bit(ptm_dev.fc_bit, &netdev_fc_xoff) ) { printk("%s: stop RX_DMACH\n", __func__); #if defined(DEBUG_DUMP_RX_SKB_AFTER) && DEBUG_DUMP_RX_SKB_AFTER dump_skb(skb, 1); #endif *CFG_WRX_DMACH_ON = 0; } #if defined(ENABLE_DEBUG_COUNTER) && ENABLE_DEBUG_COUNTER ptm_dev.rx_fail++; #endif } else { #if defined(ENABLE_DEBUG_COUNTER) && ENABLE_DEBUG_COUNTER ptm_dev.rx_success++; #endif } #else #if defined(ENABLE_DEBUG_COUNTER) && ENABLE_DEBUG_COUNTER if ( netif_rx(skb) != NET_RX_DROP ) ptm_dev.rx_success++; else ptm_dev.rx_fail++; #else netif_rx(skb); #endif /* defined(ENABLE_DEBUG_COUNTER) && ENABLE_DEBUG_COUNTER */ #endif adsl_led_flash(); /* update descriptor with new sk_buff */ reg_desc.dataptr = (u32)new_skb->data >> 2; reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT; } else{ PTM_DBG("null sk_buff"); /* no sk buffer */ ptm_dev.rx_drop_counter++; #if defined(ENABLE_DEBUG_COUNTER) && ENABLE_DEBUG_COUNTER ptm_dev.rx_driver_level_drop++; #endif } } else{ PTM_DBG("rx_error"); ptm_net_stats[channel].rx_errors++; #if defined(ENABLE_DEBUG_COUNTER) && ENABLE_DEBUG_COUNTER ptm_dev.rx_driver_level_error++; #endif } /* update descriptor */ reg_desc.datalen = ptm_dev.rx_buf_size; reg_desc.own = 1; reg_desc.c = 0; /* write descriptor to memory */ *desc = reg_desc; return 0; } static void do_ptm_tasklet(unsigned long arg) { int rx_check_loop_counter; u32 channel_mask; int channel; rx_check_loop_counter = 0; channel = RX_TOTAL_CHANNEL_USED - 1; channel_mask = 0; do{ /* Wrap up to the first channel */ if ( ++channel == RX_TOTAL_CHANNEL_USED ){ channel_mask = 1; channel = 0; } else { channel_mask <<= 1; } if ( !(*MBOX_IGU1_ISR & channel_mask) ) continue; while ( ptm_rx_handler(channel) == 0 ) { /* signal firmware that descriptor is updated */ ptm_mailbox_signal(channel, MBOX_RX_DIR); /* Give other tasklet chances to run */ if ( ++rx_check_loop_counter >= TASKLET_MAX_RX_CHECK_LOOP ) { tasklet_schedule(&ptm_tasklet); return; } } *MBOX_IGU1_ISRC = channel_mask; } while ( (*MBOX_IGU1_ISR & ((1 << RX_TOTAL_CHANNEL_USED) - 1)) ); PTM_IRQ_LOCK(arg); ptm_dev.irq_handling_flag = 0; ptm_dev.rx_irq = (1 << RX_TOTAL_CHANNEL_USED) - 1; /* enable RX interrupt */ *MBOX_IGU1_IER = ptm_dev.tx_irq ? ((ptm_dev.tx_irq << MBOX_TX_POS_BOUNDARY) | ptm_dev.rx_irq) : ptm_dev.rx_irq; PTM_IRQ_UNLOCK(arg); /* XXX, check interrupt? */ } /* * Description: * Handle IRQ of mailbox and despatch to relative handler. * Input: * irq --- int, IRQ number * dev_id --- void *, argument passed when registering IRQ handler * regs --- struct pt_regs *, registers' value before jumping into handler * Output: * none */ static irqreturn_t mailbox_irq_handler(int irq, void *dev_id) { if ( !*MBOX_IGU1_ISR ){ ptm_dev.irq_handling_flag = 0; return IRQ_NONE; } /* TX */ if ( ptm_dev.tx_irq ){ u32 f_set; u32 bit; int i; f_set = ptm_dev.tx_irq & (*MBOX_IGU1_ISR >> MBOX_TX_POS_BOUNDARY); if ( f_set ){ ptm_dev.tx_irq &= ~f_set; *MBOX_IGU1_IER = (ptm_dev.tx_irq << MBOX_TX_POS_BOUNDARY) | ptm_dev.rx_irq; for ( i = 0, bit = 1; i < ARRAY_SIZE(ptm_net_dev); i++, bit <<= 1 ){ if ( (f_set & bit) ) { netif_wake_queue(ptm_net_dev + i); } } } } if ( ptm_dev.irq_handling_flag++ ){ return IRQ_HANDLED; } /* RX */ if ( ptm_dev.rx_irq ){ PTM_DBG("** fn:%s: rx_irq = %d, tasklet_schedule() called\n", __func__, ptm_dev.rx_irq); ptm_dev.rx_irq = 0; *MBOX_IGU1_IER = ptm_dev.tx_irq << MBOX_TX_POS_BOUNDARY; /* disable RX irq */ tasklet_schedule(&ptm_tasklet); return IRQ_HANDLED; } else { ptm_dev.irq_handling_flag = 0; } return IRQ_HANDLED; } /* * Description: * Check parameters passed by command "insmod" and amend them. * Input: * none * Output: * none */ static INLINE void ptm_param_check(void) { /* * There is a delay between PPE write descriptor and descriptor is * really stored in memory. Host also has this delay when writing * descriptor. So PPE will use this value to determine if the write * operation makes effect. */ if ( write_desc_delay < 0 ) { write_desc_delay = 0; } /* * Because of the limitation of length field in descriptors, the packet * size could not be larger than 64K minus overhead size. */ if ( rx_max_pkt_size < ETH_MIN_FRAME_LEN ) { rx_max_pkt_size = ETH_MIN_FRAME_LEN; } else if ( rx_max_pkt_size > ETH_MAX_FRAME_LEN ){ rx_max_pkt_size = ETH_MAX_FRAME_LEN; } if ( rx_min_pkt_size < ETH_MIN_FRAME_LEN ) { rx_min_pkt_size = ETH_MIN_FRAME_LEN; } else if ( rx_min_pkt_size > rx_max_pkt_size ) { rx_min_pkt_size = rx_max_pkt_size; } if ( tx_max_pkt_size < ETH_MIN_FRAME_LEN ) { tx_max_pkt_size = ETH_MIN_FRAME_LEN; } else if ( tx_max_pkt_size > ETH_MAX_FRAME_LEN ){ tx_max_pkt_size = ETH_MAX_FRAME_LEN; } if ( tx_min_pkt_size < ETH_MIN_FRAME_LEN ){ tx_min_pkt_size = ETH_MIN_FRAME_LEN; } else if ( tx_min_pkt_size > tx_max_pkt_size ){ tx_min_pkt_size = tx_max_pkt_size; } if ( dma_rx_desc_len < PTM_MIN_RX_DESC_NUM){ dma_rx_desc_len = PTM_MIN_RX_DESC_NUM; } if ( dma_tx_desc_len < PTM_MIN_TX_DESC_NUM ) { dma_tx_desc_len = PTM_MIN_TX_DESC_NUM; } } /* * Description: * Setup variable ptm_dev and allocate memory. * Input: * none * Output: * int --- 0: Success * else: Error Code */ static INLINE int ptm_dev_init(void) { int i; int rx_desc; int tx_desc; struct rx_descriptor rx_descriptor = { .own = 1, .c = 0, .sop = 1, .eop = 1, .res1 = 0, .byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT, .res2 = 0, .id = 0, .err = 0, .datalen = 0, .res3 = 0, .dataptr = 0, }; struct tx_descriptor tx_descriptor = { .own = 0, .c = 0, .sop = 1, .eop = 1, .byteoff = 0, .res1 = 0, .iscell = 0, .clp = 0, .datalen = 0, .res2 = 0, .dataptr = 0 }; memset(&ptm_dev, 0, sizeof(ptm_dev)); memset(ptm_net_stats, 0, sizeof(ptm_net_stats)); ptm_dev.rx_buf_size = (rx_max_pkt_size + EMA_ALIGN - 1) & ~(EMA_ALIGN - 1); /* descriptor number of RX DMA channel */ ptm_dev.rx_desc_num = dma_rx_desc_len; /* descriptor number of TX DMA channel */ ptm_dev.tx_desc_num = dma_tx_desc_len; /* delay on descriptor write path */ ptm_dev.write_desc_delay = write_desc_delay; /* allocate memory for RX descriptors */ ptm_dev.rx_desc_addr = kmalloc(RX_TOTAL_CHANNEL_USED * ptm_dev.rx_desc_num * sizeof(struct rx_descriptor) + EMA_ALIGN, GFP_KERNEL | GFP_DMA); if ( !ptm_dev.rx_desc_addr ) goto RX_DESCRIPTOR_BASE_ALLOCATE_FAIL; /* do alignment (DWORD) */ ptm_dev.rx_desc_base = (struct rx_descriptor *)(((u32)ptm_dev.rx_desc_addr + (EMA_ALIGN - 1)) & ~(EMA_ALIGN - 1)); ptm_dev.rx_desc_base = (struct rx_descriptor *)((u32)ptm_dev.rx_desc_base | KSEG1); // no cache PTM_DBG("PTM rx_desc_base = %08x\n", (u32)ptm_dev.rx_desc_base); for ( i = 0; i < RX_TOTAL_CHANNEL_USED; i++ ) { ptm_dev.rx_desc_ch_base[i] = ptm_dev.rx_desc_base + ptm_dev.rx_desc_num * i; } /* allocate memory for TX descriptors */ ptm_dev.tx_desc_addr = kmalloc(TX_TOTAL_CHANNEL_USED * ptm_dev.tx_desc_num * sizeof(struct tx_descriptor) + EMA_ALIGN, GFP_KERNEL | GFP_DMA); if ( !ptm_dev.tx_desc_addr ) goto TX_DESCRIPTOR_BASE_ALLOCATE_FAIL; /* do alignment (DWORD) */ ptm_dev.tx_descr_base = (struct tx_descriptor *)(((u32)ptm_dev.tx_desc_addr + (EMA_ALIGN - 1)) & ~(EMA_ALIGN - 1)); ptm_dev.tx_descr_base = (struct tx_descriptor *)((u32)ptm_dev.tx_descr_base | KSEG1); // no cache PTM_DBG("PTM tx_desc_base = %08x\n", (u32)ptm_dev.tx_descr_base); /* allocate pointers to TX sk_buff */ ptm_dev.tx_skb_pointers = kmalloc(TX_TOTAL_CHANNEL_USED * ptm_dev.tx_desc_num * sizeof(struct sk_buff *), GFP_KERNEL); if ( !ptm_dev.tx_skb_pointers ) goto TX_SKB_POINTER_ALLOCATE_FAIL; memset(ptm_dev.tx_skb_pointers, 0, TX_TOTAL_CHANNEL_USED * ptm_dev.tx_desc_num * sizeof(struct sk_buff *)); /* Allocate RX sk_buff and fill up RX descriptors. */ rx_descriptor.datalen = ptm_dev.rx_buf_size; for ( rx_desc = RX_TOTAL_CHANNEL_USED * ptm_dev.rx_desc_num - 1; rx_desc >= 0; rx_desc-- ) { struct sk_buff *skb; skb = ptm_alloc_rx_skb(); if ( skb == NULL ) panic("sk buffer is used up\n"); rx_descriptor.dataptr = (u32)skb->data >> 2; ptm_dev.rx_desc_base[rx_desc] = rx_descriptor; } /* Fill up TX descriptors. */ for ( tx_desc = TX_TOTAL_CHANNEL_USED * ptm_dev.tx_desc_num - 1; tx_desc >= 0; tx_desc-- ) ptm_dev.tx_descr_base[tx_desc] = tx_descriptor; return 0; TX_SKB_POINTER_ALLOCATE_FAIL: kfree(ptm_dev.tx_desc_addr); TX_DESCRIPTOR_BASE_ALLOCATE_FAIL: kfree(ptm_dev.rx_desc_addr); RX_DESCRIPTOR_BASE_ALLOCATE_FAIL: return -ENOMEM; } #ifdef MODULE extern void amazon_s_sw_config(void); static INLINE void ptm_ppe_reset(void) { /* PPE core reset */ *AMAZON_S_RCU_RST_REQ |= AMAZON_S_RCU_RST_REQ_PPE; while (1) { if (((*AMAZON_S_RCU_RST_STAT) & AMAZON_S_RCU_RST_REQ_PPE)) break; } /* Reset MII/GMII settings */ amazon_s_sw_config(); } #endif static INLINE int ptm_local_var_init(void) { #if defined(ENABLE_DBG_PROC) dbg_enable = 1; #endif return 0; } /* * Description: * Fill up share buffer with 0. * Input: * none * Output: * none */ static INLINE void ptm_sb_init(void) { int i; volatile u32 *p = SB_RAM0_ADDR(0); /* write all zeros only */ for (i = 0; i < SB_TOTAL_DWLEN; i++) { *p++ = 0; } } static INLINE void ptm_hw_config(void) { *RFBI_CFG = 0x0; *SFSM_DBA0 = 0x1800; *SFSM_DBA1 = 0x1921; *SFSM_CBA0 = 0x1A42; *SFSM_CBA1 = 0x1A53; *SFSM_CFG0 = 0x14011; *SFSM_CFG1 = 0x14011; *FFSM_DBA0 = 0x1000; *FFSM_DBA1 = 0x1700; *FFSM_CFG0 = 0x3000C; *FFSM_CFG1 = 0x3000C; *FFSM_IDLE_HEAD_BC0 = 0xF0D10000; *FFSM_IDLE_HEAD_BC1 = 0xF0D10000; /* * 0. Backup port2 value to temp * 1. Disable CPU port2 in switch (link and learning) * 2. wait for a while * 3. Configure DM register and counter * 4. restore temp to CPU port2 in switch * This code will cause network to stop working if there are heavy * traffic during bootup. This part should be moved to switch and use * the same code as ATM */ { int i; u32 temp; temp = *(volatile u32 *)AMAZON_S_SW_P2_CTL; *(volatile u32 *)AMAZON_S_SW_P2_CTL = 0x40020000; for (i = 0; i < 200; i++) udelay(2000); *DM_RXCFG = 0x00007028; *DS_RXCFG = 0x00007028; *DM_RXDB = 0x00001100; *DS_RXDB = 0x00001100; *DM_RXCB = 0x00001600; *DS_RXCB = 0x00001600; /* * For dynamic, must reset these counters, * For once initialization, don't need to reset these counters */ *DM_RXPGCNT = 0x0; *DS_RXPGCNT = 0x0; *DM_RXPKTCNT = 0x0; *DM_RXCFG |= 0x80000000; *DS_RXCFG |= 0x8000; udelay(2000); *(volatile u32 *)AMAZON_S_SW_P2_CTL = temp; udelay(2000); } } /* * Description: * Setup RX/TX relative registers and tables, including HTU table. All * parameters are taken from ptm_dev. * Input: * none * Output: * none */ static INLINE void ptm_host_ppe_if_init(void) { int i; volatile u32 *p; struct wrx_dma_channel_config rx_config = {0}; struct wtx_dma_channel_config tx_config = {0}; struct wrx_port_cfg_status rx_port_cfg = {0}; struct wtx_port_cfg tx_port_cfg = {0}; /* CDM Block 1 */ *CDM_CFG = CDM_CFG_RAM1_SET(0x00) | CDM_CFG_RAM0_SET(0x00); p = PP32_DATA_MEMORY_RAM1_ADDR(0); for ( i = 0; i < PP32_DATA_MEMORY_RAM1_DWLEN; i++ ) { *p++ = 0; } /* General Registers */ *CFG_WAN_WRDES_DELAY = ptm_dev.write_desc_delay; *CFG_WRX_DMACH_ON = (1 << RX_TOTAL_CHANNEL_USED) - 1; *CFG_WTX_DMACH_ON = (1 << TX_TOTAL_CHANNEL_USED) - 1 ; *CFG_WRX_LOOK_BITTH = WRX_LOOK_THRESHOLD; *CFG_ETH_EFMTC_CRC = *(struct eth_efmtc_crc_cfg *)ð_efmtc_crc_cfg; /* WRX DMA Channel Configuration Table */ rx_config.deslen = ptm_dev.rx_desc_num; rx_port_cfg.mfs = WRX_PORT_RX_MFS; rx_port_cfg.local_state = 0; rx_port_cfg.partner_state = 0; for ( i = 0; i < RX_TOTAL_CHANNEL_USED; i++ ) { rx_config.desba = (((u32)ptm_dev.rx_desc_base >> 2 ) & 0x0FFFFFFF) + ptm_dev.rx_desc_num * i * (sizeof(struct rx_descriptor) >> 2); *WRX_DMA_CHANNEL_CONFIG(i) = rx_config; rx_port_cfg.dmach = i; *WRX_PORT_CONFIG(i) = rx_port_cfg; } /* WTX DMA Channel Configuration Table */ tx_config.deslen = ptm_dev.tx_desc_num; tx_port_cfg.tx_cwth1 = WTX_PORT_TX_CWTH1; tx_port_cfg.tx_cwth2 = WTX_PORT_TX_CWTH2; for ( i = 0; i < TX_TOTAL_CHANNEL_USED; i++ ) { tx_config.desba = (((u32)ptm_dev.tx_descr_base >> 2 ) & 0x0FFFFFFF) + ptm_dev.tx_desc_num * i * (sizeof(struct tx_descriptor) >> 2); *WTX_DMA_CHANNEL_CONFIG(i) = tx_config; *WTX_PORT_CONFIG(i) = tx_port_cfg; } } /* * Description: * Clean-up ptm_dev and release memory. * Input: * none * Output: * none */ static INLINE void ptm_dev_cleanup(void) { int desc_base; int i, j; /* Free memory allocated for RX/TX descriptors and RX sk_buff */ desc_base = 0; for ( i = 0; i < TX_TOTAL_CHANNEL_USED; i++ ) { for ( j = 0; j < ptm_dev.tx_desc_num; j++ ) { if ( ptm_dev.tx_skb_pointers[desc_base] ) { dev_kfree_skb_any(ptm_dev.tx_skb_pointers[desc_base]); } desc_base++; } } for ( i = RX_TOTAL_CHANNEL_USED * ptm_dev.rx_desc_num - 1; i >= 0; i-- ) { dev_kfree_skb_any(*(struct sk_buff **)(((ptm_dev.rx_desc_base[i].dataptr << 2) | KSEG0) - 4)); } kfree(ptm_dev.tx_skb_pointers); kfree(ptm_dev.tx_desc_addr); kfree(ptm_dev.rx_desc_addr); } static INLINE void ptm_chip_init(void) { PPE_TC_PMU_SETUP(PMU_ENABLE); PPE_EMA_PMU_SETUP(PMU_ENABLE); PPE_DPLUS_PMU_SETUP(PMU_ENABLE); /* The following two module must be enabled */ DSL_PMU_SETUP(PMU_ENABLE); AHB_PMU_SETUP(PMU_ENABLE); /* Configure share buffer master selection */ *SB_MST_PRI0 = 1; *SB_MST_PRI1 = 1; /* EMA settings */ *EMA_CMDCFG = (EMA_CMD_BUF_LEN << 16) | (EMA_CMD_BASE_ADDR >> 2); *EMA_DATACFG = (EMA_DATA_BUF_LEN << 16) | (EMA_DATA_BASE_ADDR >> 2); *EMA_IER = 0x000000FF; *EMA_CFG = EMA_READ_BURST | (EMA_WRITE_BURST << 2); /* Mailbox settings */ *MBOX_IGU1_ISRC = 0xFFFFFFFF; *MBOX_IGU1_IER = (1 << RX_TOTAL_CHANNEL_USED) - 1; /* enable RX interrupt only */ *MBOX_IGU3_ISRC = 0xFFFFFFFF; *MBOX_IGU3_IER = ((1 << RX_TOTAL_CHANNEL_USED) - 1) | (((1 << TX_TOTAL_CHANNEL_USED) - 1) << MBOX_TX_POS_BOUNDARY); } /* * Description: * Download PPE firmware binary code. * Input: * src --- u32 *, binary code buffer * dword_len --- unsigned int, binary code length in DWORD (32-bit) * Output: * int --- 0: Success * else: Error Code */ static INLINE int pp32_download_code(u32 *code_src, unsigned int code_dword_len, u32 *data_src, unsigned int data_dword_len) { u32 reg_old_value; volatile u32 *dest; if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0 || data_src == 0 || ((unsigned long)data_src & 0x03) ) { return -EINVAL; } /* save the old value of CDM_CFG and set PPE code memory to FPI bus access mode */ reg_old_value = *CDM_CFG; if ( code_dword_len <= 4096 ) { *CDM_CFG = CDM_CFG_RAM1_SET(0x00) | CDM_CFG_RAM0_SET(0x00); } else { *CDM_CFG = CDM_CFG_RAM1_SET(0x01) | CDM_CFG_RAM0_SET(0x00); } /* copy code */ dest = CDM_CODE_MEMORY_RAM0_ADDR(0); while ( code_dword_len-- > 0 ){ *dest++ = *code_src++; } /* copy data */ dest = PP32_DATA_MEMORY_RAM1_ADDR(0); while ( data_dword_len-- > 0 ){ *dest++ = *data_src++; } return 0; } /* * Description: * Do PP32 specific initialization. * Input: * data --- void *, specific parameter passed in. * Output: * int --- 0: Success * else: Error Code */ static INLINE int pp32_specific_init(void *data) { return 0; } /* * Description: * Initialize and start up PP32. * Input: * none * Output: * int --- 0: Success * else: Error Code */ static INLINE int pp32_start(void) { int ret; register int i; /* download firmware */ ret = pp32_download_code(firmware_binary_code, ARRAY_SIZE(firmware_binary_code), firmware_binary_data, ARRAY_SIZE(firmware_binary_data)); if ( ret ) return ret; /* firmware specific initialization */ ret = pp32_specific_init(NULL); if ( ret ) return ret; /* run PP32 */ *PP32_DBG_CTRL = DBG_CTRL_RESTART; /* idle for a while to let PP32 init itself */ for ( i = 0; i < IDLE_CYCLE_NUMBER; i++ ){ ; } return 0; } static INLINE void ppe_bp_setup(void) { } /* * Description: * Halt PP32. * Input: * none * Output: * none */ static INLINE void pp32_stop(void) { /* halt PP32 */ *PP32_DBG_CTRL = DBG_CTRL_STOP; } static INLINE int stricmp(const char *p1, const char *p2) { int c1, c2; while ( *p1 && *p2 ) { c1 = (*p1 >= 'A' && *p1 <= 'Z') ? (*p1 + 'a' - 'A'): *p1; c2 = (*p2 >= 'A' && *p2 <= 'Z') ? (*p2 + 'a' - 'A'): *p2; if ( (c1 -= c2) ) return c1; p1++; p2++; } return *p1 - *p2; } static INLINE int get_token(char **p1, char **p2, int *len, int *colon) { int tlen = 0; while ( *len && !((**p1 >= 'A' && **p1 <= 'Z') || (**p1 >= 'a' && **p1<= 'z')) ) { (*p1)++; (*len)--; } if ( !*len ) return 0; if ( *colon ) { *colon = 0; *p2 = *p1; while ( *len && **p2 > ' ' && **p2 != ',' ) { if ( **p2 == ':' ) { *colon = 1; break; } (*p2)++; (*len)--; tlen++; } **p2 = 0; } else { *p2 = *p1; while ( *len && **p2 > ' ' && **p2 != ',' ) { (*p2)++; (*len)--; tlen++; } **p2 = 0; } return tlen; } static INLINE int get_number(char **p, int *len, int is_hex) { int ret = 0; int n = 0; if ( is_hex ) { while ( *len && ((**p >= '0' && **p <= '9') || (**p >= 'a' && **p <= 'f') || (**p >= 'A' && **p <= 'F')) ){ if ( **p >= '0' && **p <= '9' ) n = **p - '0'; else if ( **p >= 'a' && **p <= 'f' ) n = **p - 'a' + 10; else if ( **p >= 'A' && **p <= 'F' ) n = **p - 'A' + 10; ret = (ret << 4) | n; (*p)++; (*len)--; } } else { while ( *len && **p >= '0' && **p <= '9' ) { n = **p - '0'; ret = ret * 10 + n; (*p)++; (*len)--; } } return ret; } static INLINE void ignore_space(char **p, int *len) { while ( *len && (**p <= ' ' || **p == ':' || **p == '.' || **p == ',') ){ (*p)++; (*len)--; } } static int proc_read_mib(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = 0; len += sprintf(page + off + len, "ptm0\n"); len += sprintf(page + off + len, " RX\n"); len += sprintf(page + off + len, " correct : %u (%u)\n", WAN_MIB_TABLE[PTM0_MIB_IDX].wrx_correct_pdu, WAN_MIB_TABLE[PTM0_MIB_IDX].wrx_correct_pdu_bytes); len += sprintf(page + off + len, " tccrc_err : %u (%u)\n", WAN_MIB_TABLE[PTM0_MIB_IDX].wrx_tccrc_err_pdu, WAN_MIB_TABLE[PTM0_MIB_IDX].wrx_tccrc_err_pdu_bytes); len += sprintf(page + off + len, " ethcrc_err : %u (%u)\n", WAN_MIB_TABLE[PTM0_MIB_IDX].wrx_ethcrc_err_pdu, WAN_MIB_TABLE[PTM0_MIB_IDX].wrx_ethcrc_err_pdu_bytes); len += sprintf(page + off + len, " nodesc_drop : %u\n", WAN_MIB_TABLE[PTM0_MIB_IDX].wrx_nodesc_drop_pdu); len += sprintf(page + off + len, " len_viol_drop: %u\n", WAN_MIB_TABLE[PTM0_MIB_IDX].wrx_len_violation_drop_pdu); len += sprintf(page + off + len, " idle_bytes : %u\n", WAN_MIB_TABLE[PTM0_MIB_IDX].wrx_idle_bytes); len += sprintf(page + off + len, " nonidle_cw : %u\n", WAN_MIB_TABLE[PTM0_MIB_IDX].wrx_nonidle_cw); len += sprintf(page + off + len, " idle_cw : %u\n", WAN_MIB_TABLE[PTM0_MIB_IDX].wrx_idle_cw); len += sprintf(page + off + len, " err_cw : %u\n", WAN_MIB_TABLE[PTM0_MIB_IDX].wrx_err_cw); len += sprintf(page + off + len, " TX \n"); len += sprintf(page + off + len, " total_pdu : %u (%u)\n", WAN_MIB_TABLE[PTM0_MIB_IDX].wtx_total_pdu, WAN_MIB_TABLE[PTM0_MIB_IDX].wtx_total_bytes); len += sprintf(page + off + len, "ptmfast0\n"); len += sprintf(page + off + len, " RX\n"); len += sprintf(page + off + len, " correct : %u (%u)\n", WAN_MIB_TABLE[PTMFAST_MIB_IDX].wrx_correct_pdu, WAN_MIB_TABLE[PTMFAST_MIB_IDX].wrx_correct_pdu_bytes); len += sprintf(page + off + len, " tccrc_err : %u (%u)\n", WAN_MIB_TABLE[PTMFAST_MIB_IDX].wrx_tccrc_err_pdu, WAN_MIB_TABLE[PTMFAST_MIB_IDX].wrx_tccrc_err_pdu_bytes); len += sprintf(page + off + len, " ethcrc_err : %u (%u)\n", WAN_MIB_TABLE[PTMFAST_MIB_IDX].wrx_ethcrc_err_pdu, WAN_MIB_TABLE[PTMFAST_MIB_IDX].wrx_ethcrc_err_pdu_bytes); len += sprintf(page + off + len, " nodesc_drop : %u\n", WAN_MIB_TABLE[PTMFAST_MIB_IDX].wrx_nodesc_drop_pdu); len += sprintf(page + off + len, " len_viol_drop: %u\n", WAN_MIB_TABLE[PTMFAST_MIB_IDX].wrx_len_violation_drop_pdu); len += sprintf(page + off + len, " idle_bytes : %u\n", WAN_MIB_TABLE[PTMFAST_MIB_IDX].wrx_idle_bytes); len += sprintf(page + off + len, " nonidle_cw : %u\n", WAN_MIB_TABLE[PTMFAST_MIB_IDX].wrx_nonidle_cw); len += sprintf(page + off + len, " idle_cw : %u\n", WAN_MIB_TABLE[PTMFAST_MIB_IDX].wrx_idle_cw); len += sprintf(page + off + len, " err_cw : %u\n", WAN_MIB_TABLE[PTMFAST_MIB_IDX].wrx_err_cw); len += sprintf(page + off + len, " TX \n"); len += sprintf(page + off + len, " total_pdu : %u (%u)\n", WAN_MIB_TABLE[PTMFAST_MIB_IDX].wtx_total_pdu, WAN_MIB_TABLE[PTMFAST_MIB_IDX].wtx_total_bytes); *eof = 1; return len; } static int proc_write_mib(struct file *file, const char *buf, unsigned long count, void *data) { char str[64]; char *p; int len, rlen; len = count < sizeof(str) ? count : sizeof(str) - 1; rlen = len - copy_from_user(str, buf, len); while ( rlen && str[rlen - 1] <= ' ' ) rlen--; str[rlen] = 0; for ( p = str; *p && *p <= ' '; p++, rlen-- ); if ( !*p ){ return 0; } if ( stricmp(p, "clear") == 0 || stricmp(p, "clear all") == 0 || stricmp(p, "clean") == 0 || stricmp(p, "clean all") == 0 ) { memset((void *)WAN_MIB_TABLE, 0, WAN_MIB_TABLE_SIZE); } return count; } static int proc_read_stats(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = 0; len += sprintf(page + off + len, "PTM Stats\n"); #if defined(ENABLE_DEBUG_COUNTER) && ENABLE_DEBUG_COUNTER len += sprintf(page + off + len, " Total\n"); len += sprintf(page + off + len, " rx_success = %u\n", ptm_dev.rx_success); len += sprintf(page + off + len, " rx_fail = %u\n", ptm_dev.rx_fail); len += sprintf(page + off + len, " rx_desc_read_pos = %u, %u, %u, %u\n", ptm_dev.rx_desc_read_pos[0], ptm_dev.rx_desc_read_pos[1], ptm_dev.rx_desc_read_pos[2], ptm_dev.rx_desc_read_pos[3]); len += sprintf(page + off + len, " tx_success = %u\n", ptm_dev.tx_success); len += sprintf(page + off + len, " tx_desc_alloc_pos = %u, %u, %u, %u\n", ptm_dev.tx_desc_alloc_pos[0], ptm_dev.tx_desc_alloc_pos[1], ptm_dev.tx_desc_alloc_pos[2], ptm_dev.tx_desc_alloc_pos[3]); len += sprintf(page + off + len, " Driver\n"); len += sprintf(page + off + len, " rx_error = %u\n", ptm_dev.rx_driver_level_error); len += sprintf(page + off + len, " rx_drop = %u\n", ptm_dev.rx_driver_level_drop); len += sprintf(page + off + len, " tx_drop = %u\n", ptm_dev.tx_driver_level_drop); len += sprintf(page + off + len, " EMA\n"); len += sprintf(page + off + len, " CMDBUF_VCNT = %u\n", *EMA_CMDCNT & 0xFF); len += sprintf(page + off + len, " DATABUF_UCNT = %u\n", *EMA_DATACNT & 0x3FF); len += sprintf(page + off + len, " Some Switches\n"); len += sprintf(page + off + len, " netif_Q_stopped = %s\n", netif_queue_stopped(&ptm_net_dev[0]) ? "stopped" : "running"); len += sprintf(page + off + len, " netif_running = %s\n", netif_running(&ptm_net_dev[0]) ? "running" : "stopped"); len += sprintf(page + off + len, " IGU1_IER\n"); len += sprintf(page + off + len, " rx_irq = %04X, IER = %04X\n", ptm_dev.rx_irq & 0xFFFF, *MBOX_IGU1_IER & 0xFFFF); len += sprintf(page + off + len, " tx_irq = %04X, IER = %04X\n", ptm_dev.tx_irq & 0xFFFF, *MBOX_IGU1_IER >> MBOX_TX_POS_BOUNDARY); len += sprintf(page + off + len, " Mailbox Signal Wait Loop\n"); len += sprintf(page + off + len, " RX Wait Loop = %d\n", ptm_dev.rx_desc_update_wait_loop); len += sprintf(page + off + len, " TX Wait Loop = %d\n", ptm_dev.tx_desc_update_wait_loop); #endif *eof = 1; return len; } #if defined(ENABLE_DBG_PROC) static int proc_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = 0; if ( dbg_enable ) len += sprintf(page + off + len, "debug enabled (dbg_enable = %08X)\n", dbg_enable); else len += sprintf(page + off + len, "debug disabled\n"); len += sprintf(page + off + len, "PTM tx quota: %d\n", ptm_tx_quota); len += sprintf(page + off + len, "CRC settings:\n"); len += sprintf(page + off + len, " rx_eth_crc_present: %s\n", CFG_ETH_EFMTC_CRC->rx_eth_crc_present ? "yes" : "no"); len += sprintf(page + off + len, " rx_eth_crc_check: %s\n", CFG_ETH_EFMTC_CRC->rx_eth_crc_check ? "on" : "off"); len += sprintf(page + off + len, " rx_tc_crc_check: %s\n", CFG_ETH_EFMTC_CRC->rx_tc_crc_check ? "on" : "off"); len += sprintf(page + off + len, " rx_tc_crc_len: %d\n", CFG_ETH_EFMTC_CRC->rx_tc_crc_len); len += sprintf(page + off + len, " tx_eth_crc_gen: %s\n", CFG_ETH_EFMTC_CRC->tx_eth_crc_gen ? "on" : "off"); len += sprintf(page + off + len, " tx_tc_crc_gen: %s\n", CFG_ETH_EFMTC_CRC->tx_tc_crc_gen ? "on" : "off"); len += sprintf(page + off + len, " tx_tc_crc_len: %d\n", CFG_ETH_EFMTC_CRC->tx_tc_crc_len); *eof = 1; return len; } static int proc_write_dbg(struct file *file, const char *buf, unsigned long count, void *data) { char str[64]; char *p, *p1; int len, rlen; len = count < sizeof(str) ? count : sizeof(str) - 1; rlen = len - copy_from_user(str, buf, len); while ( rlen && str[rlen - 1] <= ' ' ) { rlen--; } str[rlen] = 0; for ( p = str; *p && *p <= ' '; p++, rlen-- ){ ; } if ( !*p ) { return 0; } p1 = p; while (*p1 > ' ') { *p1 ++; } if(*p1) { *p1 = 0; *p1 ++; } if ( stricmp(p, "enable") == 0 ) dbg_enable = 1; else if ( stricmp(p, "disable") == 0 ) dbg_enable = 0; else if ( stricmp(p, "notxq") == 0 ) ptm_tx_quota = -1; else if ( stricmp(p, "txq") == 0 ) { int new_q = -1; p = p1; while (*p && *p <= ' ') { p++; } while( *p >= '0' && *p <= '9') { if( new_q < 0 ){ new_q = 0; } new_q = new_q * 10 + *p - '0'; p ++; } if(new_q >= 0) { ptm_tx_quota = new_q; } } return count; } #endif #if defined(DEBUG_MEM_PROC) && DEBUG_MEM_PROC static int proc_write_mem(struct file *file, const char *buf, unsigned long count, void *data) { char *p1, *p2; int len; int colon; unsigned long *p; static char local_buf[1024]; int i, n; len = sizeof(local_buf) < count ? sizeof(local_buf) : count; len = len - copy_from_user(local_buf, buf, len); local_buf[len] = 0; p1 = local_buf; colon = 1; while ( get_token(&p1, &p2, &len, &colon) ){ if ( stricmp(p1, "w") == 0 || stricmp(p1, "write") == 0 || stricmp(p1, "r") == 0 || stricmp(p1, "read") == 0 ) { break; } p1 = p2; colon = 1; } if ( *p1 == 'w' ){ ignore_space(&p2, &len); p = (unsigned long *)get_number(&p2, &len, 1); if ( (u32)p >= KSEG0 ) while ( 1 ){ ignore_space(&p2, &len); if ( !len || !((*p2 >= '0' && *p2 <= '9') || (*p2 >= 'a' && *p2 <= 'f') || (*p2 >= 'A' && *p2 <= 'F')) ) { break; } *p++ = (u32)get_number(&p2, &len, 1); } } else if ( *p1 == 'r' ){ ignore_space(&p2, &len); p = (unsigned long *)get_number(&p2, &len, 1); if ( (u32)p >= KSEG0 ){ ignore_space(&p2, &len); n = (int)get_number(&p2, &len, 0); if ( n ){ n += (((int)p >> 2) & 0x03); p = (unsigned long *)((u32)p & ~0x0F); for ( i = 0; i < n; i++ ){ if ( (i & 0x03) == 0 ) printk("%08X:", (u32)p); printk(" %08X", (u32)*p++); if ( (i & 0x03) == 0x03 ) printk("\n"); } if ( (n & 0x03) != 0x00 ) printk("\n"); } } } return count; } #endif /* DEBUG_MEM_PROC */ static INLINE void proc_file_create(void) { struct proc_dir_entry *res; ptm_proc_dir = proc_mkdir(AMAZON_S_PTM_NAME, NULL); create_proc_read_entry("stats", 0, ptm_proc_dir, proc_read_stats, NULL); res = create_proc_read_entry("mib", 0, ptm_proc_dir, proc_read_mib, NULL); if ( res ) res->write_proc = proc_write_mib; #if defined(ENABLE_DBG_PROC) res = create_proc_read_entry("dbg", 0, ptm_proc_dir, proc_read_dbg, NULL); if ( res ) res->write_proc = proc_write_dbg; #endif #if defined(DEBUG_MEM_PROC) && DEBUG_MEM_PROC res = create_proc_read_entry("mem", 0, ptm_proc_dir, NULL, NULL); if ( res ) res->write_proc = proc_write_mem; #endif } static INLINE void proc_file_delete(void) { remove_proc_entry("stats", ptm_proc_dir); remove_proc_entry("mib", ptm_proc_dir); #if defined(ENABLE_DBG_PROC) remove_proc_entry("dbg", ptm_proc_dir); #endif #if defined(DEBUG_MEM_PROC) && DEBUG_MEM_PROC remove_proc_entry("mem", ptm_proc_dir); #endif remove_proc_entry(AMAZON_S_PTM_NAME, NULL); } #if defined(DEBUG_DUMP_RX_SKB) || (defined(DEBUG_DUMP_TX_SKB) && DEBUG_DUMP_TX_SKB) static INLINE void dump_skb(struct sk_buff *skb, int is_rx) { int i; if ( !dbg_enable ) return; printk(is_rx ? "RX path --- sk_buff\n" : "TX path --- sk_buff\n"); printk(" skb->data = %08X, skb->tail = %08X, skb->len = %d\n", (u32)skb->data, (u32)skb->tail, (int)skb->len); for ( i = 1; i <= skb->len; i++ ){ if ( i % 16 == 1 ) printk(" %4d:", i - 1); printk(" %02X", (int)(*((char*)skb->data + i - 1) & 0xFF)); if ( i % 16 == 0 ) printk("\n"); } if ( (i - 1) % 16 != 0 ) printk("\n"); } #endif /* * Description: * Initialize global variables, PP32, comunication structures, register IRQ * and register device. * Input: * none * Output: * 0 --- successful * else --- failure, usually it is negative value of error code */ static int __init amazon_s_ptm_init(void) { int ret; int i; #ifdef MODULE ptm_ppe_reset(); #endif ret = ptm_local_var_init(); ptm_param_check(); ret = ptm_dev_init(); if ( ret ) goto INIT_PTM_DEV_FAIL; ptm_sb_init(); ptm_hw_config(); ptm_host_ppe_if_init(); /* create device */ for ( i = 0; i < ARRAY_SIZE(ptm_net_dev); i++ ) { ret = register_netdev(ptm_net_dev + i); if ( ret ) { goto REGISTER_NETDEV_FAIL; } } /* register interrupt handler */ ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, IRQF_DISABLED, "ptm_dma_isr", NULL); if ( ret ) goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL; ptm_chip_init(); ptm_dev.rx_irq = *MBOX_IGU1_IER & 0x0000FFFF; ptm_dev.tx_irq = (*MBOX_IGU1_IER & 0xFFFF0000) >> MBOX_TX_POS_BOUNDARY; ppe_bp_setup(); #if ENABLE_PTM_DEBUG ptm_register_dump(); #endif ret = pp32_start(); if ( ret ) { goto PP32_START_FAIL; } /* careful, PPE firmware may set some registers, recover them here */ *MBOX_IGU1_IER = (ptm_dev.tx_irq << MBOX_TX_POS_BOUNDARY) | ptm_dev.rx_irq; /* create proc file */ proc_file_create(); printk(KERN_INFO "%s firmware version %d.%d.%d.%d.%d.%d \n", __func__, (int)FW_VER_ID->family, (int)FW_VER_ID->fwtype, (int)FW_VER_ID->interface, (int)FW_VER_ID->fwmode, (int)FW_VER_ID->major, (int)FW_VER_ID->minor); printk(KERN_INFO "Infineon technologies PTM driver version %s\n", AMAZON_S_PTM_VERSION); return 0; PP32_START_FAIL: free_irq(PPE_MAILBOX_IGU1_INT, NULL); REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL: REGISTER_NETDEV_FAIL: while ( i-- ) unregister_netdev(ptm_net_dev + i); ptm_dev_cleanup(); INIT_PTM_DEV_FAIL: printk("%s failed\n", __func__); return ret; } /* * Description: * Release memory, free IRQ, and deregister device. * Input: * none * Output: * none */ static void __exit amazon_s_ptm_exit(void) { int i; proc_file_delete(); pp32_stop(); free_irq(PPE_MAILBOX_IGU1_INT, NULL); for ( i = ARRAY_SIZE(ptm_net_dev) - 1; i >= 0; i-- ){ unregister_netdev(ptm_net_dev + i); } ptm_dev_cleanup(); } #ifndef MODULE static int __init amazon_s_ethaddr_setup(char *line) { int i; char *ep; memset(MY_ETHADDR, 0, sizeof(MY_ETHADDR)); for ( i = 0; i < ETH_ADDR_LEN; i++ ) { MY_ETHADDR[i] = line ? simple_strtoul(line, &ep, 16) : 0; if ( line ) line = *ep ? ep + 1 : ep; } PTM_DBG("ptm device mac address %02X-%02X-%02X-%02X-%02X-%02X\n", MY_ETHADDR[0], MY_ETHADDR[1], MY_ETHADDR[2], MY_ETHADDR[3], MY_ETHADDR[4], MY_ETHADDR[5]); return 0; } #endif module_init(amazon_s_ptm_init); module_exit(amazon_s_ptm_exit); MODULE_LICENSE ("GPL"); MODULE_AUTHOR ("XuLiang@infineon.com"); MODULE_SUPPORTED_DEVICE ("Infineon Amazon_S"); MODULE_DESCRIPTION ("Infineon PTM/EFM device driver"); #ifndef MODULE __setup("ethaddr=", amazon_s_ethaddr_setup); #endif