/*------------------------------------------------------------------------------------------*\ * * Copyright (C) 2014 AVM GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA \*------------------------------------------------------------------------------------------*/ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define IS_X86 defined(CONFIG_ARCH_GEN3) #if IS_X86 # include #else /*--- #if IS_X86 ---*/ # include # include #endif /*--- #else ---*/ /*--- #if IS_X86 ---*/ #include #include "acc_debug.h" char acc_driver_name[] = "acc"; #define DRV_VERSION "0.1" const char acc_driver_version[] = DRV_VERSION; static const char acc_copyright[] = "Copyright (c) 2014 AVM GmbH"; MODULE_AUTHOR("AVM GmbH"); MODULE_DESCRIPTION("AVM CPU connection"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); #define DESCR_MODE 1 #if IS_X86 #define REMOTE_MEM_OFFSET ARM_MEM_OFFSET #else #define REMOTE_MEM_OFFSET (-ARM_MEM_OFFSET) #endif /* Assumption: ACC_MESSAGES is of the form 2^x */ #define ACC_MESSAGES 128 // memory queue length #define ACC_QUOTA (ACC_MESSAGES / 4) // limit for processing during one poll #define ACC_IRQ_TIME 1 // ms before re-sending IRQ when Tx queue is full #define ACC_MAX_MTU 2000 #define ACC_MIN_FRAME_SIZE 64 #define ACC_BUFFER_SIZE ACC_MAX_MTU + 4 /* MTU + VLAN (4) */ #define ACC_SKB_HEADER_RESERVED 48 + 2 #define ACC_TOTAL_BUFFER_SIZE (ACC_BUFFER_SIZE + ACC_SKB_HEADER_RESERVED) #define ACC_NEXT_IDX(idx) (((idx) + 1) & (ACC_MESSAGES - 1)) enum acc_msg_cmd { acc_msg_cmd_none = 0, acc_msg_cmd_ping, acc_msg_cmd_ping_ack, acc_msg_cmd_tx, acc_msg_cmd_rx, }; typedef enum acc_msg_cmd acc_msg_cmd_t; #define ACC_LSTATE_CHG_LOC (1 << 0) #define ACC_LSTATE_CHG_REM (1 << 1) enum acc_link_state { acc_lstate_inval = 0, acc_lstate_down = 1, acc_lstate_up = 2 }; struct acc_data_ping { __le32 seq; } __attribute__((packed)); enum acc_desc_state { acc_desc_inval = 0, acc_desc_ready = 1, acc_desc_cmplt = 2, }; struct acc_data_desc { __le32 addr; __le32 data_len; __le32 state; } __attribute__((packed)); union acc_data { struct acc_data_ping ping; struct acc_data_desc desc; } __attribute__((packed)); struct acc_msg_struct { uint32_t cmd; union acc_data data; } __attribute__((packed)); struct acc_msg { acc_msg_cmd_t cmd; struct sk_buff *skb; }; /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ enum queue_mode { queue_mode_copy = 0, queue_mode_actv = 1, queue_mode_pasv = 2, }; struct acc_queue { __le32 queue_len; __le32 linkstate; __le32 rx_head; __le32 rx_tail; __le32 tx_head; __le32 tx_tail; struct acc_msg_struct tx_msgs[ACC_MESSAGES]; struct acc_msg_struct rx_msgs[ACC_MESSAGES]; }__attribute__((packed)); /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ struct acc_queue_mgmt { struct resource *io_region; dma_addr_t phys_base; enum queue_mode mode; enum acc_link_state linkstate; spinlock_t lock; struct sk_buff *rx_skb[ACC_MESSAGES]; struct sk_buff *tx_skb[ACC_MESSAGES]; struct acc_queue *queue; }; enum acc_mgmt_state { acc_mgmt_state_error, acc_mgmt_state_init, acc_mgmt_state_addr_sent, acc_mgmt_state_addr_rcvd, acc_mgmt_state_ping_sent, acc_mgmt_state_ping_rcvd, acc_mgmt_state_running, acc_mgmt_state_closing, acc_mgmt_state_closed }; typedef enum acc_mgmt_state acc_mgmt_state_t; #define ACC_WORK_BIT_RUN 0 #define ACC_WORK_BIT_POLL_RX 2 #define ACC_WORK_BIT_POLL_TX 3 #define ACC_WORK_RUN (1 << ACC_WORK_BIT_RUN) #define ACC_WORK_POLL_RX (1 << ACC_WORK_BIT_POLL_RX) #define ACC_WORK_POLL_TX (1 << ACC_WORK_BIT_POLL_TX) /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ struct acc_mgmt_struct { acc_mgmt_state_t state; struct net_device *dev; struct napi_struct napi; struct acc_queue_mgmt loc_queue; struct acc_queue_mgmt rem_queue; struct task_struct *kthread; wait_queue_head_t wait_queue; unsigned long last_irq; unsigned int tx_stalled; unsigned long triggers; struct sk_buff_head rx_skb_q; struct sk_buff_head tx_skb_q; struct sk_buff_head skb_pool; }; struct acc_netdev_priv { struct acc_mgmt_struct *pctrl; }; struct acc_mgmt_struct acc_mgmt; static void acc_trigger_poll_tx(struct acc_mgmt_struct *pctrl); static void acc_trigger_poll_rx(struct acc_mgmt_struct *pctrl); enum flush_type { flush_read, flush_write, flush_read_write, }; /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void flush_cache(void *addr, size_t len, enum flush_type type __attribute__((unused))) { mb(); # if IS_X86 clflush_cache_range(addr, len); # else /*--- #if IS_X86 ---*/ switch(type){ case flush_read: PAL_sysCacheInvalidate(PAL_OSMEM_ADDR_DAT, addr, len); break; case flush_write: PAL_sysCacheFlush(PAL_OSMEM_ADDR_DAT, addr, len); break; case flush_read_write: PAL_sysCacheFlushAndInvalidate(PAL_OSMEM_ADDR_DAT, addr, len); break; } # endif /*--- #else ---*/ /*--- #if IS_X86 ---*/ mb(); } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static int setup_localqueue(struct acc_queue_mgmt *p_queue) { unsigned int i; p_queue->queue = dma_alloc_coherent(NULL, sizeof(struct acc_queue), &(p_queue->phys_base), GFP_KERNEL); if(p_queue->queue == NULL ){ DEB_ERR("[%s] Unable to allocate send queue.\n", __func__); return -ENOMEM; } DEB_TRC("[%s] virt: %p phys: %08x\n", __func__, p_queue->queue, p_queue->phys_base); DEB_TRC("[%s] queue: %p queue.tx_msgs[0]: %p, queue.rx_msgs[0]: %p\n", __func__, p_queue->queue, &(p_queue->queue->tx_msgs[0]), &(p_queue->queue->rx_msgs[0])); memset((unsigned char *)p_queue->queue, 0x0, sizeof(struct acc_queue)); spin_lock_init(&p_queue->lock); p_queue->linkstate = acc_lstate_down; p_queue->queue->queue_len = cpu_to_le32(ACC_MESSAGES); p_queue->queue->linkstate = cpu_to_le32(acc_lstate_down); p_queue->queue->rx_head = 0; p_queue->queue->rx_tail = 0; p_queue->queue->tx_head = 0; p_queue->queue->tx_tail = 0; #if defined(DESCR_MODE) #if !IS_X86 p_queue->mode = queue_mode_pasv; #else // !IS_X86 p_queue->mode = queue_mode_actv; #endif // !IS_X86 for(i = 0; i < ACC_MESSAGES; ++i){ p_queue->queue->rx_msgs[i].cmd = cpu_to_le32(acc_msg_cmd_rx); p_queue->queue->rx_msgs[i].data.desc.addr = 0; p_queue->queue->rx_msgs[i].data.desc.data_len = 0; p_queue->queue->rx_msgs[i].data.desc.state = cpu_to_le32(acc_desc_inval); p_queue->queue->tx_msgs[i].cmd = cpu_to_le32(acc_msg_cmd_tx); p_queue->queue->tx_msgs[i].data.desc.addr = 0; p_queue->queue->tx_msgs[i].data.desc.data_len = 0; p_queue->queue->tx_msgs[i].data.desc.state = cpu_to_le32(acc_desc_inval); p_queue->rx_skb[i] = NULL; p_queue->tx_skb[i] = NULL; } #else // defined(DESCR_MODE) p_queue->mode = queue_mode_copy; #endif // defined(DESCR_MODE) flush_cache((void *) p_queue->queue, sizeof(struct acc_queue), flush_write); return 0; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static void free_localqueue(struct acc_queue_mgmt *ploc_queue) __attribute__((unused)); static void free_localqueue(struct acc_queue_mgmt *ploc_queue) { struct acc_queue *queue; dma_addr_t phys_base; unsigned int i; queue = ploc_queue->queue; phys_base = ploc_queue->phys_base; ploc_queue->queue = NULL; ploc_queue->phys_base = 0; for(i = 0; i < ACC_MESSAGES; ++i){ if(ploc_queue->tx_skb[i] != NULL){ dev_kfree_skb_any(ploc_queue->tx_skb[i]); } if(ploc_queue->rx_skb[i] != NULL){ dev_kfree_skb_any(ploc_queue->rx_skb[i]); } } if(queue != NULL ){ dma_free_coherent(NULL, sizeof(struct acc_queue), (void *) queue, phys_base); } } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ extern struct resource *puma6_get_arm_ram(void); static int setup_remotequeue(uint32_t queue_addr, struct acc_queue_mgmt *p_queue) { resource_size_t size; size = sizeof(struct acc_queue); #if !IS_X86 p_queue->io_region = request_mem_region((resource_size_t) queue_addr, size, "AccQueue"); #else p_queue->io_region = puma6_get_arm_ram(); #endif if(p_queue->io_region == NULL ){ DEB_ERR("[%s] Unable to request_mem_region for address 0x%0x\n", __func__, queue_addr); return -EFAULT; } p_queue->phys_base = (dma_addr_t) queue_addr; p_queue->queue = (struct acc_queue *) ioremap_nocache(queue_addr, size); DEB_TRC("[%s] virt: %p phys: %08x\n", __func__, p_queue->queue, queue_addr); if(p_queue->queue == NULL ){ DEB_ERR("[%s] Unable to set up receive queue.\n", __func__); return -EINVAL; } flush_cache((void *) p_queue->queue, sizeof(struct acc_queue), flush_read); if(ACC_MESSAGES != ioread32(&p_queue->queue->queue_len)){ DEB_ERR("[%s] Queue size mismatch. Expected: 0x%x, found: 0x%x\n", __func__, ACC_MESSAGES, ioread32(&p_queue->queue->queue_len)); return -EINVAL; } spin_lock_init(&p_queue->lock); p_queue->linkstate = ioread32(&p_queue->queue->linkstate); acc_trigger_poll_rx(&acc_mgmt); return 0; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static void free_remotequeue(struct acc_queue_mgmt *prem_queue) __attribute__((unused)); static void free_remotequeue(struct acc_queue_mgmt *prem_queue) { if(prem_queue->queue != NULL ){ iounmap(prem_queue->io_region); #if !IS_X86 release_mem_region((unsigned long ) prem_queue->phys_base, sizeof(struct acc_queue)); #endif prem_queue->queue = NULL; } } /*------------------------------------------------------------------------------------------*\ * Send IRQ to trigger remote \*------------------------------------------------------------------------------------------*/ static void acc_trigger_remote(struct acc_mgmt_struct *pctrl) { trigger_remotecpuirq(AVM_CPUCONN_REMOTECPU_IRQ); pctrl->last_irq = jiffies; } /*------------------------------------------------------------------------------------------*\ * Set the RX poll flag and wake the work thread \*------------------------------------------------------------------------------------------*/ static void acc_trigger_poll_rx(struct acc_mgmt_struct *pctrl) { set_bit(ACC_WORK_BIT_POLL_RX, &pctrl->triggers); set_bit(ACC_WORK_BIT_RUN, &pctrl->triggers); wake_up_interruptible_sync(&pctrl->wait_queue); } /*------------------------------------------------------------------------------------------*\ * Set the TX poll flag and wake the work thread \*------------------------------------------------------------------------------------------*/ static void acc_trigger_poll_tx(struct acc_mgmt_struct *pctrl) { set_bit(ACC_WORK_BIT_POLL_TX, &pctrl->triggers); set_bit(ACC_WORK_BIT_RUN, &pctrl->triggers); wake_up_interruptible_sync(&pctrl->wait_queue); } /*------------------------------------------------------------------------------------------*\ * Set RX and TX poll flags and wake the work thread \*------------------------------------------------------------------------------------------*/ static void acc_trigger_poll_rxtx(struct acc_mgmt_struct *pctrl) __attribute__((unused)); static void acc_trigger_poll_rxtx(struct acc_mgmt_struct *pctrl) { set_bit(ACC_WORK_BIT_POLL_RX, &pctrl->triggers); set_bit(ACC_WORK_BIT_POLL_TX, &pctrl->triggers); set_bit(ACC_WORK_BIT_RUN, &pctrl->triggers); wake_up_interruptible_sync(&pctrl->wait_queue); } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static int update_linkstate(struct acc_mgmt_struct *pctrl, unsigned int flush) { struct acc_queue_mgmt *ploc_queue = &pctrl->loc_queue; struct acc_queue_mgmt *prem_queue = &pctrl->rem_queue; int changed = 0; if(flush){ flush_cache((void *) &prem_queue->queue->linkstate, sizeof(prem_queue->queue->linkstate), flush_read); } else { mb(); } if(ploc_queue->queue->linkstate != cpu_to_le32(ploc_queue->linkstate)){ ploc_queue->queue->linkstate = cpu_to_le32(ploc_queue->linkstate); changed |= ACC_LSTATE_CHG_LOC; } if(prem_queue->linkstate != ioread32(&prem_queue->queue->linkstate)){ prem_queue->linkstate = ioread32(&prem_queue->queue->linkstate); changed |= ACC_LSTATE_CHG_REM; } if(flush && (changed & ACC_LSTATE_CHG_LOC)){ flush_cache((void *) &ploc_queue->queue->linkstate, sizeof(ploc_queue->queue->linkstate), flush_write); } else { mb(); } return changed; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static int send_msg(struct acc_mgmt_struct *pctrl, struct acc_msg msg) { struct acc_queue_mgmt *ploc_queue = &pctrl->loc_queue; struct acc_queue_mgmt *prem_queue = &pctrl->rem_queue; struct acc_data_desc *l_desc, *r_desc; struct sk_buff *skb; dma_addr_t dma_addr; uint32_t l_head, l_tail, r_head, r_tail, data_len; uintptr_t io_addr; int result; unsigned char *buff; if(pctrl->state < acc_mgmt_state_addr_rcvd){ return -ENODEV; } skb = msg.skb; if(skb == NULL ){ DEB_ERR("[%s] skb == NULL\n", __func__); return -EINVAL; } result = 0; flush_cache((void *) prem_queue->queue, ((uintptr_t) &prem_queue->queue->tx_msgs[0]) - ((uintptr_t) prem_queue->queue), flush_read); // get a snapshot of the current queue pointer positions r_head = ioread32(&prem_queue->queue->rx_head); r_tail = ioread32(&prem_queue->queue->rx_tail); l_head = le32_to_cpu(ploc_queue->queue->tx_head); l_tail = le32_to_cpu(ploc_queue->queue->tx_tail); mb(); DEB_DEBUG("[%s] l_head: %03d l_tail: %03d r_head: %03d r_tail: %03d\n", __func__, l_head, l_tail, r_head, r_tail); /* * active side (Atom) */ if(ploc_queue->mode == queue_mode_actv){ // reset local descriptors that have been cleared on the remote side while(l_tail != r_tail){ l_tail = ACC_NEXT_IDX(l_tail); l_desc = &ploc_queue->queue->tx_msgs[l_tail].data.desc; l_desc->state = cpu_to_le32(acc_desc_inval); l_desc->addr = 0; l_desc->data_len = 0; flush_cache((void *) l_desc, sizeof(*l_desc), flush_write); } ploc_queue->queue->tx_tail = cpu_to_le32(l_tail); skb = msg.skb; if(skb == NULL){ result = -EINVAL; goto err_out; } /* * We have a SKB to send. Fetch the remote's next TX descriptor */ l_head = ACC_NEXT_IDX(l_head); result = -EBUSY; if(l_head != ACC_NEXT_IDX(r_head)){ l_desc = &ploc_queue->queue->tx_msgs[l_head].data.desc; r_desc = &prem_queue->queue->rx_msgs[l_head].data.desc; flush_cache((void *) r_desc, sizeof(*r_desc), flush_read); if(ioread32(&r_desc->state) == acc_desc_ready){ // we have a free descriptor, do some sanity checks if(unlikely(ioread32(&r_desc->addr) == 0 || ioread32(&r_desc->data_len) == 0)) { DEB_ERR("[%s] tx descriptor %d has bogus data: addr: 0x%08x len: 0x%08x\n", __func__, l_head, (uint32_t)ioread32(&r_desc->addr), ioread32(&r_desc->data_len)); result = -EIO; goto err_out; } // map the buffer pointed to by the descriptor as I/O mem io_addr = ioread32(&r_desc->addr) - REMOTE_MEM_OFFSET; data_len = ioread32(&r_desc->data_len); if(data_len < skb->len){ DEB_ERR("[%s] tx descriptor %d: data buffer too small: 0x%x\n", __func__, l_head, data_len); result = -EIO; goto err_out; } buff = (unsigned char *) ioremap_nocache(io_addr, skb->len); if(buff != NULL){ // mapping of the buffer succeeded, copy the SKB's data into it... flush_cache((void *) buff, skb->len, flush_read); memcpy_toio(buff, skb->data, skb->len); flush_cache((void *) buff, skb->len, flush_write); iounmap(buff); // and update our shadow descriptor l_desc->addr = cpu_to_le32(ioread32(&r_desc->addr)); l_desc->data_len = cpu_to_le32(skb->len); l_desc->state = cpu_to_le32(acc_desc_cmplt); flush_cache((void *)l_desc, sizeof(*l_desc), flush_write); // move our TX head so the remote side knows that there is new data ploc_queue->queue->tx_head = cpu_to_le32(l_head); // update interface stats pctrl->dev->stats.tx_bytes += skb->len; ++pctrl->dev->stats.tx_packets; // if our SKB pool is below the high water mark, try recycling this SKB, // otherwise just free it if( pctrl->skb_pool.qlen < (2 * ACC_MESSAGES) && skb_recycle_check(skb, ACC_TOTAL_BUFFER_SIZE)) { skb_queue_head(&pctrl->skb_pool, skb); } else { dev_kfree_skb_any(skb); } result = 0; } else { DEB_ERR("[%s] ioremap failed for tx descriptor %d, addr: 0x%08x, len: 0x%x\n", __func__, l_head, (unsigned int) io_addr, skb->len); } } else { DEB_TRC("[%s] tx descriptor %d not ready\n", __func__, l_head); } } else { DEB_TRC("[%s] no tx descriptors available\n", __func__); } } else { /* * passive side (ARM) */ /* * release SKBs that have been processed by the remote side */ while(l_tail != r_head){ l_tail = ACC_NEXT_IDX(l_tail); l_desc = &ploc_queue->queue->tx_msgs[l_tail].data.desc; r_desc = &prem_queue->queue->rx_msgs[l_tail].data.desc; flush_cache((void *) r_desc, sizeof(*r_desc), flush_read); // check remote's shadow descriptor if(ioread32(&r_desc->state) == acc_desc_cmplt){ // do some sanity checks, abort on failure if(unlikely(ioread32(&r_desc->addr) != le32_to_cpu(l_desc->addr) || ioread32(&r_desc->data_len) != le32_to_cpu(l_desc->data_len))) { DEB_ERR("[%s] buffer data mismatch on completed remote descriptor %d \n", __func__, l_tail); break; } // invalidate descriptor, remember buffer's DMA address dma_addr = (dma_addr_t) le32_to_cpu(l_desc->addr); l_desc->state = cpu_to_le32(acc_desc_inval); l_desc->addr = 0; l_desc->data_len = 0; flush_cache((void *) l_desc, sizeof(*l_desc), flush_write); // unmap completed SKB's buffer skb = ploc_queue->tx_skb[l_tail]; ploc_queue->tx_skb[l_tail] = NULL; if(likely(skb != NULL)){ dma_unmap_single(NULL, dma_addr, skb->len, DMA_TO_DEVICE); pctrl->dev->stats.tx_bytes += skb->len; ++pctrl->dev->stats.tx_packets; // try recycling the SKB if our pool is below the high watermark if( pctrl->skb_pool.qlen < (2 * ACC_MESSAGES) && skb_recycle_check(skb, ACC_TOTAL_BUFFER_SIZE)) { skb_queue_head(&pctrl->skb_pool, skb); } else { dev_kfree_skb_any(skb); } } else { pr_emerg("[%s] empty descriptor %d to be released!\n", __func__, l_tail); break; } } ploc_queue->queue->tx_tail = cpu_to_le32(l_tail); } /* * send an SKB by mapping its buffer as DMA memory and attach it to an descriptor * so the remote side can pick it up */ skb = msg.skb; l_head = ACC_NEXT_IDX(l_head); if(l_head != r_tail){ l_desc = &ploc_queue->queue->tx_msgs[l_head].data.desc; dma_addr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); if(unlikely(dma_mapping_error(NULL, dma_addr))){ DEB_ERR("[%s] mapping skb for tx descriptor %d failed!\n", __func__, l_head); result = -EIO; goto err_out; } mb(); l_desc->addr = cpu_to_le32(dma_addr); l_desc->data_len = cpu_to_le32(skb->len); l_desc->state = cpu_to_le32(acc_desc_ready); flush_cache((void *) l_desc, sizeof(*l_desc), flush_write); ploc_queue->tx_skb[l_head] = skb; ploc_queue->queue->tx_head = cpu_to_le32(l_head); result = 0; } else { result = -EBUSY; } } err_out: // flush out the management part of the local queue flush_cache((void *) ploc_queue->queue, ((uintptr_t) &ploc_queue->queue->tx_msgs[0]) - ((uintptr_t) ploc_queue->queue), flush_write); DEB_DEBUG("[%s] Leave: l_head: %03d l_tail: %03d r_head: %03d r_tail: %03d\n", __func__, l_head, l_tail, r_head, r_tail); return result; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static struct acc_msg receive_msg(struct acc_mgmt_struct *pctrl) { struct acc_queue_mgmt *ploc_queue = &pctrl->loc_queue; struct acc_queue_mgmt *prem_queue = &pctrl->rem_queue; struct acc_data_desc *l_desc, *r_desc; struct sk_buff *skb, *tmp_skb; dma_addr_t dma_addr; uint32_t l_head, l_tail, r_head, l_tmp, r_tail, data_len, buff_len; uintptr_t io_addr; unsigned char *buff; struct acc_msg msg; msg.cmd = acc_msg_cmd_rx; msg.skb = NULL; skb = msg.skb; if(pctrl->state != acc_mgmt_state_running){ return msg; } flush_cache((void *) prem_queue->queue, ((uintptr_t) &prem_queue->queue->tx_msgs[0]) - ((uintptr_t) prem_queue->queue), flush_read); // take a snapshot of the queue pointers r_head = ioread32(&prem_queue->queue->tx_head); r_tail = ioread32(&prem_queue->queue->tx_tail); l_head = le32_to_cpu(ploc_queue->queue->rx_head); l_tail = le32_to_cpu(ploc_queue->queue->rx_tail); mb(); DEB_DEBUG("[%s] Enter: l_head: %03d l_tail: %03d r_head: %03d r_tail: %03d\n", __func__, l_head, l_tail, r_head, r_tail); /* * active side (Atom) */ if(ploc_queue->mode == queue_mode_actv){ // reset local descriptors that have been cleared on the remote side while(l_tail != r_tail){ l_tail = ACC_NEXT_IDX(l_tail); l_desc = &ploc_queue->queue->rx_msgs[l_tail].data.desc; l_desc->state = cpu_to_le32(acc_desc_inval); l_desc->addr = 0; l_desc->data_len = 0; flush_cache((void *) l_desc, sizeof(*l_desc), flush_write); } ploc_queue->queue->rx_tail = cpu_to_le32(l_tail); // check if remote side has queued a new buffer l_head = ACC_NEXT_IDX(l_head); if(l_head != ACC_NEXT_IDX(r_head)){ l_desc = &ploc_queue->queue->rx_msgs[l_head].data.desc; r_desc = &prem_queue->queue->tx_msgs[l_head].data.desc; flush_cache((void *) r_desc, sizeof(*r_desc), flush_read); if(likely(ioread32(&r_desc->state) == acc_desc_ready)){ // new buffer found, do some sanity checks io_addr = ioread32(&r_desc->addr); data_len = ioread32(&r_desc->data_len); if(unlikely(io_addr == 0 || data_len == 0)){ DEB_ERR("[%s] rx descriptor %d has bogus data: addr: 0x%08x len: 0x%08x\n", __func__, l_head, (unsigned int) io_addr, data_len); skb = ERR_PTR(-EIO); goto err_out; } // fetch and prepare an empty SKB from pool skb = skb_dequeue(&pctrl->skb_pool); if(unlikely(skb == NULL)){ DEB_ERR("[%s] unable to receive, skb pool empty\n", __func__); skb = ERR_PTR(-EIO); goto err_out; } skb_reserve(skb, ACC_SKB_HEADER_RESERVED); // do some sanity checks on SKB and remote buffer if((unsigned int)skb_tailroom(skb) < data_len){ DEB_ERR("[%s] rx descriptor %d: data buffer too big: 0x%x\n", __func__, l_head, data_len); dev_kfree_skb_any(skb); skb = ERR_PTR(-EIO); goto err_out; } // map remote buffer as iomem buff = (unsigned char *) ioremap_nocache(io_addr - REMOTE_MEM_OFFSET, data_len); if(buff != NULL){ flush_cache((void *) buff, data_len, flush_read); // copy data from remote buffer to local SKB memcpy_fromio(skb->data, buff, data_len); mb(); iounmap(buff); skb_put(skb, data_len); // update our shadow descriptor so remote side can see that buffer has // been processed l_desc->addr = cpu_to_le32(io_addr); l_desc->data_len = cpu_to_le32(skb->len); l_desc->state = cpu_to_le32(acc_desc_cmplt); flush_cache((void *)l_desc, sizeof(*l_desc), flush_write); ploc_queue->queue->rx_head = cpu_to_le32(l_head); } else { DEB_ERR("[%s] ioremap failed for rx descriptor %d, addr: 0x%08x, len: 0x%x\n", __func__, l_head, (unsigned int) (io_addr - REMOTE_MEM_OFFSET), data_len); skb = ERR_PTR(-EIO); } } else { DEB_ERR("[%s] Remote descriptor at queue position %d not ready!\n", __func__, l_head); } } } else { /* * passive side (ARM) */ /* * refill descriptor slots */ while(ACC_NEXT_IDX(l_head) != r_tail){ l_tmp = ACC_NEXT_IDX(l_head); l_desc = &ploc_queue->queue->rx_msgs[l_tmp].data.desc; r_desc = &prem_queue->queue->tx_msgs[l_tmp].data.desc; flush_cache((void *) r_desc, sizeof(*r_desc), flush_read); /* refill RX descriptor slots if remote side has acknowledged that the descriptor * is now invalid */ if(likely(ioread32(&r_desc->state) == acc_desc_inval)){ tmp_skb = skb_dequeue(&pctrl->skb_pool); if(unlikely(tmp_skb == NULL)){ DEB_ERR("[%s] unable to refill rx descriptor at index %d\n", __func__, l_tmp); break; } // prepare SKB and map its buffer to DMA skb_reserve(tmp_skb, ACC_SKB_HEADER_RESERVED); data_len = skb_tailroom(tmp_skb); dma_addr = dma_map_single(NULL, tmp_skb->data, data_len, DMA_FROM_DEVICE); if(unlikely(dma_mapping_error(NULL, dma_addr))){ DEB_ERR("[%s] mapping skb for rx descriptor %d failed!\n", __func__, l_tmp); dev_kfree_skb_any(tmp_skb); break; } mb(); // store pointer to SKB and update the decriptor ploc_queue->rx_skb[l_tmp] = tmp_skb; l_desc->addr = cpu_to_le32(dma_addr); l_desc->data_len = cpu_to_le32(data_len); l_desc->state = cpu_to_le32(acc_desc_ready); flush_cache((void *) l_desc, sizeof(*l_desc), flush_write); } else { DEB_ERR("[%s] descriptor at queue position %d not cleared on remote. r_tail: %d\n", __func__, l_head, r_tail); break; } l_head = l_tmp; } ploc_queue->queue->rx_head = cpu_to_le32(l_head); /* * process SKBs that have been marked as complete by the remote side */ if(l_tail != r_head){ l_tail = ACC_NEXT_IDX(l_tail); l_desc = &ploc_queue->queue->rx_msgs[l_tail].data.desc; r_desc = &prem_queue->queue->tx_msgs[l_tail].data.desc; flush_cache((void *) r_desc, sizeof(*r_desc), flush_read); // remote side marked decriptor as completed in its shadow descriptor if(likely(ioread32(&r_desc->state) == acc_desc_cmplt)){ dma_addr = le32_to_cpu(l_desc->addr); buff_len = le32_to_cpu(l_desc->data_len); data_len = ioread32(&r_desc->data_len); // do some sanity checks if(unlikely(dma_addr != ioread32(&r_desc->addr))){ DEB_ERR("[%s] local/remote address mismatch for descriptor %d\n", __func__, l_tail); skb = ERR_PTR(-EIO); goto err_out; } // invalidate and flush out the local descriptor l_desc->state = cpu_to_le32(acc_desc_inval); l_desc->addr = 0; l_desc->data_len = 0; flush_cache((void *) l_desc, sizeof(*l_desc), flush_write); ploc_queue->queue->rx_tail = cpu_to_le32(l_tail); // unmap the SKB's buffer and update its length dma_unmap_single(NULL, dma_addr, buff_len, DMA_FROM_DEVICE); mb(); skb = ploc_queue->rx_skb[l_tail]; ploc_queue->rx_skb[l_tail] = NULL; if(unlikely(skb == NULL)){ pr_emerg("[%s] descriptor %d to release but rx skb slot empty!\n", __func__, l_tail); skb = ERR_PTR(-EIO); goto err_out; } // invalidate buffer memory's cache and update SKB info flush_cache(skb->data, data_len, flush_read); skb_put(skb, data_len); } else { DEB_TRC("[%s] descriptor %d still incomplete, state: 0x%08x\n", __func__, l_tail, ioread32(&r_desc->state)); } } } err_out: mb(); flush_cache((void *) ploc_queue->queue, ((uintptr_t) &ploc_queue->queue->tx_msgs[0]) - ((uintptr_t) ploc_queue->queue), flush_write); DEB_DEBUG("[%s] Leave: l_head: %03d l_tail: %03d r_head: %03d r_tail: %03d\n", __func__, l_head, l_tail, r_head, r_tail); msg.skb = skb; return msg; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ int acc_poll(struct acc_mgmt_struct *pctrl, unsigned int quota) { struct acc_msg msg; struct sk_buff *skb; unsigned int rx_done, rx_quota, tx_done, tx_quota; unsigned int rx_empty, tx_complete; int result, link_changed; rx_done = 1; tx_done = 1; rx_empty = 0; tx_complete = 0; rx_quota = tx_quota = quota; // top up the SKB pool while(pctrl->skb_pool.qlen < (ACC_MESSAGES + quota)){ skb = netdev_alloc_skb(pctrl->dev, ACC_TOTAL_BUFFER_SIZE); if(skb == NULL){ break; } skb_queue_tail(&pctrl->skb_pool, skb); } link_changed = update_linkstate(pctrl, 1); if(link_changed & ACC_LSTATE_CHG_REM){ switch(pctrl->rem_queue.linkstate){ case acc_lstate_up: netif_carrier_on(pctrl->dev); break; case acc_lstate_down: netif_carrier_off(pctrl->dev); break; default: DEB_ERR("[%s] invalid link state on remote queue: 0x%x\n", __func__, pctrl->rem_queue.linkstate); break; } } // enable rx processing only if the POLL_RX bit has been set if(test_and_clear_bit(ACC_WORK_BIT_POLL_RX, &pctrl->triggers)){ rx_done = 0; } // same for tx if(test_and_clear_bit(ACC_WORK_BIT_POLL_TX, &pctrl->triggers)){ if(pctrl->tx_skb_q.qlen > 0){ tx_done = 0; } pctrl->tx_stalled = 0; } /* loop until tx/rx work quota is reached, there are no more packets to process or * or memory quese are empty/full. Always alternate between receiving ans sending. */ while((rx_done == 0 && rx_quota > 0) || (tx_done == 0 && tx_quota > 0)){ DEB_DEBUG("[%s] rx_quota: %03d rx_done: %d tx_quota: %03d tx_done: %d\n", __func__, rx_quota, rx_done, tx_quota, tx_done); // try receiving a new SKB and append it to our RX queue if(rx_done == 0 && rx_quota > 0){ msg = receive_msg(pctrl); if(msg.skb == NULL){ // nothing left to receive, disable rx for th erest of this invocation rx_done = 1; rx_empty = 1; } else { if(IS_ERR(msg.skb)){ // there was an error processing the current descriptor, but there might // still be valid data left DEB_ERR("[%s] Invalid frame received\n", __func__); } else { // SKB received. Queue it for later processing skb_queue_tail(&pctrl->rx_skb_q, msg.skb); } --rx_quota; } } // try sending SKB from our TX queue if(tx_done == 0 && tx_quota > 0){ msg.cmd = acc_msg_cmd_tx; msg.skb = skb_dequeue(&pctrl->tx_skb_q); if(msg.skb != NULL){ result = send_msg(pctrl, msg); switch(result){ case -EBUSY: // we ran out of tx descriptors. Re-queue the SKB at the head of // the tx queue and disable tx for this invocation skb_queue_head(&pctrl->tx_skb_q, msg.skb); tx_done = 1; pctrl->tx_stalled = 1; break; case 0: // SKB successfuly sent break; default: // something went wrong... DEB_ERR("[%s] unexpected return code from send_skb: %d\n", __func__, result); } --tx_quota; } else { // tx queue is empty, we are done for this invocation tx_done = 1; tx_complete = 1; } } } // re-enable tx queueing if backlog has been reduced somewhat if(pctrl->tx_skb_q.qlen < ACC_MESSAGES && netif_queue_stopped(pctrl->dev)){ netif_wake_queue(pctrl->dev); } // trigger remote if we made changes on the descriptor queues if( (rx_empty && rx_quota < quota) // there were SKBs in the Rx queue and we emptied it || tx_complete // there is nothing more to send // Tx descriptor queue is full and we sent the last IRQ more than ACC_IRQ_TIME ms ago || (pctrl->tx_stalled && time_is_before_jiffies(pctrl->last_irq + msecs_to_jiffies(ACC_IRQ_TIME)))) { acc_trigger_remote(pctrl); } // hand the SKBs we received to the network layer while((skb = skb_dequeue(&pctrl->rx_skb_q)) != NULL){ skb->ip_summed = CHECKSUM_NONE; skb->protocol = eth_type_trans(skb, pctrl->dev); ++pctrl->dev->stats.rx_packets; pctrl->dev->stats.rx_bytes += skb->len; /* * FIXME: since we are not in (soft)-IRQ context, we should use netif_rx_ni(), * this slows down things considerably because of rescheduling after each packet */ netif_rx(skb); } // release surplus SKBs that might have accumulated in our pool while(pctrl->skb_pool.qlen > (2 * ACC_MESSAGES)){ skb = skb_dequeue_tail(&pctrl->skb_pool); if(skb == NULL){ break; } dev_kfree_skb_any(skb); } // schedule another run if there is still work to do if(rx_quota == 0){ acc_trigger_poll_rx(pctrl); } if(tx_quota == 0){ acc_trigger_poll_tx(pctrl); } return 0; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static inline char *state_to_str(acc_mgmt_state_t state) { return state == acc_mgmt_state_error ? "ERROR" : state == acc_mgmt_state_init ? "INIT" : state == acc_mgmt_state_addr_sent ? "ADDR_SENT" : state == acc_mgmt_state_addr_rcvd ? "ADDR_RCVD" : state == acc_mgmt_state_ping_sent ? "PING_SENT" : state == acc_mgmt_state_ping_rcvd ? "PING_RCVD" : state == acc_mgmt_state_running ? "RUNNING" : state == acc_mgmt_state_closing ? "CLOSING" : state == acc_mgmt_state_closed ? "CLOSED" : "UNKNOWN"; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static void change_state(struct acc_mgmt_struct *pctrl, acc_mgmt_state_t state) { DEB_TRC("[%s] %s -> %s\n", __func__, state_to_str(pctrl->state), state_to_str(state)); pctrl->state = state; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ acc_mgmt_state_t acc_action(struct acc_mgmt_struct *pctrl) { int result; uint32_t addr; DEB_TRC("[%s] %s\n", __func__, state_to_str(pctrl->state)); cpu_relax(); switch(pctrl->state){ case acc_mgmt_state_init: addr = cpu_to_be32(pctrl->loc_queue.phys_base); DEB_TRC("[%s] sending address: 0x%08x\n", __func__, addr); #if IS_X86 result = npcpu_appcpu_mbx_send_notification(APPCPU_EVENT_AVM_CPU_CONNECT, &addr); #else result = arm_atom_mbx_send_notification(ARM11_EVENT_AVM_CPU_CONNECT, &addr); #endif if(result == 0){ change_state(pctrl, acc_mgmt_state_addr_sent); }else{ change_state(pctrl, acc_mgmt_state_error); } break; case acc_mgmt_state_addr_sent: #if IS_X86 result = npcpu_appcpu_mbx_check_event_notification(NPCPU_EVENT_AVM_CPU_CONNECT, &addr); #else result = arm_atom_mbx_receive_event_notification_noblock(ATOM_EVENT_AVM_CPU_CONNECT, &addr); #endif addr -= REMOTE_MEM_OFFSET; if(result == 0){ DEB_TRC("[%s] received address: 0x%08x\n", __func__, addr); result = setup_remotequeue((resource_size_t) addr, &pctrl->rem_queue); if(result == 0){ change_state(pctrl, acc_mgmt_state_addr_rcvd); } }else if(result != -EAGAIN){ DEB_ERR("[%s] unable to use receive queue!\n", __func__); change_state(pctrl, acc_mgmt_state_error); } break; case acc_mgmt_state_addr_rcvd: #if 0 result = send_ping(atomic_read(&pctrl->node.nonce), pctrl); if(result == 0){ change_state(&pctrl->node, NODE_PING_SENT); pctrl->last_ping = jiffies; } else { DEB_ERR("[%s] Can't send ping!\n", __func__); change_state(&pctrl->node, NODE_ERROR); } #endif change_state(pctrl, acc_mgmt_state_ping_rcvd); break; case acc_mgmt_state_ping_rcvd: change_state(pctrl, acc_mgmt_state_running); DEB_INFO("[%s] AVM CPU connection established\n", __func__); case acc_mgmt_state_ping_sent: case acc_mgmt_state_running: result = acc_poll(pctrl, ACC_QUOTA); if(result != 0){ change_state(pctrl, acc_mgmt_state_error); break; } #if 0 if(time_is_before_jiffies(pctrl->last_ping + 10 * HZ)){ result = send_ping(atomic_read(&pctrl->node.nonce), pctrl); if(result != 0){ change_state(&pctrl->node, NODE_ERROR); } pctrl->last_ping = jiffies; } #endif break; default: DEB_ERR("[%s] Invalid node state: 0x%x\n", __func__, pctrl->state); change_state(pctrl, acc_mgmt_state_error); } return pctrl->state; } /*------------------------------------------------------------------------------------------*\ * Called via interrupt from remote side to tell us there is something to do \*------------------------------------------------------------------------------------------*/ static int avm_cpu_connect_trigger(int irq __maybe_unused, void *ref) { struct acc_mgmt_struct *pacc_mgmt = (struct acc_mgmt_struct *)ref; if(pacc_mgmt->state > acc_mgmt_state_error) { if(pacc_mgmt->state == acc_mgmt_state_running){ set_bit(ACC_WORK_BIT_POLL_RX, &pacc_mgmt->triggers); set_bit(ACC_WORK_BIT_POLL_TX, &pacc_mgmt->triggers); } set_bit(ACC_WORK_BIT_RUN, &pacc_mgmt->triggers); wake_up_interruptible_sync(&pacc_mgmt->wait_queue); } return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int acc_open(struct net_device *netdev) { struct acc_netdev_priv *priv; DEB_TRC("[%s] %s\n", __func__, netdev->name); priv = netdev_priv(netdev); priv->pctrl->loc_queue.linkstate = acc_lstate_up; netif_start_queue(netdev); acc_trigger_remote(priv->pctrl); set_bit(ACC_WORK_BIT_RUN, &priv->pctrl->triggers); wake_up_interruptible(&priv->pctrl->wait_queue); return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int acc_close(struct net_device *netdev) { struct acc_netdev_priv *priv; DEB_TRC("[%s] %s\n", __func__, netdev->name); priv = netdev_priv(netdev); netif_stop_queue(netdev); priv->pctrl->loc_queue.linkstate = acc_lstate_down; acc_trigger_remote(priv->pctrl); set_bit(ACC_WORK_BIT_RUN, &priv->pctrl->triggers); wake_up_interruptible(&priv->pctrl->wait_queue); return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static netdev_tx_t acc_xmit_frame(struct sk_buff *skb __attribute__ ((unused)), struct net_device *netdev __attribute__ ((unused))) { struct acc_netdev_priv *priv; assert(skb->len <= ACC_BUFFER_SIZE); priv = netdev_priv(netdev); // append SKB to our tx queue skb_queue_tail(&priv->pctrl->tx_skb_q, skb); // stop queueing if the backlog gets too big if(priv->pctrl->tx_skb_q.qlen >= (2 * ACC_MESSAGES)){ netif_stop_queue(netdev); } // make sure the work thread is running and tx processing is enabled acc_trigger_poll_tx(priv->pctrl); return NETDEV_TX_OK; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int acc_set_mac(struct net_device *netdev, void *p) { struct sockaddr *addr = p; unsigned char *mac; DEB_TRC("[%s] %s\n", __func__, netdev->name); if (!is_valid_ether_addr(addr->sa_data)){ return -EADDRNOTAVAIL; } mac = (unsigned char *) addr->sa_data; DEB_INFO("[%s] Setting MAC to %02X:%02X:%02X:%02X:%02X:%02X on device %s", __func__, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], netdev->name); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void acc_tx_timeout(struct net_device *netdev __attribute__ ((unused))) { struct acc_netdev_priv *priv; DEB_DEBUG("[%s] %s\n", __func__, netdev->name); priv = netdev_priv(netdev); acc_trigger_poll_tx(priv->pctrl); return; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static struct net_device_stats *acc_get_stats(struct net_device *netdev __attribute__ ((unused))) { DEB_TRC("[%s] %s\n", __func__, netdev->name); /* only return the current stats */ return &netdev->stats; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int acc_change_mtu(struct net_device *netdev __attribute__ ((unused)), int new_mtu __attribute__ ((unused))) { DEB_TRC("[%s] %s\n", __func__, netdev->name); if(new_mtu > ACC_MAX_MTU || new_mtu < ACC_MIN_FRAME_SIZE){ return -EINVAL; } return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int acc_ioctl(struct net_device *netdev __attribute__((unused)), struct ifreq *ifr __attribute__((unused)), int cmd __attribute__((unused))) { return -EOPNOTSUPP; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void acc_set_rx_mode(struct net_device *dev __attribute__ ((unused))) { return; } /*--------------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------------*/ static int acc_thread(void *data) { struct acc_mgmt_struct *pctrl; acc_mgmt_state_t state; int timeout; unsigned long max_wait; #if 0 struct sched_param PARAM = {.sched_priority = MAX_RT_PRIO }; PARAM.sched_priority = 50; sched_setscheduler(current, SCHED_FIFO, &PARAM); // <-- unknown symbol?? #endif pctrl = (struct acc_mgmt_struct *) data; while(!kthread_should_stop()){ state = acc_action(pctrl); if(state == acc_mgmt_state_error){ DEB_ERR("[%s] Entered error state, thread stopped.\n", __func__); break; } max_wait = pctrl->tx_stalled ? msecs_to_jiffies(ACC_IRQ_TIME) : HZ; timeout = wait_event_interruptible_timeout( pctrl->wait_queue, test_and_clear_bit(ACC_WORK_BIT_RUN, &pctrl->triggers), max_wait); if(timeout == -ERESTARTSYS){ /* interrupted by signal -> exit */ break; } } DEB_INFO("[%s] done\n", __func__); pctrl->kthread = NULL; return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static const struct net_device_ops acc_netdev_ops = { .ndo_open = acc_open, .ndo_stop = acc_close, .ndo_start_xmit = acc_xmit_frame, .ndo_get_stats = acc_get_stats, .ndo_set_rx_mode = acc_set_rx_mode, .ndo_set_mac_address = acc_set_mac, .ndo_tx_timeout = acc_tx_timeout, .ndo_change_mtu = acc_change_mtu, .ndo_do_ioctl = acc_ioctl, .ndo_validate_addr = eth_validate_addr, }; /*------------------------------------------------------------------------------------------*\ * Initialize AVM CPU connect device \*------------------------------------------------------------------------------------------*/ static int __init avm_cpu_connect_probe(void) { struct acc_netdev_priv *priv; int result; unsigned int registered; DEB_INFO("[%s - %u]\n", __func__, __LINE__); /* FIXME */ memset(&acc_mgmt, 0x0, sizeof(acc_mgmt)); registered = 0; skb_queue_head_init(&acc_mgmt.rx_skb_q); skb_queue_head_init(&acc_mgmt.tx_skb_q); skb_queue_head_init(&acc_mgmt.skb_pool); init_waitqueue_head(&acc_mgmt.wait_queue); result = setup_localqueue(&acc_mgmt.loc_queue); if(result != 0){ goto err_out; } acc_mgmt.dev = alloc_netdev(sizeof(struct acc_netdev_priv), "acc0", ether_setup); if(!acc_mgmt.dev) { DEB_ERR("[%s] Could not allocate device acc0\n", __func__); result = -ENOMEM; goto err_out; } priv = (struct acc_netdev_priv *)netdev_priv(acc_mgmt.dev); priv->pctrl = &acc_mgmt; /* Assign random MAC address. Carefully ensure that ATOM and ARM get different ones * by setting (or clearing) the lsb */ eth_hw_addr_random(acc_mgmt.dev); #if IS_X86 acc_mgmt.dev->dev_addr[5] |= 1; #else acc_mgmt.dev->dev_addr[5] &= ~1; #endif acc_mgmt.dev->netdev_ops = &acc_netdev_ops; /*--- acc_set_ethtool_ops(acc_mgmt.dev); ---*/ acc_mgmt.dev->watchdog_timeo = 5 * HZ; acc_mgmt.dev->tx_queue_len = 1000; acc_mgmt.last_irq = jiffies - msecs_to_jiffies(ACC_IRQ_TIME); result = register_netdev(acc_mgmt.dev); if(result != 0){ goto err_out; } registered = 1; /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(acc_mgmt.dev); acc_mgmt.state = acc_mgmt_state_init; acc_mgmt.kthread = kthread_run(acc_thread, &acc_mgmt, "ACC work thread"); if(acc_mgmt.kthread == NULL) { DEB_ERR("[%s] Could not start worker thread!\n", __func__); result = -ENOMEM; goto err_out; } request_remotecpu_irqhandler(AVM_CPUCONN_REMOTECPU_IRQ, avm_cpu_connect_trigger, &acc_mgmt); set_bit(ACC_WORK_BIT_RUN, &acc_mgmt.triggers); wake_up_interruptible_sync(&acc_mgmt.wait_queue); return 0; err_out: if(acc_mgmt.kthread != NULL){ kthread_stop(acc_mgmt.kthread); } if(registered != 0){ unregister_netdev(acc_mgmt.dev); } if(acc_mgmt.dev != NULL){ free_netdev(acc_mgmt.dev); } return result; } device_initcall(avm_cpu_connect_probe);