--- zzzz-none-000/linux-3.10.107/drivers/net/xen-netback/netback.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/net/xen-netback/netback.c 2021-02-04 17:41:59.000000000 +0000 @@ -37,225 +37,103 @@ #include #include #include +#include #include #include #include #include +#include #include -#include -/* - * This is the maximum slots a skb can have. If a guest sends a skb - * which exceeds this limit it is considered malicious. +/* Provide an option to disable split event channels at load time as + * event channels are limited resource. Split event channels are + * enabled by default. */ -#define FATAL_SKB_SLOTS_DEFAULT 20 -static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; -module_param(fatal_skb_slots, uint, 0444); +bool separate_tx_rx_irq = true; +module_param(separate_tx_rx_irq, bool, 0644); -/* - * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating - * the maximum slots a valid packet can use. Now this value is defined - * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by - * all backend. +/* The time that packets can stay on the guest Rx internal queue + * before they are dropped. */ -#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN - -typedef unsigned int pending_ring_idx_t; -#define INVALID_PENDING_RING_IDX (~0U) - -struct pending_tx_info { - struct xen_netif_tx_request req; /* coalesced tx request */ - struct xenvif *vif; - pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX - * if it is head of one or more tx - * reqs - */ -}; - -struct netbk_rx_meta { - int id; - int size; - int gso_size; -}; - -#define MAX_PENDING_REQS 256 - -/* Discriminate from any valid pending_idx value. */ -#define INVALID_PENDING_IDX 0xFFFF - -#define MAX_BUFFER_OFFSET PAGE_SIZE - -/* extra field used in struct page */ -union page_ext { - struct { -#if BITS_PER_LONG < 64 -#define IDX_WIDTH 8 -#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH) - unsigned int group:GROUP_WIDTH; - unsigned int idx:IDX_WIDTH; -#else - unsigned int group, idx; -#endif - } e; - void *mapping; -}; - -struct xen_netbk { - wait_queue_head_t wq; - struct task_struct *task; +unsigned int rx_drain_timeout_msecs = 10000; +module_param(rx_drain_timeout_msecs, uint, 0444); - struct sk_buff_head rx_queue; - struct sk_buff_head tx_queue; - - struct timer_list net_timer; - - struct page *mmap_pages[MAX_PENDING_REQS]; - - pending_ring_idx_t pending_prod; - pending_ring_idx_t pending_cons; - struct list_head net_schedule_list; - - /* Protect the net_schedule_list in netif. */ - spinlock_t net_schedule_list_lock; - - atomic_t netfront_count; - - struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; - /* Coalescing tx requests before copying makes number of grant - * copy ops greater or equal to number of slots required. In - * worst case a tx request consumes 2 gnttab_copy. - */ - struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS]; - - u16 pending_ring[MAX_PENDING_REQS]; - - /* - * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each - * head/fragment page uses 2 copy operations because it - * straddles two buffers in the frontend. - */ - struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; - struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; -}; +/* The length of time before the frontend is considered unresponsive + * because it isn't providing Rx slots. + */ +unsigned int rx_stall_timeout_msecs = 60000; +module_param(rx_stall_timeout_msecs, uint, 0444); -static struct xen_netbk *xen_netbk; -static int xen_netbk_group_nr; +unsigned int xenvif_max_queues; +module_param_named(max_queues, xenvif_max_queues, uint, 0644); +MODULE_PARM_DESC(max_queues, + "Maximum number of queues per virtual interface"); /* - * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of - * one or more merged tx requests, otherwise it is the continuation of - * previous tx request. + * This is the maximum slots a skb can have. If a guest sends a skb + * which exceeds this limit it is considered malicious. */ -static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx) -{ - return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX; -} - -void xen_netbk_add_xenvif(struct xenvif *vif) -{ - int i; - int min_netfront_count; - int min_group = 0; - struct xen_netbk *netbk; - - min_netfront_count = atomic_read(&xen_netbk[0].netfront_count); - for (i = 0; i < xen_netbk_group_nr; i++) { - int netfront_count = atomic_read(&xen_netbk[i].netfront_count); - if (netfront_count < min_netfront_count) { - min_group = i; - min_netfront_count = netfront_count; - } - } +#define FATAL_SKB_SLOTS_DEFAULT 20 +static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; +module_param(fatal_skb_slots, uint, 0444); - netbk = &xen_netbk[min_group]; +/* The amount to copy out of the first guest Tx slot into the skb's + * linear area. If the first slot has more data, it will be mapped + * and put into the first frag. + * + * This is sized to avoid pulling headers from the frags for most + * TCP/IP packets. + */ +#define XEN_NETBACK_TX_COPY_LEN 128 - vif->netbk = netbk; - atomic_inc(&netbk->netfront_count); -} -void xen_netbk_remove_xenvif(struct xenvif *vif) -{ - struct xen_netbk *netbk = vif->netbk; - vif->netbk = NULL; - atomic_dec(&netbk->netfront_count); -} +static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, + u8 status); -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, - u8 status); -static void make_tx_response(struct xenvif *vif, +static void make_tx_response(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, s8 st); -static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, +static void push_tx_responses(struct xenvif_queue *queue); + +static inline int tx_work_todo(struct xenvif_queue *queue); + +static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, u16 id, s8 st, u16 offset, u16 size, u16 flags); -static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, +static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, u16 idx) { - return page_to_pfn(netbk->mmap_pages[idx]); + return page_to_pfn(queue->mmap_pages[idx]); } -static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, +static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, u16 idx) { - return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); + return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); } -/* extra field used in struct page */ -static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk, - unsigned int idx) -{ - unsigned int group = netbk - xen_netbk; - union page_ext ext = { .e = { .group = group + 1, .idx = idx } }; - - BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping)); - pg->mapping = ext.mapping; -} +#define callback_param(vif, pending_idx) \ + (vif->pending_tx_info[pending_idx].callback_struct) -static int get_page_ext(struct page *pg, - unsigned int *pgroup, unsigned int *pidx) +/* Find the containing VIF's structure from a pointer in pending_tx_info array + */ +static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf) { - union page_ext ext = { .mapping = pg->mapping }; - struct xen_netbk *netbk; - unsigned int group, idx; - - group = ext.e.group - 1; - - if (group < 0 || group >= xen_netbk_group_nr) - return 0; - - netbk = &xen_netbk[group]; - - idx = ext.e.idx; - - if ((idx < 0) || (idx >= MAX_PENDING_REQS)) - return 0; - - if (netbk->mmap_pages[idx] != pg) - return 0; - - *pgroup = group; - *pidx = idx; - - return 1; + u16 pending_idx = ubuf->desc; + struct pending_tx_info *temp = + container_of(ubuf, struct pending_tx_info, callback_struct); + return container_of(temp - pending_idx, + struct xenvif_queue, + pending_tx_info[0]); } -/* - * This is the amount of packet we copy rather than map, so that the - * guest can't fiddle with the contents of the headers while we do - * packet processing on them (netfilter, routing, etc). - */ -#define PKT_PROT_LEN (ETH_HLEN + \ - VLAN_HLEN + \ - sizeof(struct iphdr) + MAX_IPOPTLEN + \ - sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) - static u16 frag_get_pending_idx(skb_frag_t *frag) { return (u16)frag->page_offset; @@ -271,194 +149,121 @@ return i & (MAX_PENDING_REQS-1); } -static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk) -{ - return MAX_PENDING_REQS - - netbk->pending_prod + netbk->pending_cons; -} - -static void xen_netbk_kick_thread(struct xen_netbk *netbk) +static int xenvif_rx_ring_slots_needed(struct xenvif *vif) { - wake_up(&netbk->wq); -} - -static int max_required_rx_slots(struct xenvif *vif) -{ - int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); - - /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ - if (vif->can_sg || vif->gso || vif->gso_prefix) - max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ - - return max; + if (vif->gso_mask) + return DIV_ROUND_UP(vif->dev->gso_max_size, XEN_PAGE_SIZE) + 1; + else + return DIV_ROUND_UP(vif->dev->mtu, XEN_PAGE_SIZE); } -int xen_netbk_rx_ring_full(struct xenvif *vif) +static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) { - RING_IDX peek = vif->rx_req_cons_peek; - RING_IDX needed = max_required_rx_slots(vif); - - return ((vif->rx.sring->req_prod - peek) < needed) || - ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); -} + RING_IDX prod, cons; + int needed; -int xen_netbk_must_stop_queue(struct xenvif *vif) -{ - if (!xen_netbk_rx_ring_full(vif)) - return 0; + needed = xenvif_rx_ring_slots_needed(queue->vif); - vif->rx.sring->req_event = vif->rx_req_cons_peek + - max_required_rx_slots(vif); - mb(); /* request notification /then/ check the queue */ + do { + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; - return xen_netbk_rx_ring_full(vif); -} + if (prod - cons >= needed) + return true; -/* - * Returns true if we should start a new receive buffer instead of - * adding 'size' bytes to a buffer which currently contains 'offset' - * bytes. - */ -static bool start_new_rx_buffer(int offset, unsigned long size, int head) -{ - /* simple case: we have completely filled the current buffer. */ - if (offset == MAX_BUFFER_OFFSET) - return true; + queue->rx.sring->req_event = prod + 1; - /* - * complex case: start a fresh buffer if the current frag - * would overflow the current buffer but only if: - * (i) this frag would fit completely in the next buffer - * and (ii) there is already some data in the current buffer - * and (iii) this is not the head buffer. - * - * Where: - * - (i) stops us splitting a frag into two copies - * unless the frag is too large for a single buffer. - * - (ii) stops us from leaving a buffer pointlessly empty. - * - (iii) stops us leaving the first buffer - * empty. Strictly speaking this is already covered - * by (ii) but is explicitly checked because - * netfront relies on the first buffer being - * non-empty and can crash otherwise. - * - * This means we will effectively linearise small - * frags but do not needlessly split large buffers - * into multiple copies tend to give large frags their - * own buffers as before. - */ - BUG_ON(size > MAX_BUFFER_OFFSET); - if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head) - return true; + /* Make sure event is visible before we check prod + * again. + */ + mb(); + } while (queue->rx.sring->req_prod != prod); return false; } -struct xenvif_count_slot_state { - unsigned long copy_off; - bool head; -}; - -unsigned int xenvif_count_frag_slots(struct xenvif *vif, - unsigned long offset, unsigned long size, - struct xenvif_count_slot_state *state) +void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) { - unsigned count = 0; - - offset &= ~PAGE_MASK; + unsigned long flags; - while (size > 0) { - unsigned long bytes; + spin_lock_irqsave(&queue->rx_queue.lock, flags); - bytes = PAGE_SIZE - offset; + __skb_queue_tail(&queue->rx_queue, skb); - if (bytes > size) - bytes = size; + queue->rx_queue_len += skb->len; + if (queue->rx_queue_len > queue->rx_queue_max) + netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); - if (start_new_rx_buffer(state->copy_off, bytes, state->head)) { - count++; - state->copy_off = 0; - } + spin_unlock_irqrestore(&queue->rx_queue.lock, flags); +} - if (state->copy_off + bytes > MAX_BUFFER_OFFSET) - bytes = MAX_BUFFER_OFFSET - state->copy_off; +static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) +{ + struct sk_buff *skb; - state->copy_off += bytes; + spin_lock_irq(&queue->rx_queue.lock); - offset += bytes; - size -= bytes; + skb = __skb_dequeue(&queue->rx_queue); + if (skb) + queue->rx_queue_len -= skb->len; - if (offset == PAGE_SIZE) - offset = 0; + spin_unlock_irq(&queue->rx_queue.lock); - state->head = false; - } - - return count; + return skb; } -/* - * Figure out how many ring slots we're going to need to send @skb to - * the guest. This function is essentially a dry run of - * netbk_gop_frag_copy. - */ -unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) +static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue) { - struct xenvif_count_slot_state state; - unsigned int count; - unsigned char *data; - unsigned i; + spin_lock_irq(&queue->rx_queue.lock); - state.head = true; - state.copy_off = 0; - - /* Slot for the first (partial) page of data. */ - count = 1; - - /* Need a slot for the GSO prefix for GSO extra data? */ - if (skb_shinfo(skb)->gso_size) - count++; - - data = skb->data; - while (data < skb_tail_pointer(skb)) { - unsigned long offset = offset_in_page(data); - unsigned long size = PAGE_SIZE - offset; + if (queue->rx_queue_len < queue->rx_queue_max) + netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); - if (data + size > skb_tail_pointer(skb)) - size = skb_tail_pointer(skb) - data; + spin_unlock_irq(&queue->rx_queue.lock); +} - count += xenvif_count_frag_slots(vif, offset, size, &state); - data += size; - } +static void xenvif_rx_queue_purge(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + while ((skb = xenvif_rx_dequeue(queue)) != NULL) + kfree_skb(skb); +} - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); - unsigned long offset = skb_shinfo(skb)->frags[i].page_offset; +static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) +{ + struct sk_buff *skb; - count += xenvif_count_frag_slots(vif, offset, size, &state); + for(;;) { + skb = skb_peek(&queue->rx_queue); + if (!skb) + break; + if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) + break; + xenvif_rx_dequeue(queue); + kfree_skb(skb); } - return count; } struct netrx_pending_operations { unsigned copy_prod, copy_cons; unsigned meta_prod, meta_cons; struct gnttab_copy *copy; - struct netbk_rx_meta *meta; + struct xenvif_rx_meta *meta; int copy_off; grant_ref_t copy_gref; }; -static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif, - struct netrx_pending_operations *npo) +static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, + struct netrx_pending_operations *npo) { - struct netbk_rx_meta *meta; + struct xenvif_rx_meta *meta; struct xen_netif_rx_request req; - RING_COPY_REQUEST(&vif->rx, vif->rx.req_cons++, &req); + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); meta = npo->meta + npo->meta_prod++; + meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; meta->gso_size = 0; meta->size = 0; meta->id = req.id; @@ -469,29 +274,108 @@ return meta; } +struct gop_frag_copy { + struct xenvif_queue *queue; + struct netrx_pending_operations *npo; + struct xenvif_rx_meta *meta; + int head; + int gso_type; + + struct page *page; +}; + +static void xenvif_setup_copy_gop(unsigned long gfn, + unsigned int offset, + unsigned int *len, + struct gop_frag_copy *info) +{ + struct gnttab_copy *copy_gop; + struct xen_page_foreign *foreign; + /* Convenient aliases */ + struct xenvif_queue *queue = info->queue; + struct netrx_pending_operations *npo = info->npo; + struct page *page = info->page; + + BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); + + if (npo->copy_off == MAX_BUFFER_OFFSET) + info->meta = get_next_rx_buffer(queue, npo); + + if (npo->copy_off + *len > MAX_BUFFER_OFFSET) + *len = MAX_BUFFER_OFFSET - npo->copy_off; + + copy_gop = npo->copy + npo->copy_prod++; + copy_gop->flags = GNTCOPY_dest_gref; + copy_gop->len = *len; + + foreign = xen_page_foreign(page); + if (foreign) { + copy_gop->source.domid = foreign->domid; + copy_gop->source.u.ref = foreign->gref; + copy_gop->flags |= GNTCOPY_source_gref; + } else { + copy_gop->source.domid = DOMID_SELF; + copy_gop->source.u.gmfn = gfn; + } + copy_gop->source.offset = offset; + + copy_gop->dest.domid = queue->vif->domid; + copy_gop->dest.offset = npo->copy_off; + copy_gop->dest.u.ref = npo->copy_gref; + + npo->copy_off += *len; + info->meta->size += *len; + + /* Leave a gap for the GSO descriptor. */ + if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask)) + queue->rx.req_cons++; + + info->head = 0; /* There must be something in this buffer now */ +} + +static void xenvif_gop_frag_copy_grant(unsigned long gfn, + unsigned offset, + unsigned int len, + void *data) +{ + unsigned int bytes; + + while (len) { + bytes = len; + xenvif_setup_copy_gop(gfn, offset, &bytes, data); + offset += bytes; + len -= bytes; + } +} + /* * Set up the grant operations for this fragment. If it's a flipping * interface, we also set up the unmap request from here. */ -static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, - struct netrx_pending_operations *npo, - struct page *page, unsigned long size, - unsigned long offset, int *head) -{ - struct gnttab_copy *copy_gop; - struct netbk_rx_meta *meta; - /* - * These variables are used iff get_page_ext returns true, - * in which case they are guaranteed to be initialized. - */ - unsigned int uninitialized_var(group), uninitialized_var(idx); - int foreign = get_page_ext(page, &group, &idx); +static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, + struct netrx_pending_operations *npo, + struct page *page, unsigned long size, + unsigned long offset, int *head) +{ + struct gop_frag_copy info = { + .queue = queue, + .npo = npo, + .head = *head, + .gso_type = XEN_NETIF_GSO_TYPE_NONE, + }; unsigned long bytes; + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6; + } + /* Data must not cross a page boundary. */ BUG_ON(size + offset > PAGE_SIZE<meta + npo->meta_prod - 1; + info.meta = npo->meta + npo->meta_prod - 1; /* Skip unused frames from start of page */ page += offset >> PAGE_SHIFT; @@ -499,69 +383,26 @@ while (size > 0) { BUG_ON(offset >= PAGE_SIZE); - BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); bytes = PAGE_SIZE - offset; - if (bytes > size) bytes = size; - if (start_new_rx_buffer(npo->copy_off, bytes, *head)) { - /* - * Netfront requires there to be some data in the head - * buffer. - */ - BUG_ON(*head); - - meta = get_next_rx_buffer(vif, npo); - } - - if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) - bytes = MAX_BUFFER_OFFSET - npo->copy_off; - - copy_gop = npo->copy + npo->copy_prod++; - copy_gop->flags = GNTCOPY_dest_gref; - if (foreign) { - struct xen_netbk *netbk = &xen_netbk[group]; - struct pending_tx_info *src_pend; - - src_pend = &netbk->pending_tx_info[idx]; - - copy_gop->source.domid = src_pend->vif->domid; - copy_gop->source.u.ref = src_pend->req.gref; - copy_gop->flags |= GNTCOPY_source_gref; - } else { - void *vaddr = page_address(page); - copy_gop->source.domid = DOMID_SELF; - copy_gop->source.u.gmfn = virt_to_mfn(vaddr); - } - copy_gop->source.offset = offset; - copy_gop->dest.domid = vif->domid; - - copy_gop->dest.offset = npo->copy_off; - copy_gop->dest.u.ref = npo->copy_gref; - copy_gop->len = bytes; - - npo->copy_off += bytes; - meta->size += bytes; - - offset += bytes; + info.page = page; + gnttab_foreach_grant_in_range(page, offset, bytes, + xenvif_gop_frag_copy_grant, + &info); size -= bytes; + offset = 0; - /* Next frame */ - if (offset == PAGE_SIZE && size) { + /* Next page */ + if (size) { BUG_ON(!PageCompound(page)); page++; - offset = 0; } - - /* Leave a gap for the GSO descriptor. */ - if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) - vif->rx.req_cons++; - - *head = 0; /* There must be something in this buffer now. */ - } + + *head = info.head; } /* @@ -576,36 +417,50 @@ * zero GSO descriptors (for non-GSO packets) or one descriptor (for * frontend-side LRO). */ -static int netbk_gop_skb(struct sk_buff *skb, - struct netrx_pending_operations *npo) +static int xenvif_gop_skb(struct sk_buff *skb, + struct netrx_pending_operations *npo, + struct xenvif_queue *queue) { struct xenvif *vif = netdev_priv(skb->dev); int nr_frags = skb_shinfo(skb)->nr_frags; int i; struct xen_netif_rx_request req; - struct netbk_rx_meta *meta; + struct xenvif_rx_meta *meta; unsigned char *data; int head = 1; int old_meta_prod; + int gso_type; old_meta_prod = npo->meta_prod; + gso_type = XEN_NETIF_GSO_TYPE_NONE; + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + gso_type = XEN_NETIF_GSO_TYPE_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + gso_type = XEN_NETIF_GSO_TYPE_TCPV6; + } + /* Set up a GSO prefix descriptor, if necessary */ - if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { - RING_COPY_REQUEST(&vif->rx, vif->rx.req_cons++, &req); + if ((1 << gso_type) & vif->gso_prefix_mask) { + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); meta = npo->meta + npo->meta_prod++; + meta->gso_type = gso_type; meta->gso_size = skb_shinfo(skb)->gso_size; meta->size = 0; meta->id = req.id; } - RING_COPY_REQUEST(&vif->rx, vif->rx.req_cons++, &req); + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); meta = npo->meta + npo->meta_prod++; - if (!vif->gso_prefix) + if ((1 << gso_type) & vif->gso_mask) { + meta->gso_type = gso_type; meta->gso_size = skb_shinfo(skb)->gso_size; - else + } else { + meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; meta->gso_size = 0; + } meta->size = 0; meta->id = req.id; @@ -620,30 +475,30 @@ if (data + len > skb_tail_pointer(skb)) len = skb_tail_pointer(skb) - data; - netbk_gop_frag_copy(vif, skb, npo, - virt_to_page(data), len, offset, &head); + xenvif_gop_frag_copy(queue, skb, npo, + virt_to_page(data), len, offset, &head); data += len; } for (i = 0; i < nr_frags; i++) { - netbk_gop_frag_copy(vif, skb, npo, - skb_frag_page(&skb_shinfo(skb)->frags[i]), - skb_frag_size(&skb_shinfo(skb)->frags[i]), - skb_shinfo(skb)->frags[i].page_offset, - &head); + xenvif_gop_frag_copy(queue, skb, npo, + skb_frag_page(&skb_shinfo(skb)->frags[i]), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + skb_shinfo(skb)->frags[i].page_offset, + &head); } return npo->meta_prod - old_meta_prod; } /* - * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was + * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was * used to set up the operations on the top of * netrx_pending_operations, which have since been done. Check that * they didn't give any errors and advance over them. */ -static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, - struct netrx_pending_operations *npo) +static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots, + struct netrx_pending_operations *npo) { struct gnttab_copy *copy_op; int status = XEN_NETIF_RSP_OKAY; @@ -662,9 +517,9 @@ return status; } -static void netbk_add_frag_responses(struct xenvif *vif, int status, - struct netbk_rx_meta *meta, - int nr_meta_slots) +static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status, + struct xenvif_rx_meta *meta, + int nr_meta_slots) { int i; unsigned long offset; @@ -683,18 +538,18 @@ flags = XEN_NETRXF_more_data; offset = 0; - make_rx_response(vif, meta[i].id, status, offset, + make_rx_response(queue, meta[i].id, status, offset, meta[i].size, flags); } } -struct skb_cb_overlay { - int meta_slots_used; -}; +void xenvif_kick_thread(struct xenvif_queue *queue) +{ + wake_up(&queue->wq); +} -static void xen_netbk_rx_action(struct xen_netbk *netbk) +static void xenvif_rx_action(struct xenvif_queue *queue) { - struct xenvif *vif = NULL, *tmp; s8 status; u16 flags; struct xen_netif_rx_response *resp; @@ -702,71 +557,59 @@ struct sk_buff *skb; LIST_HEAD(notify); int ret; - int nr_frags; - int count; unsigned long offset; - struct skb_cb_overlay *sco; + bool need_to_notify = false; struct netrx_pending_operations npo = { - .copy = netbk->grant_copy_op, - .meta = netbk->meta, + .copy = queue->grant_copy_op, + .meta = queue->meta, }; skb_queue_head_init(&rxq); - count = 0; - - while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) { - vif = netdev_priv(skb->dev); - nr_frags = skb_shinfo(skb)->nr_frags; - - sco = (struct skb_cb_overlay *)skb->cb; - sco->meta_slots_used = netbk_gop_skb(skb, &npo); + while (xenvif_rx_ring_slots_available(queue) + && (skb = xenvif_rx_dequeue(queue)) != NULL) { + queue->last_rx_time = jiffies; - count += nr_frags + 1; + XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); __skb_queue_tail(&rxq, skb); - - /* Filled the batch queue? */ - /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ - if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) - break; } - BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta)); + BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta)); if (!npo.copy_prod) - return; + goto done; - BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op)); - gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod); + BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); + gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod); while ((skb = __skb_dequeue(&rxq)) != NULL) { - sco = (struct skb_cb_overlay *)skb->cb; - - vif = netdev_priv(skb->dev); - if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) { - resp = RING_GET_RESPONSE(&vif->rx, - vif->rx.rsp_prod_pvt++); + if ((1 << queue->meta[npo.meta_cons].gso_type) & + queue->vif->gso_prefix_mask) { + resp = RING_GET_RESPONSE(&queue->rx, + queue->rx.rsp_prod_pvt++); resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; - resp->offset = netbk->meta[npo.meta_cons].gso_size; - resp->id = netbk->meta[npo.meta_cons].id; - resp->status = sco->meta_slots_used; + resp->offset = queue->meta[npo.meta_cons].gso_size; + resp->id = queue->meta[npo.meta_cons].id; + resp->status = XENVIF_RX_CB(skb)->meta_slots_used; npo.meta_cons++; - sco->meta_slots_used--; + XENVIF_RX_CB(skb)->meta_slots_used--; } - vif->dev->stats.tx_bytes += skb->len; - vif->dev->stats.tx_packets++; + queue->stats.tx_bytes += skb->len; + queue->stats.tx_packets++; - status = netbk_check_gop(vif, sco->meta_slots_used, &npo); + status = xenvif_check_gop(queue->vif, + XENVIF_RX_CB(skb)->meta_slots_used, + &npo); - if (sco->meta_slots_used == 1) + if (XENVIF_RX_CB(skb)->meta_slots_used == 1) flags = 0; else flags = XEN_NETRXF_more_data; @@ -778,21 +621,22 @@ flags |= XEN_NETRXF_data_validated; offset = 0; - resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id, + resp = make_rx_response(queue, queue->meta[npo.meta_cons].id, status, offset, - netbk->meta[npo.meta_cons].size, + queue->meta[npo.meta_cons].size, flags); - if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { + if ((1 << queue->meta[npo.meta_cons].gso_type) & + queue->vif->gso_mask) { struct xen_netif_extra_info *gso = (struct xen_netif_extra_info *) - RING_GET_RESPONSE(&vif->rx, - vif->rx.rsp_prod_pvt++); + RING_GET_RESPONSE(&queue->rx, + queue->rx.rsp_prod_pvt++); resp->flags |= XEN_NETRXF_extra_info; - gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size; - gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; + gso->u.gso.type = queue->meta[npo.meta_cons].gso_type; + gso->u.gso.size = queue->meta[npo.meta_cons].gso_size; gso->u.gso.pad = 0; gso->u.gso.features = 0; @@ -800,127 +644,34 @@ gso->flags = 0; } - netbk_add_frag_responses(vif, status, - netbk->meta + npo.meta_cons + 1, - sco->meta_slots_used); + xenvif_add_frag_responses(queue, status, + queue->meta + npo.meta_cons + 1, + XENVIF_RX_CB(skb)->meta_slots_used); - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret); - xenvif_notify_tx_completion(vif); + need_to_notify |= !!ret; - if (ret && list_empty(&vif->notify_list)) - list_add_tail(&vif->notify_list, ¬ify); - else - xenvif_put(vif); - npo.meta_cons += sco->meta_slots_used; + npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used; dev_kfree_skb(skb); - xenvif_put_rings(vif); - } - - list_for_each_entry_safe(vif, tmp, ¬ify, notify_list) { - notify_remote_via_irq(vif->irq); - list_del_init(&vif->notify_list); - xenvif_put(vif); } - /* More work to do? */ - if (!skb_queue_empty(&netbk->rx_queue) && - !timer_pending(&netbk->net_timer)) - xen_netbk_kick_thread(netbk); +done: + if (need_to_notify) + notify_remote_via_irq(queue->rx_irq); } -void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) -{ - struct xen_netbk *netbk = vif->netbk; - - skb_queue_tail(&netbk->rx_queue, skb); - - xen_netbk_kick_thread(netbk); -} - -static void xen_netbk_alarm(unsigned long data) -{ - struct xen_netbk *netbk = (struct xen_netbk *)data; - xen_netbk_kick_thread(netbk); -} - -static int __on_net_schedule_list(struct xenvif *vif) -{ - return !list_empty(&vif->schedule_list); -} - -/* Must be called with net_schedule_list_lock held */ -static void remove_from_net_schedule_list(struct xenvif *vif) -{ - if (likely(__on_net_schedule_list(vif))) { - list_del_init(&vif->schedule_list); - xenvif_put(vif); - } -} - -static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk) -{ - struct xenvif *vif = NULL; - - spin_lock_irq(&netbk->net_schedule_list_lock); - if (list_empty(&netbk->net_schedule_list)) - goto out; - - vif = list_first_entry(&netbk->net_schedule_list, - struct xenvif, schedule_list); - if (!vif) - goto out; - - xenvif_get(vif); - - remove_from_net_schedule_list(vif); -out: - spin_unlock_irq(&netbk->net_schedule_list_lock); - return vif; -} - -void xen_netbk_schedule_xenvif(struct xenvif *vif) -{ - unsigned long flags; - struct xen_netbk *netbk = vif->netbk; - - if (__on_net_schedule_list(vif)) - goto kick; - - spin_lock_irqsave(&netbk->net_schedule_list_lock, flags); - if (!__on_net_schedule_list(vif) && - likely(xenvif_schedulable(vif))) { - list_add_tail(&vif->schedule_list, &netbk->net_schedule_list); - xenvif_get(vif); - } - spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags); - -kick: - smp_mb(); - if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) && - !list_empty(&netbk->net_schedule_list)) - xen_netbk_kick_thread(netbk); -} - -void xen_netbk_deschedule_xenvif(struct xenvif *vif) -{ - struct xen_netbk *netbk = vif->netbk; - spin_lock_irq(&netbk->net_schedule_list_lock); - remove_from_net_schedule_list(vif); - spin_unlock_irq(&netbk->net_schedule_list_lock); -} - -void xen_netbk_check_rx_xenvif(struct xenvif *vif) +void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) { int more_to_do; - RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); + RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); if (more_to_do) - xen_netbk_schedule_xenvif(vif); + napi_schedule(&queue->napi); } -static void tx_add_credit(struct xenvif *vif) +static void tx_add_credit(struct xenvif_queue *queue) { unsigned long max_burst, max_credit; @@ -928,52 +679,56 @@ * Allow a burst big enough to transmit a jumbo packet of up to 128kB. * Otherwise the interface can seize up due to insufficient credit. */ - max_burst = max(131072UL, vif->credit_bytes); + max_burst = max(131072UL, queue->credit_bytes); /* Take care that adding a new chunk of credit doesn't wrap to zero. */ - max_credit = vif->remaining_credit + vif->credit_bytes; - if (max_credit < vif->remaining_credit) + max_credit = queue->remaining_credit + queue->credit_bytes; + if (max_credit < queue->remaining_credit) max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ - vif->remaining_credit = min(max_credit, max_burst); + queue->remaining_credit = min(max_credit, max_burst); } -static void tx_credit_callback(unsigned long data) +void xenvif_tx_credit_callback(unsigned long data) { - struct xenvif *vif = (struct xenvif *)data; - tx_add_credit(vif); - xen_netbk_check_rx_xenvif(vif); + struct xenvif_queue *queue = (struct xenvif_queue *)data; + tx_add_credit(queue); + xenvif_napi_schedule_or_enable_events(queue); } -static void netbk_tx_err(struct xenvif *vif, - struct xen_netif_tx_request *txp, RING_IDX end) +static void xenvif_tx_err(struct xenvif_queue *queue, + struct xen_netif_tx_request *txp, RING_IDX end) { - RING_IDX cons = vif->tx.req_cons; + RING_IDX cons = queue->tx.req_cons; + unsigned long flags; do { - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); + spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); + push_tx_responses(queue); + spin_unlock_irqrestore(&queue->response_lock, flags); if (cons == end) break; - RING_COPY_REQUEST(&vif->tx, cons++, txp); + RING_COPY_REQUEST(&queue->tx, cons++, txp); } while (1); - vif->tx.req_cons = cons; - xen_netbk_check_rx_xenvif(vif); - xenvif_put(vif); + queue->tx.req_cons = cons; } -static void netbk_fatal_tx_err(struct xenvif *vif) +static void xenvif_fatal_tx_err(struct xenvif *vif) { netdev_err(vif->dev, "fatal error; disabling device\n"); - xenvif_carrier_off(vif); - xenvif_put(vif); + vif->disabled = true; + /* Disable the vif from queue 0's kthread */ + if (vif->queues) + xenvif_kick_thread(&vif->queues[0]); } -static int netbk_count_requests(struct xenvif *vif, - struct xen_netif_tx_request *first, - struct xen_netif_tx_request *txp, - int work_to_do) +static int xenvif_count_requests(struct xenvif_queue *queue, + struct xen_netif_tx_request *first, + struct xen_netif_tx_request *txp, + int work_to_do) { - RING_IDX cons = vif->tx.req_cons; + RING_IDX cons = queue->tx.req_cons; int slots = 0; int drop_err = 0; int more_data; @@ -985,10 +740,10 @@ struct xen_netif_tx_request dropped_tx = { 0 }; if (slots >= work_to_do) { - netdev_err(vif->dev, + netdev_err(queue->vif->dev, "Asked for %d slots but exceeds this limit\n", work_to_do); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(queue->vif); return -ENODATA; } @@ -996,10 +751,10 @@ * considered malicious. */ if (unlikely(slots >= fatal_skb_slots)) { - netdev_err(vif->dev, + netdev_err(queue->vif->dev, "Malicious frontend using %d slots, threshold %u\n", slots, fatal_skb_slots); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(queue->vif); return -E2BIG; } @@ -1012,7 +767,7 @@ */ if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { if (net_ratelimit()) - netdev_dbg(vif->dev, + netdev_dbg(queue->vif->dev, "Too many slots (%d) exceeding limit (%d), dropping packet\n", slots, XEN_NETBK_LEGACY_SLOTS_MAX); drop_err = -E2BIG; @@ -1021,7 +776,7 @@ if (drop_err) txp = &dropped_tx; - RING_COPY_REQUEST(&vif->tx, cons + slots, txp); + RING_COPY_REQUEST(&queue->tx, cons + slots, txp); /* If the guest submitted a frame >= 64 KiB then * first->size overflowed and following slots will @@ -1034,7 +789,7 @@ */ if (!drop_err && txp->size > first->size) { if (net_ratelimit()) - netdev_dbg(vif->dev, + netdev_dbg(queue->vif->dev, "Invalid tx request, slot size %u > remaining size %u\n", txp->size, first->size); drop_err = -EIO; @@ -1043,10 +798,10 @@ first->size -= txp->size; slots++; - if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { - netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", + if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) { + netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n", txp->offset, txp->size); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(queue->vif); return -EINVAL; } @@ -1058,221 +813,256 @@ } while (more_data); if (drop_err) { - netbk_tx_err(vif, first, cons + slots); + xenvif_tx_err(queue, first, cons + slots); return drop_err; } return slots; } -static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, - u16 pending_idx) + +struct xenvif_tx_cb { + u16 pending_idx; +}; + +#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) + +static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, + u16 pending_idx, + struct xen_netif_tx_request *txp, + struct gnttab_map_grant_ref *mop) { - struct page *page; - page = alloc_page(GFP_KERNEL|__GFP_COLD); - if (!page) + queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; + gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), + GNTMAP_host_map | GNTMAP_readonly, + txp->gref, queue->vif->domid); + + memcpy(&queue->pending_tx_info[pending_idx].req, txp, + sizeof(*txp)); +} + +static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) +{ + struct sk_buff *skb = + alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(skb == NULL)) return NULL; - set_page_ext(page, netbk, pending_idx); - netbk->mmap_pages[pending_idx] = page; - return page; + + /* Packets passed to netif_rx() must have some headroom. */ + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + + /* Initialize it here to avoid later surprises */ + skb_shinfo(skb)->destructor_arg = NULL; + + return skb; } -static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, - struct xenvif *vif, - struct sk_buff *skb, - struct xen_netif_tx_request *txp, - struct gnttab_copy *gop) +static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, + struct sk_buff *skb, + struct xen_netif_tx_request *txp, + struct gnttab_map_grant_ref *gop, + unsigned int frag_overflow, + struct sk_buff *nskb) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; - u16 pending_idx = *((u16 *)skb->data); - u16 head_idx = 0; - int slot, start; - struct page *page; - pending_ring_idx_t index, start_idx = 0; - uint16_t dst_offset; + u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; + int start; + pending_ring_idx_t index; unsigned int nr_slots; - struct pending_tx_info *first = NULL; - /* At this point shinfo->nr_frags is in fact the number of - * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. - */ nr_slots = shinfo->nr_frags; /* Skip first skb fragment if it is on same page as header fragment. */ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); - /* Coalesce tx requests, at this point the packet passed in - * should be <= 64K. Any packets larger than 64K have been - * handled in netbk_count_requests(). - */ - for (shinfo->nr_frags = slot = start; slot < nr_slots; - shinfo->nr_frags++) { - struct pending_tx_info *pending_tx_info = - netbk->pending_tx_info; - - page = alloc_page(GFP_KERNEL|__GFP_COLD); - if (!page) - goto err; - - dst_offset = 0; - first = NULL; - while (dst_offset < PAGE_SIZE && slot < nr_slots) { - gop->flags = GNTCOPY_source_gref; - - gop->source.u.ref = txp->gref; - gop->source.domid = vif->domid; - gop->source.offset = txp->offset; - - gop->dest.domid = DOMID_SELF; - - gop->dest.offset = dst_offset; - gop->dest.u.gmfn = virt_to_mfn(page_address(page)); - - if (dst_offset + txp->size > PAGE_SIZE) { - /* This page can only merge a portion - * of tx request. Do not increment any - * pointer / counter here. The txp - * will be dealt with in future - * rounds, eventually hitting the - * `else` branch. - */ - gop->len = PAGE_SIZE - dst_offset; - txp->offset += gop->len; - txp->size -= gop->len; - dst_offset += gop->len; /* quit loop */ - } else { - /* This tx request can be merged in the page */ - gop->len = txp->size; - dst_offset += gop->len; - - index = pending_index(netbk->pending_cons++); - - pending_idx = netbk->pending_ring[index]; - - memcpy(&pending_tx_info[pending_idx].req, txp, - sizeof(*txp)); - xenvif_get(vif); - - pending_tx_info[pending_idx].vif = vif; - - /* Poison these fields, corresponding - * fields for head tx req will be set - * to correct values after the loop. - */ - netbk->mmap_pages[pending_idx] = (void *)(~0UL); - pending_tx_info[pending_idx].head = - INVALID_PENDING_RING_IDX; - - if (!first) { - first = &pending_tx_info[pending_idx]; - start_idx = index; - head_idx = pending_idx; - } - - txp++; - slot++; - } - - gop++; + for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots; + shinfo->nr_frags++, txp++, gop++) { + index = pending_index(queue->pending_cons++); + pending_idx = queue->pending_ring[index]; + xenvif_tx_create_map_op(queue, pending_idx, txp, gop); + frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); + } + + if (frag_overflow) { + + shinfo = skb_shinfo(nskb); + frags = shinfo->frags; + + for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; + shinfo->nr_frags++, txp++, gop++) { + index = pending_index(queue->pending_cons++); + pending_idx = queue->pending_ring[index]; + xenvif_tx_create_map_op(queue, pending_idx, txp, gop); + frag_set_pending_idx(&frags[shinfo->nr_frags], + pending_idx); } - first->req.offset = 0; - first->req.size = dst_offset; - first->head = start_idx; - set_page_ext(page, netbk, head_idx); - netbk->mmap_pages[head_idx] = page; - frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx); + skb_shinfo(skb)->frag_list = nskb; } - BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS); - return gop; -err: - /* Unwind, freeing all pages and sending error responses. */ - while (shinfo->nr_frags-- > start) { - xen_netbk_idx_release(netbk, - frag_get_pending_idx(&frags[shinfo->nr_frags]), - XEN_NETIF_RSP_ERROR); - } - /* The head too, if necessary. */ - if (start) - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); - - return NULL; } -static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, - struct sk_buff *skb, - struct gnttab_copy **gopp) -{ - struct gnttab_copy *gop = *gopp; - u16 pending_idx = *((u16 *)skb->data); +static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, + u16 pending_idx, + grant_handle_t handle) +{ + if (unlikely(queue->grant_tx_handle[pending_idx] != + NETBACK_INVALID_HANDLE)) { + netdev_err(queue->vif->dev, + "Trying to overwrite active handle! pending_idx: 0x%x\n", + pending_idx); + BUG(); + } + queue->grant_tx_handle[pending_idx] = handle; +} + +static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue, + u16 pending_idx) +{ + if (unlikely(queue->grant_tx_handle[pending_idx] == + NETBACK_INVALID_HANDLE)) { + netdev_err(queue->vif->dev, + "Trying to unmap invalid handle! pending_idx: 0x%x\n", + pending_idx); + BUG(); + } + queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; +} + +static int xenvif_tx_check_gop(struct xenvif_queue *queue, + struct sk_buff *skb, + struct gnttab_map_grant_ref **gopp_map, + struct gnttab_copy **gopp_copy) +{ + struct gnttab_map_grant_ref *gop_map = *gopp_map; + u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; + /* This always points to the shinfo of the skb being checked, which + * could be either the first or the one on the frag_list + */ struct skb_shared_info *shinfo = skb_shinfo(skb); - struct pending_tx_info *tx_info; + /* If this is non-NULL, we are currently checking the frag_list skb, and + * this points to the shinfo of the first one + */ + struct skb_shared_info *first_shinfo = NULL; int nr_frags = shinfo->nr_frags; - int i, err, start; - u16 peek; /* peek into next tx request */ + const bool sharedslot = nr_frags && + frag_get_pending_idx(&shinfo->frags[0]) == pending_idx; + int i, err; /* Check status of header. */ - err = gop->status; - if (unlikely(err)) - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); - - /* Skip first skb fragment if it is on same page as header fragment. */ - start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx); + err = (*gopp_copy)->status; + if (unlikely(err)) { + if (net_ratelimit()) + netdev_dbg(queue->vif->dev, + "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n", + (*gopp_copy)->status, + pending_idx, + (*gopp_copy)->source.u.ref); + /* The first frag might still have this slot mapped */ + if (!sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_ERROR); + } + (*gopp_copy)++; - for (i = start; i < nr_frags; i++) { +check_frags: + for (i = 0; i < nr_frags; i++, gop_map++) { int j, newerr; - pending_ring_idx_t head; pending_idx = frag_get_pending_idx(&shinfo->frags[i]); - tx_info = &netbk->pending_tx_info[pending_idx]; - head = tx_info->head; /* Check error status: if okay then remember grant handle. */ - do { - newerr = (++gop)->status; - if (newerr) - break; - peek = netbk->pending_ring[pending_index(++head)]; - } while (!pending_tx_is_head(netbk, peek)); + newerr = gop_map->status; if (likely(!newerr)) { + xenvif_grant_handle_set(queue, + pending_idx, + gop_map->handle); /* Had a previous error? Invalidate this fragment. */ - if (unlikely(err)) - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + if (unlikely(err)) { + xenvif_idx_unmap(queue, pending_idx); + /* If the mapping of the first frag was OK, but + * the header's copy failed, and they are + * sharing a slot, send an error + */ + if (i == 0 && sharedslot) + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_ERROR); + else + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } continue; } /* Error on this fragment: respond to client with an error. */ - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR); + if (net_ratelimit()) + netdev_dbg(queue->vif->dev, + "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n", + i, + gop_map->status, + pending_idx, + gop_map->ref); + + xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); /* Not the first error? Preceding frags already invalidated. */ if (err) continue; - /* First error: invalidate header and preceding fragments. */ - pending_idx = *((u16 *)skb->data); - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); - for (j = start; j < i; j++) { + /* First error: if the header haven't shared a slot with the + * first frag, release it as well. + */ + if (!sharedslot) + xenvif_idx_release(queue, + XENVIF_TX_CB(skb)->pending_idx, + XEN_NETIF_RSP_OKAY); + + /* Invalidate preceding fragments of this skb. */ + for (j = 0; j < i; j++) { pending_idx = frag_get_pending_idx(&shinfo->frags[j]); - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + xenvif_idx_unmap(queue, pending_idx); + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } + + /* And if we found the error while checking the frag_list, unmap + * the first skb's frags + */ + if (first_shinfo) { + for (j = 0; j < first_shinfo->nr_frags; j++) { + pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); + xenvif_idx_unmap(queue, pending_idx); + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); + } } /* Remember the error: invalidate all subsequent fragments. */ err = newerr; } - *gopp = gop + 1; + if (skb_has_frag_list(skb) && !first_shinfo) { + first_shinfo = skb_shinfo(skb); + shinfo = skb_shinfo(skb_shinfo(skb)->frag_list); + nr_frags = shinfo->nr_frags; + + goto check_frags; + } + + *gopp_map = gop_map; return err; } -static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) +static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); int nr_frags = shinfo->nr_frags; int i; + u16 prev_pending_idx = INVALID_PENDING_IDX; for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = shinfo->frags + i; @@ -1282,167 +1072,134 @@ pending_idx = frag_get_pending_idx(frag); - txp = &netbk->pending_tx_info[pending_idx].req; - page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); + /* If this is not the first frag, chain it to the previous*/ + if (prev_pending_idx == INVALID_PENDING_IDX) + skb_shinfo(skb)->destructor_arg = + &callback_param(queue, pending_idx); + else + callback_param(queue, prev_pending_idx).ctx = + &callback_param(queue, pending_idx); + + callback_param(queue, pending_idx).ctx = NULL; + prev_pending_idx = pending_idx; + + txp = &queue->pending_tx_info[pending_idx].req; + page = virt_to_page(idx_to_kaddr(queue, pending_idx)); __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); skb->len += txp->size; skb->data_len += txp->size; skb->truesize += txp->size; - /* Take an extra reference to offset xen_netbk_idx_release */ - get_page(netbk->mmap_pages[pending_idx]); - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + /* Take an extra reference to offset network stack's put_page */ + get_page(queue->mmap_pages[pending_idx]); } } -static int xen_netbk_get_extras(struct xenvif *vif, +static int xenvif_get_extras(struct xenvif_queue *queue, struct xen_netif_extra_info *extras, int work_to_do) { struct xen_netif_extra_info extra; - RING_IDX cons = vif->tx.req_cons; + RING_IDX cons = queue->tx.req_cons; do { if (unlikely(work_to_do-- <= 0)) { - netdev_err(vif->dev, "Missing extra info\n"); - netbk_fatal_tx_err(vif); + netdev_err(queue->vif->dev, "Missing extra info\n"); + xenvif_fatal_tx_err(queue->vif); return -EBADR; } - RING_COPY_REQUEST(&vif->tx, cons, &extra); + RING_COPY_REQUEST(&queue->tx, cons, &extra); if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { - vif->tx.req_cons = ++cons; - netdev_err(vif->dev, + queue->tx.req_cons = ++cons; + netdev_err(queue->vif->dev, "Invalid extra type: %d\n", extra.type); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(queue->vif); return -EINVAL; } memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); - vif->tx.req_cons = ++cons; + queue->tx.req_cons = ++cons; } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); return work_to_do; } -static int netbk_set_skb_gso(struct xenvif *vif, - struct sk_buff *skb, - struct xen_netif_extra_info *gso) +static int xenvif_set_skb_gso(struct xenvif *vif, + struct sk_buff *skb, + struct xen_netif_extra_info *gso) { if (!gso->u.gso.size) { netdev_err(vif->dev, "GSO size must not be zero.\n"); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(vif); return -EINVAL; } - /* Currently only TCPv4 S.O. is supported. */ - if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { + switch (gso->u.gso.type) { + case XEN_NETIF_GSO_TYPE_TCPV4: + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + break; + case XEN_NETIF_GSO_TYPE_TCPV6: + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + break; + default: netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); - netbk_fatal_tx_err(vif); + xenvif_fatal_tx_err(vif); return -EINVAL; } skb_shinfo(skb)->gso_size = gso->u.gso.size; - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; - - /* Header must be checked, and gso_segs computed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; + /* gso_segs will be calculated later */ return 0; } -static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) +static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb) { - struct iphdr *iph; - int err = -EPROTO; - int recalculate_partial_csum = 0; + bool recalculate_partial_csum = false; - /* - * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy + /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy * peers can fail to set NETRXF_csum_blank when sending a GSO * frame. In this case force the SKB to CHECKSUM_PARTIAL and * recalculate the partial checksum. */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { - vif->rx_gso_checksum_fixup++; + queue->stats.rx_gso_checksum_fixup++; skb->ip_summed = CHECKSUM_PARTIAL; - recalculate_partial_csum = 1; + recalculate_partial_csum = true; } /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; - if (skb->protocol != htons(ETH_P_IP)) - goto out; - - iph = (void *)skb->data; - switch (iph->protocol) { - case IPPROTO_TCP: - if (!skb_partial_csum_set(skb, 4 * iph->ihl, - offsetof(struct tcphdr, check))) - goto out; - - if (recalculate_partial_csum) { - struct tcphdr *tcph = tcp_hdr(skb); - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - iph->ihl*4, - IPPROTO_TCP, 0); - } - break; - case IPPROTO_UDP: - if (!skb_partial_csum_set(skb, 4 * iph->ihl, - offsetof(struct udphdr, check))) - goto out; - - if (recalculate_partial_csum) { - struct udphdr *udph = udp_hdr(skb); - udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - iph->ihl*4, - IPPROTO_UDP, 0); - } - break; - default: - if (net_ratelimit()) - netdev_err(vif->dev, - "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n", - iph->protocol); - goto out; - } - - err = 0; - -out: - return err; + return skb_checksum_setup(skb, recalculate_partial_csum); } -static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) +static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) { u64 now = get_jiffies_64(); - u64 next_credit = vif->credit_window_start + - msecs_to_jiffies(vif->credit_usec / 1000); + u64 next_credit = queue->credit_window_start + + msecs_to_jiffies(queue->credit_usec / 1000); /* Timer could already be pending in rare cases. */ - if (timer_pending(&vif->credit_timeout)) + if (timer_pending(&queue->credit_timeout)) return true; /* Passed the point where we can replenish credit? */ if (time_after_eq64(now, next_credit)) { - vif->credit_window_start = now; - tx_add_credit(vif); + queue->credit_window_start = now; + tx_add_credit(queue); } /* Still too big to send right now? Set a callback. */ - if (size > vif->remaining_credit) { - vif->credit_timeout.data = - (unsigned long)vif; - vif->credit_timeout.function = - tx_credit_callback; - mod_timer(&vif->credit_timeout, + if (size > queue->remaining_credit) { + queue->credit_timeout.data = + (unsigned long)queue; + mod_timer(&queue->credit_timeout, next_credit); - vif->credit_window_start = next_credit; + queue->credit_window_start = next_credit; return true; } @@ -1450,19 +1207,93 @@ return false; } -static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) +/* No locking is required in xenvif_mcast_add/del() as they are + * only ever invoked from NAPI poll. An RCU list is used because + * xenvif_mcast_match() is called asynchronously, during start_xmit. + */ + +static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr) { - struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop; - struct sk_buff *skb; + struct xenvif_mcast_addr *mcast; + + if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) { + if (net_ratelimit()) + netdev_err(vif->dev, + "Too many multicast addresses\n"); + return -ENOSPC; + } + + mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC); + if (!mcast) + return -ENOMEM; + + ether_addr_copy(mcast->addr, addr); + list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr); + vif->fe_mcast_count++; + + return 0; +} + +static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr) +{ + struct xenvif_mcast_addr *mcast; + + list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { + if (ether_addr_equal(addr, mcast->addr)) { + --vif->fe_mcast_count; + list_del_rcu(&mcast->entry); + kfree_rcu(mcast, rcu); + break; + } + } +} + +bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr) +{ + struct xenvif_mcast_addr *mcast; + + rcu_read_lock(); + list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { + if (ether_addr_equal(addr, mcast->addr)) { + rcu_read_unlock(); + return true; + } + } + rcu_read_unlock(); + + return false; +} + +void xenvif_mcast_addr_list_free(struct xenvif *vif) +{ + /* No need for locking or RCU here. NAPI poll and TX queue + * are stopped. + */ + while (!list_empty(&vif->fe_mcast_addr)) { + struct xenvif_mcast_addr *mcast; + + mcast = list_first_entry(&vif->fe_mcast_addr, + struct xenvif_mcast_addr, + entry); + --vif->fe_mcast_count; + list_del(&mcast->entry); + kfree(mcast); + } +} + +static void xenvif_tx_build_gops(struct xenvif_queue *queue, + int budget, + unsigned *copy_ops, + unsigned *map_ops) +{ + struct gnttab_map_grant_ref *gop = queue->tx_map_ops; + struct sk_buff *skb, *nskb; int ret; + unsigned int frag_overflow; - while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS) && - !list_empty(&netbk->net_schedule_list)) { - struct xenvif *vif; + while (skb_queue_len(&queue->tx_queue) < budget) { struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; - struct page *page; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; u16 pending_idx; RING_IDX idx; @@ -1470,207 +1301,301 @@ unsigned int data_len; pending_ring_idx_t index; - /* Get a netif from the list with work to do. */ - vif = poll_net_schedule_list(netbk); - /* This can sometimes happen because the test of - * list_empty(net_schedule_list) at the top of the - * loop is unlocked. Just go back and have another - * look. - */ - if (!vif) - continue; - - if (vif->tx.sring->req_prod - vif->tx.req_cons > + if (queue->tx.sring->req_prod - queue->tx.req_cons > XEN_NETIF_TX_RING_SIZE) { - netdev_err(vif->dev, + netdev_err(queue->vif->dev, "Impossible number of requests. " "req_prod %d, req_cons %d, size %ld\n", - vif->tx.sring->req_prod, vif->tx.req_cons, + queue->tx.sring->req_prod, queue->tx.req_cons, XEN_NETIF_TX_RING_SIZE); - netbk_fatal_tx_err(vif); - continue; + xenvif_fatal_tx_err(queue->vif); + break; } - RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); - if (!work_to_do) { - xenvif_put(vif); - continue; - } + work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); + if (!work_to_do) + break; - idx = vif->tx.req_cons; + idx = queue->tx.req_cons; rmb(); /* Ensure that we see the request before we copy it. */ - RING_COPY_REQUEST(&vif->tx, idx, &txreq); + RING_COPY_REQUEST(&queue->tx, idx, &txreq); /* Credit-based scheduling. */ - if (txreq.size > vif->remaining_credit && - tx_credit_exceeded(vif, txreq.size)) { - xenvif_put(vif); - continue; - } + if (txreq.size > queue->remaining_credit && + tx_credit_exceeded(queue, txreq.size)) + break; - vif->remaining_credit -= txreq.size; + queue->remaining_credit -= txreq.size; work_to_do--; - vif->tx.req_cons = ++idx; + queue->tx.req_cons = ++idx; memset(extras, 0, sizeof(extras)); if (txreq.flags & XEN_NETTXF_extra_info) { - work_to_do = xen_netbk_get_extras(vif, extras, - work_to_do); - idx = vif->tx.req_cons; + work_to_do = xenvif_get_extras(queue, extras, + work_to_do); + idx = queue->tx.req_cons; if (unlikely(work_to_do < 0)) - continue; + break; } - ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); - if (unlikely(ret < 0)) + if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) { + struct xen_netif_extra_info *extra; + + extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; + ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); + + make_tx_response(queue, &txreq, + (ret == 0) ? + XEN_NETIF_RSP_OKAY : + XEN_NETIF_RSP_ERROR); + push_tx_responses(queue); + continue; + } + + if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) { + struct xen_netif_extra_info *extra; + + extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; + xenvif_mcast_del(queue->vif, extra->u.mcast.addr); + + make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY); + push_tx_responses(queue); continue; + } + + ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); + if (unlikely(ret < 0)) + break; idx += ret; if (unlikely(txreq.size < ETH_HLEN)) { - netdev_dbg(vif->dev, + netdev_dbg(queue->vif->dev, "Bad packet size: %d\n", txreq.size); - netbk_tx_err(vif, &txreq, idx); - continue; + xenvif_tx_err(queue, &txreq, idx); + break; } /* No crossing a page as the payload mustn't fragment. */ - if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { - netdev_err(vif->dev, - "txreq.offset: %x, size: %u, end: %lu\n", + if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) { + netdev_err(queue->vif->dev, + "txreq.offset: %u, size: %u, end: %lu\n", txreq.offset, txreq.size, - (txreq.offset&~PAGE_MASK) + txreq.size); - netbk_fatal_tx_err(vif); - continue; + (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size); + xenvif_fatal_tx_err(queue->vif); + break; } - index = pending_index(netbk->pending_cons); - pending_idx = netbk->pending_ring[index]; + index = pending_index(queue->pending_cons); + pending_idx = queue->pending_ring[index]; - data_len = (txreq.size > PKT_PROT_LEN && + data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN && ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? - PKT_PROT_LEN : txreq.size; + XEN_NETBACK_TX_COPY_LEN : txreq.size; - skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, - GFP_ATOMIC | __GFP_NOWARN); + skb = xenvif_alloc_skb(data_len); if (unlikely(skb == NULL)) { - netdev_dbg(vif->dev, + netdev_dbg(queue->vif->dev, "Can't allocate a skb in start_xmit.\n"); - netbk_tx_err(vif, &txreq, idx); + xenvif_tx_err(queue, &txreq, idx); break; } - /* Packets passed to netif_rx() must have some headroom. */ - skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + skb_shinfo(skb)->nr_frags = ret; + if (data_len < txreq.size) + skb_shinfo(skb)->nr_frags++; + /* At this point shinfo->nr_frags is in fact the number of + * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. + */ + frag_overflow = 0; + nskb = NULL; + if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { + frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; + BUG_ON(frag_overflow > MAX_SKB_FRAGS); + skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; + nskb = xenvif_alloc_skb(0); + if (unlikely(nskb == NULL)) { + kfree_skb(skb); + xenvif_tx_err(queue, &txreq, idx); + if (net_ratelimit()) + netdev_err(queue->vif->dev, + "Can't allocate the frag_list skb.\n"); + break; + } + } if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; - if (netbk_set_skb_gso(vif, skb, gso)) { - /* Failure in netbk_set_skb_gso is fatal. */ + if (xenvif_set_skb_gso(queue->vif, skb, gso)) { + /* Failure in xenvif_set_skb_gso is fatal. */ kfree_skb(skb); - continue; + kfree_skb(nskb); + break; } } - /* XXX could copy straight to head */ - page = xen_netbk_alloc_page(netbk, pending_idx); - if (!page) { - kfree_skb(skb); - netbk_tx_err(vif, &txreq, idx); - continue; - } - - gop->source.u.ref = txreq.gref; - gop->source.domid = vif->domid; - gop->source.offset = txreq.offset; - - gop->dest.u.gmfn = virt_to_mfn(page_address(page)); - gop->dest.domid = DOMID_SELF; - gop->dest.offset = txreq.offset; - - gop->len = txreq.size; - gop->flags = GNTCOPY_source_gref; - - gop++; - - memcpy(&netbk->pending_tx_info[pending_idx].req, - &txreq, sizeof(txreq)); - netbk->pending_tx_info[pending_idx].vif = vif; - netbk->pending_tx_info[pending_idx].head = index; - *((u16 *)skb->data) = pending_idx; + XENVIF_TX_CB(skb)->pending_idx = pending_idx; __skb_put(skb, data_len); + queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; + queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; + queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; + + queue->tx_copy_ops[*copy_ops].dest.u.gmfn = + virt_to_gfn(skb->data); + queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; + queue->tx_copy_ops[*copy_ops].dest.offset = + offset_in_page(skb->data) & ~XEN_PAGE_MASK; + + queue->tx_copy_ops[*copy_ops].len = data_len; + queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; + + (*copy_ops)++; - skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { - skb_shinfo(skb)->nr_frags++; frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); + xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); + gop++; } else { frag_set_pending_idx(&skb_shinfo(skb)->frags[0], INVALID_PENDING_IDX); + memcpy(&queue->pending_tx_info[pending_idx].req, &txreq, + sizeof(txreq)); } - netbk->pending_cons++; + queue->pending_cons++; - request_gop = xen_netbk_get_requests(netbk, vif, - skb, txfrags, gop); - if (request_gop == NULL) { - kfree_skb(skb); - netbk_tx_err(vif, &txreq, idx); - continue; - } - gop = request_gop; + gop = xenvif_get_requests(queue, skb, txfrags, gop, + frag_overflow, nskb); - __skb_queue_tail(&netbk->tx_queue, skb); + __skb_queue_tail(&queue->tx_queue, skb); - vif->tx.req_cons = idx; - xen_netbk_check_rx_xenvif(vif); + queue->tx.req_cons = idx; - if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops)) + if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || + (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) break; } - return gop - netbk->tx_copy_ops; + (*map_ops) = gop - queue->tx_map_ops; + return; } -static void xen_netbk_tx_submit(struct xen_netbk *netbk) +/* Consolidate skb with a frag_list into a brand new one with local pages on + * frags. Returns 0 or -ENOMEM if can't allocate new pages. + */ +static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb) { - struct gnttab_copy *gop = netbk->tx_copy_ops; + unsigned int offset = skb_headlen(skb); + skb_frag_t frags[MAX_SKB_FRAGS]; + int i, f; + struct ubuf_info *uarg; + struct sk_buff *nskb = skb_shinfo(skb)->frag_list; + + queue->stats.tx_zerocopy_sent += 2; + queue->stats.tx_frag_overflow++; + + xenvif_fill_frags(queue, nskb); + /* Subtract frags size, we will correct it later */ + skb->truesize -= skb->data_len; + skb->len += nskb->len; + skb->data_len += nskb->len; + + /* create a brand new frags array and coalesce there */ + for (i = 0; offset < skb->len; i++) { + struct page *page; + unsigned int len; + + BUG_ON(i >= MAX_SKB_FRAGS); + page = alloc_page(GFP_ATOMIC); + if (!page) { + int j; + skb->truesize += skb->data_len; + for (j = 0; j < i; j++) + put_page(frags[j].page.p); + return -ENOMEM; + } + + if (offset + PAGE_SIZE < skb->len) + len = PAGE_SIZE; + else + len = skb->len - offset; + if (skb_copy_bits(skb, offset, page_address(page), len)) + BUG(); + + offset += len; + frags[i].page.p = page; + frags[i].page_offset = 0; + skb_frag_size_set(&frags[i], len); + } + + /* Copied all the bits from the frag list -- free it. */ + skb_frag_list_init(skb); + xenvif_skb_zerocopy_prepare(queue, nskb); + kfree_skb(nskb); + + /* Release all the original (foreign) frags. */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + skb_frag_unref(skb, f); + uarg = skb_shinfo(skb)->destructor_arg; + /* increase inflight counter to offset decrement in callback */ + atomic_inc(&queue->inflight_packets); + uarg->callback(uarg, true); + skb_shinfo(skb)->destructor_arg = NULL; + + /* Fill the skb with the new (local) frags. */ + memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); + skb_shinfo(skb)->nr_frags = i; + skb->truesize += i * PAGE_SIZE; + + return 0; +} + +static int xenvif_tx_submit(struct xenvif_queue *queue) +{ + struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; + struct gnttab_copy *gop_copy = queue->tx_copy_ops; struct sk_buff *skb; + int work_done = 0; - while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) { + while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { struct xen_netif_tx_request *txp; - struct xenvif *vif; u16 pending_idx; unsigned data_len; - pending_idx = *((u16 *)skb->data); - vif = netbk->pending_tx_info[pending_idx].vif; - txp = &netbk->pending_tx_info[pending_idx].req; + pending_idx = XENVIF_TX_CB(skb)->pending_idx; + txp = &queue->pending_tx_info[pending_idx].req; /* Check the remap error code. */ - if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) { - netdev_dbg(vif->dev, "netback grant failed.\n"); + if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { + /* If there was an error, xenvif_tx_check_gop is + * expected to release all the frags which were mapped, + * so kfree_skb shouldn't do it again + */ skb_shinfo(skb)->nr_frags = 0; + if (skb_has_frag_list(skb)) { + struct sk_buff *nskb = + skb_shinfo(skb)->frag_list; + skb_shinfo(nskb)->nr_frags = 0; + } kfree_skb(skb); continue; } data_len = skb->len; - memcpy(skb->data, - (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset), - data_len); + callback_param(queue, pending_idx).ctx = NULL; if (data_len < txp->size) { /* Append the packet payload as a fragment. */ txp->offset += data_len; txp->size -= data_len; } else { /* Schedule a response immediately. */ - xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY); + xenvif_idx_release(queue, pending_idx, + XEN_NETIF_RSP_OKAY); } if (txp->flags & XEN_NETTXF_csum_blank) @@ -1678,136 +1603,260 @@ else if (txp->flags & XEN_NETTXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; - xen_netbk_fill_frags(netbk, skb); + xenvif_fill_frags(queue, skb); - /* - * If the initial fragment was < PKT_PROT_LEN then - * pull through some bytes from the other fragments to - * increase the linear region to PKT_PROT_LEN bytes. - */ - if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) { - int target = min_t(int, skb->len, PKT_PROT_LEN); - __pskb_pull_tail(skb, target - skb_headlen(skb)); + if (unlikely(skb_has_frag_list(skb))) { + if (xenvif_handle_frag_list(queue, skb)) { + if (net_ratelimit()) + netdev_err(queue->vif->dev, + "Not enough memory to consolidate frag_list!\n"); + xenvif_skb_zerocopy_prepare(queue, skb); + kfree_skb(skb); + continue; + } } - skb->dev = vif->dev; + skb->dev = queue->vif->dev; skb->protocol = eth_type_trans(skb, skb->dev); skb_reset_network_header(skb); - if (checksum_setup(vif, skb)) { - netdev_dbg(vif->dev, + if (checksum_setup(queue, skb)) { + netdev_dbg(queue->vif->dev, "Can't setup checksum in net_tx_action\n"); + /* We have to set this flag to trigger the callback */ + if (skb_shinfo(skb)->destructor_arg) + xenvif_skb_zerocopy_prepare(queue, skb); kfree_skb(skb); continue; } skb_probe_transport_header(skb, 0); - vif->dev->stats.rx_bytes += skb->len; - vif->dev->stats.rx_packets++; + /* If the packet is GSO then we will have just set up the + * transport header offset in checksum_setup so it's now + * straightforward to calculate gso_segs. + */ + if (skb_is_gso(skb)) { + int mss = skb_shinfo(skb)->gso_size; + int hdrlen = skb_transport_header(skb) - + skb_mac_header(skb) + + tcp_hdrlen(skb); + + skb_shinfo(skb)->gso_segs = + DIV_ROUND_UP(skb->len - hdrlen, mss); + } + + queue->stats.rx_bytes += skb->len; + queue->stats.rx_packets++; + + work_done++; - xenvif_receive_skb(vif, skb); + /* Set this flag right before netif_receive_skb, otherwise + * someone might think this packet already left netback, and + * do a skb_copy_ubufs while we are still in control of the + * skb. E.g. the __pskb_pull_tail earlier can do such thing. + */ + if (skb_shinfo(skb)->destructor_arg) { + xenvif_skb_zerocopy_prepare(queue, skb); + queue->stats.tx_zerocopy_sent++; + } + + netif_receive_skb(skb); } + + return work_done; } -/* Called after netfront has transmitted */ -static void xen_netbk_tx_action(struct xen_netbk *netbk) +void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) { - unsigned nr_gops; + unsigned long flags; + pending_ring_idx_t index; + struct xenvif_queue *queue = ubuf_to_queue(ubuf); - nr_gops = xen_netbk_tx_build_gops(netbk); + /* This is the only place where we grab this lock, to protect callbacks + * from each other. + */ + spin_lock_irqsave(&queue->callback_lock, flags); + do { + u16 pending_idx = ubuf->desc; + ubuf = (struct ubuf_info *) ubuf->ctx; + BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= + MAX_PENDING_REQS); + index = pending_index(queue->dealloc_prod); + queue->dealloc_ring[index] = pending_idx; + /* Sync with xenvif_tx_dealloc_action: + * insert idx then incr producer. + */ + smp_wmb(); + queue->dealloc_prod++; + } while (ubuf); + spin_unlock_irqrestore(&queue->callback_lock, flags); - if (nr_gops == 0) - return; + if (likely(zerocopy_success)) + queue->stats.tx_zerocopy_success++; + else + queue->stats.tx_zerocopy_fail++; + xenvif_skb_zerocopy_complete(queue); +} - gnttab_batch_copy(netbk->tx_copy_ops, nr_gops); +static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) +{ + struct gnttab_unmap_grant_ref *gop; + pending_ring_idx_t dc, dp; + u16 pending_idx, pending_idx_release[MAX_PENDING_REQS]; + unsigned int i = 0; + + dc = queue->dealloc_cons; + gop = queue->tx_unmap_ops; + + /* Free up any grants we have finished using */ + do { + dp = queue->dealloc_prod; + + /* Ensure we see all indices enqueued by all + * xenvif_zerocopy_callback(). + */ + smp_rmb(); + + while (dc != dp) { + BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); + pending_idx = + queue->dealloc_ring[pending_index(dc++)]; + + pending_idx_release[gop - queue->tx_unmap_ops] = + pending_idx; + queue->pages_to_unmap[gop - queue->tx_unmap_ops] = + queue->mmap_pages[pending_idx]; + gnttab_set_unmap_op(gop, + idx_to_kaddr(queue, pending_idx), + GNTMAP_host_map, + queue->grant_tx_handle[pending_idx]); + xenvif_grant_handle_reset(queue, pending_idx); + ++gop; + } + + } while (dp != queue->dealloc_prod); + + queue->dealloc_cons = dc; + + if (gop - queue->tx_unmap_ops > 0) { + int ret; + ret = gnttab_unmap_refs(queue->tx_unmap_ops, + NULL, + queue->pages_to_unmap, + gop - queue->tx_unmap_ops); + if (ret) { + netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n", + gop - queue->tx_unmap_ops, ret); + for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { + if (gop[i].status != GNTST_okay) + netdev_err(queue->vif->dev, + " host_addr: 0x%llx handle: 0x%x status: %d\n", + gop[i].host_addr, + gop[i].handle, + gop[i].status); + } + BUG(); + } + } - xen_netbk_tx_submit(netbk); + for (i = 0; i < gop - queue->tx_unmap_ops; ++i) + xenvif_idx_release(queue, pending_idx_release[i], + XEN_NETIF_RSP_OKAY); } -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx, - u8 status) + +/* Called after netfront has transmitted */ +int xenvif_tx_action(struct xenvif_queue *queue, int budget) { - struct xenvif *vif; - struct pending_tx_info *pending_tx_info; - pending_ring_idx_t head; - u16 peek; /* peek into next tx request */ + unsigned nr_mops, nr_cops = 0; + int work_done, ret; - BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL)); + if (unlikely(!tx_work_todo(queue))) + return 0; - /* Already complete? */ - if (netbk->mmap_pages[pending_idx] == NULL) - return; + xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops); - pending_tx_info = &netbk->pending_tx_info[pending_idx]; + if (nr_cops == 0) + return 0; - vif = pending_tx_info->vif; - head = pending_tx_info->head; + gnttab_batch_copy(queue->tx_copy_ops, nr_cops); + if (nr_mops != 0) { + ret = gnttab_map_refs(queue->tx_map_ops, + NULL, + queue->pages_to_map, + nr_mops); + BUG_ON(ret); + } - BUG_ON(!pending_tx_is_head(netbk, head)); - BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx); + work_done = xenvif_tx_submit(queue); - do { - pending_ring_idx_t index; - pending_ring_idx_t idx = pending_index(head); - u16 info_idx = netbk->pending_ring[idx]; + return work_done; +} - pending_tx_info = &netbk->pending_tx_info[info_idx]; - make_tx_response(vif, &pending_tx_info->req, status); +static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, + u8 status) +{ + struct pending_tx_info *pending_tx_info; + pending_ring_idx_t index; + unsigned long flags; - /* Setting any number other than - * INVALID_PENDING_RING_IDX indicates this slot is - * starting a new packet / ending a previous packet. - */ - pending_tx_info->head = 0; + pending_tx_info = &queue->pending_tx_info[pending_idx]; - index = pending_index(netbk->pending_prod++); - netbk->pending_ring[index] = netbk->pending_ring[info_idx]; + spin_lock_irqsave(&queue->response_lock, flags); - xenvif_put(vif); + make_tx_response(queue, &pending_tx_info->req, status); - peek = netbk->pending_ring[pending_index(++head)]; + /* Release the pending index before pusing the Tx response so + * its available before a new Tx request is pushed by the + * frontend. + */ + index = pending_index(queue->pending_prod++); + queue->pending_ring[index] = pending_idx; - } while (!pending_tx_is_head(netbk, peek)); + push_tx_responses(queue); - netbk->mmap_pages[pending_idx]->mapping = 0; - put_page(netbk->mmap_pages[pending_idx]); - netbk->mmap_pages[pending_idx] = NULL; + spin_unlock_irqrestore(&queue->response_lock, flags); } -static void make_tx_response(struct xenvif *vif, +static void make_tx_response(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, s8 st) { - RING_IDX i = vif->tx.rsp_prod_pvt; + RING_IDX i = queue->tx.rsp_prod_pvt; struct xen_netif_tx_response *resp; - int notify; - resp = RING_GET_RESPONSE(&vif->tx, i); + resp = RING_GET_RESPONSE(&queue->tx, i); resp->id = txp->id; resp->status = st; if (txp->flags & XEN_NETTXF_extra_info) - RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL; + RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; + + queue->tx.rsp_prod_pvt = ++i; +} - vif->tx.rsp_prod_pvt = ++i; - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); +static void push_tx_responses(struct xenvif_queue *queue) +{ + int notify; + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) - notify_remote_via_irq(vif->irq); + notify_remote_via_irq(queue->tx_irq); } -static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, +static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, u16 id, s8 st, u16 offset, u16 size, u16 flags) { - RING_IDX i = vif->rx.rsp_prod_pvt; + RING_IDX i = queue->rx.rsp_prod_pvt; struct xen_netif_rx_response *resp; - resp = RING_GET_RESPONSE(&vif->rx, i); + resp = RING_GET_RESPONSE(&queue->rx, i); resp->offset = offset; resp->flags = flags; resp->id = id; @@ -1815,66 +1864,62 @@ if (st < 0) resp->status = (s16)st; - vif->rx.rsp_prod_pvt = ++i; + queue->rx.rsp_prod_pvt = ++i; return resp; } -static inline int rx_work_todo(struct xen_netbk *netbk) +void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) { - return !skb_queue_empty(&netbk->rx_queue); + int ret; + struct gnttab_unmap_grant_ref tx_unmap_op; + + gnttab_set_unmap_op(&tx_unmap_op, + idx_to_kaddr(queue, pending_idx), + GNTMAP_host_map, + queue->grant_tx_handle[pending_idx]); + xenvif_grant_handle_reset(queue, pending_idx); + + ret = gnttab_unmap_refs(&tx_unmap_op, NULL, + &queue->mmap_pages[pending_idx], 1); + if (ret) { + netdev_err(queue->vif->dev, + "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n", + ret, + pending_idx, + tx_unmap_op.host_addr, + tx_unmap_op.handle, + tx_unmap_op.status); + BUG(); + } } -static inline int tx_work_todo(struct xen_netbk *netbk) +static inline int tx_work_todo(struct xenvif_queue *queue) { - - if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX - < MAX_PENDING_REQS) && - !list_empty(&netbk->net_schedule_list)) + if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) return 1; return 0; } -static int xen_netbk_kthread(void *data) +static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue) { - struct xen_netbk *netbk = data; - while (!kthread_should_stop()) { - wait_event_interruptible(netbk->wq, - rx_work_todo(netbk) || - tx_work_todo(netbk) || - kthread_should_stop()); - cond_resched(); - - if (kthread_should_stop()) - break; - - if (rx_work_todo(netbk)) - xen_netbk_rx_action(netbk); - - if (tx_work_todo(netbk)) - xen_netbk_tx_action(netbk); - } - - return 0; + return queue->dealloc_cons != queue->dealloc_prod; } -void xen_netbk_unmap_frontend_rings(struct xenvif *vif) +void xenvif_unmap_frontend_rings(struct xenvif_queue *queue) { - atomic_dec(&vif->ring_refcnt); - wait_event(vif->waiting_to_unmap, atomic_read(&vif->ring_refcnt) == 0); + if (queue->tx.sring) + xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), + queue->tx.sring); + if (queue->rx.sring) + xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), + queue->rx.sring); +} - if (vif->tx.sring) - xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), - vif->tx.sring); - if (vif->rx.sring) - xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), - vif->rx.sring); -} - -int xen_netbk_map_frontend_rings(struct xenvif *vif, - grant_ref_t tx_ring_ref, - grant_ref_t rx_ring_ref) +int xenvif_map_frontend_rings(struct xenvif_queue *queue, + grant_ref_t tx_ring_ref, + grant_ref_t rx_ring_ref) { void *addr; struct xen_netif_tx_sring *txs; @@ -1882,113 +1927,272 @@ int err = -ENOMEM; - atomic_set(&vif->ring_refcnt, 1); - - err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), - tx_ring_ref, &addr); + err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), + &tx_ring_ref, 1, &addr); if (err) goto err; txs = (struct xen_netif_tx_sring *)addr; - BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); + BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); - err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), - rx_ring_ref, &addr); + err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), + &rx_ring_ref, 1, &addr); if (err) goto err; rxs = (struct xen_netif_rx_sring *)addr; - BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); - - vif->rx_req_cons_peek = 0; + BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); return 0; err: - xen_netbk_unmap_frontend_rings(vif); + xenvif_unmap_frontend_rings(queue); return err; } -static int __init netback_init(void) +static void xenvif_queue_carrier_off(struct xenvif_queue *queue) { - int i; - int rc = 0; - int group; + struct xenvif *vif = queue->vif; - if (!xen_domain()) - return -ENODEV; + queue->stalled = true; - if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { - printk(KERN_INFO - "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", - fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); - fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; + /* At least one queue has stalled? Disable the carrier. */ + spin_lock(&vif->lock); + if (vif->stalled_queues++ == 0) { + netdev_info(vif->dev, "Guest Rx stalled"); + netif_carrier_off(vif->dev); } + spin_unlock(&vif->lock); +} - xen_netbk_group_nr = num_online_cpus(); - xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr); - if (!xen_netbk) - return -ENOMEM; +static void xenvif_queue_carrier_on(struct xenvif_queue *queue) +{ + struct xenvif *vif = queue->vif; - for (group = 0; group < xen_netbk_group_nr; group++) { - struct xen_netbk *netbk = &xen_netbk[group]; - skb_queue_head_init(&netbk->rx_queue); - skb_queue_head_init(&netbk->tx_queue); + queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ + queue->stalled = false; - init_timer(&netbk->net_timer); - netbk->net_timer.data = (unsigned long)netbk; - netbk->net_timer.function = xen_netbk_alarm; + /* All queues are ready? Enable the carrier. */ + spin_lock(&vif->lock); + if (--vif->stalled_queues == 0) { + netdev_info(vif->dev, "Guest Rx ready"); + netif_carrier_on(vif->dev); + } + spin_unlock(&vif->lock); +} - netbk->pending_cons = 0; - netbk->pending_prod = MAX_PENDING_REQS; - for (i = 0; i < MAX_PENDING_REQS; i++) - netbk->pending_ring[i] = i; +static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) +{ + RING_IDX prod, cons; + + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; - init_waitqueue_head(&netbk->wq); - netbk->task = kthread_create(xen_netbk_kthread, - (void *)netbk, - "netback/%u", group); + return !queue->stalled && prod - cons < 1 + && time_after(jiffies, + queue->last_rx_time + queue->vif->stall_timeout); +} - if (IS_ERR(netbk->task)) { - printk(KERN_ALERT "kthread_create() fails at netback\n"); - del_timer(&netbk->net_timer); - rc = PTR_ERR(netbk->task); - goto failed_init; +static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) +{ + RING_IDX prod, cons; + + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; + + return queue->stalled && prod - cons >= 1; +} + +static bool xenvif_have_rx_work(struct xenvif_queue *queue) +{ + return (!skb_queue_empty(&queue->rx_queue) + && xenvif_rx_ring_slots_available(queue)) + || (queue->vif->stall_timeout && + (xenvif_rx_queue_stalled(queue) + || xenvif_rx_queue_ready(queue))) + || kthread_should_stop() + || queue->vif->disabled; +} + +static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) +{ + struct sk_buff *skb; + long timeout; + + skb = skb_peek(&queue->rx_queue); + if (!skb) + return MAX_SCHEDULE_TIMEOUT; + + timeout = XENVIF_RX_CB(skb)->expires - jiffies; + return timeout < 0 ? 0 : timeout; +} + +/* Wait until the guest Rx thread has work. + * + * The timeout needs to be adjusted based on the current head of the + * queue (and not just the head at the beginning). In particular, if + * the queue is initially empty an infinite timeout is used and this + * needs to be reduced when a skb is queued. + * + * This cannot be done with wait_event_timeout() because it only + * calculates the timeout once. + */ +static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) +{ + DEFINE_WAIT(wait); + + if (xenvif_have_rx_work(queue)) + return; + + for (;;) { + long ret; + + prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); + if (xenvif_have_rx_work(queue)) + break; + ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); + if (!ret) + break; + } + finish_wait(&queue->wq, &wait); +} + +int xenvif_kthread_guest_rx(void *data) +{ + struct xenvif_queue *queue = data; + struct xenvif *vif = queue->vif; + + if (!vif->stall_timeout) + xenvif_queue_carrier_on(queue); + + for (;;) { + xenvif_wait_for_rx_work(queue); + + if (kthread_should_stop()) + break; + + /* This frontend is found to be rogue, disable it in + * kthread context. Currently this is only set when + * netback finds out frontend sends malformed packet, + * but we cannot disable the interface in softirq + * context so we defer it here, if this thread is + * associated with queue 0. + */ + if (unlikely(vif->disabled && queue->id == 0)) { + xenvif_carrier_off(vif); + break; } - kthread_bind(netbk->task, group); + if (!skb_queue_empty(&queue->rx_queue)) + xenvif_rx_action(queue); + + /* If the guest hasn't provided any Rx slots for a + * while it's probably not responsive, drop the + * carrier so packets are dropped earlier. + */ + if (vif->stall_timeout) { + if (xenvif_rx_queue_stalled(queue)) + xenvif_queue_carrier_off(queue); + else if (xenvif_rx_queue_ready(queue)) + xenvif_queue_carrier_on(queue); + } + + /* Queued packets may have foreign pages from other + * domains. These cannot be queued indefinitely as + * this would starve guests of grant refs and transmit + * slots. + */ + xenvif_rx_queue_drop_expired(queue); + + xenvif_rx_queue_maybe_wake(queue); + + cond_resched(); + } + + /* Bin any remaining skbs */ + xenvif_rx_queue_purge(queue); - INIT_LIST_HEAD(&netbk->net_schedule_list); + return 0; +} - spin_lock_init(&netbk->net_schedule_list_lock); +static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue) +{ + /* Dealloc thread must remain running until all inflight + * packets complete. + */ + return kthread_should_stop() && + !atomic_read(&queue->inflight_packets); +} + +int xenvif_dealloc_kthread(void *data) +{ + struct xenvif_queue *queue = data; - atomic_set(&netbk->netfront_count, 0); + for (;;) { + wait_event_interruptible(queue->dealloc_wq, + tx_dealloc_work_todo(queue) || + xenvif_dealloc_kthread_should_stop(queue)); + if (xenvif_dealloc_kthread_should_stop(queue)) + break; - wake_up_process(netbk->task); + xenvif_tx_dealloc_action(queue); + cond_resched(); + } + + /* Unmap anything remaining*/ + if (tx_dealloc_work_todo(queue)) + xenvif_tx_dealloc_action(queue); + + return 0; +} + +static int __init netback_init(void) +{ + int rc = 0; + + if (!xen_domain()) + return -ENODEV; + + /* Allow as many queues as there are CPUs if user has not + * specified a value. + */ + if (xenvif_max_queues == 0) + xenvif_max_queues = num_online_cpus(); + + if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { + pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", + fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); + fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; } rc = xenvif_xenbus_init(); if (rc) goto failed_init; +#ifdef CONFIG_DEBUG_FS + xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); + if (IS_ERR_OR_NULL(xen_netback_dbg_root)) + pr_warn("Init of debugfs returned %ld!\n", + PTR_ERR(xen_netback_dbg_root)); +#endif /* CONFIG_DEBUG_FS */ + return 0; failed_init: - while (--group >= 0) { - struct xen_netbk *netbk = &xen_netbk[group]; - for (i = 0; i < MAX_PENDING_REQS; i++) { - if (netbk->mmap_pages[i]) - __free_page(netbk->mmap_pages[i]); - } - del_timer(&netbk->net_timer); - kthread_stop(netbk->task); - } - vfree(xen_netbk); return rc; - } module_init(netback_init); +static void __exit netback_fini(void) +{ +#ifdef CONFIG_DEBUG_FS + if (!IS_ERR_OR_NULL(xen_netback_dbg_root)) + debugfs_remove_recursive(xen_netback_dbg_root); +#endif /* CONFIG_DEBUG_FS */ + xenvif_xenbus_fini(); +} +module_exit(netback_fini); + MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:vif");