Lines Matching +full:tx +full:- +full:slots

2  * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
66 * because it isn't providing Rx slots.
78 * This is the maximum slots a skb can have. If a guest sends a skb
85 /* The amount to copy out of the first guest Tx slot into the skb's
101 * for xen-netfront with the XDP_PACKET_HEADROOM offset
122 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
132 (vif->pending_tx_info[pending_idx].callback_struct)
138 u16 pending_idx = ubuf->desc; in ubuf_to_queue()
141 return container_of(temp - pending_idx, in ubuf_to_queue()
158 return i & (MAX_PENDING_REQS-1); in pending_index()
163 wake_up(&queue->wq); in xenvif_kick_thread()
170 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); in xenvif_napi_schedule_or_enable_events()
173 napi_schedule(&queue->napi); in xenvif_napi_schedule_or_enable_events()
175 &queue->eoi_pending) & in xenvif_napi_schedule_or_enable_events()
177 xen_irq_lateeoi(queue->tx_irq, 0); in xenvif_napi_schedule_or_enable_events()
188 max_burst = max(131072UL, queue->credit_bytes); in tx_add_credit()
191 max_credit = queue->remaining_credit + queue->credit_bytes; in tx_add_credit()
192 if (max_credit < queue->remaining_credit) in tx_add_credit()
195 queue->remaining_credit = min(max_credit, max_burst); in tx_add_credit()
196 queue->rate_limited = false; in tx_add_credit()
210 RING_IDX cons = queue->tx.req_cons; in xenvif_tx_err()
216 RING_COPY_REQUEST(&queue->tx, cons++, txp); in xenvif_tx_err()
219 queue->tx.req_cons = cons; in xenvif_tx_err()
224 netdev_err(vif->dev, "fatal error; disabling device\n"); in xenvif_fatal_tx_err()
225 vif->disabled = true; in xenvif_fatal_tx_err()
227 if (vif->num_queues) in xenvif_fatal_tx_err()
228 xenvif_kick_thread(&vif->queues[0]); in xenvif_fatal_tx_err()
237 RING_IDX cons = queue->tx.req_cons; in xenvif_count_requests()
238 int slots = 0; in xenvif_count_requests() local
242 if (!(first->flags & XEN_NETTXF_more_data)) in xenvif_count_requests()
248 if (slots >= work_to_do) { in xenvif_count_requests()
249 netdev_err(queue->vif->dev, in xenvif_count_requests()
250 "Asked for %d slots but exceeds this limit\n", in xenvif_count_requests()
252 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
253 return -ENODATA; in xenvif_count_requests()
256 /* This guest is really using too many slots and in xenvif_count_requests()
259 if (unlikely(slots >= fatal_skb_slots)) { in xenvif_count_requests()
260 netdev_err(queue->vif->dev, in xenvif_count_requests()
261 "Malicious frontend using %d slots, threshold %u\n", in xenvif_count_requests()
262 slots, fatal_skb_slots); in xenvif_count_requests()
263 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
264 return -E2BIG; in xenvif_count_requests()
271 * 18 slots but less than fatal_skb_slots slots is in xenvif_count_requests()
274 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { in xenvif_count_requests()
276 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
277 "Too many slots (%d) exceeding limit (%d), dropping packet\n", in xenvif_count_requests()
278 slots, XEN_NETBK_LEGACY_SLOTS_MAX); in xenvif_count_requests()
279 drop_err = -E2BIG; in xenvif_count_requests()
285 RING_COPY_REQUEST(&queue->tx, cons + slots, txp); in xenvif_count_requests()
288 * first->size overflowed and following slots will in xenvif_count_requests()
294 * Consume all slots and drop the packet. in xenvif_count_requests()
296 if (!drop_err && txp->size > first->size) { in xenvif_count_requests()
298 netdev_dbg(queue->vif->dev, in xenvif_count_requests()
299 "Invalid tx request, slot size %u > remaining size %u\n", in xenvif_count_requests()
300 txp->size, first->size); in xenvif_count_requests()
301 drop_err = -EIO; in xenvif_count_requests()
304 first->size -= txp->size; in xenvif_count_requests()
305 slots++; in xenvif_count_requests()
307 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) { in xenvif_count_requests()
308 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n", in xenvif_count_requests()
309 txp->offset, txp->size); in xenvif_count_requests()
310 xenvif_fatal_tx_err(queue->vif); in xenvif_count_requests()
311 return -EINVAL; in xenvif_count_requests()
314 more_data = txp->flags & XEN_NETTXF_more_data; in xenvif_count_requests()
322 xenvif_tx_err(queue, first, extra_count, cons + slots); in xenvif_count_requests()
326 return slots; in xenvif_count_requests()
336 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
337 #define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
338 #define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
346 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; in xenvif_tx_create_map_op()
349 txp->gref, queue->vif->domid); in xenvif_tx_create_map_op()
351 memcpy(&queue->pending_tx_info[pending_idx].req, txp, in xenvif_tx_create_map_op()
353 queue->pending_tx_info[pending_idx].extra_count = extra_count; in xenvif_tx_create_map_op()
362 BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb)); in xenvif_alloc_skb()
370 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_alloc_skb()
387 skb_frag_t *frags = shinfo->frags; in xenvif_get_requests()
391 struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops; in xenvif_get_requests()
392 struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; in xenvif_get_requests()
395 nr_slots = shinfo->nr_frags + frag_overflow + 1; in xenvif_get_requests()
398 XENVIF_TX_CB(skb)->split_mask = 0; in xenvif_get_requests()
403 int amount = data_len > txp->size ? txp->size : data_len; in xenvif_get_requests()
406 cop->source.u.ref = txp->gref; in xenvif_get_requests()
407 cop->source.domid = queue->vif->domid; in xenvif_get_requests()
408 cop->source.offset = txp->offset; in xenvif_get_requests()
410 cop->dest.domid = DOMID_SELF; in xenvif_get_requests()
411 cop->dest.offset = (offset_in_page(skb->data + in xenvif_get_requests()
412 skb_headlen(skb) - in xenvif_get_requests()
414 cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) in xenvif_get_requests()
415 - data_len); in xenvif_get_requests()
418 if (cop->dest.offset + amount > XEN_PAGE_SIZE) { in xenvif_get_requests()
419 amount = XEN_PAGE_SIZE - cop->dest.offset; in xenvif_get_requests()
420 XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb); in xenvif_get_requests()
424 cop->len = amount; in xenvif_get_requests()
425 cop->flags = GNTCOPY_source_gref; in xenvif_get_requests()
427 index = pending_index(queue->pending_cons); in xenvif_get_requests()
428 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
435 data_len -= amount; in xenvif_get_requests()
437 if (amount == txp->size) { in xenvif_get_requests()
440 memcpy(&queue->pending_tx_info[pending_idx].req, in xenvif_get_requests()
442 queue->pending_tx_info[pending_idx].extra_count = in xenvif_get_requests()
449 queue->pending_cons++; in xenvif_get_requests()
450 nr_slots--; in xenvif_get_requests()
456 txp->offset += amount; in xenvif_get_requests()
457 txp->size -= amount; in xenvif_get_requests()
461 for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; in xenvif_get_requests()
462 nr_slots--) { in xenvif_get_requests()
463 if (unlikely(!txp->size)) { in xenvif_get_requests()
469 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
470 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
473 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); in xenvif_get_requests()
474 ++shinfo->nr_frags; in xenvif_get_requests()
486 frags = shinfo->frags; in xenvif_get_requests()
488 for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { in xenvif_get_requests()
489 if (unlikely(!txp->size)) { in xenvif_get_requests()
495 index = pending_index(queue->pending_cons++); in xenvif_get_requests()
496 pending_idx = queue->pending_ring[index]; in xenvif_get_requests()
499 frag_set_pending_idx(&frags[shinfo->nr_frags], in xenvif_get_requests()
501 ++shinfo->nr_frags; in xenvif_get_requests()
505 if (shinfo->nr_frags) { in xenvif_get_requests()
506 skb_shinfo(skb)->frag_list = nskb; in xenvif_get_requests()
513 * because enough slots were converted to copy ops above or some in xenvif_get_requests()
519 (*copy_ops) = cop - queue->tx_copy_ops; in xenvif_get_requests()
520 (*map_ops) = gop - queue->tx_map_ops; in xenvif_get_requests()
527 if (unlikely(queue->grant_tx_handle[pending_idx] != in xenvif_grant_handle_set()
529 netdev_err(queue->vif->dev, in xenvif_grant_handle_set()
534 queue->grant_tx_handle[pending_idx] = handle; in xenvif_grant_handle_set()
540 if (unlikely(queue->grant_tx_handle[pending_idx] == in xenvif_grant_handle_reset()
542 netdev_err(queue->vif->dev, in xenvif_grant_handle_reset()
547 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; in xenvif_grant_handle_reset()
561 /* If this is non-NULL, we are currently checking the frag_list skb, and in xenvif_tx_check_gop()
565 int nr_frags = shinfo->nr_frags; in xenvif_tx_check_gop()
567 frag_get_pending_idx(&shinfo->frags[0]) == in xenvif_tx_check_gop()
568 copy_pending_idx(skb, copy_count(skb) - 1); in xenvif_tx_check_gop()
577 newerr = (*gopp_copy)->status; in xenvif_tx_check_gop()
580 if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) { in xenvif_tx_check_gop()
583 newerr = (*gopp_copy)->status; in xenvif_tx_check_gop()
587 if (i < copy_count(skb) - 1 || !sharedslot) in xenvif_tx_check_gop()
593 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
595 (*gopp_copy)->status, in xenvif_tx_check_gop()
597 (*gopp_copy)->source.u.ref); in xenvif_tx_check_gop()
599 if (i < copy_count(skb) - 1 || !sharedslot) in xenvif_tx_check_gop()
610 pending_idx = frag_get_pending_idx(&shinfo->frags[i]); in xenvif_tx_check_gop()
613 newerr = gop_map->status; in xenvif_tx_check_gop()
618 gop_map->handle); in xenvif_tx_check_gop()
638 netdev_dbg(queue->vif->dev, in xenvif_tx_check_gop()
641 gop_map->status, in xenvif_tx_check_gop()
643 gop_map->ref); in xenvif_tx_check_gop()
653 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); in xenvif_tx_check_gop()
663 for (j = 0; j < first_shinfo->nr_frags; j++) { in xenvif_tx_check_gop()
664 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]); in xenvif_tx_check_gop()
677 shinfo = skb_shinfo(shinfo->frag_list); in xenvif_tx_check_gop()
678 nr_frags = shinfo->nr_frags; in xenvif_tx_check_gop()
690 int nr_frags = shinfo->nr_frags; in xenvif_fill_frags()
695 skb_frag_t *frag = shinfo->frags + i; in xenvif_fill_frags()
704 skb_shinfo(skb)->destructor_arg = in xenvif_fill_frags()
713 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_fill_frags()
715 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); in xenvif_fill_frags()
716 skb->len += txp->size; in xenvif_fill_frags()
717 skb->data_len += txp->size; in xenvif_fill_frags()
718 skb->truesize += txp->size; in xenvif_fill_frags()
721 get_page(queue->mmap_pages[pending_idx]); in xenvif_fill_frags()
731 RING_IDX cons = queue->tx.req_cons; in xenvif_get_extras()
734 if (unlikely(work_to_do-- <= 0)) { in xenvif_get_extras()
735 netdev_err(queue->vif->dev, "Missing extra info\n"); in xenvif_get_extras()
736 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
737 return -EBADR; in xenvif_get_extras()
740 RING_COPY_REQUEST(&queue->tx, cons, &extra); in xenvif_get_extras()
742 queue->tx.req_cons = ++cons; in xenvif_get_extras()
747 netdev_err(queue->vif->dev, in xenvif_get_extras()
749 xenvif_fatal_tx_err(queue->vif); in xenvif_get_extras()
750 return -EINVAL; in xenvif_get_extras()
753 memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); in xenvif_get_extras()
763 if (!gso->u.gso.size) { in xenvif_set_skb_gso()
764 netdev_err(vif->dev, "GSO size must not be zero.\n"); in xenvif_set_skb_gso()
766 return -EINVAL; in xenvif_set_skb_gso()
769 switch (gso->u.gso.type) { in xenvif_set_skb_gso()
771 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in xenvif_set_skb_gso()
774 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in xenvif_set_skb_gso()
777 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); in xenvif_set_skb_gso()
779 return -EINVAL; in xenvif_set_skb_gso()
782 skb_shinfo(skb)->gso_size = gso->u.gso.size; in xenvif_set_skb_gso()
797 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { in checksum_setup()
798 queue->stats.rx_gso_checksum_fixup++; in checksum_setup()
799 skb->ip_summed = CHECKSUM_PARTIAL; in checksum_setup()
803 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ in checksum_setup()
804 if (skb->ip_summed != CHECKSUM_PARTIAL) in checksum_setup()
813 u64 next_credit = queue->credit_window_start + in tx_credit_exceeded()
814 msecs_to_jiffies(queue->credit_usec / 1000); in tx_credit_exceeded()
817 if (timer_pending(&queue->credit_timeout)) { in tx_credit_exceeded()
818 queue->rate_limited = true; in tx_credit_exceeded()
824 queue->credit_window_start = now; in tx_credit_exceeded()
829 if (size > queue->remaining_credit) { in tx_credit_exceeded()
830 mod_timer(&queue->credit_timeout, in tx_credit_exceeded()
832 queue->credit_window_start = next_credit; in tx_credit_exceeded()
833 queue->rate_limited = true; in tx_credit_exceeded()
850 if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) { in xenvif_mcast_add()
852 netdev_err(vif->dev, in xenvif_mcast_add()
854 return -ENOSPC; in xenvif_mcast_add()
859 return -ENOMEM; in xenvif_mcast_add()
861 ether_addr_copy(mcast->addr, addr); in xenvif_mcast_add()
862 list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr); in xenvif_mcast_add()
863 vif->fe_mcast_count++; in xenvif_mcast_add()
872 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { in xenvif_mcast_del()
873 if (ether_addr_equal(addr, mcast->addr)) { in xenvif_mcast_del()
874 --vif->fe_mcast_count; in xenvif_mcast_del()
875 list_del_rcu(&mcast->entry); in xenvif_mcast_del()
887 list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) { in xenvif_mcast_match()
888 if (ether_addr_equal(addr, mcast->addr)) { in xenvif_mcast_match()
900 /* No need for locking or RCU here. NAPI poll and TX queue in xenvif_mcast_addr_list_free()
903 while (!list_empty(&vif->fe_mcast_addr)) { in xenvif_mcast_addr_list_free()
906 mcast = list_first_entry(&vif->fe_mcast_addr, in xenvif_mcast_addr_list_free()
909 --vif->fe_mcast_count; in xenvif_mcast_addr_list_free()
910 list_del(&mcast->entry); in xenvif_mcast_addr_list_free()
924 while (skb_queue_len(&queue->tx_queue) < budget) { in xenvif_tx_build_gops()
927 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; in xenvif_tx_build_gops()
933 if (queue->tx.sring->req_prod - queue->tx.req_cons > in xenvif_tx_build_gops()
935 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
938 queue->tx.sring->req_prod, queue->tx.req_cons, in xenvif_tx_build_gops()
940 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
944 work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_tx_build_gops()
948 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
950 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops()
952 /* Credit-based scheduling. */ in xenvif_tx_build_gops()
953 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops()
957 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops()
959 work_to_do--; in xenvif_tx_build_gops()
960 queue->tx.req_cons = ++idx; in xenvif_tx_build_gops()
968 idx = queue->tx.req_cons; in xenvif_tx_build_gops()
973 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) { in xenvif_tx_build_gops()
976 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; in xenvif_tx_build_gops()
977 ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
986 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) { in xenvif_tx_build_gops()
989 extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; in xenvif_tx_build_gops()
990 xenvif_mcast_del(queue->vif, extra->u.mcast.addr); in xenvif_tx_build_gops()
1009 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
1017 netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n", in xenvif_tx_build_gops()
1019 xenvif_fatal_tx_err(queue->vif); in xenvif_tx_build_gops()
1023 if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size) in xenvif_tx_build_gops()
1028 netdev_dbg(queue->vif->dev, in xenvif_tx_build_gops()
1034 skb_shinfo(skb)->nr_frags = ret; in xenvif_tx_build_gops()
1035 /* At this point shinfo->nr_frags is in fact the number of in xenvif_tx_build_gops()
1036 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. in xenvif_tx_build_gops()
1040 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { in xenvif_tx_build_gops()
1041 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; in xenvif_tx_build_gops()
1043 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; in xenvif_tx_build_gops()
1046 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_build_gops()
1050 netdev_err(queue->vif->dev, in xenvif_tx_build_gops()
1056 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { in xenvif_tx_build_gops()
1058 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; in xenvif_tx_build_gops()
1060 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { in xenvif_tx_build_gops()
1062 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_build_gops()
1069 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) { in xenvif_tx_build_gops()
1073 extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; in xenvif_tx_build_gops()
1075 switch (extra->u.hash.type) { in xenvif_tx_build_gops()
1092 *(u32 *)extra->u.hash.value, in xenvif_tx_build_gops()
1100 __skb_queue_tail(&queue->tx_queue, skb); in xenvif_tx_build_gops()
1102 queue->tx.req_cons = idx; in xenvif_tx_build_gops()
1109 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1117 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; in xenvif_handle_frag_list()
1119 queue->stats.tx_zerocopy_sent += 2; in xenvif_handle_frag_list()
1120 queue->stats.tx_frag_overflow++; in xenvif_handle_frag_list()
1124 skb->truesize -= skb->data_len; in xenvif_handle_frag_list()
1125 skb->len += nskb->len; in xenvif_handle_frag_list()
1126 skb->data_len += nskb->len; in xenvif_handle_frag_list()
1129 for (i = 0; offset < skb->len; i++) { in xenvif_handle_frag_list()
1137 skb->truesize += skb->data_len; in xenvif_handle_frag_list()
1140 return -ENOMEM; in xenvif_handle_frag_list()
1143 if (offset + PAGE_SIZE < skb->len) in xenvif_handle_frag_list()
1146 len = skb->len - offset; in xenvif_handle_frag_list()
1155 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in xenvif_handle_frag_list()
1157 uarg = skb_shinfo(skb)->destructor_arg; in xenvif_handle_frag_list()
1159 atomic_inc(&queue->inflight_packets); in xenvif_handle_frag_list()
1160 uarg->ops->complete(NULL, uarg, true); in xenvif_handle_frag_list()
1161 skb_shinfo(skb)->destructor_arg = NULL; in xenvif_handle_frag_list()
1164 memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); in xenvif_handle_frag_list()
1165 skb_shinfo(skb)->nr_frags = i; in xenvif_handle_frag_list()
1166 skb->truesize += i * PAGE_SIZE; in xenvif_handle_frag_list()
1173 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; in xenvif_tx_submit()
1174 struct gnttab_copy *gop_copy = queue->tx_copy_ops; in xenvif_tx_submit()
1178 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { in xenvif_tx_submit()
1183 txp = &queue->pending_tx_info[pending_idx].req; in xenvif_tx_submit()
1191 skb_shinfo(skb)->nr_frags = 0; in xenvif_tx_submit()
1194 skb_shinfo(skb)->frag_list; in xenvif_tx_submit()
1195 skb_shinfo(nskb)->nr_frags = 0; in xenvif_tx_submit()
1201 if (txp->flags & XEN_NETTXF_csum_blank) in xenvif_tx_submit()
1202 skb->ip_summed = CHECKSUM_PARTIAL; in xenvif_tx_submit()
1203 else if (txp->flags & XEN_NETTXF_data_validated) in xenvif_tx_submit()
1204 skb->ip_summed = CHECKSUM_UNNECESSARY; in xenvif_tx_submit()
1209 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; in xenvif_tx_submit()
1213 netdev_err(queue->vif->dev, in xenvif_tx_submit()
1219 /* Copied all the bits from the frag list -- free it. */ in xenvif_tx_submit()
1224 skb->dev = queue->vif->dev; in xenvif_tx_submit()
1225 skb->protocol = eth_type_trans(skb, skb->dev); in xenvif_tx_submit()
1229 netdev_dbg(queue->vif->dev, in xenvif_tx_submit()
1232 if (skb_shinfo(skb)->destructor_arg) in xenvif_tx_submit()
1254 mss = skb_shinfo(skb)->gso_size; in xenvif_tx_submit()
1257 skb_shinfo(skb)->gso_segs = in xenvif_tx_submit()
1258 DIV_ROUND_UP(skb->len - hdrlen, mss); in xenvif_tx_submit()
1261 queue->stats.rx_bytes += skb->len; in xenvif_tx_submit()
1262 queue->stats.rx_packets++; in xenvif_tx_submit()
1271 if (skb_shinfo(skb)->destructor_arg) { in xenvif_tx_submit()
1273 queue->stats.tx_zerocopy_sent++; in xenvif_tx_submit()
1294 spin_lock_irqsave(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1296 u16 pending_idx = ubuf->desc; in xenvif_zerocopy_callback()
1297 ubuf = (struct ubuf_info_msgzc *) ubuf->ctx; in xenvif_zerocopy_callback()
1298 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= in xenvif_zerocopy_callback()
1300 index = pending_index(queue->dealloc_prod); in xenvif_zerocopy_callback()
1301 queue->dealloc_ring[index] = pending_idx; in xenvif_zerocopy_callback()
1306 queue->dealloc_prod++; in xenvif_zerocopy_callback()
1308 spin_unlock_irqrestore(&queue->callback_lock, flags); in xenvif_zerocopy_callback()
1311 queue->stats.tx_zerocopy_success++; in xenvif_zerocopy_callback()
1313 queue->stats.tx_zerocopy_fail++; in xenvif_zerocopy_callback()
1328 dc = queue->dealloc_cons; in xenvif_tx_dealloc_action()
1329 gop = queue->tx_unmap_ops; in xenvif_tx_dealloc_action()
1333 dp = queue->dealloc_prod; in xenvif_tx_dealloc_action()
1341 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); in xenvif_tx_dealloc_action()
1343 queue->dealloc_ring[pending_index(dc++)]; in xenvif_tx_dealloc_action()
1345 pending_idx_release[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1347 queue->pages_to_unmap[gop - queue->tx_unmap_ops] = in xenvif_tx_dealloc_action()
1348 queue->mmap_pages[pending_idx]; in xenvif_tx_dealloc_action()
1352 queue->grant_tx_handle[pending_idx]); in xenvif_tx_dealloc_action()
1357 } while (dp != queue->dealloc_prod); in xenvif_tx_dealloc_action()
1359 queue->dealloc_cons = dc; in xenvif_tx_dealloc_action()
1361 if (gop - queue->tx_unmap_ops > 0) { in xenvif_tx_dealloc_action()
1363 ret = gnttab_unmap_refs(queue->tx_unmap_ops, in xenvif_tx_dealloc_action()
1365 queue->pages_to_unmap, in xenvif_tx_dealloc_action()
1366 gop - queue->tx_unmap_ops); in xenvif_tx_dealloc_action()
1368 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n", in xenvif_tx_dealloc_action()
1369 gop - queue->tx_unmap_ops, ret); in xenvif_tx_dealloc_action()
1370 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { in xenvif_tx_dealloc_action()
1372 netdev_err(queue->vif->dev, in xenvif_tx_dealloc_action()
1382 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) in xenvif_tx_dealloc_action()
1402 gnttab_batch_copy(queue->tx_copy_ops, nr_cops); in xenvif_tx_action()
1404 ret = gnttab_map_refs(queue->tx_map_ops, in xenvif_tx_action()
1406 queue->pages_to_map, in xenvif_tx_action()
1411 netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n", in xenvif_tx_action()
1414 WARN_ON_ONCE(queue->tx_map_ops[i].status == in xenvif_tx_action()
1429 RING_IDX i = queue->tx.rsp_prod_pvt; in _make_tx_response()
1432 resp = RING_GET_RESPONSE(&queue->tx, i); in _make_tx_response()
1433 resp->id = txp->id; in _make_tx_response()
1434 resp->status = status; in _make_tx_response()
1436 while (extra_count-- != 0) in _make_tx_response()
1437 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; in _make_tx_response()
1439 queue->tx.rsp_prod_pvt = ++i; in _make_tx_response()
1446 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); in push_tx_responses()
1448 notify_remote_via_irq(queue->tx_irq); in push_tx_responses()
1458 pending_tx_info = &queue->pending_tx_info[pending_idx]; in xenvif_idx_release()
1460 spin_lock_irqsave(&queue->response_lock, flags); in xenvif_idx_release()
1462 _make_tx_response(queue, &pending_tx_info->req, in xenvif_idx_release()
1463 pending_tx_info->extra_count, status); in xenvif_idx_release()
1465 /* Release the pending index before pusing the Tx response so in xenvif_idx_release()
1466 * its available before a new Tx request is pushed by the in xenvif_idx_release()
1469 index = pending_index(queue->pending_prod++); in xenvif_idx_release()
1470 queue->pending_ring[index] = pending_idx; in xenvif_idx_release()
1474 spin_unlock_irqrestore(&queue->response_lock, flags); in xenvif_idx_release()
1484 spin_lock_irqsave(&queue->response_lock, flags); in make_tx_response()
1489 spin_unlock_irqrestore(&queue->response_lock, flags); in make_tx_response()
1500 queue->grant_tx_handle[pending_idx]); in xenvif_idx_unmap()
1504 &queue->mmap_pages[pending_idx], 1); in xenvif_idx_unmap()
1506 netdev_err(queue->vif->dev, in xenvif_idx_unmap()
1519 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) in tx_work_todo()
1527 return queue->dealloc_cons != queue->dealloc_prod; in tx_dealloc_work_todo()
1532 if (queue->tx.sring) in xenvif_unmap_frontend_data_rings()
1533 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_data_rings()
1534 queue->tx.sring); in xenvif_unmap_frontend_data_rings()
1535 if (queue->rx.sring) in xenvif_unmap_frontend_data_rings()
1536 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), in xenvif_unmap_frontend_data_rings()
1537 queue->rx.sring); in xenvif_unmap_frontend_data_rings()
1550 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_data_rings()
1556 rsp_prod = READ_ONCE(txs->rsp_prod); in xenvif_map_frontend_data_rings()
1557 req_prod = READ_ONCE(txs->req_prod); in xenvif_map_frontend_data_rings()
1559 BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE); in xenvif_map_frontend_data_rings()
1561 err = -EIO; in xenvif_map_frontend_data_rings()
1562 if (req_prod - rsp_prod > RING_SIZE(&queue->tx)) in xenvif_map_frontend_data_rings()
1565 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), in xenvif_map_frontend_data_rings()
1571 rsp_prod = READ_ONCE(rxs->rsp_prod); in xenvif_map_frontend_data_rings()
1572 req_prod = READ_ONCE(rxs->req_prod); in xenvif_map_frontend_data_rings()
1574 BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE); in xenvif_map_frontend_data_rings()
1576 err = -EIO; in xenvif_map_frontend_data_rings()
1577 if (req_prod - rsp_prod > RING_SIZE(&queue->rx)) in xenvif_map_frontend_data_rings()
1593 !atomic_read(&queue->inflight_packets); in xenvif_dealloc_kthread_should_stop()
1601 wait_event_interruptible(queue->dealloc_wq, in xenvif_dealloc_kthread()
1622 RING_IDX idx = vif->ctrl.rsp_prod_pvt; in make_ctrl_response()
1624 .id = req->id, in make_ctrl_response()
1625 .type = req->type, in make_ctrl_response()
1630 *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp; in make_ctrl_response()
1631 vif->ctrl.rsp_prod_pvt = ++idx; in make_ctrl_response()
1638 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify); in push_ctrl_response()
1640 notify_remote_via_irq(vif->ctrl_irq); in push_ctrl_response()
1649 switch (req->type) { in process_ctrl_request()
1651 status = xenvif_set_hash_alg(vif, req->data[0]); in process_ctrl_request()
1659 status = xenvif_set_hash_flags(vif, req->data[0]); in process_ctrl_request()
1663 status = xenvif_set_hash_key(vif, req->data[0], in process_ctrl_request()
1664 req->data[1]); in process_ctrl_request()
1674 req->data[0]); in process_ctrl_request()
1678 status = xenvif_set_hash_mapping(vif, req->data[0], in process_ctrl_request()
1679 req->data[1], in process_ctrl_request()
1680 req->data[2]); in process_ctrl_request()
1696 req_prod = vif->ctrl.sring->req_prod; in xenvif_ctrl_action()
1697 req_cons = vif->ctrl.req_cons; in xenvif_ctrl_action()
1708 RING_COPY_REQUEST(&vif->ctrl, req_cons, &req); in xenvif_ctrl_action()
1714 vif->ctrl.req_cons = req_cons; in xenvif_ctrl_action()
1715 vif->ctrl.sring->req_event = req_cons + 1; in xenvif_ctrl_action()
1721 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl))) in xenvif_ctrl_work_todo()
1747 return -ENODEV; in netback_init()
1767 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); in netback_init()
1789 MODULE_ALIAS("xen-backend:vif");