Lines Matching +full:num +full:- +full:txq
3 * Copyright (c) 2002-2005, K A Fraser
48 needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); in xenvif_update_needed_slots()
51 if (skb->sw_hash) in xenvif_update_needed_slots()
55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots()
63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available()
68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available()
69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available()
71 if (prod - cons >= needed) in xenvif_rx_ring_slots_available()
74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available()
80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available()
90 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
92 if (queue->rx_queue_len >= queue->rx_queue_max) { in xenvif_rx_queue_tail()
93 struct net_device *dev = queue->vif->dev; in xenvif_rx_queue_tail()
95 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); in xenvif_rx_queue_tail()
98 if (skb_queue_empty(&queue->rx_queue)) in xenvif_rx_queue_tail()
101 __skb_queue_tail(&queue->rx_queue, skb); in xenvif_rx_queue_tail()
103 queue->rx_queue_len += skb->len; in xenvif_rx_queue_tail()
106 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
115 spin_lock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue()
117 skb = __skb_dequeue(&queue->rx_queue); in xenvif_rx_dequeue()
119 xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue)); in xenvif_rx_dequeue()
121 queue->rx_queue_len -= skb->len; in xenvif_rx_dequeue()
122 if (queue->rx_queue_len < queue->rx_queue_max) { in xenvif_rx_dequeue()
123 struct netdev_queue *txq; in xenvif_rx_dequeue() local
125 txq = netdev_get_tx_queue(queue->vif->dev, queue->id); in xenvif_rx_dequeue()
126 netif_tx_wake_queue(txq); in xenvif_rx_dequeue()
130 spin_unlock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue()
148 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_drop_expired()
151 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires)) in xenvif_rx_queue_drop_expired()
155 queue->vif->dev->stats.rx_dropped++; in xenvif_rx_queue_drop_expired()
164 gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); in xenvif_rx_copy_flush()
166 for (i = 0; i < queue->rx_copy.num; i++) { in xenvif_rx_copy_flush()
169 op = &queue->rx_copy.op[i]; in xenvif_rx_copy_flush()
174 if (unlikely(op->status != GNTST_okay)) { in xenvif_rx_copy_flush()
177 rsp = RING_GET_RESPONSE(&queue->rx, in xenvif_rx_copy_flush()
178 queue->rx_copy.idx[i]); in xenvif_rx_copy_flush()
179 rsp->status = op->status; in xenvif_rx_copy_flush()
183 queue->rx_copy.num = 0; in xenvif_rx_copy_flush()
186 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify); in xenvif_rx_copy_flush()
188 notify_remote_via_irq(queue->rx_irq); in xenvif_rx_copy_flush()
190 __skb_queue_purge(queue->rx_copy.completed); in xenvif_rx_copy_flush()
201 if (queue->rx_copy.num == COPY_BATCH_SIZE) in xenvif_rx_copy_add()
204 op = &queue->rx_copy.op[queue->rx_copy.num]; in xenvif_rx_copy_add()
208 op->flags = GNTCOPY_dest_gref; in xenvif_rx_copy_add()
212 op->source.domid = foreign->domid; in xenvif_rx_copy_add()
213 op->source.u.ref = foreign->gref; in xenvif_rx_copy_add()
214 op->flags |= GNTCOPY_source_gref; in xenvif_rx_copy_add()
216 op->source.u.gmfn = virt_to_gfn(data); in xenvif_rx_copy_add()
217 op->source.domid = DOMID_SELF; in xenvif_rx_copy_add()
220 op->source.offset = xen_offset_in_page(data); in xenvif_rx_copy_add()
221 op->dest.u.ref = req->gref; in xenvif_rx_copy_add()
222 op->dest.domid = queue->vif->domid; in xenvif_rx_copy_add()
223 op->dest.offset = offset; in xenvif_rx_copy_add()
224 op->len = len; in xenvif_rx_copy_add()
226 queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons; in xenvif_rx_copy_add()
227 queue->rx_copy.num++; in xenvif_rx_copy_add()
233 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) in xenvif_gso_type()
245 int frag; /* frag == -1 => frag_iter->head */
247 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
260 queue->stats.tx_bytes += skb->len; in xenvif_rx_next_skb()
261 queue->stats.tx_packets++; in xenvif_rx_next_skb()
266 pkt->skb = skb; in xenvif_rx_next_skb()
267 pkt->frag_iter = skb; in xenvif_rx_next_skb()
268 pkt->remaining_len = skb->len; in xenvif_rx_next_skb()
269 pkt->frag = -1; in xenvif_rx_next_skb()
272 if ((1 << gso_type) & queue->vif->gso_mask) { in xenvif_rx_next_skb()
275 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; in xenvif_rx_next_skb()
277 extra->u.gso.type = gso_type; in xenvif_rx_next_skb()
278 extra->u.gso.size = skb_shinfo(skb)->gso_size; in xenvif_rx_next_skb()
279 extra->u.gso.pad = 0; in xenvif_rx_next_skb()
280 extra->u.gso.features = 0; in xenvif_rx_next_skb()
281 extra->type = XEN_NETIF_EXTRA_TYPE_GSO; in xenvif_rx_next_skb()
282 extra->flags = 0; in xenvif_rx_next_skb()
284 pkt->extra_count++; in xenvif_rx_next_skb()
287 if (queue->vif->xdp_headroom) { in xenvif_rx_next_skb()
290 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1]; in xenvif_rx_next_skb()
293 extra->u.xdp.headroom = queue->vif->xdp_headroom; in xenvif_rx_next_skb()
294 extra->type = XEN_NETIF_EXTRA_TYPE_XDP; in xenvif_rx_next_skb()
295 extra->flags = 0; in xenvif_rx_next_skb()
297 pkt->extra_count++; in xenvif_rx_next_skb()
300 if (skb->sw_hash) { in xenvif_rx_next_skb()
303 extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1]; in xenvif_rx_next_skb()
305 extra->u.hash.algorithm = in xenvif_rx_next_skb()
308 if (skb->l4_hash) in xenvif_rx_next_skb()
309 extra->u.hash.type = in xenvif_rx_next_skb()
310 skb->protocol == htons(ETH_P_IP) ? in xenvif_rx_next_skb()
314 extra->u.hash.type = in xenvif_rx_next_skb()
315 skb->protocol == htons(ETH_P_IP) ? in xenvif_rx_next_skb()
319 *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb); in xenvif_rx_next_skb()
321 extra->type = XEN_NETIF_EXTRA_TYPE_HASH; in xenvif_rx_next_skb()
322 extra->flags = 0; in xenvif_rx_next_skb()
324 pkt->extra_count++; in xenvif_rx_next_skb()
332 queue->rx.rsp_prod_pvt = queue->rx.req_cons; in xenvif_rx_complete()
334 __skb_queue_tail(queue->rx_copy.completed, pkt->skb); in xenvif_rx_complete()
339 struct sk_buff *frag_iter = pkt->frag_iter; in xenvif_rx_next_frag()
340 unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags; in xenvif_rx_next_frag()
342 pkt->frag++; in xenvif_rx_next_frag()
343 pkt->frag_offset = 0; in xenvif_rx_next_frag()
345 if (pkt->frag >= nr_frags) { in xenvif_rx_next_frag()
346 if (frag_iter == pkt->skb) in xenvif_rx_next_frag()
347 pkt->frag_iter = skb_shinfo(frag_iter)->frag_list; in xenvif_rx_next_frag()
349 pkt->frag_iter = frag_iter->next; in xenvif_rx_next_frag()
351 pkt->frag = -1; in xenvif_rx_next_frag()
360 struct sk_buff *frag_iter = pkt->frag_iter; in xenvif_rx_next_chunk()
366 if (pkt->frag == -1) { in xenvif_rx_next_chunk()
367 frag_data = frag_iter->data; in xenvif_rx_next_chunk()
370 skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag]; in xenvif_rx_next_chunk()
376 frag_data += pkt->frag_offset; in xenvif_rx_next_chunk()
377 frag_len -= pkt->frag_offset; in xenvif_rx_next_chunk()
379 chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset); in xenvif_rx_next_chunk()
380 chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE - in xenvif_rx_next_chunk()
383 pkt->frag_offset += chunk_len; in xenvif_rx_next_chunk()
398 unsigned int offset = queue->vif->xdp_headroom; in xenvif_rx_data_slot()
409 pkt->remaining_len -= len; in xenvif_rx_data_slot()
411 } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0); in xenvif_rx_data_slot()
413 if (pkt->remaining_len > 0) in xenvif_rx_data_slot()
418 if (pkt->slot == 0) { in xenvif_rx_data_slot()
419 struct sk_buff *skb = pkt->skb; in xenvif_rx_data_slot()
421 if (skb->ip_summed == CHECKSUM_PARTIAL) in xenvif_rx_data_slot()
424 else if (skb->ip_summed == CHECKSUM_UNNECESSARY) in xenvif_rx_data_slot()
427 if (pkt->extra_count != 0) in xenvif_rx_data_slot()
431 rsp->offset = 0; in xenvif_rx_data_slot()
432 rsp->flags = flags; in xenvif_rx_data_slot()
433 rsp->id = req->id; in xenvif_rx_data_slot()
434 rsp->status = (s16)offset; in xenvif_rx_data_slot()
445 pkt->extra_count--; in xenvif_rx_extra_slot()
447 for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) { in xenvif_rx_extra_slot()
448 if (pkt->extras[i].type) { in xenvif_rx_extra_slot()
449 *extra = pkt->extras[i]; in xenvif_rx_extra_slot()
451 if (pkt->extra_count != 0) in xenvif_rx_extra_slot()
452 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; in xenvif_rx_extra_slot()
454 pkt->extras[i].type = 0; in xenvif_rx_extra_slot()
467 queue->last_rx_time = jiffies; in xenvif_rx_skb()
473 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons); in xenvif_rx_skb()
474 rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons); in xenvif_rx_skb()
482 queue->rx.req_cons++; in xenvif_rx_skb()
497 queue->rx_copy.completed = &completed_skbs; in xenvif_rx_action()
500 !skb_queue_empty(&queue->rx_queue) && in xenvif_rx_action()
514 prod = queue->rx.sring->req_prod; in xenvif_rx_queue_slots()
515 cons = queue->rx.req_cons; in xenvif_rx_queue_slots()
517 return prod - cons; in xenvif_rx_queue_slots()
522 unsigned int needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_queue_stalled()
524 return !queue->stalled && in xenvif_rx_queue_stalled()
527 queue->last_rx_time + queue->vif->stall_timeout); in xenvif_rx_queue_stalled()
532 unsigned int needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_queue_ready()
534 return queue->stalled && xenvif_rx_queue_slots(queue) >= needed; in xenvif_rx_queue_ready()
540 (queue->vif->stall_timeout && in xenvif_have_rx_work()
544 queue->vif->disabled; in xenvif_have_rx_work()
552 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_timeout()
556 timeout = XENVIF_RX_CB(skb)->expires - jiffies; in xenvif_rx_queue_timeout()
580 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); in xenvif_wait_for_rx_work()
584 &queue->eoi_pending) & in xenvif_wait_for_rx_work()
586 xen_irq_lateeoi(queue->rx_irq, 0); in xenvif_wait_for_rx_work()
592 finish_wait(&queue->wq, &wait); in xenvif_wait_for_rx_work()
597 struct xenvif *vif = queue->vif; in xenvif_queue_carrier_off()
599 queue->stalled = true; in xenvif_queue_carrier_off()
602 spin_lock(&vif->lock); in xenvif_queue_carrier_off()
603 if (vif->stalled_queues++ == 0) { in xenvif_queue_carrier_off()
604 netdev_info(vif->dev, "Guest Rx stalled"); in xenvif_queue_carrier_off()
605 netif_carrier_off(vif->dev); in xenvif_queue_carrier_off()
607 spin_unlock(&vif->lock); in xenvif_queue_carrier_off()
612 struct xenvif *vif = queue->vif; in xenvif_queue_carrier_on()
614 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ in xenvif_queue_carrier_on()
615 queue->stalled = false; in xenvif_queue_carrier_on()
618 spin_lock(&vif->lock); in xenvif_queue_carrier_on()
619 if (--vif->stalled_queues == 0) { in xenvif_queue_carrier_on()
620 netdev_info(vif->dev, "Guest Rx ready"); in xenvif_queue_carrier_on()
621 netif_carrier_on(vif->dev); in xenvif_queue_carrier_on()
623 spin_unlock(&vif->lock); in xenvif_queue_carrier_on()
629 struct xenvif *vif = queue->vif; in xenvif_kthread_guest_rx()
631 if (!vif->stall_timeout) in xenvif_kthread_guest_rx()
647 if (unlikely(vif->disabled && queue->id == 0)) { in xenvif_kthread_guest_rx()
652 if (!skb_queue_empty(&queue->rx_queue)) in xenvif_kthread_guest_rx()
659 if (vif->stall_timeout) { in xenvif_kthread_guest_rx()