Lines Matching refs:buff_

490 	struct aq_ring_buff_s *buff_ = buff;  in aq_add_rx_fragment()  local
500 buff_ = &ring->buff_ring[buff_->next]; in aq_add_rx_fragment()
502 buff_->rxdata.daddr, in aq_add_rx_fragment()
503 buff_->rxdata.pg_off, in aq_add_rx_fragment()
504 buff_->len, in aq_add_rx_fragment()
506 sinfo->xdp_frags_size += buff_->len; in aq_add_rx_fragment()
507 skb_frag_fill_page_desc(frag, buff_->rxdata.page, in aq_add_rx_fragment()
508 buff_->rxdata.pg_off, in aq_add_rx_fragment()
509 buff_->len); in aq_add_rx_fragment()
511 buff_->is_cleaned = 1; in aq_add_rx_fragment()
513 buff->is_ip_cso &= buff_->is_ip_cso; in aq_add_rx_fragment()
514 buff->is_udp_cso &= buff_->is_udp_cso; in aq_add_rx_fragment()
515 buff->is_tcp_cso &= buff_->is_tcp_cso; in aq_add_rx_fragment()
516 buff->is_cso_err |= buff_->is_cso_err; in aq_add_rx_fragment()
518 if (page_is_pfmemalloc(buff_->rxdata.page)) in aq_add_rx_fragment()
521 } while (!buff_->is_eop); in aq_add_rx_fragment()
539 struct aq_ring_buff_s *buff_ = NULL; in __aq_ring_rx_clean() local
550 buff_ = buff; in __aq_ring_rx_clean()
554 if (buff_->next >= self->size) { in __aq_ring_rx_clean()
560 next_ = buff_->next; in __aq_ring_rx_clean()
561 buff_ = &self->buff_ring[next_]; in __aq_ring_rx_clean()
573 buff->is_error |= buff_->is_error; in __aq_ring_rx_clean()
574 buff->is_cso_err |= buff_->is_cso_err; in __aq_ring_rx_clean()
576 } while (!buff_->is_eop); in __aq_ring_rx_clean()
580 buff_ = buff; in __aq_ring_rx_clean()
582 if (buff_->next >= self->size) { in __aq_ring_rx_clean()
586 next_ = buff_->next; in __aq_ring_rx_clean()
587 buff_ = &self->buff_ring[next_]; in __aq_ring_rx_clean()
589 buff_->is_cleaned = true; in __aq_ring_rx_clean()
590 } while (!buff_->is_eop); in __aq_ring_rx_clean()
643 buff_ = buff; in __aq_ring_rx_clean()
645 next_ = buff_->next; in __aq_ring_rx_clean()
646 buff_ = &self->buff_ring[next_]; in __aq_ring_rx_clean()
649 buff_->rxdata.daddr, in __aq_ring_rx_clean()
650 buff_->rxdata.pg_off, in __aq_ring_rx_clean()
651 buff_->len, in __aq_ring_rx_clean()
654 buff_->rxdata.page, in __aq_ring_rx_clean()
655 buff_->rxdata.pg_off, in __aq_ring_rx_clean()
656 buff_->len, in __aq_ring_rx_clean()
658 page_ref_inc(buff_->rxdata.page); in __aq_ring_rx_clean()
659 buff_->is_cleaned = 1; in __aq_ring_rx_clean()
661 buff->is_ip_cso &= buff_->is_ip_cso; in __aq_ring_rx_clean()
662 buff->is_udp_cso &= buff_->is_udp_cso; in __aq_ring_rx_clean()
663 buff->is_tcp_cso &= buff_->is_tcp_cso; in __aq_ring_rx_clean()
664 buff->is_cso_err |= buff_->is_cso_err; in __aq_ring_rx_clean()
666 } while (!buff_->is_eop); in __aq_ring_rx_clean()
715 struct aq_ring_buff_s *buff_ = NULL; in __aq_ring_xdp_clean() local
727 buff_ = buff; in __aq_ring_xdp_clean()
729 if (buff_->next >= rx_ring->size) { in __aq_ring_xdp_clean()
733 next_ = buff_->next; in __aq_ring_xdp_clean()
734 buff_ = &rx_ring->buff_ring[next_]; in __aq_ring_xdp_clean()
743 buff->is_error |= buff_->is_error; in __aq_ring_xdp_clean()
744 buff->is_cso_err |= buff_->is_cso_err; in __aq_ring_xdp_clean()
745 } while (!buff_->is_eop); in __aq_ring_xdp_clean()
753 buff_ = buff; in __aq_ring_xdp_clean()
755 if (buff_->next >= rx_ring->size) { in __aq_ring_xdp_clean()
759 next_ = buff_->next; in __aq_ring_xdp_clean()
760 buff_ = &rx_ring->buff_ring[next_]; in __aq_ring_xdp_clean()
762 buff_->is_cleaned = true; in __aq_ring_xdp_clean()
763 } while (!buff_->is_eop); in __aq_ring_xdp_clean()