Lines Matching refs:bufq
398 static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq) in idpf_rx_hdr_buf_rel_all() argument
401 .fqes = bufq->hdr_buf, in idpf_rx_hdr_buf_rel_all()
402 .pp = bufq->hdr_pp, in idpf_rx_hdr_buf_rel_all()
405 for (u32 i = 0; i < bufq->desc_count; i++) in idpf_rx_hdr_buf_rel_all()
406 idpf_rx_page_rel(&bufq->hdr_buf[i]); in idpf_rx_hdr_buf_rel_all()
409 bufq->hdr_buf = NULL; in idpf_rx_hdr_buf_rel_all()
410 bufq->hdr_pp = NULL; in idpf_rx_hdr_buf_rel_all()
417 static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq) in idpf_rx_buf_rel_bufq() argument
420 .fqes = bufq->buf, in idpf_rx_buf_rel_bufq()
421 .pp = bufq->pp, in idpf_rx_buf_rel_bufq()
425 if (!bufq->buf) in idpf_rx_buf_rel_bufq()
429 for (u32 i = 0; i < bufq->desc_count; i++) in idpf_rx_buf_rel_bufq()
430 idpf_rx_page_rel(&bufq->buf[i]); in idpf_rx_buf_rel_bufq()
432 if (idpf_queue_has(HSPLIT_EN, bufq)) in idpf_rx_buf_rel_bufq()
433 idpf_rx_hdr_buf_rel_all(bufq); in idpf_rx_buf_rel_bufq()
436 bufq->buf = NULL; in idpf_rx_buf_rel_bufq()
437 bufq->pp = NULL; in idpf_rx_buf_rel_bufq()
499 static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq, in idpf_rx_desc_rel_bufq() argument
502 if (!bufq) in idpf_rx_desc_rel_bufq()
505 idpf_rx_buf_rel_bufq(bufq); in idpf_rx_desc_rel_bufq()
507 bufq->next_to_alloc = 0; in idpf_rx_desc_rel_bufq()
508 bufq->next_to_clean = 0; in idpf_rx_desc_rel_bufq()
509 bufq->next_to_use = 0; in idpf_rx_desc_rel_bufq()
511 if (!bufq->split_buf) in idpf_rx_desc_rel_bufq()
514 dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma); in idpf_rx_desc_rel_bufq()
515 bufq->split_buf = NULL; in idpf_rx_desc_rel_bufq()
556 idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev); in idpf_rx_desc_rel_all()
566 static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val) in idpf_rx_buf_hw_update() argument
568 bufq->next_to_use = val; in idpf_rx_buf_hw_update()
570 if (unlikely(!bufq->tail)) in idpf_rx_buf_hw_update()
574 writel(val, bufq->tail); in idpf_rx_buf_hw_update()
583 static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq) in idpf_rx_hdr_buf_alloc_all() argument
586 .count = bufq->desc_count, in idpf_rx_hdr_buf_alloc_all()
588 .nid = idpf_q_vector_to_mem(bufq->q_vector), in idpf_rx_hdr_buf_alloc_all()
592 ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); in idpf_rx_hdr_buf_alloc_all()
596 bufq->hdr_pp = fq.pp; in idpf_rx_hdr_buf_alloc_all()
597 bufq->hdr_buf = fq.fqes; in idpf_rx_hdr_buf_alloc_all()
598 bufq->hdr_truesize = fq.truesize; in idpf_rx_hdr_buf_alloc_all()
599 bufq->rx_hbuf_size = fq.buf_len; in idpf_rx_hdr_buf_alloc_all()
634 static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id) in idpf_rx_post_buf_desc() argument
638 .count = bufq->desc_count, in idpf_rx_post_buf_desc()
640 u16 nta = bufq->next_to_alloc; in idpf_rx_post_buf_desc()
643 splitq_rx_desc = &bufq->split_buf[nta]; in idpf_rx_post_buf_desc()
645 if (idpf_queue_has(HSPLIT_EN, bufq)) { in idpf_rx_post_buf_desc()
646 fq.pp = bufq->hdr_pp; in idpf_rx_post_buf_desc()
647 fq.fqes = bufq->hdr_buf; in idpf_rx_post_buf_desc()
648 fq.truesize = bufq->hdr_truesize; in idpf_rx_post_buf_desc()
657 fq.pp = bufq->pp; in idpf_rx_post_buf_desc()
658 fq.fqes = bufq->buf; in idpf_rx_post_buf_desc()
659 fq.truesize = bufq->truesize; in idpf_rx_post_buf_desc()
669 if (unlikely(nta == bufq->desc_count)) in idpf_rx_post_buf_desc()
671 bufq->next_to_alloc = nta; in idpf_rx_post_buf_desc()
683 static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq, in idpf_rx_post_init_bufs() argument
689 if (!idpf_rx_post_buf_desc(bufq, i)) in idpf_rx_post_init_bufs()
693 idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc, in idpf_rx_post_init_bufs()
779 static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq, in idpf_rx_bufs_init() argument
783 .truesize = bufq->truesize, in idpf_rx_bufs_init()
784 .count = bufq->desc_count, in idpf_rx_bufs_init()
786 .hsplit = idpf_queue_has(HSPLIT_EN, bufq), in idpf_rx_bufs_init()
787 .nid = idpf_q_vector_to_mem(bufq->q_vector), in idpf_rx_bufs_init()
791 ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); in idpf_rx_bufs_init()
795 bufq->pp = fq.pp; in idpf_rx_bufs_init()
796 bufq->buf = fq.fqes; in idpf_rx_bufs_init()
797 bufq->truesize = fq.truesize; in idpf_rx_bufs_init()
798 bufq->rx_buf_size = fq.buf_len; in idpf_rx_bufs_init()
800 return idpf_rx_buf_alloc_all(bufq); in idpf_rx_bufs_init()
839 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_rx_bufs_init_all()
895 struct idpf_buf_queue *bufq) in idpf_bufq_desc_alloc() argument
899 bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf)); in idpf_bufq_desc_alloc()
901 bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma, in idpf_bufq_desc_alloc()
903 if (!bufq->split_buf) in idpf_bufq_desc_alloc()
906 bufq->next_to_alloc = 0; in idpf_bufq_desc_alloc()
907 bufq->next_to_clean = 0; in idpf_bufq_desc_alloc()
908 bufq->next_to_use = 0; in idpf_bufq_desc_alloc()
910 idpf_queue_set(GEN_CHK, bufq); in idpf_bufq_desc_alloc()
957 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_rx_desc_alloc_all()
1499 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_rxq_group_alloc()
3263 rx_bufq = &rxq->bufq_sets[bufq_id].bufq; in idpf_rx_splitq_clean()
3375 static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id, in idpf_rx_update_bufq_desc() argument
3379 .pp = bufq->pp, in idpf_rx_update_bufq_desc()
3380 .fqes = bufq->buf, in idpf_rx_update_bufq_desc()
3381 .truesize = bufq->truesize, in idpf_rx_update_bufq_desc()
3382 .count = bufq->desc_count, in idpf_rx_update_bufq_desc()
3393 if (!idpf_queue_has(HSPLIT_EN, bufq)) in idpf_rx_update_bufq_desc()
3396 fq.pp = bufq->hdr_pp; in idpf_rx_update_bufq_desc()
3397 fq.fqes = bufq->hdr_buf; in idpf_rx_update_bufq_desc()
3398 fq.truesize = bufq->hdr_truesize; in idpf_rx_update_bufq_desc()
3416 static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq, in idpf_rx_clean_refillq() argument
3420 u16 bufq_nta = bufq->next_to_alloc; in idpf_rx_clean_refillq()
3424 buf_desc = &bufq->split_buf[bufq_nta]; in idpf_rx_clean_refillq()
3436 failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc); in idpf_rx_clean_refillq()
3445 if (unlikely(++bufq_nta == bufq->desc_count)) { in idpf_rx_clean_refillq()
3446 buf_desc = &bufq->split_buf[0]; in idpf_rx_clean_refillq()
3462 if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) + in idpf_rx_clean_refillq()
3463 bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE) in idpf_rx_clean_refillq()
3464 idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta, in idpf_rx_clean_refillq()
3469 bufq->next_to_alloc = bufq_nta; in idpf_rx_clean_refillq()
3481 static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid) in idpf_rx_clean_refillq_all() argument
3486 page_pool_nid_changed(bufq->pp, nid); in idpf_rx_clean_refillq_all()
3487 if (bufq->hdr_pp) in idpf_rx_clean_refillq_all()
3488 page_pool_nid_changed(bufq->hdr_pp, nid); in idpf_rx_clean_refillq_all()
3490 bufq_set = container_of(bufq, struct idpf_bufq_set, bufq); in idpf_rx_clean_refillq_all()
3492 idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]); in idpf_rx_clean_refillq_all()
3550 kfree(q_vector->bufq); in idpf_vport_intr_rel()
3551 q_vector->bufq = NULL; in idpf_vport_intr_rel()
3993 idpf_rx_clean_refillq_all(q_vec->bufq[i], nid); in idpf_rx_splitq_clean_all()
4091 struct idpf_buf_queue *bufq; in idpf_vport_intr_map_vector_to_qs() local
4093 bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_vport_intr_map_vector_to_qs()
4094 bufq->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4095 q_index = bufq->q_vector->num_bufq; in idpf_vport_intr_map_vector_to_qs()
4096 bufq->q_vector->bufq[q_index] = bufq; in idpf_vport_intr_map_vector_to_qs()
4097 bufq->q_vector->num_bufq++; in idpf_vport_intr_map_vector_to_qs()
4251 q_vector->bufq = kcalloc(bufqs_per_vector, in idpf_vport_intr_alloc()
4252 sizeof(*q_vector->bufq), in idpf_vport_intr_alloc()
4254 if (!q_vector->bufq) in idpf_vport_intr_alloc()