Searched refs:complq (Results 1 – 7 of 7) sorted by relevance
226 rx->dqo.complq.num_free_slots = completion_queue_slots; in gve_rx_init_ring_state_dqo()227 rx->dqo.complq.mask = completion_queue_slots - 1; in gve_rx_init_ring_state_dqo()228 rx->dqo.complq.cur_gen_bit = 0; in gve_rx_init_ring_state_dqo()229 rx->dqo.complq.head = 0; in gve_rx_init_ring_state_dqo()266 if (rx->dqo.complq.desc_ring) { in gve_rx_reset_ring_dqo()267 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_reset_ring_dqo()269 memset(rx->dqo.complq.desc_ring, 0, size); in gve_rx_reset_ring_dqo()313 completion_queue_slots = rx->dqo.complq.mask + 1; in gve_rx_free_ring_dqo()342 if (rx->dqo.complq.desc_ring) { in gve_rx_free_ring_dqo()343 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_free_ring_dqo()[all …]
235 struct gve_rx_compl_queue_dqo complq; member
756 cpu_to_be64(rx->dqo.complq.bus); in gve_adminq_get_create_rx_queue_cmd()
153 static void idpf_compl_desc_rel(struct idpf_compl_queue *complq) in idpf_compl_desc_rel() argument155 if (!complq->comp) in idpf_compl_desc_rel()158 dma_free_coherent(complq->netdev->dev.parent, complq->size, in idpf_compl_desc_rel()159 complq->comp, complq->dma); in idpf_compl_desc_rel()160 complq->comp = NULL; in idpf_compl_desc_rel()161 complq->next_to_use = 0; in idpf_compl_desc_rel()162 complq->next_to_clean = 0; in idpf_compl_desc_rel()185 idpf_compl_desc_rel(txq_grp->complq); in idpf_tx_desc_rel_all()285 struct idpf_compl_queue *complq) in idpf_compl_desc_alloc() argument287 complq->size = array_size(complq->desc_count, sizeof(*complq->comp)); in idpf_compl_desc_alloc()[all …]
129 (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \131 (txq)->num_completions_pending - (txq)->complq->num_completions)413 struct idpf_compl_queue **complq; member924 struct idpf_compl_queue *complq; member
1469 cpu_to_le16(tx_qgrp->complq->q_id); in idpf_send_config_tx_queues_msg()1487 qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); in idpf_send_config_tx_queues_msg()1490 qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); in idpf_send_config_tx_queues_msg()1491 qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); in idpf_send_config_tx_queues_msg()1493 if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq)) in idpf_send_config_tx_queues_msg()1745 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); in idpf_send_ena_dis_queues_msg()1880 cpu_to_le16(tx_qgrp->complq->q_vector->v_idx); in idpf_send_map_unmap_queue_vector_msg()1882 cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx); in idpf_send_map_unmap_queue_vector_msg()3321 tx_qgrp->complq->q_id = qids[k]; in __idpf_vport_queue_ids_init()
991 return vport->txq_grps[q_grp].complq->q_vector; in idpf_find_txq_vec()