Lines Matching refs:io_sq
35 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq) in get_sq_desc_regular_queue() argument
40 tail_masked = io_sq->tail & (io_sq->q_depth - 1); in get_sq_desc_regular_queue()
42 offset = tail_masked * io_sq->desc_entry_size; in get_sq_desc_regular_queue()
44 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset); in get_sq_desc_regular_queue()
47 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, in ena_com_write_bounce_buffer_to_dev() argument
50 struct ena_com_llq_info *llq_info = &io_sq->llq_info; in ena_com_write_bounce_buffer_to_dev()
55 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1); in ena_com_write_bounce_buffer_to_dev()
58 if (is_llq_max_tx_burst_exists(io_sq)) { in ena_com_write_bounce_buffer_to_dev()
59 if (unlikely(!io_sq->entries_in_tx_burst_left)) { in ena_com_write_bounce_buffer_to_dev()
60 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_write_bounce_buffer_to_dev()
65 io_sq->entries_in_tx_burst_left--; in ena_com_write_bounce_buffer_to_dev()
66 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_write_bounce_buffer_to_dev()
67 "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid, in ena_com_write_bounce_buffer_to_dev()
68 io_sq->entries_in_tx_burst_left); in ena_com_write_bounce_buffer_to_dev()
77 __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer, in ena_com_write_bounce_buffer_to_dev()
80 io_sq->tail++; in ena_com_write_bounce_buffer_to_dev()
83 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_write_bounce_buffer_to_dev()
84 io_sq->phase ^= 1; in ena_com_write_bounce_buffer_to_dev()
89 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, in ena_com_write_header_to_bounce() argument
93 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; in ena_com_write_header_to_bounce()
94 struct ena_com_llq_info *llq_info = &io_sq->llq_info; in ena_com_write_header_to_bounce()
98 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) in ena_com_write_header_to_bounce()
102 llq_info->descs_num_before_header * io_sq->desc_entry_size; in ena_com_write_header_to_bounce()
105 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_write_header_to_bounce()
111 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n"); in ena_com_write_header_to_bounce()
120 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) in get_sq_desc_llq() argument
122 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; in get_sq_desc_llq()
129 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n"); in get_sq_desc_llq()
133 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size; in get_sq_desc_llq()
140 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) in ena_com_close_bounce_buffer() argument
142 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; in ena_com_close_bounce_buffer()
143 struct ena_com_llq_info *llq_info = &io_sq->llq_info; in ena_com_close_bounce_buffer()
146 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)) in ena_com_close_bounce_buffer()
151 rc = ena_com_write_bounce_buffer_to_dev(io_sq, in ena_com_close_bounce_buffer()
154 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_close_bounce_buffer()
160 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); in ena_com_close_bounce_buffer()
161 memset(io_sq->llq_buf_ctrl.curr_bounce_buf, in ena_com_close_bounce_buffer()
170 static void *get_sq_desc(struct ena_com_io_sq *io_sq) in get_sq_desc() argument
172 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) in get_sq_desc()
173 return get_sq_desc_llq(io_sq); in get_sq_desc()
175 return get_sq_desc_regular_queue(io_sq); in get_sq_desc()
178 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq) in ena_com_sq_update_llq_tail() argument
180 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl; in ena_com_sq_update_llq_tail()
181 struct ena_com_llq_info *llq_info = &io_sq->llq_info; in ena_com_sq_update_llq_tail()
185 rc = ena_com_write_bounce_buffer_to_dev(io_sq, in ena_com_sq_update_llq_tail()
188 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_sq_update_llq_tail()
194 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); in ena_com_sq_update_llq_tail()
195 memset(io_sq->llq_buf_ctrl.curr_bounce_buf, in ena_com_sq_update_llq_tail()
203 llq_info->desc_list_entry_size / io_sq->desc_entry_size; in ena_com_sq_update_llq_tail()
209 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) in ena_com_sq_update_tail() argument
211 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) in ena_com_sq_update_tail()
212 return ena_com_sq_update_llq_tail(io_sq); in ena_com_sq_update_tail()
214 io_sq->tail++; in ena_com_sq_update_tail()
217 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) in ena_com_sq_update_tail()
218 io_sq->phase ^= 1; in ena_com_sq_update_tail()
283 static int ena_com_create_meta(struct ena_com_io_sq *io_sq, in ena_com_create_meta() argument
288 meta_desc = get_sq_desc(io_sq); in ena_com_create_meta()
309 meta_desc->len_ctrl |= ((u32)io_sq->phase << in ena_com_create_meta()
326 return ena_com_sq_update_tail(io_sq); in ena_com_create_meta()
329 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, in ena_com_create_and_store_tx_meta_desc() argument
338 if (io_sq->disable_meta_caching) { in ena_com_create_and_store_tx_meta_desc()
340 return ena_com_create_meta(io_sq, ena_meta); in ena_com_create_and_store_tx_meta_desc()
343 if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) { in ena_com_create_and_store_tx_meta_desc()
346 memcpy(&io_sq->cached_tx_meta, ena_meta, in ena_com_create_and_store_tx_meta_desc()
348 return ena_com_create_meta(io_sq, ena_meta); in ena_com_create_and_store_tx_meta_desc()
388 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, in ena_com_prepare_tx() argument
397 u16 start_tail = io_sq->tail; in ena_com_prepare_tx()
402 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type"); in ena_com_prepare_tx()
405 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { in ena_com_prepare_tx()
406 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
411 if (unlikely(header_len > io_sq->tx_max_header_size)) { in ena_com_prepare_tx()
412 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
414 io_sq->tx_max_header_size); in ena_com_prepare_tx()
418 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) { in ena_com_prepare_tx()
419 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
424 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len); in ena_com_prepare_tx()
428 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta); in ena_com_prepare_tx()
430 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
437 rc = ena_com_close_bounce_buffer(io_sq); in ena_com_prepare_tx()
439 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
441 *nb_hw_desc = io_sq->tail - start_tail; in ena_com_prepare_tx()
445 desc = get_sq_desc(io_sq); in ena_com_prepare_tx()
457 desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & in ena_com_prepare_tx()
499 rc = ena_com_sq_update_tail(io_sq); in ena_com_prepare_tx()
501 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
506 desc = get_sq_desc(io_sq); in ena_com_prepare_tx()
512 desc->len_ctrl |= ((u32)io_sq->phase << in ena_com_prepare_tx()
521 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); in ena_com_prepare_tx()
532 rc = ena_com_sq_update_tail(io_sq); in ena_com_prepare_tx()
534 netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_prepare_tx()
539 rc = ena_com_close_bounce_buffer(io_sq); in ena_com_prepare_tx()
541 *nb_hw_desc = io_sq->tail - start_tail; in ena_com_prepare_tx()
546 struct ena_com_io_sq *io_sq, in ena_com_rx_pkt() argument
594 io_sq->next_to_comp += nb_hw_desc; in ena_com_rx_pkt()
597 "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid, in ena_com_rx_pkt()
598 io_sq->next_to_comp); in ena_com_rx_pkt()
608 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, in ena_com_add_single_rx_desc() argument
614 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); in ena_com_add_single_rx_desc()
616 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1))) in ena_com_add_single_rx_desc()
619 desc = get_sq_desc(io_sq); in ena_com_add_single_rx_desc()
630 (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK); in ena_com_add_single_rx_desc()
634 netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, in ena_com_add_single_rx_desc()
635 "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid, in ena_com_add_single_rx_desc()
640 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); in ena_com_add_single_rx_desc()
642 return ena_com_sq_update_tail(io_sq); in ena_com_add_single_rx_desc()