Lines Matching +full:j +full:- +full:to +full:- +full:k
1 // SPDX-License-Identifier: GPL-2.0-only
16 * enum idpf_vc_xn_state - Virtchnl transaction status
17 * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
25 * return context; a callback may be provided to handle
43 * struct idpf_vc_xn - Data structure representing virtchnl transactions
44 * @completed: virtchnl event loop uses that to signal when a reply is
49 * truncated on its way to the receiver thread according to
51 * @reply: Reference to the buffer(s) where the reply data should be written
52 * to. May be 0-length (then NULL address permitted) if the reply data
54 * @async_handler: if sent asynchronously, a callback can be provided to handle
58 * @salt: changed every message to make unique, used for cookie
72 * struct idpf_vc_xn_params - Parameters for executing transaction
75 * @timeout_ms: timeout to wait for reply
80 * @vc_op: virtchnl op to send
92 * struct idpf_vc_xn_manager - Manager for tracking transactions
96 * @salt: used to make cookie unique every message
106 * idpf_vid_to_vport - Translate vport id to vport pointer
108 * @v_id: vport id to translate
119 if (adapter->vport_ids[i] == v_id) in idpf_vid_to_vport()
120 return adapter->vports[i]; in idpf_vid_to_vport()
126 * idpf_handle_event_link - Handle link event message
136 vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id)); in idpf_handle_event_link()
138 dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n", in idpf_handle_event_link()
139 v2e->vport_id); in idpf_handle_event_link()
142 np = netdev_priv(vport->netdev); in idpf_handle_event_link()
144 np->link_speed_mbps = le32_to_cpu(v2e->link_speed); in idpf_handle_event_link()
146 if (vport->link_up == v2e->link_status) in idpf_handle_event_link()
149 vport->link_up = v2e->link_status; in idpf_handle_event_link()
151 if (np->state != __IDPF_VPORT_UP) in idpf_handle_event_link()
154 if (vport->link_up) { in idpf_handle_event_link()
155 netif_tx_start_all_queues(vport->netdev); in idpf_handle_event_link()
156 netif_carrier_on(vport->netdev); in idpf_handle_event_link()
158 netif_tx_stop_all_queues(vport->netdev); in idpf_handle_event_link()
159 netif_carrier_off(vport->netdev); in idpf_handle_event_link()
164 * idpf_recv_event_msg - Receive virtchnl event message
166 * @ctlq_msg: message to copy from
173 int payload_size = ctlq_msg->ctx.indirect.payload->size; in idpf_recv_event_msg()
178 …dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len… in idpf_recv_event_msg()
179 ctlq_msg->cookie.mbx.chnl_opcode, in idpf_recv_event_msg()
184 v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; in idpf_recv_event_msg()
185 event = le32_to_cpu(v2e->event); in idpf_recv_event_msg()
192 dev_err(&adapter->pdev->dev, in idpf_recv_event_msg()
199 * idpf_mb_clean - Reclaim the send mailbox queue entries
202 * Reclaim the send mailbox queue entries to be used to send further messages
215 return -ENOMEM; in idpf_mb_clean()
217 err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg); in idpf_mb_clean()
224 dma_mem = q_msg[i]->ctx.indirect.payload; in idpf_mb_clean()
226 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, in idpf_mb_clean()
227 dma_mem->va, dma_mem->pa); in idpf_mb_clean()
239 * idpf_send_mb_msg - Send message over mailbox
243 * @msg: pointer to buffer holding the payload
258 * done. This thread should silently abort and expected to in idpf_send_mb_msg()
271 return -ENOMEM; in idpf_send_mb_msg()
275 err = -ENOMEM; in idpf_send_mb_msg()
279 ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; in idpf_send_mb_msg()
280 ctlq_msg->func_id = 0; in idpf_send_mb_msg()
281 ctlq_msg->data_len = msg_size; in idpf_send_mb_msg()
282 ctlq_msg->cookie.mbx.chnl_opcode = op; in idpf_send_mb_msg()
283 ctlq_msg->cookie.mbx.chnl_retval = 0; in idpf_send_mb_msg()
284 dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN; in idpf_send_mb_msg()
285 dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size, in idpf_send_mb_msg()
286 &dma_mem->pa, GFP_ATOMIC); in idpf_send_mb_msg()
287 if (!dma_mem->va) { in idpf_send_mb_msg()
288 err = -ENOMEM; in idpf_send_mb_msg()
294 memcpy(dma_mem->va, msg, msg_size); in idpf_send_mb_msg()
295 ctlq_msg->ctx.indirect.payload = dma_mem; in idpf_send_mb_msg()
296 ctlq_msg->ctx.sw_cookie.data = cookie; in idpf_send_mb_msg()
298 err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); in idpf_send_mb_msg()
305 dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va, in idpf_send_mb_msg()
306 dma_mem->pa); in idpf_send_mb_msg()
317 * We are reusing the completion lock to serialize the accesses to the
323 * idpf_vc_xn_lock - Request exclusive access to vc transaction
324 * @xn: struct idpf_vc_xn* to access
327 raw_spin_lock(&(xn)->completed.wait.lock)
330 * idpf_vc_xn_unlock - Release exclusive access to vc transaction
331 * @xn: struct idpf_vc_xn* to access
334 raw_spin_unlock(&(xn)->completed.wait.lock)
337 * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
339 * @xn: struct idpf_vc_xn to update
343 xn->reply.iov_base = NULL; in idpf_vc_xn_release_bufs()
344 xn->reply.iov_len = 0; in idpf_vc_xn_release_bufs()
346 if (xn->state != IDPF_VC_XN_SHUTDOWN) in idpf_vc_xn_release_bufs()
347 xn->state = IDPF_VC_XN_IDLE; in idpf_vc_xn_release_bufs()
351 * idpf_vc_xn_init - Initialize virtchnl transaction object
352 * @vcxn_mngr: pointer to vc transaction manager struct
358 spin_lock_init(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_init()
360 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { in idpf_vc_xn_init()
361 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; in idpf_vc_xn_init()
363 xn->state = IDPF_VC_XN_IDLE; in idpf_vc_xn_init()
364 xn->idx = i; in idpf_vc_xn_init()
366 init_completion(&xn->completed); in idpf_vc_xn_init()
369 bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); in idpf_vc_xn_init()
373 * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
374 * @vcxn_mngr: pointer to vc transaction manager struct
376 * All waiting threads will be woken-up and their transaction aborted. Further
383 spin_lock_bh(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_shutdown()
384 bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); in idpf_vc_xn_shutdown()
385 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_shutdown()
387 for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { in idpf_vc_xn_shutdown()
388 struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; in idpf_vc_xn_shutdown()
391 xn->state = IDPF_VC_XN_SHUTDOWN; in idpf_vc_xn_shutdown()
394 complete_all(&xn->completed); in idpf_vc_xn_shutdown()
399 * idpf_vc_xn_pop_free - Pop a free transaction from free list
400 * @vcxn_mngr: transaction manager to pop from
410 spin_lock_bh(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_pop_free()
411 free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); in idpf_vc_xn_pop_free()
415 clear_bit(free_idx, vcxn_mngr->free_xn_bm); in idpf_vc_xn_pop_free()
416 xn = &vcxn_mngr->ring[free_idx]; in idpf_vc_xn_pop_free()
417 xn->salt = vcxn_mngr->salt++; in idpf_vc_xn_pop_free()
420 spin_unlock_bh(&vcxn_mngr->xn_bm_lock); in idpf_vc_xn_pop_free()
426 * idpf_vc_xn_push_free - Push a free transaction to free list
427 * @vcxn_mngr: transaction manager to push to
428 * @xn: transaction to push
434 set_bit(xn->idx, vcxn_mngr->free_xn_bm); in idpf_vc_xn_push_free()
438 * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
441 * -vc_op: virtchannel operation to send
442 * -send_buf: kvec iov for send buf and len
443 * -recv_buf: kvec iov for recv buf and len (ignored if NULL)
444 * -timeout_ms: timeout waiting for a reply (milliseconds)
445 * -async: don't wait for message reply, will lose caller context
446 * -async_handler: callback to handle async replies
455 const struct kvec *send_buf = ¶ms->send_buf; in idpf_vc_xn_exec()
460 xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr); in idpf_vc_xn_exec()
463 return -ENOSPC; in idpf_vc_xn_exec()
466 if (xn->state == IDPF_VC_XN_SHUTDOWN) { in idpf_vc_xn_exec()
467 retval = -ENXIO; in idpf_vc_xn_exec()
469 } else if (xn->state != IDPF_VC_XN_IDLE) { in idpf_vc_xn_exec()
470 /* We're just going to clobber this transaction even though in idpf_vc_xn_exec()
472 * eventually leak all the free transactions and not be able to in idpf_vc_xn_exec()
473 * send any messages. At least this way we make an attempt to in idpf_vc_xn_exec()
475 * happening that's corrupting what was supposed to be free in idpf_vc_xn_exec()
479 xn->idx, xn->vc_op); in idpf_vc_xn_exec()
482 xn->reply = params->recv_buf; in idpf_vc_xn_exec()
483 xn->reply_sz = 0; in idpf_vc_xn_exec()
484 xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING; in idpf_vc_xn_exec()
485 xn->vc_op = params->vc_op; in idpf_vc_xn_exec()
486 xn->async_handler = params->async_handler; in idpf_vc_xn_exec()
489 if (!params->async) in idpf_vc_xn_exec()
490 reinit_completion(&xn->completed); in idpf_vc_xn_exec()
491 cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) | in idpf_vc_xn_exec()
492 FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx); in idpf_vc_xn_exec()
494 retval = idpf_send_mb_msg(adapter, params->vc_op, in idpf_vc_xn_exec()
495 send_buf->iov_len, send_buf->iov_base, in idpf_vc_xn_exec()
502 if (params->async) in idpf_vc_xn_exec()
505 wait_for_completion_timeout(&xn->completed, in idpf_vc_xn_exec()
506 msecs_to_jiffies(params->timeout_ms)); in idpf_vc_xn_exec()
508 /* No need to check the return value; we check the final state of the in idpf_vc_xn_exec()
511 * wait_for_completion_timeout returns. This should be non-issue in idpf_vc_xn_exec()
515 switch (xn->state) { in idpf_vc_xn_exec()
517 retval = -ENXIO; in idpf_vc_xn_exec()
520 dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n", in idpf_vc_xn_exec()
521 params->vc_op, params->timeout_ms); in idpf_vc_xn_exec()
522 retval = -ETIME; in idpf_vc_xn_exec()
525 retval = xn->reply_sz; in idpf_vc_xn_exec()
528 dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n", in idpf_vc_xn_exec()
529 params->vc_op); in idpf_vc_xn_exec()
530 retval = -EIO; in idpf_vc_xn_exec()
535 retval = -EIO; in idpf_vc_xn_exec()
540 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); in idpf_vc_xn_exec()
549 * idpf_vc_xn_forward_async - Handle async reply receives
551 * @xn: transaction to handle
554 * For async sends we're going to lose the caller's context so, if an
564 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { in idpf_vc_xn_forward_async()
565 …dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (… in idpf_vc_xn_forward_async()
566 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); in idpf_vc_xn_forward_async()
567 xn->reply_sz = 0; in idpf_vc_xn_forward_async()
568 err = -EINVAL; in idpf_vc_xn_forward_async()
572 if (xn->async_handler) { in idpf_vc_xn_forward_async()
573 err = xn->async_handler(adapter, xn, ctlq_msg); in idpf_vc_xn_forward_async()
577 if (ctlq_msg->cookie.mbx.chnl_retval) { in idpf_vc_xn_forward_async()
578 xn->reply_sz = 0; in idpf_vc_xn_forward_async()
579 dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n", in idpf_vc_xn_forward_async()
580 ctlq_msg->cookie.mbx.chnl_opcode); in idpf_vc_xn_forward_async()
581 err = -EINVAL; in idpf_vc_xn_forward_async()
585 idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); in idpf_vc_xn_forward_async()
591 * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
593 * @ctlq_msg: controlq message to send back to receiving thread
607 msg_info = ctlq_msg->ctx.sw_cookie.data; in idpf_vc_xn_forward_reply()
609 if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) { in idpf_vc_xn_forward_reply()
610 dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n", in idpf_vc_xn_forward_reply()
612 return -EINVAL; in idpf_vc_xn_forward_reply()
614 xn = &adapter->vcxn_mngr->ring[xn_idx]; in idpf_vc_xn_forward_reply()
616 if (xn->salt != salt) { in idpf_vc_xn_forward_reply()
617 dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n", in idpf_vc_xn_forward_reply()
618 xn->salt, salt); in idpf_vc_xn_forward_reply()
619 return -EINVAL; in idpf_vc_xn_forward_reply()
623 switch (xn->state) { in idpf_vc_xn_forward_reply()
628 dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n", in idpf_vc_xn_forward_reply()
629 ctlq_msg->cookie.mbx.chnl_opcode); in idpf_vc_xn_forward_reply()
630 err = -EINVAL; in idpf_vc_xn_forward_reply()
634 * know if it should stop trying to clean the ring if we lost in idpf_vc_xn_forward_reply()
635 * the virtchnl. We need to stop playing with registers and in idpf_vc_xn_forward_reply()
638 err = -ENXIO; in idpf_vc_xn_forward_reply()
645 dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n", in idpf_vc_xn_forward_reply()
646 ctlq_msg->cookie.mbx.chnl_opcode); in idpf_vc_xn_forward_reply()
647 err = -EBUSY; in idpf_vc_xn_forward_reply()
651 if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { in idpf_vc_xn_forward_reply()
652 …dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %… in idpf_vc_xn_forward_reply()
653 ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); in idpf_vc_xn_forward_reply()
654 xn->reply_sz = 0; in idpf_vc_xn_forward_reply()
655 xn->state = IDPF_VC_XN_COMPLETED_FAILED; in idpf_vc_xn_forward_reply()
656 err = -EINVAL; in idpf_vc_xn_forward_reply()
660 if (ctlq_msg->cookie.mbx.chnl_retval) { in idpf_vc_xn_forward_reply()
661 xn->reply_sz = 0; in idpf_vc_xn_forward_reply()
662 xn->state = IDPF_VC_XN_COMPLETED_FAILED; in idpf_vc_xn_forward_reply()
663 err = -EINVAL; in idpf_vc_xn_forward_reply()
667 if (ctlq_msg->data_len) { in idpf_vc_xn_forward_reply()
668 payload = ctlq_msg->ctx.indirect.payload->va; in idpf_vc_xn_forward_reply()
669 payload_size = ctlq_msg->data_len; in idpf_vc_xn_forward_reply()
672 xn->reply_sz = payload_size; in idpf_vc_xn_forward_reply()
673 xn->state = IDPF_VC_XN_COMPLETED_SUCCESS; in idpf_vc_xn_forward_reply()
675 if (xn->reply.iov_base && xn->reply.iov_len && payload_size) in idpf_vc_xn_forward_reply()
676 memcpy(xn->reply.iov_base, payload, in idpf_vc_xn_forward_reply()
677 min_t(size_t, xn->reply.iov_len, payload_size)); in idpf_vc_xn_forward_reply()
682 complete(&xn->completed); in idpf_vc_xn_forward_reply()
688 * idpf_recv_mb_msg - Receive message over mailbox
706 err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg); in idpf_recv_mb_msg()
722 post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, in idpf_recv_mb_msg()
723 adapter->hw.arq, in idpf_recv_mb_msg()
729 dmam_free_coherent(&adapter->pdev->dev, in idpf_recv_mb_msg()
730 dma_mem->size, dma_mem->va, in idpf_recv_mb_msg()
731 dma_mem->pa); in idpf_recv_mb_msg()
735 /* virtchnl trying to shutdown, stop cleaning */ in idpf_recv_mb_msg()
736 if (err == -ENXIO) in idpf_recv_mb_msg()
744 * idpf_wait_for_marker_event - wait for software marker response
754 for (i = 0; i < vport->num_txq; i++) in idpf_wait_for_marker_event()
755 idpf_queue_set(SW_MARKER, vport->txqs[i]); in idpf_wait_for_marker_event()
757 event = wait_event_timeout(vport->sw_marker_wq, in idpf_wait_for_marker_event()
759 vport->flags), in idpf_wait_for_marker_event()
762 for (i = 0; i < vport->num_txq; i++) in idpf_wait_for_marker_event()
763 idpf_queue_clear(POLL_MODE, vport->txqs[i]); in idpf_wait_for_marker_event()
768 dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n"); in idpf_wait_for_marker_event()
770 return -ETIMEDOUT; in idpf_wait_for_marker_event()
774 * idpf_send_ver_msg - send virtchnl version message
787 if (adapter->virt_ver_maj) { in idpf_send_ver_msg()
788 vvi.major = cpu_to_le32(adapter->virt_ver_maj); in idpf_send_ver_msg()
789 vvi.minor = cpu_to_le32(adapter->virt_ver_min); in idpf_send_ver_msg()
805 return -EIO; in idpf_send_ver_msg()
811 dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n"); in idpf_send_ver_msg()
812 return -EINVAL; in idpf_send_ver_msg()
817 dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n"); in idpf_send_ver_msg()
819 /* If we have a mismatch, resend version to update receiver on what in idpf_send_ver_msg()
822 if (!adapter->virt_ver_maj && in idpf_send_ver_msg()
825 err = -EAGAIN; in idpf_send_ver_msg()
827 adapter->virt_ver_maj = major; in idpf_send_ver_msg()
828 adapter->virt_ver_min = minor; in idpf_send_ver_msg()
834 * idpf_send_get_caps_msg - Send virtchnl get capabilities message
904 xn_params.recv_buf.iov_base = &adapter->caps; in idpf_send_get_caps_msg()
905 xn_params.recv_buf.iov_len = sizeof(adapter->caps); in idpf_send_get_caps_msg()
911 if (reply_sz < sizeof(adapter->caps)) in idpf_send_get_caps_msg()
912 return -EIO; in idpf_send_get_caps_msg()
918 * idpf_vport_alloc_max_qs - Allocate max queues for a vport
925 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; in idpf_vport_alloc_max_qs()
926 struct virtchnl2_get_capabilities *caps = &adapter->caps; in idpf_vport_alloc_max_qs()
930 mutex_lock(&adapter->queue_lock); in idpf_vport_alloc_max_qs()
932 max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports; in idpf_vport_alloc_max_qs()
933 max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports; in idpf_vport_alloc_max_qs()
934 if (adapter->num_alloc_vports < default_vports) { in idpf_vport_alloc_max_qs()
935 max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q); in idpf_vport_alloc_max_qs()
936 max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q); in idpf_vport_alloc_max_qs()
938 max_q->max_rxq = IDPF_MIN_Q; in idpf_vport_alloc_max_qs()
939 max_q->max_txq = IDPF_MIN_Q; in idpf_vport_alloc_max_qs()
941 max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP; in idpf_vport_alloc_max_qs()
942 max_q->max_complq = max_q->max_txq; in idpf_vport_alloc_max_qs()
944 if (avail_queues->avail_rxq < max_q->max_rxq || in idpf_vport_alloc_max_qs()
945 avail_queues->avail_txq < max_q->max_txq || in idpf_vport_alloc_max_qs()
946 avail_queues->avail_bufq < max_q->max_bufq || in idpf_vport_alloc_max_qs()
947 avail_queues->avail_complq < max_q->max_complq) { in idpf_vport_alloc_max_qs()
948 mutex_unlock(&adapter->queue_lock); in idpf_vport_alloc_max_qs()
950 return -EINVAL; in idpf_vport_alloc_max_qs()
953 avail_queues->avail_rxq -= max_q->max_rxq; in idpf_vport_alloc_max_qs()
954 avail_queues->avail_txq -= max_q->max_txq; in idpf_vport_alloc_max_qs()
955 avail_queues->avail_bufq -= max_q->max_bufq; in idpf_vport_alloc_max_qs()
956 avail_queues->avail_complq -= max_q->max_complq; in idpf_vport_alloc_max_qs()
958 mutex_unlock(&adapter->queue_lock); in idpf_vport_alloc_max_qs()
964 * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
973 mutex_lock(&adapter->queue_lock); in idpf_vport_dealloc_max_qs()
974 avail_queues = &adapter->avail_queues; in idpf_vport_dealloc_max_qs()
976 avail_queues->avail_rxq += max_q->max_rxq; in idpf_vport_dealloc_max_qs()
977 avail_queues->avail_txq += max_q->max_txq; in idpf_vport_dealloc_max_qs()
978 avail_queues->avail_bufq += max_q->max_bufq; in idpf_vport_dealloc_max_qs()
979 avail_queues->avail_complq += max_q->max_complq; in idpf_vport_dealloc_max_qs()
981 mutex_unlock(&adapter->queue_lock); in idpf_vport_dealloc_max_qs()
985 * idpf_init_avail_queues - Initialize available queues on the device
990 struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues; in idpf_init_avail_queues()
991 struct virtchnl2_get_capabilities *caps = &adapter->caps; in idpf_init_avail_queues()
993 avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q); in idpf_init_avail_queues()
994 avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q); in idpf_init_avail_queues()
995 avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq); in idpf_init_avail_queues()
996 avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq); in idpf_init_avail_queues()
1000 * idpf_get_reg_intr_vecs - Get vector queue register offset
1002 * @reg_vals: Register offsets to store in
1012 int num_regs = 0, i, j; in idpf_get_reg_intr_vecs() local
1014 chunks = &vport->adapter->req_vec_chunks->vchunks; in idpf_get_reg_intr_vecs()
1015 num_vchunks = le16_to_cpu(chunks->num_vchunks); in idpf_get_reg_intr_vecs()
1017 for (j = 0; j < num_vchunks; j++) { in idpf_get_reg_intr_vecs()
1022 chunk = &chunks->vchunks[j]; in idpf_get_reg_intr_vecs()
1023 num_vec = le16_to_cpu(chunk->num_vectors); in idpf_get_reg_intr_vecs()
1024 reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start); in idpf_get_reg_intr_vecs()
1025 reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start); in idpf_get_reg_intr_vecs()
1026 reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing); in idpf_get_reg_intr_vecs()
1028 dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing); in idpf_get_reg_intr_vecs()
1029 itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing); in idpf_get_reg_intr_vecs()
1047 * idpf_vport_get_q_reg - Get the queue registers for the vport
1048 * @reg_vals: register values needing to be set
1049 * @num_regs: amount we expect to fill
1061 u16 num_chunks = le16_to_cpu(chunks->num_chunks); in idpf_vport_get_q_reg()
1065 while (num_chunks--) { in idpf_vport_get_q_reg()
1069 chunk = &chunks->chunks[num_chunks]; in idpf_vport_get_q_reg()
1070 if (le32_to_cpu(chunk->type) != q_type) in idpf_vport_get_q_reg()
1073 num_q = le32_to_cpu(chunk->num_queues); in idpf_vport_get_q_reg()
1074 reg_val = le64_to_cpu(chunk->qtail_reg_start); in idpf_vport_get_q_reg()
1077 reg_val += le32_to_cpu(chunk->qtail_reg_spacing); in idpf_vport_get_q_reg()
1085 * __idpf_queue_reg_init - initialize queue registers
1096 struct idpf_adapter *adapter = vport->adapter; in __idpf_queue_reg_init()
1097 int i, j, k = 0; in __idpf_queue_reg_init() local
1101 for (i = 0; i < vport->num_txq_grp; i++) { in __idpf_queue_reg_init()
1102 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in __idpf_queue_reg_init()
1104 for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++) in __idpf_queue_reg_init()
1105 tx_qgrp->txqs[j]->tail = in __idpf_queue_reg_init()
1106 idpf_get_reg_addr(adapter, reg_vals[k]); in __idpf_queue_reg_init()
1110 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_queue_reg_init()
1111 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_queue_reg_init()
1112 u16 num_rxq = rx_qgrp->singleq.num_rxq; in __idpf_queue_reg_init()
1114 for (j = 0; j < num_rxq && k < num_regs; j++, k++) { in __idpf_queue_reg_init()
1117 q = rx_qgrp->singleq.rxqs[j]; in __idpf_queue_reg_init()
1118 q->tail = idpf_get_reg_addr(adapter, in __idpf_queue_reg_init()
1119 reg_vals[k]); in __idpf_queue_reg_init()
1124 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_queue_reg_init()
1125 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_queue_reg_init()
1126 u8 num_bufqs = vport->num_bufqs_per_qgrp; in __idpf_queue_reg_init()
1128 for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { in __idpf_queue_reg_init()
1131 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in __idpf_queue_reg_init()
1132 q->tail = idpf_get_reg_addr(adapter, in __idpf_queue_reg_init()
1133 reg_vals[k]); in __idpf_queue_reg_init()
1141 return k; in __idpf_queue_reg_init()
1145 * idpf_queue_reg_init - initialize queue registers
1155 u16 vport_idx = vport->idx; in idpf_queue_reg_init()
1162 return -ENOMEM; in idpf_queue_reg_init()
1164 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_queue_reg_init()
1165 if (vport_config->req_qs_chunks) { in idpf_queue_reg_init()
1167 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; in idpf_queue_reg_init()
1168 chunks = &vc_aq->chunks; in idpf_queue_reg_init()
1170 vport_params = vport->adapter->vport_params_recvd[vport_idx]; in idpf_queue_reg_init()
1171 chunks = &vport_params->chunks; in idpf_queue_reg_init()
1178 if (num_regs < vport->num_txq) { in idpf_queue_reg_init()
1179 ret = -EINVAL; in idpf_queue_reg_init()
1185 if (num_regs < vport->num_txq) { in idpf_queue_reg_init()
1186 ret = -EINVAL; in idpf_queue_reg_init()
1193 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_queue_reg_init()
1197 if (num_regs < vport->num_bufq) { in idpf_queue_reg_init()
1198 ret = -EINVAL; in idpf_queue_reg_init()
1204 if (num_regs < vport->num_bufq) { in idpf_queue_reg_init()
1205 ret = -EINVAL; in idpf_queue_reg_init()
1212 if (num_regs < vport->num_rxq) { in idpf_queue_reg_init()
1213 ret = -EINVAL; in idpf_queue_reg_init()
1219 if (num_regs < vport->num_rxq) { in idpf_queue_reg_init()
1220 ret = -EINVAL; in idpf_queue_reg_init()
1232 * idpf_send_create_vport_msg - Send virtchnl create vport message
1245 u16 idx = adapter->next_vport; in idpf_send_create_vport_msg()
1250 if (!adapter->vport_params_reqd[idx]) { in idpf_send_create_vport_msg()
1251 adapter->vport_params_reqd[idx] = kzalloc(buf_size, in idpf_send_create_vport_msg()
1253 if (!adapter->vport_params_reqd[idx]) in idpf_send_create_vport_msg()
1254 return -ENOMEM; in idpf_send_create_vport_msg()
1257 vport_msg = adapter->vport_params_reqd[idx]; in idpf_send_create_vport_msg()
1258 vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); in idpf_send_create_vport_msg()
1259 vport_msg->vport_index = cpu_to_le16(idx); in idpf_send_create_vport_msg()
1261 if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) in idpf_send_create_vport_msg()
1262 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); in idpf_send_create_vport_msg()
1264 vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); in idpf_send_create_vport_msg()
1266 if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) in idpf_send_create_vport_msg()
1267 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); in idpf_send_create_vport_msg()
1269 vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); in idpf_send_create_vport_msg()
1273 dev_err(&adapter->pdev->dev, "Enough queues are not available"); in idpf_send_create_vport_msg()
1278 if (!adapter->vport_params_recvd[idx]) { in idpf_send_create_vport_msg()
1279 adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, in idpf_send_create_vport_msg()
1281 if (!adapter->vport_params_recvd[idx]) { in idpf_send_create_vport_msg()
1282 err = -ENOMEM; in idpf_send_create_vport_msg()
1290 xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx]; in idpf_send_create_vport_msg()
1302 kfree(adapter->vport_params_recvd[idx]); in idpf_send_create_vport_msg()
1303 adapter->vport_params_recvd[idx] = NULL; in idpf_send_create_vport_msg()
1304 kfree(adapter->vport_params_reqd[idx]); in idpf_send_create_vport_msg()
1305 adapter->vport_params_reqd[idx] = NULL; in idpf_send_create_vport_msg()
1311 * idpf_check_supported_desc_ids - Verify we have required descriptor support
1318 struct idpf_adapter *adapter = vport->adapter; in idpf_check_supported_desc_ids()
1322 vport_msg = adapter->vport_params_recvd[vport->idx]; in idpf_check_supported_desc_ids()
1325 (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE || in idpf_check_supported_desc_ids()
1326 vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) { in idpf_check_supported_desc_ids()
1327 pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n"); in idpf_check_supported_desc_ids()
1328 return -EOPNOTSUPP; in idpf_check_supported_desc_ids()
1331 rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); in idpf_check_supported_desc_ids()
1332 tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); in idpf_check_supported_desc_ids()
1334 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_check_supported_desc_ids()
1336 dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); in idpf_check_supported_desc_ids()
1337 vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); in idpf_check_supported_desc_ids()
1341 vport->base_rxd = true; in idpf_check_supported_desc_ids()
1344 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_check_supported_desc_ids()
1348 dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n"); in idpf_check_supported_desc_ids()
1349 vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID); in idpf_check_supported_desc_ids()
1356 * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
1368 v_id.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_destroy_vport_msg()
1374 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_destroy_vport_msg()
1380 * idpf_send_enable_vport_msg - Send virtchnl enable vport message
1392 v_id.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_enable_vport_msg()
1398 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_enable_vport_msg()
1404 * idpf_send_disable_vport_msg - Send virtchnl disable vport message
1416 v_id.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_disable_vport_msg()
1422 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_disable_vport_msg()
1428 * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
1442 int i, k = 0; in idpf_send_config_tx_queues_msg() local
1444 totqs = vport->num_txq + vport->num_complq; in idpf_send_config_tx_queues_msg()
1447 return -ENOMEM; in idpf_send_config_tx_queues_msg()
1450 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_send_config_tx_queues_msg()
1451 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_config_tx_queues_msg()
1452 int j, sched_mode; in idpf_send_config_tx_queues_msg() local
1454 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { in idpf_send_config_tx_queues_msg()
1455 qi[k].queue_id = in idpf_send_config_tx_queues_msg()
1456 cpu_to_le32(tx_qgrp->txqs[j]->q_id); in idpf_send_config_tx_queues_msg()
1457 qi[k].model = in idpf_send_config_tx_queues_msg()
1458 cpu_to_le16(vport->txq_model); in idpf_send_config_tx_queues_msg()
1459 qi[k].type = in idpf_send_config_tx_queues_msg()
1461 qi[k].ring_len = in idpf_send_config_tx_queues_msg()
1462 cpu_to_le16(tx_qgrp->txqs[j]->desc_count); in idpf_send_config_tx_queues_msg()
1463 qi[k].dma_ring_addr = in idpf_send_config_tx_queues_msg()
1464 cpu_to_le64(tx_qgrp->txqs[j]->dma); in idpf_send_config_tx_queues_msg()
1465 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_send_config_tx_queues_msg()
1466 struct idpf_tx_queue *q = tx_qgrp->txqs[j]; in idpf_send_config_tx_queues_msg()
1468 qi[k].tx_compl_queue_id = in idpf_send_config_tx_queues_msg()
1469 cpu_to_le16(tx_qgrp->complq->q_id); in idpf_send_config_tx_queues_msg()
1470 qi[k].relative_queue_id = cpu_to_le16(j); in idpf_send_config_tx_queues_msg()
1473 qi[k].sched_mode = in idpf_send_config_tx_queues_msg()
1476 qi[k].sched_mode = in idpf_send_config_tx_queues_msg()
1479 qi[k].sched_mode = in idpf_send_config_tx_queues_msg()
1484 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_send_config_tx_queues_msg()
1487 qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); in idpf_send_config_tx_queues_msg()
1488 qi[k].model = cpu_to_le16(vport->txq_model); in idpf_send_config_tx_queues_msg()
1489 qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); in idpf_send_config_tx_queues_msg()
1490 qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); in idpf_send_config_tx_queues_msg()
1491 qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); in idpf_send_config_tx_queues_msg()
1493 if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq)) in idpf_send_config_tx_queues_msg()
1497 qi[k].sched_mode = cpu_to_le16(sched_mode); in idpf_send_config_tx_queues_msg()
1499 k++; in idpf_send_config_tx_queues_msg()
1503 if (k != totqs) in idpf_send_config_tx_queues_msg()
1504 return -EINVAL; in idpf_send_config_tx_queues_msg()
1506 /* Chunk up the queue contexts into multiple messages to avoid in idpf_send_config_tx_queues_msg()
1519 return -ENOMEM; in idpf_send_config_tx_queues_msg()
1524 for (i = 0, k = 0; i < num_msgs; i++) { in idpf_send_config_tx_queues_msg()
1526 ctq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_config_tx_queues_msg()
1527 ctq->num_qinfo = cpu_to_le16(num_chunks); in idpf_send_config_tx_queues_msg()
1528 memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks); in idpf_send_config_tx_queues_msg()
1532 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_config_tx_queues_msg()
1536 k += num_chunks; in idpf_send_config_tx_queues_msg()
1537 totqs -= num_chunks; in idpf_send_config_tx_queues_msg()
1547 * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
1561 int i, k = 0; in idpf_send_config_rx_queues_msg() local
1563 totqs = vport->num_rxq + vport->num_bufq; in idpf_send_config_rx_queues_msg()
1566 return -ENOMEM; in idpf_send_config_rx_queues_msg()
1569 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_config_rx_queues_msg()
1570 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_config_rx_queues_msg()
1572 int j; in idpf_send_config_rx_queues_msg() local
1574 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_config_rx_queues_msg()
1577 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { in idpf_send_config_rx_queues_msg()
1579 &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_send_config_rx_queues_msg()
1581 qi[k].queue_id = cpu_to_le32(bufq->q_id); in idpf_send_config_rx_queues_msg()
1582 qi[k].model = cpu_to_le16(vport->rxq_model); in idpf_send_config_rx_queues_msg()
1583 qi[k].type = in idpf_send_config_rx_queues_msg()
1585 qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); in idpf_send_config_rx_queues_msg()
1586 qi[k].ring_len = cpu_to_le16(bufq->desc_count); in idpf_send_config_rx_queues_msg()
1587 qi[k].dma_ring_addr = cpu_to_le64(bufq->dma); in idpf_send_config_rx_queues_msg()
1588 qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size); in idpf_send_config_rx_queues_msg()
1589 qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE; in idpf_send_config_rx_queues_msg()
1590 qi[k].rx_buffer_low_watermark = in idpf_send_config_rx_queues_msg()
1591 cpu_to_le16(bufq->rx_buffer_low_watermark); in idpf_send_config_rx_queues_msg()
1593 qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); in idpf_send_config_rx_queues_msg()
1597 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_config_rx_queues_msg()
1598 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_send_config_rx_queues_msg()
1600 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_send_config_rx_queues_msg()
1602 for (j = 0; j < num_rxq; j++, k++) { in idpf_send_config_rx_queues_msg()
1606 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_send_config_rx_queues_msg()
1607 rxq = rx_qgrp->singleq.rxqs[j]; in idpf_send_config_rx_queues_msg()
1611 rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_send_config_rx_queues_msg()
1612 sets = rxq->bufq_sets; in idpf_send_config_rx_queues_msg()
1615 * set to that of the first buffer queue in idpf_send_config_rx_queues_msg()
1618 rxq->rx_buf_size = sets[0].bufq.rx_buf_size; in idpf_send_config_rx_queues_msg()
1620 qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id); in idpf_send_config_rx_queues_msg()
1621 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { in idpf_send_config_rx_queues_msg()
1622 qi[k].bufq2_ena = IDPF_BUFQ2_ENA; in idpf_send_config_rx_queues_msg()
1623 qi[k].rx_bufq2_id = in idpf_send_config_rx_queues_msg()
1626 qi[k].rx_buffer_low_watermark = in idpf_send_config_rx_queues_msg()
1627 cpu_to_le16(rxq->rx_buffer_low_watermark); in idpf_send_config_rx_queues_msg()
1629 qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); in idpf_send_config_rx_queues_msg()
1631 rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size; in idpf_send_config_rx_queues_msg()
1634 qi[k].qflags |= in idpf_send_config_rx_queues_msg()
1636 qi[k].hdr_buffer_size = in idpf_send_config_rx_queues_msg()
1637 cpu_to_le16(rxq->rx_hbuf_size); in idpf_send_config_rx_queues_msg()
1641 qi[k].queue_id = cpu_to_le32(rxq->q_id); in idpf_send_config_rx_queues_msg()
1642 qi[k].model = cpu_to_le16(vport->rxq_model); in idpf_send_config_rx_queues_msg()
1643 qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); in idpf_send_config_rx_queues_msg()
1644 qi[k].ring_len = cpu_to_le16(rxq->desc_count); in idpf_send_config_rx_queues_msg()
1645 qi[k].dma_ring_addr = cpu_to_le64(rxq->dma); in idpf_send_config_rx_queues_msg()
1646 qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size); in idpf_send_config_rx_queues_msg()
1647 qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size); in idpf_send_config_rx_queues_msg()
1648 qi[k].qflags |= in idpf_send_config_rx_queues_msg()
1650 qi[k].desc_ids = cpu_to_le64(rxq->rxdids); in idpf_send_config_rx_queues_msg()
1655 if (k != totqs) in idpf_send_config_rx_queues_msg()
1656 return -EINVAL; in idpf_send_config_rx_queues_msg()
1658 /* Chunk up the queue contexts into multiple messages to avoid in idpf_send_config_rx_queues_msg()
1671 return -ENOMEM; in idpf_send_config_rx_queues_msg()
1676 for (i = 0, k = 0; i < num_msgs; i++) { in idpf_send_config_rx_queues_msg()
1678 crq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_config_rx_queues_msg()
1679 crq->num_qinfo = cpu_to_le16(num_chunks); in idpf_send_config_rx_queues_msg()
1680 memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks); in idpf_send_config_rx_queues_msg()
1684 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_config_rx_queues_msg()
1688 k += num_chunks; in idpf_send_config_rx_queues_msg()
1689 totqs -= num_chunks; in idpf_send_config_rx_queues_msg()
1699 * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
1716 int i, j, k = 0; in idpf_send_ena_dis_queues_msg() local
1718 num_txq = vport->num_txq + vport->num_complq; in idpf_send_ena_dis_queues_msg()
1719 num_rxq = vport->num_rxq + vport->num_bufq; in idpf_send_ena_dis_queues_msg()
1724 return -ENOMEM; in idpf_send_ena_dis_queues_msg()
1726 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_send_ena_dis_queues_msg()
1727 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_ena_dis_queues_msg()
1729 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { in idpf_send_ena_dis_queues_msg()
1730 qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); in idpf_send_ena_dis_queues_msg()
1731 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); in idpf_send_ena_dis_queues_msg()
1732 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); in idpf_send_ena_dis_queues_msg()
1735 if (vport->num_txq != k) in idpf_send_ena_dis_queues_msg()
1736 return -EINVAL; in idpf_send_ena_dis_queues_msg()
1738 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_send_ena_dis_queues_msg()
1741 for (i = 0; i < vport->num_txq_grp; i++, k++) { in idpf_send_ena_dis_queues_msg()
1742 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_ena_dis_queues_msg()
1744 qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); in idpf_send_ena_dis_queues_msg()
1745 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); in idpf_send_ena_dis_queues_msg()
1746 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); in idpf_send_ena_dis_queues_msg()
1748 if (vport->num_complq != (k - vport->num_txq)) in idpf_send_ena_dis_queues_msg()
1749 return -EINVAL; in idpf_send_ena_dis_queues_msg()
1752 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_ena_dis_queues_msg()
1753 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_ena_dis_queues_msg()
1755 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_ena_dis_queues_msg()
1756 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_send_ena_dis_queues_msg()
1758 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_send_ena_dis_queues_msg()
1760 for (j = 0; j < num_rxq; j++, k++) { in idpf_send_ena_dis_queues_msg()
1761 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_send_ena_dis_queues_msg()
1762 qc[k].start_queue_id = in idpf_send_ena_dis_queues_msg()
1763 cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); in idpf_send_ena_dis_queues_msg()
1764 qc[k].type = in idpf_send_ena_dis_queues_msg()
1767 qc[k].start_queue_id = in idpf_send_ena_dis_queues_msg()
1768 cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); in idpf_send_ena_dis_queues_msg()
1769 qc[k].type = in idpf_send_ena_dis_queues_msg()
1772 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); in idpf_send_ena_dis_queues_msg()
1775 if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) in idpf_send_ena_dis_queues_msg()
1776 return -EINVAL; in idpf_send_ena_dis_queues_msg()
1778 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_ena_dis_queues_msg()
1781 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_ena_dis_queues_msg()
1782 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_ena_dis_queues_msg()
1784 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { in idpf_send_ena_dis_queues_msg()
1787 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in idpf_send_ena_dis_queues_msg()
1788 qc[k].type = in idpf_send_ena_dis_queues_msg()
1790 qc[k].start_queue_id = cpu_to_le32(q->q_id); in idpf_send_ena_dis_queues_msg()
1791 qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); in idpf_send_ena_dis_queues_msg()
1794 if (vport->num_bufq != k - (vport->num_txq + in idpf_send_ena_dis_queues_msg()
1795 vport->num_complq + in idpf_send_ena_dis_queues_msg()
1796 vport->num_rxq)) in idpf_send_ena_dis_queues_msg()
1797 return -EINVAL; in idpf_send_ena_dis_queues_msg()
1811 return -ENOMEM; in idpf_send_ena_dis_queues_msg()
1821 for (i = 0, k = 0; i < num_msgs; i++) { in idpf_send_ena_dis_queues_msg()
1823 eq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_ena_dis_queues_msg()
1824 eq->chunks.num_chunks = cpu_to_le16(num_chunks); in idpf_send_ena_dis_queues_msg()
1825 qcs = &eq->chunks; in idpf_send_ena_dis_queues_msg()
1826 memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks); in idpf_send_ena_dis_queues_msg()
1830 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_ena_dis_queues_msg()
1834 k += num_chunks; in idpf_send_ena_dis_queues_msg()
1835 num_q -= num_chunks; in idpf_send_ena_dis_queues_msg()
1845 * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
1861 int i, j, k = 0; in idpf_send_map_unmap_queue_vector_msg() local
1863 num_q = vport->num_txq + vport->num_rxq; in idpf_send_map_unmap_queue_vector_msg()
1868 return -ENOMEM; in idpf_send_map_unmap_queue_vector_msg()
1870 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_send_map_unmap_queue_vector_msg()
1871 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_map_unmap_queue_vector_msg()
1873 for (j = 0; j < tx_qgrp->num_txq; j++, k++) { in idpf_send_map_unmap_queue_vector_msg()
1874 vqv[k].queue_type = in idpf_send_map_unmap_queue_vector_msg()
1876 vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); in idpf_send_map_unmap_queue_vector_msg()
1878 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_send_map_unmap_queue_vector_msg()
1879 vqv[k].vector_id = in idpf_send_map_unmap_queue_vector_msg()
1880 cpu_to_le16(tx_qgrp->complq->q_vector->v_idx); in idpf_send_map_unmap_queue_vector_msg()
1881 vqv[k].itr_idx = in idpf_send_map_unmap_queue_vector_msg()
1882 cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx); in idpf_send_map_unmap_queue_vector_msg()
1884 vqv[k].vector_id = in idpf_send_map_unmap_queue_vector_msg()
1885 cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx); in idpf_send_map_unmap_queue_vector_msg()
1886 vqv[k].itr_idx = in idpf_send_map_unmap_queue_vector_msg()
1887 cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx); in idpf_send_map_unmap_queue_vector_msg()
1892 if (vport->num_txq != k) in idpf_send_map_unmap_queue_vector_msg()
1893 return -EINVAL; in idpf_send_map_unmap_queue_vector_msg()
1895 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_map_unmap_queue_vector_msg()
1896 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_map_unmap_queue_vector_msg()
1899 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_map_unmap_queue_vector_msg()
1900 num_rxq = rx_qgrp->splitq.num_rxq_sets; in idpf_send_map_unmap_queue_vector_msg()
1902 num_rxq = rx_qgrp->singleq.num_rxq; in idpf_send_map_unmap_queue_vector_msg()
1904 for (j = 0; j < num_rxq; j++, k++) { in idpf_send_map_unmap_queue_vector_msg()
1907 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_map_unmap_queue_vector_msg()
1908 rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; in idpf_send_map_unmap_queue_vector_msg()
1910 rxq = rx_qgrp->singleq.rxqs[j]; in idpf_send_map_unmap_queue_vector_msg()
1912 vqv[k].queue_type = in idpf_send_map_unmap_queue_vector_msg()
1914 vqv[k].queue_id = cpu_to_le32(rxq->q_id); in idpf_send_map_unmap_queue_vector_msg()
1915 vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); in idpf_send_map_unmap_queue_vector_msg()
1916 vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); in idpf_send_map_unmap_queue_vector_msg()
1920 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_send_map_unmap_queue_vector_msg()
1921 if (vport->num_rxq != k - vport->num_complq) in idpf_send_map_unmap_queue_vector_msg()
1922 return -EINVAL; in idpf_send_map_unmap_queue_vector_msg()
1924 if (vport->num_rxq != k - vport->num_txq) in idpf_send_map_unmap_queue_vector_msg()
1925 return -EINVAL; in idpf_send_map_unmap_queue_vector_msg()
1939 return -ENOMEM; in idpf_send_map_unmap_queue_vector_msg()
1949 for (i = 0, k = 0; i < num_msgs; i++) { in idpf_send_map_unmap_queue_vector_msg()
1953 vqvm->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_map_unmap_queue_vector_msg()
1954 vqvm->num_qv_maps = cpu_to_le16(num_chunks); in idpf_send_map_unmap_queue_vector_msg()
1955 memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks); in idpf_send_map_unmap_queue_vector_msg()
1957 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_map_unmap_queue_vector_msg()
1961 k += num_chunks; in idpf_send_map_unmap_queue_vector_msg()
1962 num_q -= num_chunks; in idpf_send_map_unmap_queue_vector_msg()
1972 * idpf_send_enable_queues_msg - send enable queues virtchnl message
1984 * idpf_send_disable_queues_msg - send disable queues virtchnl message
1998 /* switch to poll mode as interrupts will be disabled after disable in idpf_send_disable_queues_msg()
2001 for (i = 0; i < vport->num_txq; i++) in idpf_send_disable_queues_msg()
2002 idpf_queue_set(POLL_MODE, vport->txqs[i]); in idpf_send_disable_queues_msg()
2004 /* schedule the napi to receive all the marker packets */ in idpf_send_disable_queues_msg()
2006 for (i = 0; i < vport->num_q_vectors; i++) in idpf_send_disable_queues_msg()
2007 napi_schedule(&vport->q_vectors[i].napi); in idpf_send_disable_queues_msg()
2014 * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
2016 * @dchunks: Destination chunks to store data to
2017 * @schunks: Source chunks to copy data from
2018 * @num_chunks: number of chunks to copy
2034 * idpf_send_delete_queues_msg - send delete queues virtchnl message
2047 u16 vport_idx = vport->idx; in idpf_send_delete_queues_msg()
2052 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_send_delete_queues_msg()
2053 if (vport_config->req_qs_chunks) { in idpf_send_delete_queues_msg()
2054 chunks = &vport_config->req_qs_chunks->chunks; in idpf_send_delete_queues_msg()
2056 vport_params = vport->adapter->vport_params_recvd[vport_idx]; in idpf_send_delete_queues_msg()
2057 chunks = &vport_params->chunks; in idpf_send_delete_queues_msg()
2060 num_chunks = le16_to_cpu(chunks->num_chunks); in idpf_send_delete_queues_msg()
2065 return -ENOMEM; in idpf_send_delete_queues_msg()
2067 eq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_delete_queues_msg()
2068 eq->chunks.num_chunks = cpu_to_le16(num_chunks); in idpf_send_delete_queues_msg()
2070 idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, in idpf_send_delete_queues_msg()
2077 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_delete_queues_msg()
2083 * idpf_send_config_queues_msg - Send config queues virtchnl message
2101 * idpf_send_add_queues_msg - Send virtchnl add queues message
2118 u16 vport_idx = vport->idx; in idpf_send_add_queues_msg()
2124 return -ENOMEM; in idpf_send_add_queues_msg()
2126 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_send_add_queues_msg()
2127 kfree(vport_config->req_qs_chunks); in idpf_send_add_queues_msg()
2128 vport_config->req_qs_chunks = NULL; in idpf_send_add_queues_msg()
2130 aq.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_add_queues_msg()
2142 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_add_queues_msg()
2147 if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || in idpf_send_add_queues_msg()
2148 le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || in idpf_send_add_queues_msg()
2149 le16_to_cpu(vc_msg->num_tx_complq) != num_complq || in idpf_send_add_queues_msg()
2150 le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) in idpf_send_add_queues_msg()
2151 return -EINVAL; in idpf_send_add_queues_msg()
2154 le16_to_cpu(vc_msg->chunks.num_chunks)); in idpf_send_add_queues_msg()
2156 return -EIO; in idpf_send_add_queues_msg()
2158 vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); in idpf_send_add_queues_msg()
2159 if (!vport_config->req_qs_chunks) in idpf_send_add_queues_msg()
2160 return -ENOMEM; in idpf_send_add_queues_msg()
2166 * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
2168 * @num_vectors: number of vectors to be allocated
2185 return -ENOMEM; in idpf_send_alloc_vectors_msg()
2197 num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); in idpf_send_alloc_vectors_msg()
2200 return -EIO; in idpf_send_alloc_vectors_msg()
2203 return -EINVAL; in idpf_send_alloc_vectors_msg()
2205 kfree(adapter->req_vec_chunks); in idpf_send_alloc_vectors_msg()
2206 adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL); in idpf_send_alloc_vectors_msg()
2207 if (!adapter->req_vec_chunks) in idpf_send_alloc_vectors_msg()
2208 return -ENOMEM; in idpf_send_alloc_vectors_msg()
2210 if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) { in idpf_send_alloc_vectors_msg()
2211 kfree(adapter->req_vec_chunks); in idpf_send_alloc_vectors_msg()
2212 adapter->req_vec_chunks = NULL; in idpf_send_alloc_vectors_msg()
2213 return -EINVAL; in idpf_send_alloc_vectors_msg()
2220 * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
2227 struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; in idpf_send_dealloc_vectors_msg()
2228 struct virtchnl2_vector_chunks *vcs = &ac->vchunks; in idpf_send_dealloc_vectors_msg()
2233 buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); in idpf_send_dealloc_vectors_msg()
2243 kfree(adapter->req_vec_chunks); in idpf_send_dealloc_vectors_msg()
2244 adapter->req_vec_chunks = NULL; in idpf_send_dealloc_vectors_msg()
2250 * idpf_get_max_vfs - Get max number of vfs supported
2257 return le16_to_cpu(adapter->caps.max_sriov_vfs); in idpf_get_max_vfs()
2261 * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
2263 * @num_vfs: number of virtual functions to be created
2284 * idpf_send_get_stats_msg - Send virtchnl get statistics message
2285 * @vport: vport to get stats for
2291 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); in idpf_send_get_stats_msg()
2292 struct rtnl_link_stats64 *netstats = &np->netstats; in idpf_send_get_stats_msg()
2299 if (np->state <= __IDPF_VPORT_DOWN) in idpf_send_get_stats_msg()
2302 stats_msg.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_get_stats_msg()
2310 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_get_stats_msg()
2314 return -EIO; in idpf_send_get_stats_msg()
2316 spin_lock_bh(&np->stats_lock); in idpf_send_get_stats_msg()
2318 netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) + in idpf_send_get_stats_msg()
2321 netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) + in idpf_send_get_stats_msg()
2324 netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes); in idpf_send_get_stats_msg()
2325 netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes); in idpf_send_get_stats_msg()
2326 netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors); in idpf_send_get_stats_msg()
2327 netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors); in idpf_send_get_stats_msg()
2328 netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards); in idpf_send_get_stats_msg()
2329 netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards); in idpf_send_get_stats_msg()
2331 vport->port_stats.vport_stats = stats_msg; in idpf_send_get_stats_msg()
2333 spin_unlock_bh(&np->stats_lock); in idpf_send_get_stats_msg()
2339 * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
2341 * @get: flag to set or get rss look up table
2356 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_send_get_set_rss_lut_msg()
2357 buf_size = struct_size(rl, lut, rss_data->rss_lut_size); in idpf_send_get_set_rss_lut_msg()
2360 return -ENOMEM; in idpf_send_get_set_rss_lut_msg()
2362 rl->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_get_set_rss_lut_msg()
2371 return -ENOMEM; in idpf_send_get_set_rss_lut_msg()
2376 rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); in idpf_send_get_set_rss_lut_msg()
2377 for (i = 0; i < rss_data->rss_lut_size; i++) in idpf_send_get_set_rss_lut_msg()
2378 rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]); in idpf_send_get_set_rss_lut_msg()
2382 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_get_set_rss_lut_msg()
2388 return -EIO; in idpf_send_get_set_rss_lut_msg()
2390 lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32); in idpf_send_get_set_rss_lut_msg()
2392 return -EIO; in idpf_send_get_set_rss_lut_msg()
2395 if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) in idpf_send_get_set_rss_lut_msg()
2398 rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); in idpf_send_get_set_rss_lut_msg()
2399 kfree(rss_data->rss_lut); in idpf_send_get_set_rss_lut_msg()
2401 rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL); in idpf_send_get_set_rss_lut_msg()
2402 if (!rss_data->rss_lut) { in idpf_send_get_set_rss_lut_msg()
2403 rss_data->rss_lut_size = 0; in idpf_send_get_set_rss_lut_msg()
2404 return -ENOMEM; in idpf_send_get_set_rss_lut_msg()
2408 memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size); in idpf_send_get_set_rss_lut_msg()
2414 * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
2416 * @get: flag to set or get rss look up table
2431 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_send_get_set_rss_key_msg()
2432 buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); in idpf_send_get_set_rss_key_msg()
2435 return -ENOMEM; in idpf_send_get_set_rss_key_msg()
2437 rk->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_get_set_rss_key_msg()
2444 return -ENOMEM; in idpf_send_get_set_rss_key_msg()
2450 rk->key_len = cpu_to_le16(rss_data->rss_key_size); in idpf_send_get_set_rss_key_msg()
2451 for (i = 0; i < rss_data->rss_key_size; i++) in idpf_send_get_set_rss_key_msg()
2452 rk->key_flex[i] = rss_data->rss_key[i]; in idpf_send_get_set_rss_key_msg()
2457 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_get_set_rss_key_msg()
2463 return -EIO; in idpf_send_get_set_rss_key_msg()
2466 le16_to_cpu(recv_rk->key_len)); in idpf_send_get_set_rss_key_msg()
2468 return -EIO; in idpf_send_get_set_rss_key_msg()
2471 if (rss_data->rss_key_size == key_size) in idpf_send_get_set_rss_key_msg()
2474 rss_data->rss_key_size = key_size; in idpf_send_get_set_rss_key_msg()
2475 kfree(rss_data->rss_key); in idpf_send_get_set_rss_key_msg()
2476 rss_data->rss_key = kzalloc(key_size, GFP_KERNEL); in idpf_send_get_set_rss_key_msg()
2477 if (!rss_data->rss_key) { in idpf_send_get_set_rss_key_msg()
2478 rss_data->rss_key_size = 0; in idpf_send_get_set_rss_key_msg()
2479 return -ENOMEM; in idpf_send_get_set_rss_key_msg()
2483 memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size); in idpf_send_get_set_rss_key_msg()
2489 * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
2500 if (!pstate->outer_ip || !pstate->outer_frag) { in idpf_fill_ptype_lookup()
2501 pstate->outer_ip = true; in idpf_fill_ptype_lookup()
2504 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4; in idpf_fill_ptype_lookup()
2506 ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6; in idpf_fill_ptype_lookup()
2509 ptype->outer_frag = LIBETH_RX_PT_FRAG; in idpf_fill_ptype_lookup()
2510 pstate->outer_frag = true; in idpf_fill_ptype_lookup()
2513 ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP; in idpf_fill_ptype_lookup()
2514 pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; in idpf_fill_ptype_lookup()
2517 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4; in idpf_fill_ptype_lookup()
2519 ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6; in idpf_fill_ptype_lookup()
2522 ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG; in idpf_fill_ptype_lookup()
2528 if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && in idpf_finalize_ptype_lookup()
2529 ptype->inner_prot) in idpf_finalize_ptype_lookup()
2530 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4; in idpf_finalize_ptype_lookup()
2531 else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && in idpf_finalize_ptype_lookup()
2532 ptype->outer_ip) in idpf_finalize_ptype_lookup()
2533 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3; in idpf_finalize_ptype_lookup()
2534 else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2) in idpf_finalize_ptype_lookup()
2535 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2; in idpf_finalize_ptype_lookup()
2537 ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE; in idpf_finalize_ptype_lookup()
2543 * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
2554 struct idpf_adapter *adapter = vport->adapter; in idpf_send_get_rx_ptype_msg()
2558 int i, j, k; in idpf_send_get_rx_ptype_msg() local
2560 if (vport->rx_ptype_lkup) in idpf_send_get_rx_ptype_msg()
2563 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_get_rx_ptype_msg()
2570 return -ENOMEM; in idpf_send_get_rx_ptype_msg()
2574 return -ENOMEM; in idpf_send_get_rx_ptype_msg()
2578 return -ENOMEM; in idpf_send_get_rx_ptype_msg()
2588 get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id); in idpf_send_get_rx_ptype_msg()
2591 get_ptype_info->num_ptypes = in idpf_send_get_rx_ptype_msg()
2592 cpu_to_le16(max_ptype - next_ptype_id); in idpf_send_get_rx_ptype_msg()
2594 get_ptype_info->num_ptypes = in idpf_send_get_rx_ptype_msg()
2601 ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); in idpf_send_get_rx_ptype_msg()
2603 return -EINVAL; in idpf_send_get_rx_ptype_msg()
2605 next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) + in idpf_send_get_rx_ptype_msg()
2606 le16_to_cpu(get_ptype_info->num_ptypes); in idpf_send_get_rx_ptype_msg()
2610 for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) { in idpf_send_get_rx_ptype_msg()
2620 return -EINVAL; in idpf_send_get_rx_ptype_msg()
2623 if (le16_to_cpu(ptype->ptype_id_10) == in idpf_send_get_rx_ptype_msg()
2627 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_get_rx_ptype_msg()
2628 k = le16_to_cpu(ptype->ptype_id_10); in idpf_send_get_rx_ptype_msg()
2630 k = ptype->ptype_id_8; in idpf_send_get_rx_ptype_msg()
2632 for (j = 0; j < ptype->proto_id_count; j++) { in idpf_send_get_rx_ptype_msg()
2633 id = le16_to_cpu(ptype->proto_id[j]); in idpf_send_get_rx_ptype_msg()
2638 ptype_lkup[k].tunnel_type = in idpf_send_get_rx_ptype_msg()
2645 ptype_lkup[k].outer_ip = in idpf_send_get_rx_ptype_msg()
2649 ptype_lkup[k].tunnel_type = in idpf_send_get_rx_ptype_msg()
2656 idpf_fill_ptype_lookup(&ptype_lkup[k], in idpf_send_get_rx_ptype_msg()
2661 idpf_fill_ptype_lookup(&ptype_lkup[k], in idpf_send_get_rx_ptype_msg()
2666 idpf_fill_ptype_lookup(&ptype_lkup[k], in idpf_send_get_rx_ptype_msg()
2671 idpf_fill_ptype_lookup(&ptype_lkup[k], in idpf_send_get_rx_ptype_msg()
2676 ptype_lkup[k].inner_prot = in idpf_send_get_rx_ptype_msg()
2680 ptype_lkup[k].inner_prot = in idpf_send_get_rx_ptype_msg()
2684 ptype_lkup[k].inner_prot = in idpf_send_get_rx_ptype_msg()
2688 ptype_lkup[k].inner_prot = in idpf_send_get_rx_ptype_msg()
2692 ptype_lkup[k].payload_layer = in idpf_send_get_rx_ptype_msg()
2748 idpf_finalize_ptype_lookup(&ptype_lkup[k]); in idpf_send_get_rx_ptype_msg()
2753 vport->rx_ptype_lkup = no_free_ptr(ptype_lkup); in idpf_send_get_rx_ptype_msg()
2759 * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
2771 loopback.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_ena_dis_loopback_msg()
2778 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_ena_dis_loopback_msg()
2784 * idpf_find_ctlq - Given a type and id, find ctlq info
2786 * @type: type of ctrlq to find
2787 * @id: ctlq id to find
2789 * Returns pointer to found ctlq info struct, NULL otherwise.
2796 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list) in idpf_find_ctlq()
2797 if (cq->q_id == id && cq->cq_type == type) in idpf_find_ctlq()
2804 * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
2825 struct idpf_hw *hw = &adapter->hw; in idpf_init_dflt_mbx()
2828 adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info); in idpf_init_dflt_mbx()
2834 hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX, in idpf_init_dflt_mbx()
2836 hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX, in idpf_init_dflt_mbx()
2839 if (!hw->asq || !hw->arq) { in idpf_init_dflt_mbx()
2842 return -ENOENT; in idpf_init_dflt_mbx()
2845 adapter->state = __IDPF_VER_CHECK; in idpf_init_dflt_mbx()
2851 * idpf_deinit_dflt_mbx - Free up ctlqs setup
2856 if (adapter->hw.arq && adapter->hw.asq) { in idpf_deinit_dflt_mbx()
2858 idpf_ctlq_deinit(&adapter->hw); in idpf_deinit_dflt_mbx()
2860 adapter->hw.arq = NULL; in idpf_deinit_dflt_mbx()
2861 adapter->hw.asq = NULL; in idpf_deinit_dflt_mbx()
2865 * idpf_vport_params_buf_rel - Release memory for MailBox resources
2868 * Will release memory to hold the vport parameters received on MailBox
2872 kfree(adapter->vport_params_recvd); in idpf_vport_params_buf_rel()
2873 adapter->vport_params_recvd = NULL; in idpf_vport_params_buf_rel()
2874 kfree(adapter->vport_params_reqd); in idpf_vport_params_buf_rel()
2875 adapter->vport_params_reqd = NULL; in idpf_vport_params_buf_rel()
2876 kfree(adapter->vport_ids); in idpf_vport_params_buf_rel()
2877 adapter->vport_ids = NULL; in idpf_vport_params_buf_rel()
2881 * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
2884 * Will alloc memory to hold the vport parameters received on MailBox
2890 adapter->vport_params_reqd = kcalloc(num_max_vports, in idpf_vport_params_buf_alloc()
2891 sizeof(*adapter->vport_params_reqd), in idpf_vport_params_buf_alloc()
2893 if (!adapter->vport_params_reqd) in idpf_vport_params_buf_alloc()
2894 return -ENOMEM; in idpf_vport_params_buf_alloc()
2896 adapter->vport_params_recvd = kcalloc(num_max_vports, in idpf_vport_params_buf_alloc()
2897 sizeof(*adapter->vport_params_recvd), in idpf_vport_params_buf_alloc()
2899 if (!adapter->vport_params_recvd) in idpf_vport_params_buf_alloc()
2902 adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL); in idpf_vport_params_buf_alloc()
2903 if (!adapter->vport_ids) in idpf_vport_params_buf_alloc()
2906 if (adapter->vport_config) in idpf_vport_params_buf_alloc()
2909 adapter->vport_config = kcalloc(num_max_vports, in idpf_vport_params_buf_alloc()
2910 sizeof(*adapter->vport_config), in idpf_vport_params_buf_alloc()
2912 if (!adapter->vport_config) in idpf_vport_params_buf_alloc()
2920 return -ENOMEM; in idpf_vport_params_buf_alloc()
2924 * idpf_vc_core_init - Initialize state machine and get driver specific
2930 * initialized, allocate memory to store vport specific information and also
2933 * Returns 0 on success, -EAGAIN function will get called again,
2942 if (!adapter->vcxn_mngr) { in idpf_vc_core_init()
2943 adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL); in idpf_vc_core_init()
2944 if (!adapter->vcxn_mngr) { in idpf_vc_core_init()
2945 err = -ENOMEM; in idpf_vc_core_init()
2949 idpf_vc_xn_init(adapter->vcxn_mngr); in idpf_vc_core_init()
2951 while (adapter->state != __IDPF_INIT_SW) { in idpf_vc_core_init()
2952 switch (adapter->state) { in idpf_vc_core_init()
2958 adapter->state = __IDPF_GET_CAPS; in idpf_vc_core_init()
2960 case -EAGAIN: in idpf_vc_core_init()
2972 adapter->state = __IDPF_INIT_SW; in idpf_vc_core_init()
2975 dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n", in idpf_vc_core_init()
2976 adapter->state); in idpf_vc_core_init()
2977 err = -EINVAL; in idpf_vc_core_init()
2988 pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); in idpf_vc_core_init()
2990 adapter->max_vports = num_max_vports; in idpf_vc_core_init()
2991 adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports), in idpf_vc_core_init()
2993 if (!adapter->vports) in idpf_vc_core_init()
2994 return -ENOMEM; in idpf_vc_core_init()
2996 if (!adapter->netdevs) { in idpf_vc_core_init()
2997 adapter->netdevs = kcalloc(num_max_vports, in idpf_vc_core_init()
3000 if (!adapter->netdevs) { in idpf_vc_core_init()
3001 err = -ENOMEM; in idpf_vc_core_init()
3008 dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n", in idpf_vc_core_init()
3016 queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); in idpf_vc_core_init()
3018 queue_delayed_work(adapter->serv_wq, &adapter->serv_task, in idpf_vc_core_init()
3019 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); in idpf_vc_core_init()
3023 dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n", in idpf_vc_core_init()
3031 * to prevent every function from making the same call simultaneously. in idpf_vc_core_init()
3033 queue_delayed_work(adapter->init_wq, &adapter->init_task, in idpf_vc_core_init()
3034 msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); in idpf_vc_core_init()
3036 set_bit(IDPF_VC_CORE_INIT, adapter->flags); in idpf_vc_core_init()
3041 cancel_delayed_work_sync(&adapter->serv_task); in idpf_vc_core_init()
3042 cancel_delayed_work_sync(&adapter->mbx_task); in idpf_vc_core_init()
3045 kfree(adapter->vports); in idpf_vc_core_init()
3046 adapter->vports = NULL; in idpf_vc_core_init()
3050 /* Don't retry if we're trying to go down, just bail. */ in idpf_vc_core_init()
3051 if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) in idpf_vc_core_init()
3054 if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) { in idpf_vc_core_init()
3055 dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n"); in idpf_vc_core_init()
3057 return -EFAULT; in idpf_vc_core_init()
3060 * register writes might not have taken effect. Retry to initialize in idpf_vc_core_init()
3063 adapter->state = __IDPF_VER_CHECK; in idpf_vc_core_init()
3064 if (adapter->vcxn_mngr) in idpf_vc_core_init()
3065 idpf_vc_xn_shutdown(adapter->vcxn_mngr); in idpf_vc_core_init()
3066 set_bit(IDPF_HR_DRV_LOAD, adapter->flags); in idpf_vc_core_init()
3067 queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, in idpf_vc_core_init()
3070 return -EAGAIN; in idpf_vc_core_init()
3074 * idpf_vc_core_deinit - Device deinit routine
3080 if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) in idpf_vc_core_deinit()
3085 idpf_vc_xn_shutdown(adapter->vcxn_mngr); in idpf_vc_core_deinit()
3087 cancel_delayed_work_sync(&adapter->serv_task); in idpf_vc_core_deinit()
3088 cancel_delayed_work_sync(&adapter->mbx_task); in idpf_vc_core_deinit()
3092 kfree(adapter->vports); in idpf_vc_core_deinit()
3093 adapter->vports = NULL; in idpf_vc_core_deinit()
3095 clear_bit(IDPF_VC_CORE_INIT, adapter->flags); in idpf_vc_core_deinit()
3099 * idpf_vport_alloc_vec_indexes - Get relative vector indexes
3113 vec_info.num_curr_vecs = vport->num_q_vectors; in idpf_vport_alloc_vec_indexes()
3114 vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq); in idpf_vport_alloc_vec_indexes()
3115 vec_info.default_vport = vport->default_vport; in idpf_vport_alloc_vec_indexes()
3116 vec_info.index = vport->idx; in idpf_vport_alloc_vec_indexes()
3118 num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, in idpf_vport_alloc_vec_indexes()
3119 vport->q_vector_idxs, in idpf_vport_alloc_vec_indexes()
3122 dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n", in idpf_vport_alloc_vec_indexes()
3124 return -EINVAL; in idpf_vport_alloc_vec_indexes()
3127 vport->num_q_vectors = num_alloc_vecs; in idpf_vport_alloc_vec_indexes()
3133 * idpf_vport_init - Initialize virtual port
3134 * @vport: virtual port to be initialized
3141 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_init()
3147 u16 idx = vport->idx; in idpf_vport_init()
3149 vport_config = adapter->vport_config[idx]; in idpf_vport_init()
3150 rss_data = &vport_config->user_config.rss_data; in idpf_vport_init()
3151 vport_msg = adapter->vport_params_recvd[idx]; in idpf_vport_init()
3153 vport_config->max_q.max_txq = max_q->max_txq; in idpf_vport_init()
3154 vport_config->max_q.max_rxq = max_q->max_rxq; in idpf_vport_init()
3155 vport_config->max_q.max_complq = max_q->max_complq; in idpf_vport_init()
3156 vport_config->max_q.max_bufq = max_q->max_bufq; in idpf_vport_init()
3158 vport->txq_model = le16_to_cpu(vport_msg->txq_model); in idpf_vport_init()
3159 vport->rxq_model = le16_to_cpu(vport_msg->rxq_model); in idpf_vport_init()
3160 vport->vport_type = le16_to_cpu(vport_msg->vport_type); in idpf_vport_init()
3161 vport->vport_id = le32_to_cpu(vport_msg->vport_id); in idpf_vport_init()
3163 rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN, in idpf_vport_init()
3164 le16_to_cpu(vport_msg->rss_key_size)); in idpf_vport_init()
3165 rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); in idpf_vport_init()
3167 ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); in idpf_vport_init()
3168 vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN; in idpf_vport_init()
3171 memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); in idpf_vport_init()
3172 memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); in idpf_vport_init()
3181 vport->crc_enable = adapter->crc_enable; in idpf_vport_init()
3185 * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
3186 * @adapter: adapter structure to get the mailbox vector id
3200 u16 num_chunks = le16_to_cpu(chunks->num_vchunks); in idpf_get_vec_ids()
3202 int i, j; in idpf_get_vec_ids() local
3204 vecids[num_vecid_filled] = adapter->mb_vector.v_idx; in idpf_get_vec_ids()
3207 for (j = 0; j < num_chunks; j++) { in idpf_get_vec_ids()
3211 chunk = &chunks->vchunks[j]; in idpf_get_vec_ids()
3212 num_vec = le16_to_cpu(chunk->num_vectors); in idpf_get_vec_ids()
3213 start_vecid = le16_to_cpu(chunk->start_vector_id); in idpf_get_vec_ids()
3230 * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
3242 u16 num_chunks = le16_to_cpu(chunks->num_chunks); in idpf_vport_get_queue_ids()
3246 while (num_chunks--) { in idpf_vport_get_queue_ids()
3249 chunk = &chunks->chunks[num_chunks]; in idpf_vport_get_queue_ids()
3250 if (le32_to_cpu(chunk->type) != q_type) in idpf_vport_get_queue_ids()
3253 num_q = le32_to_cpu(chunk->num_queues); in idpf_vport_get_queue_ids()
3254 start_q_id = le32_to_cpu(chunk->start_queue_id); in idpf_vport_get_queue_ids()
3271 * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3285 int i, j, k = 0; in __idpf_vport_queue_ids_init() local
3289 for (i = 0; i < vport->num_txq_grp; i++) { in __idpf_vport_queue_ids_init()
3290 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in __idpf_vport_queue_ids_init()
3292 for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) in __idpf_vport_queue_ids_init()
3293 tx_qgrp->txqs[j]->q_id = qids[k]; in __idpf_vport_queue_ids_init()
3297 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_vport_queue_ids_init()
3298 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_vport_queue_ids_init()
3301 if (idpf_is_queue_model_split(vport->rxq_model)) in __idpf_vport_queue_ids_init()
3302 num_rxq = rx_qgrp->splitq.num_rxq_sets; in __idpf_vport_queue_ids_init()
3304 num_rxq = rx_qgrp->singleq.num_rxq; in __idpf_vport_queue_ids_init()
3306 for (j = 0; j < num_rxq && k < num_qids; j++, k++) { in __idpf_vport_queue_ids_init()
3309 if (idpf_is_queue_model_split(vport->rxq_model)) in __idpf_vport_queue_ids_init()
3310 q = &rx_qgrp->splitq.rxq_sets[j]->rxq; in __idpf_vport_queue_ids_init()
3312 q = rx_qgrp->singleq.rxqs[j]; in __idpf_vport_queue_ids_init()
3313 q->q_id = qids[k]; in __idpf_vport_queue_ids_init()
3318 for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) { in __idpf_vport_queue_ids_init()
3319 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in __idpf_vport_queue_ids_init()
3321 tx_qgrp->complq->q_id = qids[k]; in __idpf_vport_queue_ids_init()
3325 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_vport_queue_ids_init()
3326 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_vport_queue_ids_init()
3327 u8 num_bufqs = vport->num_bufqs_per_qgrp; in __idpf_vport_queue_ids_init()
3329 for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { in __idpf_vport_queue_ids_init()
3332 q = &rx_qgrp->splitq.bufq_sets[j].bufq; in __idpf_vport_queue_ids_init()
3333 q->q_id = qids[k]; in __idpf_vport_queue_ids_init()
3341 return k; in __idpf_vport_queue_ids_init()
3345 * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
3356 u16 vport_idx = vport->idx; in idpf_vport_queue_ids_init()
3361 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_vport_queue_ids_init()
3362 if (vport_config->req_qs_chunks) { in idpf_vport_queue_ids_init()
3364 (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; in idpf_vport_queue_ids_init()
3365 chunks = &vc_aq->chunks; in idpf_vport_queue_ids_init()
3367 vport_params = vport->adapter->vport_params_recvd[vport_idx]; in idpf_vport_queue_ids_init()
3368 chunks = &vport_params->chunks; in idpf_vport_queue_ids_init()
3373 return -ENOMEM; in idpf_vport_queue_ids_init()
3378 if (num_ids < vport->num_txq) { in idpf_vport_queue_ids_init()
3379 err = -EINVAL; in idpf_vport_queue_ids_init()
3384 if (num_ids < vport->num_txq) { in idpf_vport_queue_ids_init()
3385 err = -EINVAL; in idpf_vport_queue_ids_init()
3392 if (num_ids < vport->num_rxq) { in idpf_vport_queue_ids_init()
3393 err = -EINVAL; in idpf_vport_queue_ids_init()
3398 if (num_ids < vport->num_rxq) { in idpf_vport_queue_ids_init()
3399 err = -EINVAL; in idpf_vport_queue_ids_init()
3403 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_queue_ids_init()
3408 if (num_ids < vport->num_complq) { in idpf_vport_queue_ids_init()
3409 err = -EINVAL; in idpf_vport_queue_ids_init()
3413 if (num_ids < vport->num_complq) { in idpf_vport_queue_ids_init()
3414 err = -EINVAL; in idpf_vport_queue_ids_init()
3419 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_queue_ids_init()
3424 if (num_ids < vport->num_bufq) { in idpf_vport_queue_ids_init()
3425 err = -EINVAL; in idpf_vport_queue_ids_init()
3429 if (num_ids < vport->num_bufq) in idpf_vport_queue_ids_init()
3430 err = -EINVAL; in idpf_vport_queue_ids_init()
3439 * idpf_vport_adjust_qs - Adjust to new requested queues
3449 vport_msg.txq_model = cpu_to_le16(vport->txq_model); in idpf_vport_adjust_qs()
3450 vport_msg.rxq_model = cpu_to_le16(vport->rxq_model); in idpf_vport_adjust_qs()
3451 err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg, in idpf_vport_adjust_qs()
3463 * idpf_is_capability_ena - Default implementation of capability checking
3466 * @field: caps field to check for flags
3467 * @flag: flag to check
3474 u8 *caps = (u8 *)&adapter->caps; in idpf_is_capability_ena()
3501 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; in idpf_get_vport_id()
3503 return le32_to_cpu(vport_msg->vport_id); in idpf_get_vport_id()
3507 * idpf_mac_filter_async_handler - Async callback for mac filters
3514 * situation to deal with errors returned on the reply. The best we can
3532 if (!ctlq_msg->cookie.mbx.chnl_retval) in idpf_mac_filter_async_handler()
3536 if (xn->reply_sz < sizeof(*ma_list)) in idpf_mac_filter_async_handler()
3539 ma_list = ctlq_msg->ctx.indirect.payload->va; in idpf_mac_filter_async_handler()
3540 mac_addr = ma_list->mac_addr_list; in idpf_mac_filter_async_handler()
3541 num_entries = le16_to_cpu(ma_list->num_mac_addr); in idpf_mac_filter_async_handler()
3543 if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries)) in idpf_mac_filter_async_handler()
3546 vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id)); in idpf_mac_filter_async_handler()
3550 vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)]; in idpf_mac_filter_async_handler()
3551 ma_list_head = &vport_config->user_config.mac_filter_list; in idpf_mac_filter_async_handler()
3553 /* We can't do much to reconcile bad filters at this point, however we in idpf_mac_filter_async_handler()
3557 spin_lock_bh(&vport_config->mac_filter_list_lock); in idpf_mac_filter_async_handler()
3560 if (ether_addr_equal(mac_addr[i].addr, f->macaddr)) in idpf_mac_filter_async_handler()
3561 list_del(&f->list); in idpf_mac_filter_async_handler()
3562 spin_unlock_bh(&vport_config->mac_filter_list_lock); in idpf_mac_filter_async_handler()
3563 dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n", in idpf_mac_filter_async_handler()
3564 xn->vc_op); in idpf_mac_filter_async_handler()
3569 dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n", in idpf_mac_filter_async_handler()
3570 xn->vc_op, xn->reply_sz); in idpf_mac_filter_async_handler()
3572 return -EINVAL; in idpf_mac_filter_async_handler()
3576 * idpf_add_del_mac_filters - Add/del mac filters
3590 struct idpf_adapter *adapter = np->adapter; in idpf_add_del_mac_filters()
3596 int i = 0, k; in idpf_add_del_mac_filters() local
3604 vport_config = adapter->vport_config[np->vport_idx]; in idpf_add_del_mac_filters()
3605 spin_lock_bh(&vport_config->mac_filter_list_lock); in idpf_add_del_mac_filters()
3608 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, in idpf_add_del_mac_filters()
3610 if (add && f->add) in idpf_add_del_mac_filters()
3612 else if (!add && f->remove) in idpf_add_del_mac_filters()
3617 spin_unlock_bh(&vport_config->mac_filter_list_lock); in idpf_add_del_mac_filters()
3626 spin_unlock_bh(&vport_config->mac_filter_list_lock); in idpf_add_del_mac_filters()
3628 return -ENOMEM; in idpf_add_del_mac_filters()
3631 list_for_each_entry(f, &vport_config->user_config.mac_filter_list, in idpf_add_del_mac_filters()
3633 if (add && f->add) { in idpf_add_del_mac_filters()
3634 ether_addr_copy(mac_addr[i].addr, f->macaddr); in idpf_add_del_mac_filters()
3636 f->add = false; in idpf_add_del_mac_filters()
3640 if (!add && f->remove) { in idpf_add_del_mac_filters()
3641 ether_addr_copy(mac_addr[i].addr, f->macaddr); in idpf_add_del_mac_filters()
3643 f->remove = false; in idpf_add_del_mac_filters()
3649 spin_unlock_bh(&vport_config->mac_filter_list_lock); in idpf_add_del_mac_filters()
3651 /* Chunk up the filters into multiple messages to avoid in idpf_add_del_mac_filters()
3656 for (i = 0, k = 0; i < num_msgs; i++) { in idpf_add_del_mac_filters()
3668 return -ENOMEM; in idpf_add_del_mac_filters()
3673 ma_list->vport_id = cpu_to_le32(np->vport_id); in idpf_add_del_mac_filters()
3674 ma_list->num_mac_addr = cpu_to_le16(num_entries); in idpf_add_del_mac_filters()
3675 memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); in idpf_add_del_mac_filters()
3683 k += num_entries; in idpf_add_del_mac_filters()
3684 total_filters -= num_entries; in idpf_add_del_mac_filters()
3691 * idpf_set_promiscuous - set promiscuous and send message to mailbox
3696 * Request to enable promiscuous mode for the vport. Message is sent
3709 if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) in idpf_set_promiscuous()
3711 if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags)) in idpf_set_promiscuous()