Lines Matching refs:mvq
163 static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq);
374 struct mlx5_vdpa_virtqueue *mvq, u32 num_ent) in qp_prepare() argument
380 vqp = fw ? &mvq->fwqp : &mvq->vqqp; in qp_prepare()
400 MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn); in qp_prepare()
419 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in qp_create() argument
430 vqp = &mvq->vqqp; in qp_create()
431 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
447 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
465 rx_post(vqp, mvq->num_ent); in qp_create()
511 static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num) in mlx5_vdpa_handle_completions() argument
513 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_handle_completions()
516 event_cb = &ndev->event_cbs[mvq->index]; in mlx5_vdpa_handle_completions()
517 mlx5_cq_set_ci(&mvq->cq.mcq); in mlx5_vdpa_handle_completions()
523 rx_post(&mvq->vqqp, num); in mlx5_vdpa_handle_completions()
530 struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq); in mlx5_vdpa_cq_comp() local
531 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp()
535 while (!mlx5_vdpa_poll_one(&mvq->cq)) { in mlx5_vdpa_cq_comp()
537 if (num > mvq->num_ent / 2) { in mlx5_vdpa_cq_comp()
544 mlx5_vdpa_handle_completions(mvq, num); in mlx5_vdpa_cq_comp()
550 mlx5_vdpa_handle_completions(mvq, num); in mlx5_vdpa_cq_comp()
552 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in mlx5_vdpa_cq_comp()
557 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create() local
561 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_create()
619 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in cq_create()
634 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy() local
636 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_destroy()
686 static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, in set_umem_size() argument
696 *umemp = &mvq->umem1; in set_umem_size()
701 *umemp = &mvq->umem2; in set_umem_size()
706 *umemp = &mvq->umem3; in set_umem_size()
710 (*umemp)->size = p_a * mvq->num_ent + p_b; in set_umem_size()
718 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in create_umem() argument
728 set_umem_size(ndev, mvq, num, &umem); in create_umem()
768 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in umem_destroy() argument
776 umem = &mvq->umem1; in umem_destroy()
779 umem = &mvq->umem2; in umem_destroy()
782 umem = &mvq->umem3; in umem_destroy()
794 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_create() argument
800 err = create_umem(ndev, mvq, num); in umems_create()
808 umem_destroy(ndev, mvq, num); in umems_create()
813 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_destroy() argument
818 umem_destroy(ndev, mvq, num); in umems_destroy()
879 struct mlx5_vdpa_virtqueue *mvq, in create_virtqueue() argument
895 err = umems_create(ndev, mvq); in create_virtqueue()
920 if (vq_is_tx(mvq->index)) in create_virtqueue()
923 if (mvq->map.virq) { in create_virtqueue()
925 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->map.index); in create_virtqueue()
928 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn); in create_virtqueue()
931 MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index); in create_virtqueue()
932 MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent); in create_virtqueue()
937 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); in create_virtqueue()
938 MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); in create_virtqueue()
940 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); in create_virtqueue()
941 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); in create_virtqueue()
942 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); in create_virtqueue()
958 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; in create_virtqueue()
962 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; in create_virtqueue()
965 MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id); in create_virtqueue()
966 MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size); in create_virtqueue()
967 MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id); in create_virtqueue()
968 MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size); in create_virtqueue()
969 MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); in create_virtqueue()
970 MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size); in create_virtqueue()
973 MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id); in create_virtqueue()
979 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT; in create_virtqueue()
981 mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in create_virtqueue()
985 mvq->vq_mr = vq_mr; in create_virtqueue()
990 mvq->desc_mr = vq_desc_mr; in create_virtqueue()
999 umems_destroy(ndev, mvq); in create_virtqueue()
1003 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in destroy_virtqueue() argument
1010 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id); in destroy_virtqueue()
1015 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
1018 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; in destroy_virtqueue()
1019 umems_destroy(ndev, mvq); in destroy_virtqueue()
1021 mlx5_vdpa_put_mr(&ndev->mvdev, mvq->vq_mr); in destroy_virtqueue()
1022 mvq->vq_mr = NULL; in destroy_virtqueue()
1024 mlx5_vdpa_put_mr(&ndev->mvdev, mvq->desc_mr); in destroy_virtqueue()
1025 mvq->desc_mr = NULL; in destroy_virtqueue()
1028 static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) in get_rqpn() argument
1030 return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn; in get_rqpn()
1033 static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) in get_qpn() argument
1035 return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn; in get_qpn()
1133 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) in modify_qp() argument
1141 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); in modify_qp()
1150 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in connect_qps() argument
1154 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); in connect_qps()
1158 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); in connect_qps()
1162 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1166 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1170 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1174 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1178 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); in connect_qps()
1198 struct mlx5_vdpa_virtqueue *mvq, in fill_query_virtqueue_cmd() argument
1205 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in fill_query_virtqueue_cmd()
1296 static bool modifiable_virtqueue_fields(struct mlx5_vdpa_virtqueue *mvq) in modifiable_virtqueue_fields() argument
1299 if (mvq->modified_fields & ~MLX5_VIRTQ_MODIFY_MASK_STATE) in modifiable_virtqueue_fields()
1300 return mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT || in modifiable_virtqueue_fields()
1301 mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND; in modifiable_virtqueue_fields()
1307 struct mlx5_vdpa_virtqueue *mvq, in fill_modify_virtqueue_cmd() argument
1322 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in fill_modify_virtqueue_cmd()
1328 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) in fill_modify_virtqueue_cmd()
1331 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) { in fill_modify_virtqueue_cmd()
1332 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); in fill_modify_virtqueue_cmd()
1333 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); in fill_modify_virtqueue_cmd()
1334 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); in fill_modify_virtqueue_cmd()
1337 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX) in fill_modify_virtqueue_cmd()
1338 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); in fill_modify_virtqueue_cmd()
1340 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX) in fill_modify_virtqueue_cmd()
1341 MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); in fill_modify_virtqueue_cmd()
1343 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_QUEUE_VIRTIO_VERSION) in fill_modify_virtqueue_cmd()
1347 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_QUEUE_FEATURES) { in fill_modify_virtqueue_cmd()
1356 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { in fill_modify_virtqueue_cmd()
1362 mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; in fill_modify_virtqueue_cmd()
1365 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { in fill_modify_virtqueue_cmd()
1371 mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; in fill_modify_virtqueue_cmd()
1374 MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields); in fill_modify_virtqueue_cmd()
1378 struct mlx5_vdpa_virtqueue *mvq, in modify_virtqueue_end() argument
1383 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { in modify_virtqueue_end()
1387 mlx5_vdpa_put_mr(mvdev, mvq->vq_mr); in modify_virtqueue_end()
1389 mvq->vq_mr = vq_mr; in modify_virtqueue_end()
1392 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { in modify_virtqueue_end()
1396 mlx5_vdpa_put_mr(mvdev, mvq->desc_mr); in modify_virtqueue_end()
1398 mvq->desc_mr = desc_mr; in modify_virtqueue_end()
1401 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) in modify_virtqueue_end()
1402 mvq->fw_state = state; in modify_virtqueue_end()
1404 mvq->modified_fields = 0; in modify_virtqueue_end()
1407 static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_alloc() argument
1427 mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in counter_set_alloc()
1432 static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_dealloc() argument
1441 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id); in counter_set_dealloc()
1445 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1459 struct mlx5_vdpa_virtqueue *mvq) in alloc_vector() argument
1470 dev_name(&ndev->mvdev.vdev.dev), mvq->index); in alloc_vector()
1471 ent->dev_id = &ndev->event_cbs[mvq->index]; in alloc_vector()
1478 mvq->map = ent->map; in alloc_vector()
1485 struct mlx5_vdpa_virtqueue *mvq) in dealloc_vector() argument
1491 if (mvq->map.virq == irqp->entries[i].map.virq) { in dealloc_vector()
1492 free_irq(mvq->map.virq, irqp->entries[i].dev_id); in dealloc_vector()
1499 struct mlx5_vdpa_virtqueue *mvq, in setup_vq() argument
1502 u16 idx = mvq->index; in setup_vq()
1505 if (mvq->initialized) in setup_vq()
1508 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1512 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1516 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1520 err = connect_qps(ndev, mvq); in setup_vq()
1524 err = counter_set_alloc(ndev, mvq); in setup_vq()
1528 alloc_vector(ndev, mvq); in setup_vq()
1529 err = create_virtqueue(ndev, mvq, filled); in setup_vq()
1533 mvq->initialized = true; in setup_vq()
1535 if (mvq->ready) { in setup_vq()
1536 err = resume_vq(ndev, mvq); in setup_vq()
1544 destroy_virtqueue(ndev, mvq); in setup_vq()
1546 dealloc_vector(ndev, mvq); in setup_vq()
1547 counter_set_dealloc(ndev, mvq); in setup_vq()
1549 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1551 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1576 struct mlx5_vdpa_virtqueue *mvq; in modify_virtqueues() local
1579 mvq = &ndev->vqs[vq_idx]; in modify_virtqueues()
1581 if (!modifiable_virtqueue_fields(mvq)) { in modify_virtqueues()
1586 if (mvq->fw_state != state) { in modify_virtqueues()
1587 if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) { in modify_virtqueues()
1592 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE; in modify_virtqueues()
1599 fill_modify_virtqueue_cmd(ndev, mvq, state, &cmd_mem[i]); in modify_virtqueues()
1611 struct mlx5_vdpa_virtqueue *mvq; in modify_virtqueues() local
1614 mvq = &ndev->vqs[vq_idx]; in modify_virtqueues()
1618 vq_idx, mvq->fw_state, state, err); in modify_virtqueues()
1624 modify_virtqueue_end(ndev, mvq, state); in modify_virtqueues()
1635 struct mlx5_vdpa_virtqueue *mvq; in suspend_vqs() local
1643 mvq = &ndev->vqs[start_vq]; in suspend_vqs()
1644 if (!mvq->initialized) in suspend_vqs()
1647 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in suspend_vqs()
1663 mvq = &ndev->vqs[vq_idx]; in suspend_vqs()
1664 mvq->avail_idx = attrs[i].available_index; in suspend_vqs()
1665 mvq->used_idx = attrs[i].used_index; in suspend_vqs()
1673 static int suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in suspend_vq() argument
1675 return suspend_vqs(ndev, mvq->index, 1); in suspend_vq()
1680 struct mlx5_vdpa_virtqueue *mvq; in resume_vqs() local
1686 mvq = &ndev->vqs[start_vq]; in resume_vqs()
1687 if (!mvq->initialized) in resume_vqs()
1690 if (mvq->index >= ndev->cur_num_vqs) in resume_vqs()
1693 switch (mvq->fw_state) { in resume_vqs()
1698 err = modify_virtqueues(ndev, start_vq, num_vqs, mvq->fw_state); in resume_vqs()
1704 mlx5_vdpa_warn(&ndev->mvdev, "vq %d is not resumable\n", mvq->index); in resume_vqs()
1712 mvq->index, mvq->fw_state); in resume_vqs()
1719 static int resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in resume_vq() argument
1721 return resume_vqs(ndev, mvq->index, 1); in resume_vq()
1724 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in teardown_vq() argument
1726 if (!mvq->initialized) in teardown_vq()
1729 suspend_vq(ndev, mvq); in teardown_vq()
1730 mvq->modified_fields = 0; in teardown_vq()
1731 destroy_virtqueue(ndev, mvq); in teardown_vq()
1732 dealloc_vector(ndev, mvq); in teardown_vq()
1733 counter_set_dealloc(ndev, mvq); in teardown_vq()
1734 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1735 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1736 cq_destroy(ndev, mvq->index); in teardown_vq()
1737 mvq->initialized = false; in teardown_vq()
2432 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_kick_vq() local
2445 mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
2446 if (unlikely(!mvq->ready)) in mlx5_vdpa_kick_vq()
2457 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_address() local
2469 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
2470 mvq->desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
2471 mvq->device_addr = device_area; in mlx5_vdpa_set_vq_address()
2472 mvq->driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
2473 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS; in mlx5_vdpa_set_vq_address()
2481 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_num() local
2493 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
2494 ndev->needs_teardown = num != mvq->num_ent; in mlx5_vdpa_set_vq_num()
2495 mvq->num_ent = num; in mlx5_vdpa_set_vq_num()
2533 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_ready() local
2546 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
2548 suspend_vq(ndev, mvq); in mlx5_vdpa_set_vq_ready()
2550 if (resume_vq(ndev, mvq)) in mlx5_vdpa_set_vq_ready()
2554 mvq->ready = ready; in mlx5_vdpa_set_vq_ready()
2576 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_set_vq_state() local
2586 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
2587 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) { in mlx5_vdpa_set_vq_state()
2592 mvq->used_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2593 mvq->avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2594 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX | in mlx5_vdpa_set_vq_state()
2603 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_get_vq_state() local
2615 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
2620 if (!mvq->initialized) { in mlx5_vdpa_get_vq_state()
2625 state->split.avail_index = mvq->used_idx; in mlx5_vdpa_get_vq_state()
2629 err = query_virtqueues(ndev, mvq->index, 1, &attr); in mlx5_vdpa_get_vq_state()
2922 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[i]; in mlx5_vdpa_set_driver_features() local
2924 mvq->modified_fields |= ( in mlx5_vdpa_set_driver_features()
2976 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in save_channel_info() argument
2978 struct mlx5_vq_restore_info *ri = &mvq->ri; in save_channel_info()
2982 if (mvq->initialized) { in save_channel_info()
2983 err = query_virtqueues(ndev, mvq->index, 1, &attr); in save_channel_info()
2990 ri->ready = mvq->ready; in save_channel_info()
2991 ri->num_ent = mvq->num_ent; in save_channel_info()
2992 ri->desc_addr = mvq->desc_addr; in save_channel_info()
2993 ri->device_addr = mvq->device_addr; in save_channel_info()
2994 ri->driver_addr = mvq->driver_addr; in save_channel_info()
2995 ri->map = mvq->map; in save_channel_info()
3021 struct mlx5_vdpa_virtqueue *mvq; in restore_channels_info() local
3028 mvq = &ndev->vqs[i]; in restore_channels_info()
3029 ri = &mvq->ri; in restore_channels_info()
3033 mvq->avail_idx = ri->avail_index; in restore_channels_info()
3034 mvq->used_idx = ri->used_index; in restore_channels_info()
3035 mvq->ready = ri->ready; in restore_channels_info()
3036 mvq->num_ent = ri->num_ent; in restore_channels_info()
3037 mvq->desc_addr = ri->desc_addr; in restore_channels_info()
3038 mvq->device_addr = ri->device_addr; in restore_channels_info()
3039 mvq->driver_addr = ri->driver_addr; in restore_channels_info()
3040 mvq->map = ri->map; in restore_channels_info()
3244 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[0]; in needs_vqs_reset() local
3249 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT) in needs_vqs_reset()
3252 return mvq->modified_fields & ( in needs_vqs_reset()
3478 struct mlx5_vdpa_virtqueue *mvq; in mlx5_get_vq_irq() local
3486 mvq = &ndev->vqs[idx]; in mlx5_get_vq_irq()
3487 if (!mvq->map.virq) in mlx5_get_vq_irq()
3490 return mvq->map.virq; in mlx5_get_vq_irq()
3500 static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in counter_set_query() argument
3512 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in counter_set_query()
3520 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id); in counter_set_query()
3538 struct mlx5_vdpa_virtqueue *mvq; in mlx5_vdpa_get_vendor_vq_stats() local
3558 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vendor_vq_stats()
3559 err = counter_set_query(ndev, mvq, &received_desc, &completed_desc); in mlx5_vdpa_get_vendor_vq_stats()
3747 struct mlx5_vdpa_virtqueue *mvq; in mvqs_set_defaults() local
3751 mvq = &ndev->vqs[i]; in mvqs_set_defaults()
3752 memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in mvqs_set_defaults()
3753 mvq->index = i; in mvqs_set_defaults()
3754 mvq->ndev = ndev; in mvqs_set_defaults()
3755 mvq->fwqp.fw = true; in mvqs_set_defaults()
3756 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; in mvqs_set_defaults()
3757 mvq->num_ent = MLX5V_DEFAULT_VQ_SIZE; in mvqs_set_defaults()