Home
last modified time | relevance | path

Searched refs:cvq (Results 1 – 6 of 6) sorted by relevance

/linux-6.12.1/drivers/vdpa/vdpa_sim/
Dvdpa_sim_net.c109 struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2]; in vdpasim_handle_ctrl_mac() local
115 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov, in vdpasim_handle_ctrl_mac()
129 struct vdpasim_virtqueue *cvq = &vdpasim->vqs[2]; in vdpasim_handle_cvq() local
140 if (!cvq->ready) in vdpasim_handle_cvq()
144 err = vringh_getdesc_iotlb(&cvq->vring, &cvq->in_iov, in vdpasim_handle_cvq()
145 &cvq->out_iov, in vdpasim_handle_cvq()
146 &cvq->head, GFP_ATOMIC); in vdpasim_handle_cvq()
151 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov, &ctrl, in vdpasim_handle_cvq()
174 write = vringh_iov_push_iotlb(&cvq->vring, &cvq->out_iov, in vdpasim_handle_cvq()
176 vringh_complete_iotlb(&cvq->vring, cvq->head, write); in vdpasim_handle_cvq()
[all …]
/linux-6.12.1/drivers/vdpa/mlx5/net/
Dmlx5_vnet.c2133 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mac() local
2142 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)mac, ETH_ALEN); in handle_ctrl_mac()
2272 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mq() local
2291 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq)); in handle_ctrl_mq()
2320 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_vlan() local
2330 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); in handle_ctrl_vlan()
2341 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); in handle_ctrl_vlan()
2362 struct mlx5_control_vq *cvq; in mlx5_cvq_kick_handler() local
2370 cvq = &mvdev->cvq; in mlx5_cvq_kick_handler()
2380 if (!cvq->ready) in mlx5_cvq_kick_handler()
[all …]
/linux-6.12.1/drivers/vdpa/mlx5/core/
Dresources.c232 mvdev->cvq.iotlb = vhost_iotlb_alloc(0, 0); in init_ctrl_vq()
233 if (!mvdev->cvq.iotlb) in init_ctrl_vq()
236 spin_lock_init(&mvdev->cvq.iommu_lock); in init_ctrl_vq()
237 vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock); in init_ctrl_vq()
244 vhost_iotlb_free(mvdev->cvq.iotlb); in cleanup_ctrl_vq()
Dmr.c768 prune_iotlb(mvdev->cvq.iotlb); in mlx5_vdpa_clean_mrs()
848 spin_lock(&mvdev->cvq.iommu_lock); in mlx5_vdpa_update_cvq_iotlb()
850 prune_iotlb(mvdev->cvq.iotlb); in mlx5_vdpa_update_cvq_iotlb()
851 err = dup_iotlb(mvdev->cvq.iotlb, iotlb); in mlx5_vdpa_update_cvq_iotlb()
853 spin_unlock(&mvdev->cvq.iommu_lock); in mlx5_vdpa_update_cvq_iotlb()
Dmlx5_vdpa.h116 struct mlx5_control_vq cvq; member
/linux-6.12.1/drivers/net/
Dvirtio_net.c391 struct virtqueue *cvq; member
3271 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC); in virtnet_send_command_reply()
3279 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command_reply()
3285 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command_reply()
3286 !virtqueue_is_broken(vi->cvq)) { in virtnet_send_command_reply()
6095 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()