Lines Matching +full:virtio +full:- +full:iommu
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Virtio ring implementation.
6 #include <linux/virtio.h>
13 #include <linux/dma-mapping.h>
22 dev_err(&(_vq)->vq.vdev->dev, \
23 "%s:"fmt, (_vq)->vq.name, ##args); \
29 if ((_vq)->in_use) \
31 (_vq)->vq.name, (_vq)->in_use); \
32 (_vq)->in_use = __LINE__; \
35 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
41 if ((_vq)->last_add_time_valid) \
43 (_vq)->last_add_time)) > 100); \
44 (_vq)->last_add_time = now; \
45 (_vq)->last_add_time_valid = true; \
49 if ((_vq)->last_add_time_valid) { \
51 (_vq)->last_add_time)) > 100); \
55 ((_vq)->last_add_time_valid = false)
59 dev_err(&_vq->vq.vdev->dev, \
60 "%s:"fmt, (_vq)->vq.name, ##args); \
61 (_vq)->broken = true; \
93 /* Last written value to avail->flags */
97 * Last written value to avail->idx in
102 /* Per-descriptor state. */
137 * Last written value to driver->flags in
142 /* Per-descriptor state. */
251 return (vq->indirect && total_sg > 1 && vq->vq.num_free); in virtqueue_use_indirect()
255 * Modern virtio devices have feature bits to specify whether they need a
256 * quirk and bypass the IOMMU. If not there, just use the DMA API.
258 * If there, the interaction between virtio and DMA API is messy.
260 * On most systems with virtio, physical addresses match bus addresses,
264 * that speaks virtio behind a physical IOMMU, we must use the DMA API
265 * for virtio DMA to work at all.
267 * On other systems, including SPARC and PPC64, virtio-pci devices are
268 * enumerated as though they are behind an IOMMU, but the virtio host
269 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
275 * TODO: install a per-device DMA ops structure that does the right thing
287 * In theory, it's possible to have a buggy QEMU-supposed in vring_use_dma_api()
288 * emulated Q35 IOMMU and Xen enabled at the same time. On in vring_use_dma_api()
289 * such a configuration, virtio has never worked and will in vring_use_dma_api()
305 max_segment_size = dma_max_mapping_size(vdev->dev.parent); in virtio_max_dma_size()
328 * have 64-bit phys_addr_t but 32-bit dma_addr_t in vring_alloc_queue()
329 * are certain non-highmem MIPS and x86 in vring_alloc_queue()
362 return vq->dma_dev; in vring_dma_dev()
369 if (vq->premapped) { in vring_map_one_sg()
374 if (!vq->use_dma_api) { in vring_map_one_sg()
380 kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction); in vring_map_one_sg()
391 sg_page(sg), sg->offset, sg->length, in vring_map_one_sg()
395 return -ENOMEM; in vring_map_one_sg()
404 if (!vq->use_dma_api) in vring_map_single()
414 if (!vq->use_dma_api) in vring_mapping_error()
422 vq->vq.num_free = num; in virtqueue_init()
424 if (vq->packed_ring) in virtqueue_init()
425 vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR); in virtqueue_init()
427 vq->last_used_idx = 0; in virtqueue_init()
429 vq->event_triggered = false; in virtqueue_init()
430 vq->num_added = 0; in virtqueue_init()
433 vq->in_use = false; in virtqueue_init()
434 vq->last_add_time_valid = false; in virtqueue_init()
440 * Split ring specific functions - *_split().
448 if (!vq->do_unmap) in vring_unmap_one_split_indirect()
451 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); in vring_unmap_one_split_indirect()
454 virtio64_to_cpu(vq->vq.vdev, desc->addr), in vring_unmap_one_split_indirect()
455 virtio32_to_cpu(vq->vq.vdev, desc->len), in vring_unmap_one_split_indirect()
463 struct vring_desc_extra *extra = vq->split.desc_extra; in vring_unmap_one_split()
469 if (!vq->use_dma_api) in vring_unmap_one_split()
478 if (!vq->do_unmap) in vring_unmap_one_split()
511 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); in alloc_indirect_split()
524 struct vring_desc_extra *extra = vring->split.desc_extra; in virtqueue_add_desc_split()
527 desc[i].flags = cpu_to_virtio16(vq->vdev, flags); in virtqueue_add_desc_split()
528 desc[i].addr = cpu_to_virtio64(vq->vdev, addr); in virtqueue_add_desc_split()
529 desc[i].len = cpu_to_virtio32(vq->vdev, len); in virtqueue_add_desc_split()
533 desc[i].next = cpu_to_virtio16(vq->vdev, next); in virtqueue_add_desc_split()
539 next = virtio16_to_cpu(vq->vdev, desc[i].next); in virtqueue_add_desc_split()
563 BUG_ON(ctx && vq->indirect); in virtqueue_add_split()
565 if (unlikely(vq->broken)) { in virtqueue_add_split()
567 return -EIO; in virtqueue_add_split()
574 head = vq->free_head; in virtqueue_add_split()
580 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); in virtqueue_add_split()
591 desc = vq->split.vring.desc; in virtqueue_add_split()
596 if (unlikely(vq->vq.num_free < descs_used)) { in virtqueue_add_split()
597 pr_debug("Can't add buf len %i - avail = %i\n", in virtqueue_add_split()
598 descs_used, vq->vq.num_free); in virtqueue_add_split()
603 vq->notify(&vq->vq); in virtqueue_add_split()
607 return -ENOSPC; in virtqueue_add_split()
621 i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length, in virtqueue_add_split()
638 sg->length, in virtqueue_add_split()
645 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); in virtqueue_add_split()
646 if (!indirect && vq->do_unmap) in virtqueue_add_split()
647 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= in virtqueue_add_split()
656 if (vq->premapped) in virtqueue_add_split()
662 virtqueue_add_desc_split(_vq, vq->split.vring.desc, in virtqueue_add_split()
670 vq->vq.num_free -= descs_used; in virtqueue_add_split()
674 vq->free_head = vq->split.desc_extra[head].next; in virtqueue_add_split()
676 vq->free_head = i; in virtqueue_add_split()
679 vq->split.desc_state[head].data = data; in virtqueue_add_split()
681 vq->split.desc_state[head].indir_desc = desc; in virtqueue_add_split()
683 vq->split.desc_state[head].indir_desc = ctx; in virtqueue_add_split()
685 /* Put entry in available array (but don't update avail->idx until they in virtqueue_add_split()
687 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); in virtqueue_add_split()
688 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); in virtqueue_add_split()
692 virtio_wmb(vq->weak_barriers); in virtqueue_add_split()
693 vq->split.avail_idx_shadow++; in virtqueue_add_split()
694 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_add_split()
695 vq->split.avail_idx_shadow); in virtqueue_add_split()
696 vq->num_added++; in virtqueue_add_split()
703 if (unlikely(vq->num_added == (1 << 16) - 1)) in virtqueue_add_split()
721 i = virtio16_to_cpu(_vq->vdev, desc[i].next); in virtqueue_add_split()
731 return -ENOMEM; in virtqueue_add_split()
743 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_split()
745 old = vq->split.avail_idx_shadow - vq->num_added; in virtqueue_kick_prepare_split()
746 new = vq->split.avail_idx_shadow; in virtqueue_kick_prepare_split()
747 vq->num_added = 0; in virtqueue_kick_prepare_split()
752 if (vq->event) { in virtqueue_kick_prepare_split()
753 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, in virtqueue_kick_prepare_split()
754 vring_avail_event(&vq->split.vring)), in virtqueue_kick_prepare_split()
757 needs_kick = !(vq->split.vring.used->flags & in virtqueue_kick_prepare_split()
758 cpu_to_virtio16(_vq->vdev, in virtqueue_kick_prepare_split()
769 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); in detach_buf_split()
772 vq->split.desc_state[head].data = NULL; in detach_buf_split()
774 /* Put back on free list: unmap first-level descriptors and find end */ in detach_buf_split()
777 while (vq->split.vring.desc[i].flags & nextflag) { in detach_buf_split()
779 i = vq->split.desc_extra[i].next; in detach_buf_split()
780 vq->vq.num_free++; in detach_buf_split()
784 vq->split.desc_extra[i].next = vq->free_head; in detach_buf_split()
785 vq->free_head = head; in detach_buf_split()
788 vq->vq.num_free++; in detach_buf_split()
790 if (vq->indirect) { in detach_buf_split()
792 vq->split.desc_state[head].indir_desc; in detach_buf_split()
799 len = vq->split.desc_extra[head].len; in detach_buf_split()
801 BUG_ON(!(vq->split.desc_extra[head].flags & in detach_buf_split()
805 if (vq->do_unmap) { in detach_buf_split()
811 vq->split.desc_state[head].indir_desc = NULL; in detach_buf_split()
813 *ctx = vq->split.desc_state[head].indir_desc; in detach_buf_split()
819 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, in more_used_split()
820 vq->split.vring.used->idx); in more_used_split()
834 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_split()
846 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_split()
848 last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); in virtqueue_get_buf_ctx_split()
849 i = virtio32_to_cpu(_vq->vdev, in virtqueue_get_buf_ctx_split()
850 vq->split.vring.used->ring[last_used].id); in virtqueue_get_buf_ctx_split()
851 *len = virtio32_to_cpu(_vq->vdev, in virtqueue_get_buf_ctx_split()
852 vq->split.vring.used->ring[last_used].len); in virtqueue_get_buf_ctx_split()
854 if (unlikely(i >= vq->split.vring.num)) { in virtqueue_get_buf_ctx_split()
858 if (unlikely(!vq->split.desc_state[i].data)) { in virtqueue_get_buf_ctx_split()
864 ret = vq->split.desc_state[i].data; in virtqueue_get_buf_ctx_split()
866 vq->last_used_idx++; in virtqueue_get_buf_ctx_split()
870 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) in virtqueue_get_buf_ctx_split()
871 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_split()
872 &vring_used_event(&vq->split.vring), in virtqueue_get_buf_ctx_split()
873 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); in virtqueue_get_buf_ctx_split()
885 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { in virtqueue_disable_cb_split()
886 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_disable_cb_split()
892 if (vq->event_triggered) in virtqueue_disable_cb_split()
895 if (vq->event) in virtqueue_disable_cb_split()
897 vring_used_event(&vq->split.vring) = 0x0; in virtqueue_disable_cb_split()
899 vq->split.vring.avail->flags = in virtqueue_disable_cb_split()
900 cpu_to_virtio16(_vq->vdev, in virtqueue_disable_cb_split()
901 vq->split.avail_flags_shadow); in virtqueue_disable_cb_split()
917 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_prepare_split()
918 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_prepare_split()
919 if (!vq->event) in virtqueue_enable_cb_prepare_split()
920 vq->split.vring.avail->flags = in virtqueue_enable_cb_prepare_split()
921 cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
922 vq->split.avail_flags_shadow); in virtqueue_enable_cb_prepare_split()
924 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
925 last_used_idx = vq->last_used_idx); in virtqueue_enable_cb_prepare_split()
934 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, in virtqueue_poll_split()
935 vq->split.vring.used->idx); in virtqueue_poll_split()
950 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_delayed_split()
951 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_delayed_split()
952 if (!vq->event) in virtqueue_enable_cb_delayed_split()
953 vq->split.vring.avail->flags = in virtqueue_enable_cb_delayed_split()
954 cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_delayed_split()
955 vq->split.avail_flags_shadow); in virtqueue_enable_cb_delayed_split()
958 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; in virtqueue_enable_cb_delayed_split()
960 virtio_store_mb(vq->weak_barriers, in virtqueue_enable_cb_delayed_split()
961 &vring_used_event(&vq->split.vring), in virtqueue_enable_cb_delayed_split()
962 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); in virtqueue_enable_cb_delayed_split()
964 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) in virtqueue_enable_cb_delayed_split()
965 - vq->last_used_idx) > bufs)) { in virtqueue_enable_cb_delayed_split()
982 for (i = 0; i < vq->split.vring.num; i++) { in virtqueue_detach_unused_buf_split()
983 if (!vq->split.desc_state[i].data) in virtqueue_detach_unused_buf_split()
986 buf = vq->split.desc_state[i].data; in virtqueue_detach_unused_buf_split()
988 vq->split.avail_idx_shadow--; in virtqueue_detach_unused_buf_split()
989 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_detach_unused_buf_split()
990 vq->split.avail_idx_shadow); in virtqueue_detach_unused_buf_split()
995 BUG_ON(vq->vq.num_free != vq->split.vring.num); in virtqueue_detach_unused_buf_split()
1006 vdev = vq->vq.vdev; in virtqueue_vring_init_split()
1008 vring_split->avail_flags_shadow = 0; in virtqueue_vring_init_split()
1009 vring_split->avail_idx_shadow = 0; in virtqueue_vring_init_split()
1012 if (!vq->vq.callback) { in virtqueue_vring_init_split()
1013 vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_vring_init_split()
1014 if (!vq->event) in virtqueue_vring_init_split()
1015 vring_split->vring.avail->flags = cpu_to_virtio16(vdev, in virtqueue_vring_init_split()
1016 vring_split->avail_flags_shadow); in virtqueue_vring_init_split()
1024 num = vq->split.vring.num; in virtqueue_reinit_split()
1026 vq->split.vring.avail->flags = 0; in virtqueue_reinit_split()
1027 vq->split.vring.avail->idx = 0; in virtqueue_reinit_split()
1030 vq->split.vring.avail->ring[num] = 0; in virtqueue_reinit_split()
1032 vq->split.vring.used->flags = 0; in virtqueue_reinit_split()
1033 vq->split.vring.used->idx = 0; in virtqueue_reinit_split()
1036 *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0; in virtqueue_reinit_split()
1040 virtqueue_vring_init_split(&vq->split, vq); in virtqueue_reinit_split()
1046 vq->split = *vring_split; in virtqueue_vring_attach_split()
1049 vq->free_head = 0; in virtqueue_vring_attach_split()
1056 u32 num = vring_split->vring.num; in vring_alloc_state_extra_split()
1068 vring_split->desc_state = state; in vring_alloc_state_extra_split()
1069 vring_split->desc_extra = extra; in vring_alloc_state_extra_split()
1075 return -ENOMEM; in vring_alloc_state_extra_split()
1081 vring_free_queue(vdev, vring_split->queue_size_in_bytes, in vring_free_split()
1082 vring_split->vring.desc, in vring_free_split()
1083 vring_split->queue_dma_addr, in vring_free_split()
1086 kfree(vring_split->desc_state); in vring_free_split()
1087 kfree(vring_split->desc_extra); in vring_free_split()
1102 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); in vring_alloc_queue_split()
1103 return -EINVAL; in vring_alloc_queue_split()
1115 return -ENOMEM; in vring_alloc_queue_split()
1119 return -ENOMEM; in vring_alloc_queue_split()
1128 return -ENOMEM; in vring_alloc_queue_split()
1130 vring_init(&vring_split->vring, num, queue, vring_align); in vring_alloc_queue_split()
1132 vring_split->queue_dma_addr = dma_addr; in vring_alloc_queue_split()
1133 vring_split->queue_size_in_bytes = vring_size(num, vring_align); in vring_alloc_queue_split()
1135 vring_split->vring_align = vring_align; in vring_alloc_queue_split()
1136 vring_split->may_reduce_num = may_reduce_num; in vring_alloc_queue_split()
1170 to_vvq(vq)->we_own_ring = true; in vring_create_virtqueue_split()
1179 struct virtio_device *vdev = _vq->vdev; in virtqueue_resize_split()
1183 vq->split.vring_align, in virtqueue_resize_split()
1184 vq->split.may_reduce_num, in virtqueue_resize_split()
1193 vring_free(&vq->vq); in virtqueue_resize_split()
1206 return -ENOMEM; in virtqueue_resize_split()
1211 * Packed ring specific functions - *_packed().
1220 return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR)); in packed_last_used()
1228 flags = extra->flags; in vring_unmap_extra_packed()
1231 if (!vq->use_dma_api) in vring_unmap_extra_packed()
1235 extra->addr, extra->len, in vring_unmap_extra_packed()
1239 if (!vq->do_unmap) in vring_unmap_extra_packed()
1243 extra->addr, extra->len, in vring_unmap_extra_packed()
1254 if (!vq->do_unmap) in vring_unmap_desc_packed()
1257 flags = le16_to_cpu(desc->flags); in vring_unmap_desc_packed()
1260 le64_to_cpu(desc->addr), in vring_unmap_desc_packed()
1261 le32_to_cpu(desc->len), in vring_unmap_desc_packed()
1297 head = vq->packed.next_avail_idx; in virtqueue_add_indirect_packed()
1300 return -ENOMEM; in virtqueue_add_indirect_packed()
1302 if (unlikely(vq->vq.num_free < 1)) { in virtqueue_add_indirect_packed()
1303 pr_debug("Can't add buf len 1 - avail = 0\n"); in virtqueue_add_indirect_packed()
1306 return -ENOSPC; in virtqueue_add_indirect_packed()
1310 id = vq->free_head; in virtqueue_add_indirect_packed()
1311 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_indirect_packed()
1322 desc[i].len = cpu_to_le32(sg->length); in virtqueue_add_indirect_packed()
1332 if (vq->premapped) in virtqueue_add_indirect_packed()
1338 vq->packed.vring.desc[head].addr = cpu_to_le64(addr); in virtqueue_add_indirect_packed()
1339 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * in virtqueue_add_indirect_packed()
1341 vq->packed.vring.desc[head].id = cpu_to_le16(id); in virtqueue_add_indirect_packed()
1343 if (vq->use_dma_api) { in virtqueue_add_indirect_packed()
1344 vq->packed.desc_extra[id].addr = addr; in virtqueue_add_indirect_packed()
1345 vq->packed.desc_extra[id].len = total_sg * in virtqueue_add_indirect_packed()
1347 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1348 vq->packed.avail_used_flags; in virtqueue_add_indirect_packed()
1356 virtio_wmb(vq->weak_barriers); in virtqueue_add_indirect_packed()
1357 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1358 vq->packed.avail_used_flags); in virtqueue_add_indirect_packed()
1361 vq->vq.num_free -= 1; in virtqueue_add_indirect_packed()
1365 if (n >= vq->packed.vring.num) { in virtqueue_add_indirect_packed()
1367 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_indirect_packed()
1368 vq->packed.avail_used_flags ^= in virtqueue_add_indirect_packed()
1372 vq->packed.next_avail_idx = n; in virtqueue_add_indirect_packed()
1373 vq->free_head = vq->packed.desc_extra[id].next; in virtqueue_add_indirect_packed()
1376 vq->packed.desc_state[id].num = 1; in virtqueue_add_indirect_packed()
1377 vq->packed.desc_state[id].data = data; in virtqueue_add_indirect_packed()
1378 vq->packed.desc_state[id].indir_desc = desc; in virtqueue_add_indirect_packed()
1379 vq->packed.desc_state[id].last = id; in virtqueue_add_indirect_packed()
1381 vq->num_added += 1; in virtqueue_add_indirect_packed()
1398 return -ENOMEM; in virtqueue_add_indirect_packed()
1421 BUG_ON(ctx && vq->indirect); in virtqueue_add_packed()
1423 if (unlikely(vq->broken)) { in virtqueue_add_packed()
1425 return -EIO; in virtqueue_add_packed()
1435 if (err != -ENOMEM) { in virtqueue_add_packed()
1443 head = vq->packed.next_avail_idx; in virtqueue_add_packed()
1444 avail_used_flags = vq->packed.avail_used_flags; in virtqueue_add_packed()
1446 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); in virtqueue_add_packed()
1448 desc = vq->packed.vring.desc; in virtqueue_add_packed()
1452 if (unlikely(vq->vq.num_free < descs_used)) { in virtqueue_add_packed()
1453 pr_debug("Can't add buf len %i - avail = %i\n", in virtqueue_add_packed()
1454 descs_used, vq->vq.num_free); in virtqueue_add_packed()
1456 return -ENOSPC; in virtqueue_add_packed()
1459 id = vq->free_head; in virtqueue_add_packed()
1460 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_packed()
1472 flags = cpu_to_le16(vq->packed.avail_used_flags | in virtqueue_add_packed()
1481 desc[i].len = cpu_to_le32(sg->length); in virtqueue_add_packed()
1484 if (unlikely(vq->use_dma_api)) { in virtqueue_add_packed()
1485 vq->packed.desc_extra[curr].addr = addr; in virtqueue_add_packed()
1486 vq->packed.desc_extra[curr].len = sg->length; in virtqueue_add_packed()
1487 vq->packed.desc_extra[curr].flags = in virtqueue_add_packed()
1491 curr = vq->packed.desc_extra[curr].next; in virtqueue_add_packed()
1493 if ((unlikely(++i >= vq->packed.vring.num))) { in virtqueue_add_packed()
1495 vq->packed.avail_used_flags ^= in virtqueue_add_packed()
1503 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_packed()
1506 vq->vq.num_free -= descs_used; in virtqueue_add_packed()
1509 vq->packed.next_avail_idx = i; in virtqueue_add_packed()
1510 vq->free_head = curr; in virtqueue_add_packed()
1513 vq->packed.desc_state[id].num = descs_used; in virtqueue_add_packed()
1514 vq->packed.desc_state[id].data = data; in virtqueue_add_packed()
1515 vq->packed.desc_state[id].indir_desc = ctx; in virtqueue_add_packed()
1516 vq->packed.desc_state[id].last = prev; in virtqueue_add_packed()
1523 virtio_wmb(vq->weak_barriers); in virtqueue_add_packed()
1524 vq->packed.vring.desc[head].flags = head_flags; in virtqueue_add_packed()
1525 vq->num_added += descs_used; in virtqueue_add_packed()
1535 curr = vq->free_head; in virtqueue_add_packed()
1537 vq->packed.avail_used_flags = avail_used_flags; in virtqueue_add_packed()
1542 vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]); in virtqueue_add_packed()
1543 curr = vq->packed.desc_extra[curr].next; in virtqueue_add_packed()
1545 if (i >= vq->packed.vring.num) in virtqueue_add_packed()
1550 return -EIO; in virtqueue_add_packed()
1572 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_packed()
1574 old = vq->packed.next_avail_idx - vq->num_added; in virtqueue_kick_prepare_packed()
1575 new = vq->packed.next_avail_idx; in virtqueue_kick_prepare_packed()
1576 vq->num_added = 0; in virtqueue_kick_prepare_packed()
1578 snapshot.u32 = *(u32 *)vq->packed.vring.device; in virtqueue_kick_prepare_packed()
1593 if (wrap_counter != vq->packed.avail_wrap_counter) in virtqueue_kick_prepare_packed()
1594 event_idx -= vq->packed.vring.num; in virtqueue_kick_prepare_packed()
1609 state = &vq->packed.desc_state[id]; in detach_buf_packed()
1612 state->data = NULL; in detach_buf_packed()
1614 vq->packed.desc_extra[state->last].next = vq->free_head; in detach_buf_packed()
1615 vq->free_head = id; in detach_buf_packed()
1616 vq->vq.num_free += state->num; in detach_buf_packed()
1618 if (unlikely(vq->use_dma_api)) { in detach_buf_packed()
1620 for (i = 0; i < state->num; i++) { in detach_buf_packed()
1622 &vq->packed.desc_extra[curr]); in detach_buf_packed()
1623 curr = vq->packed.desc_extra[curr].next; in detach_buf_packed()
1627 if (vq->indirect) { in detach_buf_packed()
1631 desc = state->indir_desc; in detach_buf_packed()
1635 if (vq->do_unmap) { in detach_buf_packed()
1636 len = vq->packed.desc_extra[id].len; in detach_buf_packed()
1642 state->indir_desc = NULL; in detach_buf_packed()
1644 *ctx = state->indir_desc; in detach_buf_packed()
1654 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags); in is_used_desc_packed()
1667 last_used_idx = READ_ONCE(vq->last_used_idx); in more_used_packed()
1684 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_packed()
1696 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_packed()
1698 last_used_idx = READ_ONCE(vq->last_used_idx); in virtqueue_get_buf_ctx_packed()
1701 id = le16_to_cpu(vq->packed.vring.desc[last_used].id); in virtqueue_get_buf_ctx_packed()
1702 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); in virtqueue_get_buf_ctx_packed()
1704 if (unlikely(id >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1708 if (unlikely(!vq->packed.desc_state[id].data)) { in virtqueue_get_buf_ctx_packed()
1714 ret = vq->packed.desc_state[id].data; in virtqueue_get_buf_ctx_packed()
1717 last_used += vq->packed.desc_state[id].num; in virtqueue_get_buf_ctx_packed()
1718 if (unlikely(last_used >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1719 last_used -= vq->packed.vring.num; in virtqueue_get_buf_ctx_packed()
1724 WRITE_ONCE(vq->last_used_idx, last_used); in virtqueue_get_buf_ctx_packed()
1731 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) in virtqueue_get_buf_ctx_packed()
1732 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_packed()
1733 &vq->packed.vring.driver->off_wrap, in virtqueue_get_buf_ctx_packed()
1734 cpu_to_le16(vq->last_used_idx)); in virtqueue_get_buf_ctx_packed()
1746 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_disable_cb_packed()
1747 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; in virtqueue_disable_cb_packed()
1753 if (vq->event_triggered) in virtqueue_disable_cb_packed()
1756 vq->packed.vring.driver->flags = in virtqueue_disable_cb_packed()
1757 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_disable_cb_packed()
1772 if (vq->event) { in virtqueue_enable_cb_prepare_packed()
1773 vq->packed.vring.driver->off_wrap = in virtqueue_enable_cb_prepare_packed()
1774 cpu_to_le16(vq->last_used_idx); in virtqueue_enable_cb_prepare_packed()
1779 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_prepare_packed()
1782 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_prepare_packed()
1783 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_prepare_packed()
1786 vq->packed.vring.driver->flags = in virtqueue_enable_cb_prepare_packed()
1787 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_prepare_packed()
1791 return vq->last_used_idx; in virtqueue_enable_cb_prepare_packed()
1819 if (vq->event) { in virtqueue_enable_cb_delayed_packed()
1821 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; in virtqueue_enable_cb_delayed_packed()
1822 last_used_idx = READ_ONCE(vq->last_used_idx); in virtqueue_enable_cb_delayed_packed()
1826 if (used_idx >= vq->packed.vring.num) { in virtqueue_enable_cb_delayed_packed()
1827 used_idx -= vq->packed.vring.num; in virtqueue_enable_cb_delayed_packed()
1831 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx | in virtqueue_enable_cb_delayed_packed()
1838 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1841 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_delayed_packed()
1842 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_delayed_packed()
1845 vq->packed.vring.driver->flags = in virtqueue_enable_cb_delayed_packed()
1846 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_delayed_packed()
1851 * before re-checking for more used buffers. in virtqueue_enable_cb_delayed_packed()
1853 virtio_mb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1855 last_used_idx = READ_ONCE(vq->last_used_idx); in virtqueue_enable_cb_delayed_packed()
1875 for (i = 0; i < vq->packed.vring.num; i++) { in virtqueue_detach_unused_buf_packed()
1876 if (!vq->packed.desc_state[i].data) in virtqueue_detach_unused_buf_packed()
1879 buf = vq->packed.desc_state[i].data; in virtqueue_detach_unused_buf_packed()
1885 BUG_ON(vq->vq.num_free != vq->packed.vring.num); in virtqueue_detach_unused_buf_packed()
1903 for (i = 0; i < num - 1; i++) in vring_alloc_desc_extra()
1913 if (vring_packed->vring.desc) in vring_free_packed()
1914 vring_free_queue(vdev, vring_packed->ring_size_in_bytes, in vring_free_packed()
1915 vring_packed->vring.desc, in vring_free_packed()
1916 vring_packed->ring_dma_addr, in vring_free_packed()
1919 if (vring_packed->vring.driver) in vring_free_packed()
1920 vring_free_queue(vdev, vring_packed->event_size_in_bytes, in vring_free_packed()
1921 vring_packed->vring.driver, in vring_free_packed()
1922 vring_packed->driver_event_dma_addr, in vring_free_packed()
1925 if (vring_packed->vring.device) in vring_free_packed()
1926 vring_free_queue(vdev, vring_packed->event_size_in_bytes, in vring_free_packed()
1927 vring_packed->vring.device, in vring_free_packed()
1928 vring_packed->device_event_dma_addr, in vring_free_packed()
1931 kfree(vring_packed->desc_state); in vring_free_packed()
1932 kfree(vring_packed->desc_extra); in vring_free_packed()
1953 vring_packed->vring.desc = ring; in vring_alloc_queue_packed()
1954 vring_packed->ring_dma_addr = ring_dma_addr; in vring_alloc_queue_packed()
1955 vring_packed->ring_size_in_bytes = ring_size_in_bytes; in vring_alloc_queue_packed()
1966 vring_packed->vring.driver = driver; in vring_alloc_queue_packed()
1967 vring_packed->event_size_in_bytes = event_size_in_bytes; in vring_alloc_queue_packed()
1968 vring_packed->driver_event_dma_addr = driver_event_dma_addr; in vring_alloc_queue_packed()
1977 vring_packed->vring.device = device; in vring_alloc_queue_packed()
1978 vring_packed->device_event_dma_addr = device_event_dma_addr; in vring_alloc_queue_packed()
1980 vring_packed->vring.num = num; in vring_alloc_queue_packed()
1986 return -ENOMEM; in vring_alloc_queue_packed()
1993 u32 num = vring_packed->vring.num; in vring_alloc_state_extra_packed()
2005 vring_packed->desc_state = state; in vring_alloc_state_extra_packed()
2006 vring_packed->desc_extra = extra; in vring_alloc_state_extra_packed()
2013 return -ENOMEM; in vring_alloc_state_extra_packed()
2019 vring_packed->next_avail_idx = 0; in virtqueue_vring_init_packed()
2020 vring_packed->avail_wrap_counter = 1; in virtqueue_vring_init_packed()
2021 vring_packed->event_flags_shadow = 0; in virtqueue_vring_init_packed()
2022 vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; in virtqueue_vring_init_packed()
2026 vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; in virtqueue_vring_init_packed()
2027 vring_packed->vring.driver->flags = in virtqueue_vring_init_packed()
2028 cpu_to_le16(vring_packed->event_flags_shadow); in virtqueue_vring_init_packed()
2035 vq->packed = *vring_packed; in virtqueue_vring_attach_packed()
2038 vq->free_head = 0; in virtqueue_vring_attach_packed()
2043 memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes); in virtqueue_reinit_packed()
2044 memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes); in virtqueue_reinit_packed()
2047 memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes); in virtqueue_reinit_packed()
2049 virtqueue_init(vq, vq->packed.vring.num); in virtqueue_reinit_packed()
2050 virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback); in virtqueue_reinit_packed()
2077 vq->vq.callback = callback; in vring_create_virtqueue_packed()
2078 vq->vq.vdev = vdev; in vring_create_virtqueue_packed()
2079 vq->vq.name = name; in vring_create_virtqueue_packed()
2080 vq->vq.index = index; in vring_create_virtqueue_packed()
2081 vq->vq.reset = false; in vring_create_virtqueue_packed()
2082 vq->we_own_ring = true; in vring_create_virtqueue_packed()
2083 vq->notify = notify; in vring_create_virtqueue_packed()
2084 vq->weak_barriers = weak_barriers; in vring_create_virtqueue_packed()
2086 vq->broken = true; in vring_create_virtqueue_packed()
2088 vq->broken = false; in vring_create_virtqueue_packed()
2090 vq->packed_ring = true; in vring_create_virtqueue_packed()
2091 vq->dma_dev = dma_dev; in vring_create_virtqueue_packed()
2092 vq->use_dma_api = vring_use_dma_api(vdev); in vring_create_virtqueue_packed()
2093 vq->premapped = false; in vring_create_virtqueue_packed()
2094 vq->do_unmap = vq->use_dma_api; in vring_create_virtqueue_packed()
2096 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in vring_create_virtqueue_packed()
2098 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in vring_create_virtqueue_packed()
2101 vq->weak_barriers = false; in vring_create_virtqueue_packed()
2112 spin_lock(&vdev->vqs_list_lock); in vring_create_virtqueue_packed()
2113 list_add_tail(&vq->vq.list, &vdev->vqs); in vring_create_virtqueue_packed()
2114 spin_unlock(&vdev->vqs_list_lock); in vring_create_virtqueue_packed()
2115 return &vq->vq; in vring_create_virtqueue_packed()
2129 struct virtio_device *vdev = _vq->vdev; in virtqueue_resize_packed()
2139 vring_free(&vq->vq); in virtqueue_resize_packed()
2141 virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback); in virtqueue_resize_packed()
2152 return -ENOMEM; in virtqueue_resize_packed()
2159 struct virtio_device *vdev = vq->vq.vdev; in virtqueue_disable_and_recycle()
2163 if (!vq->we_own_ring) in virtqueue_disable_and_recycle()
2164 return -EPERM; in virtqueue_disable_and_recycle()
2166 if (!vdev->config->disable_vq_and_reset) in virtqueue_disable_and_recycle()
2167 return -ENOENT; in virtqueue_disable_and_recycle()
2169 if (!vdev->config->enable_vq_after_reset) in virtqueue_disable_and_recycle()
2170 return -ENOENT; in virtqueue_disable_and_recycle()
2172 err = vdev->config->disable_vq_and_reset(_vq); in virtqueue_disable_and_recycle()
2185 struct virtio_device *vdev = vq->vq.vdev; in virtqueue_enable_after_reset()
2187 if (vdev->config->enable_vq_after_reset(_vq)) in virtqueue_enable_after_reset()
2188 return -EBUSY; in virtqueue_enable_after_reset()
2208 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, in virtqueue_add()
2215 * virtqueue_add_sgs - expose buffers to other end
2250 * virtqueue_add_outbuf - expose output buffers to other end
2252 * @sg: scatterlist (must be well-formed and terminated!)
2272 * virtqueue_add_inbuf - expose input buffers to other end
2274 * @sg: scatterlist (must be well-formed and terminated!)
2294 * virtqueue_add_inbuf_ctx - expose input buffers to other end
2296 * @sg: scatterlist (must be well-formed and terminated!)
2318 * virtqueue_dma_dev - get the dma dev
2327 if (vq->use_dma_api) in virtqueue_dma_dev()
2335 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
2349 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : in virtqueue_kick_prepare()
2355 * virtqueue_notify - second half of split virtqueue_kick call.
2366 if (unlikely(vq->broken)) in virtqueue_notify()
2370 if (!vq->notify(_vq)) { in virtqueue_notify()
2371 vq->broken = true; in virtqueue_notify()
2379 * virtqueue_kick - update after add_buf
2399 * virtqueue_get_buf_ctx - get the next used buffer
2420 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : in virtqueue_get_buf_ctx()
2431 * virtqueue_disable_cb - disable callbacks
2443 if (vq->packed_ring) in virtqueue_disable_cb()
2451 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2454 * This re-enables callbacks; it returns current queue state
2466 if (vq->event_triggered) in virtqueue_enable_cb_prepare()
2467 vq->event_triggered = false; in virtqueue_enable_cb_prepare()
2469 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : in virtqueue_enable_cb_prepare()
2475 * virtqueue_poll - query pending used buffers
2487 if (unlikely(vq->broken)) in virtqueue_poll()
2490 virtio_mb(vq->weak_barriers); in virtqueue_poll()
2491 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : in virtqueue_poll()
2497 * virtqueue_enable_cb - restart callbacks after disable_cb.
2500 * This re-enables callbacks; it returns "false" if there are pending
2516 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2519 * This re-enables callbacks but hints to the other side to delay
2532 if (vq->event_triggered) in virtqueue_enable_cb_delayed()
2533 vq->event_triggered = false; in virtqueue_enable_cb_delayed()
2535 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : in virtqueue_enable_cb_delayed()
2541 * virtqueue_detach_unused_buf - detach first unused buffer
2552 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : in virtqueue_detach_unused_buf()
2559 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq); in more_used()
2563 * vring_interrupt - notify a virtqueue on an interrupt
2579 if (unlikely(vq->broken)) { in vring_interrupt()
2581 dev_warn_once(&vq->vq.vdev->dev, in vring_interrupt()
2582 "virtio vring IRQ raised before DRIVER_OK"); in vring_interrupt()
2590 if (vq->event) in vring_interrupt()
2591 data_race(vq->event_triggered = true); in vring_interrupt()
2593 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); in vring_interrupt()
2594 if (vq->vq.callback) in vring_interrupt()
2595 vq->vq.callback(&vq->vq); in vring_interrupt()
2622 vq->packed_ring = false; in __vring_new_virtqueue()
2623 vq->vq.callback = callback; in __vring_new_virtqueue()
2624 vq->vq.vdev = vdev; in __vring_new_virtqueue()
2625 vq->vq.name = name; in __vring_new_virtqueue()
2626 vq->vq.index = index; in __vring_new_virtqueue()
2627 vq->vq.reset = false; in __vring_new_virtqueue()
2628 vq->we_own_ring = false; in __vring_new_virtqueue()
2629 vq->notify = notify; in __vring_new_virtqueue()
2630 vq->weak_barriers = weak_barriers; in __vring_new_virtqueue()
2632 vq->broken = true; in __vring_new_virtqueue()
2634 vq->broken = false; in __vring_new_virtqueue()
2636 vq->dma_dev = dma_dev; in __vring_new_virtqueue()
2637 vq->use_dma_api = vring_use_dma_api(vdev); in __vring_new_virtqueue()
2638 vq->premapped = false; in __vring_new_virtqueue()
2639 vq->do_unmap = vq->use_dma_api; in __vring_new_virtqueue()
2641 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in __vring_new_virtqueue()
2643 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in __vring_new_virtqueue()
2646 vq->weak_barriers = false; in __vring_new_virtqueue()
2656 virtqueue_init(vq, vring_split->vring.num); in __vring_new_virtqueue()
2659 spin_lock(&vdev->vqs_list_lock); in __vring_new_virtqueue()
2660 list_add_tail(&vq->vq.list, &vdev->vqs); in __vring_new_virtqueue()
2661 spin_unlock(&vdev->vqs_list_lock); in __vring_new_virtqueue()
2662 return &vq->vq; in __vring_new_virtqueue()
2681 context, notify, callback, name, vdev->dev.parent); in vring_create_virtqueue()
2685 context, notify, callback, name, vdev->dev.parent); in vring_create_virtqueue()
2715 * virtqueue_resize - resize the vring of vq
2730 * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
2732 * -EBUSY: Failed to sync with device, vq may not work properly
2733 * -ENOENT: Transport or device not supported
2734 * -E2BIG/-EINVAL: num error
2735 * -EPERM: Operation not permitted
2744 if (num > vq->vq.num_max) in virtqueue_resize()
2745 return -E2BIG; in virtqueue_resize()
2748 return -EINVAL; in virtqueue_resize()
2750 if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num) in virtqueue_resize()
2757 if (vq->packed_ring) in virtqueue_resize()
2767 * virtqueue_set_dma_premapped - set the vring premapped mode
2785 * -EINVAL: too late to enable premapped mode, the vq already contains buffers.
2794 num = vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; in virtqueue_set_dma_premapped()
2796 if (num != vq->vq.num_free) { in virtqueue_set_dma_premapped()
2798 return -EINVAL; in virtqueue_set_dma_premapped()
2801 vq->premapped = true; in virtqueue_set_dma_premapped()
2802 vq->do_unmap = false; in virtqueue_set_dma_premapped()
2811 * virtqueue_reset - detach and recycle all unused buffers
2820 * -EBUSY: Failed to sync with device, vq may not work properly
2821 * -ENOENT: Transport or device not supported
2822 * -EPERM: Operation not permitted
2834 if (vq->packed_ring) in virtqueue_reset()
2863 vdev->dev.parent); in vring_new_virtqueue()
2871 if (vq->we_own_ring) { in vring_free()
2872 if (vq->packed_ring) { in vring_free()
2873 vring_free_queue(vq->vq.vdev, in vring_free()
2874 vq->packed.ring_size_in_bytes, in vring_free()
2875 vq->packed.vring.desc, in vring_free()
2876 vq->packed.ring_dma_addr, in vring_free()
2879 vring_free_queue(vq->vq.vdev, in vring_free()
2880 vq->packed.event_size_in_bytes, in vring_free()
2881 vq->packed.vring.driver, in vring_free()
2882 vq->packed.driver_event_dma_addr, in vring_free()
2885 vring_free_queue(vq->vq.vdev, in vring_free()
2886 vq->packed.event_size_in_bytes, in vring_free()
2887 vq->packed.vring.device, in vring_free()
2888 vq->packed.device_event_dma_addr, in vring_free()
2891 kfree(vq->packed.desc_state); in vring_free()
2892 kfree(vq->packed.desc_extra); in vring_free()
2894 vring_free_queue(vq->vq.vdev, in vring_free()
2895 vq->split.queue_size_in_bytes, in vring_free()
2896 vq->split.vring.desc, in vring_free()
2897 vq->split.queue_dma_addr, in vring_free()
2901 if (!vq->packed_ring) { in vring_free()
2902 kfree(vq->split.desc_state); in vring_free()
2903 kfree(vq->split.desc_extra); in vring_free()
2911 spin_lock(&vq->vq.vdev->vqs_list_lock); in vring_del_virtqueue()
2912 list_del(&_vq->list); in vring_del_virtqueue()
2913 spin_unlock(&vq->vq.vdev->vqs_list_lock); in vring_del_virtqueue()
2926 if (vq->packed_ring) in vring_notification_data()
2927 next = (vq->packed.next_avail_idx & in vring_notification_data()
2928 ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) | in vring_notification_data()
2929 vq->packed.avail_wrap_counter << in vring_notification_data()
2932 next = vq->split.avail_idx_shadow; in vring_notification_data()
2934 return next << 16 | _vq->index; in vring_notification_data()
2938 /* Manipulates transport-specific feature bits. */
2968 * virtqueue_get_vring_size - return the size of the virtqueue's vring
2979 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; in virtqueue_get_vring_size()
2991 WRITE_ONCE(vq->broken, true); in __virtqueue_break()
3003 WRITE_ONCE(vq->broken, false); in __virtqueue_unbreak()
3011 return READ_ONCE(vq->broken); in virtqueue_is_broken()
3023 spin_lock(&dev->vqs_list_lock); in virtio_break_device()
3024 list_for_each_entry(_vq, &dev->vqs, list) { in virtio_break_device()
3028 WRITE_ONCE(vq->broken, true); in virtio_break_device()
3030 spin_unlock(&dev->vqs_list_lock); in virtio_break_device()
3037 * vq->broken. This should only be used in some specific case e.g
3045 spin_lock(&dev->vqs_list_lock); in __virtio_unbreak_device()
3046 list_for_each_entry(_vq, &dev->vqs, list) { in __virtio_unbreak_device()
3050 WRITE_ONCE(vq->broken, false); in __virtio_unbreak_device()
3052 spin_unlock(&dev->vqs_list_lock); in __virtio_unbreak_device()
3060 BUG_ON(!vq->we_own_ring); in virtqueue_get_desc_addr()
3062 if (vq->packed_ring) in virtqueue_get_desc_addr()
3063 return vq->packed.ring_dma_addr; in virtqueue_get_desc_addr()
3065 return vq->split.queue_dma_addr; in virtqueue_get_desc_addr()
3073 BUG_ON(!vq->we_own_ring); in virtqueue_get_avail_addr()
3075 if (vq->packed_ring) in virtqueue_get_avail_addr()
3076 return vq->packed.driver_event_dma_addr; in virtqueue_get_avail_addr()
3078 return vq->split.queue_dma_addr + in virtqueue_get_avail_addr()
3079 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); in virtqueue_get_avail_addr()
3087 BUG_ON(!vq->we_own_ring); in virtqueue_get_used_addr()
3089 if (vq->packed_ring) in virtqueue_get_used_addr()
3090 return vq->packed.device_event_dma_addr; in virtqueue_get_used_addr()
3092 return vq->split.queue_dma_addr + in virtqueue_get_used_addr()
3093 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); in virtqueue_get_used_addr()
3100 return &to_vvq(vq)->split.vring; in virtqueue_get_vring()
3105 * virtqueue_dma_map_single_attrs - map DMA for _vq
3113 * passed to this _vq when it is in pre-mapped mode.
3124 if (!vq->use_dma_api) { in virtqueue_dma_map_single_attrs()
3134 * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq
3150 if (!vq->use_dma_api) in virtqueue_dma_unmap_single_attrs()
3158 * virtqueue_dma_mapping_error - check dma address
3168 if (!vq->use_dma_api) in virtqueue_dma_mapping_error()
3176 * virtqueue_dma_need_sync - check a dma address needs sync
3189 if (!vq->use_dma_api) in virtqueue_dma_need_sync()
3197 * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
3216 if (!vq->use_dma_api) in virtqueue_dma_sync_single_range_for_cpu()
3224 * virtqueue_dma_sync_single_range_for_device - dma sync for device
3242 if (!vq->use_dma_api) in virtqueue_dma_sync_single_range_for_device()
3249 MODULE_DESCRIPTION("Virtio ring implementation");