Lines Matching +full:scatter +full:- +full:gather

1 // SPDX-License-Identifier: GPL-2.0-only
46 * struct virtio_chan - per-instance transport information
55 * @sg: scatter gather list which is used to pack a request (protected?)
58 * We keep all per-channel information in a structure.
59 * This structure is allocated within the devices dev->mem space.
93 return PAGE_SIZE - offset_in_page(data); in rest_of_page()
97 * p9_virtio_close - reclaim resources of a channel
107 struct virtio_chan *chan = client->trans; in p9_virtio_close()
111 chan->inuse = false; in p9_virtio_close()
116 * req_done - callback which signals activity from the server
120 * on the virtio channel - most likely a response to request we
130 struct virtio_chan *chan = vq->vdev->priv; in req_done()
138 spin_lock_irqsave(&chan->lock, flags); in req_done()
139 while ((req = virtqueue_get_buf(chan->vq, &len)) != NULL) { in req_done()
140 if (!chan->ring_bufs_avail) { in req_done()
141 chan->ring_bufs_avail = 1; in req_done()
146 req->rc.size = len; in req_done()
147 p9_client_cb(chan->client, req, REQ_STATUS_RCVD); in req_done()
150 spin_unlock_irqrestore(&chan->lock, flags); in req_done()
153 wake_up(chan->vc_wq); in req_done()
157 * pack_sg_list - pack a scatter gather list from a linear buffer
158 * @sg: scatter/gather list to pack into
161 * @data: data to pack into scatter/gather list
162 * @count: amount of data to pack into the scatter/gather list
165 * arbitrary data into an existing scatter gather list, segmenting the
184 count -= s; in pack_sg_list()
187 if (index-start) in pack_sg_list()
188 sg_mark_end(&sg[index - 1]); in pack_sg_list()
189 return index-start; in pack_sg_list()
206 * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
208 * @sg: scatter/gather list to pack into
212 * @nr_pages: number of pages to pack into the scatter/gather list
214 * @count: amount of data to pack into the scatter/gather list
224 BUG_ON(nr_pages > (limit - start)); in pack_sg_list_p()
230 s = PAGE_SIZE - data_off; in pack_sg_list_p()
238 count -= s; in pack_sg_list_p()
239 nr_pages--; in pack_sg_list_p()
242 if (index-start) in pack_sg_list_p()
243 sg_mark_end(&sg[index - 1]); in pack_sg_list_p()
244 return index - start; in pack_sg_list_p()
248 * p9_virtio_request - issue a request
260 struct virtio_chan *chan = client->trans; in p9_virtio_request()
265 WRITE_ONCE(req->status, REQ_STATUS_SENT); in p9_virtio_request()
267 spin_lock_irqsave(&chan->lock, flags); in p9_virtio_request()
271 out = pack_sg_list(chan->sg, 0, in p9_virtio_request()
272 VIRTQUEUE_NUM, req->tc.sdata, req->tc.size); in p9_virtio_request()
274 sgs[out_sgs++] = chan->sg; in p9_virtio_request()
276 in = pack_sg_list(chan->sg, out, in p9_virtio_request()
277 VIRTQUEUE_NUM, req->rc.sdata, req->rc.capacity); in p9_virtio_request()
279 sgs[out_sgs + in_sgs++] = chan->sg + out; in p9_virtio_request()
281 err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req, in p9_virtio_request()
284 if (err == -ENOSPC) { in p9_virtio_request()
285 chan->ring_bufs_avail = 0; in p9_virtio_request()
286 spin_unlock_irqrestore(&chan->lock, flags); in p9_virtio_request()
287 err = wait_event_killable(*chan->vc_wq, in p9_virtio_request()
288 chan->ring_bufs_avail); in p9_virtio_request()
289 if (err == -ERESTARTSYS) in p9_virtio_request()
295 spin_unlock_irqrestore(&chan->lock, flags); in p9_virtio_request()
298 return -EIO; in p9_virtio_request()
301 virtqueue_kick(chan->vq); in p9_virtio_request()
302 spin_unlock_irqrestore(&chan->lock, flags); in p9_virtio_request()
327 if (atomic_read(&vp_pinned) >= chan->p9_max_pages) { in p9_get_mapped_pages()
329 (atomic_read(&vp_pinned) < chan->p9_max_pages)); in p9_get_mapped_pages()
330 if (err == -ERESTARTSYS) in p9_get_mapped_pages()
346 /* we'd already checked that it's non-empty */ in p9_get_mapped_pages()
350 p = data->kvec->iov_base + data->iov_offset; in p9_get_mapped_pages()
358 nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) - in p9_get_mapped_pages()
364 return -ENOMEM; in p9_get_mapped_pages()
367 p -= (*offs = offset_in_page(p)); in p9_get_mapped_pages()
384 void *to = req->rc.sdata + in_hdr_len; in handle_rerror()
387 if (req->rc.size < in_hdr_len || !pages) in handle_rerror()
394 if (unlikely(req->rc.size > P9_ZC_HDR_SZ)) in handle_rerror()
395 req->rc.size = P9_ZC_HDR_SZ; in handle_rerror()
398 size = req->rc.size - in_hdr_len; in handle_rerror()
399 n = PAGE_SIZE - offs; in handle_rerror()
404 size -= n; in handle_rerror()
410 * p9_virtio_zc_request - issue a zero copy request
429 struct virtio_chan *chan = client->trans; in p9_virtio_zc_request()
448 memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4); in p9_virtio_zc_request()
455 sz = cpu_to_le32(req->tc.size + outlen); in p9_virtio_zc_request()
456 memcpy(&req->tc.sdata[0], &sz, sizeof(sz)); in p9_virtio_zc_request()
467 memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4); in p9_virtio_zc_request()
471 WRITE_ONCE(req->status, REQ_STATUS_SENT); in p9_virtio_zc_request()
473 spin_lock_irqsave(&chan->lock, flags); in p9_virtio_zc_request()
478 out = pack_sg_list(chan->sg, 0, in p9_virtio_zc_request()
479 VIRTQUEUE_NUM, req->tc.sdata, req->tc.size); in p9_virtio_zc_request()
482 sgs[out_sgs++] = chan->sg; in p9_virtio_zc_request()
485 sgs[out_sgs++] = chan->sg + out; in p9_virtio_zc_request()
486 out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, in p9_virtio_zc_request()
497 in = pack_sg_list(chan->sg, out, in p9_virtio_zc_request()
498 VIRTQUEUE_NUM, req->rc.sdata, in_hdr_len); in p9_virtio_zc_request()
500 sgs[out_sgs + in_sgs++] = chan->sg + out; in p9_virtio_zc_request()
503 sgs[out_sgs + in_sgs++] = chan->sg + out + in; in p9_virtio_zc_request()
504 pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM, in p9_virtio_zc_request()
509 err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req, in p9_virtio_zc_request()
512 if (err == -ENOSPC) { in p9_virtio_zc_request()
513 chan->ring_bufs_avail = 0; in p9_virtio_zc_request()
514 spin_unlock_irqrestore(&chan->lock, flags); in p9_virtio_zc_request()
515 err = wait_event_killable(*chan->vc_wq, in p9_virtio_zc_request()
516 chan->ring_bufs_avail); in p9_virtio_zc_request()
517 if (err == -ERESTARTSYS) in p9_virtio_zc_request()
523 spin_unlock_irqrestore(&chan->lock, flags); in p9_virtio_zc_request()
526 err = -EIO; in p9_virtio_zc_request()
530 virtqueue_kick(chan->vq); in p9_virtio_zc_request()
531 spin_unlock_irqrestore(&chan->lock, flags); in p9_virtio_zc_request()
534 err = wait_event_killable(req->wq, in p9_virtio_zc_request()
535 READ_ONCE(req->status) >= REQ_STATUS_RCVD); in p9_virtio_zc_request()
537 if (READ_ONCE(req->status) == REQ_STATUS_RCVD && in p9_virtio_zc_request()
538 unlikely(req->rc.sdata[4] == P9_RERROR)) in p9_virtio_zc_request()
574 chan = vdev->priv; in p9_mount_tag_show()
575 tag_len = strlen(chan->tag); in p9_mount_tag_show()
577 memcpy(buf, chan->tag, tag_len + 1); in p9_mount_tag_show()
585 * p9_virtio_probe - probe for existence of 9P virtio channels
599 if (!vdev->config->get) { in p9_virtio_probe()
600 dev_err(&vdev->dev, "%s failure: config access disabled\n", in p9_virtio_probe()
602 return -EINVAL; in p9_virtio_probe()
608 err = -ENOMEM; in p9_virtio_probe()
612 chan->vdev = vdev; in p9_virtio_probe()
615 chan->vq = virtio_find_single_vq(vdev, req_done, "requests"); in p9_virtio_probe()
616 if (IS_ERR(chan->vq)) { in p9_virtio_probe()
617 err = PTR_ERR(chan->vq); in p9_virtio_probe()
620 chan->vq->vdev->priv = chan; in p9_virtio_probe()
621 spin_lock_init(&chan->lock); in p9_virtio_probe()
623 sg_init_table(chan->sg, VIRTQUEUE_NUM); in p9_virtio_probe()
625 chan->inuse = false; in p9_virtio_probe()
629 err = -EINVAL; in p9_virtio_probe()
634 err = -ENOMEM; in p9_virtio_probe()
640 chan->tag = tag; in p9_virtio_probe()
641 err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); in p9_virtio_probe()
645 chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); in p9_virtio_probe()
646 if (!chan->vc_wq) { in p9_virtio_probe()
647 err = -ENOMEM; in p9_virtio_probe()
650 init_waitqueue_head(chan->vc_wq); in p9_virtio_probe()
651 chan->ring_bufs_avail = 1; in p9_virtio_probe()
653 chan->p9_max_pages = nr_free_buffer_pages()/4; in p9_virtio_probe()
658 list_add_tail(&chan->chan_list, &virtio_chan_list); in p9_virtio_probe()
662 kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); in p9_virtio_probe()
667 sysfs_remove_file(&vdev->dev.kobj, &dev_attr_mount_tag.attr); in p9_virtio_probe()
671 vdev->config->del_vqs(vdev); in p9_virtio_probe()
680 * p9_virtio_create - allocate a new virtio channel
683 * @args: args passed from sys_mount() for per-transport options (unused)
697 int ret = -ENOENT; in p9_virtio_create()
701 return -EINVAL; in p9_virtio_create()
705 if (!strcmp(devname, chan->tag)) { in p9_virtio_create()
706 if (!chan->inuse) { in p9_virtio_create()
707 chan->inuse = true; in p9_virtio_create()
711 ret = -EBUSY; in p9_virtio_create()
721 client->trans = (void *)chan; in p9_virtio_create()
722 client->status = Connected; in p9_virtio_create()
723 chan->client = client; in p9_virtio_create()
729 * p9_virtio_remove - clean up resources associated with a virtio device
736 struct virtio_chan *chan = vdev->priv; in p9_virtio_remove()
742 list_del(&chan->chan_list); in p9_virtio_remove()
746 while (chan->inuse) { in p9_virtio_remove()
750 dev_emerg(&vdev->dev, in p9_virtio_remove()
760 vdev->config->del_vqs(vdev); in p9_virtio_remove()
762 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); in p9_virtio_remove()
763 kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); in p9_virtio_remove()
764 kfree(chan->tag); in p9_virtio_remove()
765 kfree(chan->vc_wq); in p9_virtio_remove()
803 .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),