Lines Matching +full:config +full:- +full:cond

1 // SPDX-License-Identifier: GPL-2.0-only
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
59 * virtblk_remove() sets vblk->vdev to NULL.
61 * blk-mq, virtqueue processing, and sysfs attribute code paths are
62 * shut down before vblk->vdev is set to NULL and therefore do not need
74 /* Process context for config space updates */
77 /* Ida index - used to track minor number allocations. */
134 struct virtio_blk *vblk = hctx->queue->queuedata; in get_virtio_blk_vq()
135 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in get_virtio_blk_vq()
145 sg_init_one(&out_hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); in virtblk_add_req()
148 if (vbr->sg_table.nents) { in virtblk_add_req()
149 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT)) in virtblk_add_req()
150 sgs[num_out++] = vbr->sg_table.sgl; in virtblk_add_req()
152 sgs[num_out + num_in++] = vbr->sg_table.sgl; in virtblk_add_req()
155 sg_init_one(&in_hdr, &vbr->in_hdr.status, vbr->in_hdr_len); in virtblk_add_req()
174 return -ENOMEM; in virtblk_setup_discard_write_zeroes_erase()
177 * Single max discard segment means multi-range discard isn't in virtblk_setup_discard_write_zeroes_erase()
182 if (queue_max_discard_segments(req->q) == 1) { in virtblk_setup_discard_write_zeroes_erase()
189 u64 sector = bio->bi_iter.bi_sector; in virtblk_setup_discard_write_zeroes_erase()
190 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT; in virtblk_setup_discard_write_zeroes_erase()
201 bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments); in virtblk_setup_discard_write_zeroes_erase()
202 req->rq_flags |= RQF_SPECIAL_PAYLOAD; in virtblk_setup_discard_write_zeroes_erase()
210 sg_free_table_chained(&vbr->sg_table, in virtblk_unmap_data()
222 vbr->sg_table.sgl = vbr->sg; in virtblk_map_data()
223 err = sg_alloc_table_chained(&vbr->sg_table, in virtblk_map_data()
225 vbr->sg_table.sgl, in virtblk_map_data()
228 return -ENOMEM; in virtblk_map_data()
230 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl); in virtblk_map_data()
235 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) in virtblk_cleanup_cmd()
236 kfree(bvec_virt(&req->special_vec)); in virtblk_cleanup_cmd()
243 size_t in_hdr_len = sizeof(vbr->in_hdr.status); in virtblk_setup_cmd()
252 vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req)); in virtblk_setup_cmd()
271 unmap = !(req->cmd_flags & REQ_NOUNMAP); in virtblk_setup_cmd()
291 in_hdr_len = sizeof(vbr->in_hdr.zone_append); in virtblk_setup_cmd()
311 /* Set fields for non-REQ_OP_DRV_IN request types */ in virtblk_setup_cmd()
312 vbr->in_hdr_len = in_hdr_len; in virtblk_setup_cmd()
313 vbr->out_hdr.type = cpu_to_virtio32(vdev, type); in virtblk_setup_cmd()
314 vbr->out_hdr.sector = cpu_to_virtio64(vdev, sector); in virtblk_setup_cmd()
327 * in-header. This helper fetches its value for all in-header formats
332 return *((u8 *)&vbr->in_hdr + vbr->in_hdr_len - 1); in virtblk_vbr_status()
339 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; in virtblk_request_done()
345 req->__sector = virtio64_to_cpu(vblk->vdev, in virtblk_request_done()
346 vbr->in_hdr.zone_append.sector); in virtblk_request_done()
353 struct virtio_blk *vblk = vq->vdev->priv; in virtblk_done()
355 int qid = vq->index; in virtblk_done()
360 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtblk_done()
363 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { in virtblk_done()
366 if (likely(!blk_should_fake_timeout(req->q))) in virtblk_done()
374 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); in virtblk_done()
375 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtblk_done()
380 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_commit_rqs()
381 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs()
384 spin_lock_irq(&vq->lock); in virtio_commit_rqs()
385 kick = virtqueue_kick_prepare(vq->vq); in virtio_commit_rqs()
386 spin_unlock_irq(&vq->lock); in virtio_commit_rqs()
389 virtqueue_notify(vq->vq); in virtio_commit_rqs()
396 case -ENOSPC: in virtblk_fail_to_queue()
398 case -ENOMEM: in virtblk_fail_to_queue()
413 status = virtblk_setup_cmd(vblk->vdev, req, vbr); in virtblk_prep_rq()
419 return virtblk_fail_to_queue(req, -ENOMEM); in virtblk_prep_rq()
420 vbr->sg_table.nents = num; in virtblk_prep_rq()
430 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_queue_rq()
431 struct request *req = bd->rq; in virtio_queue_rq()
434 int qid = hctx->queue_num; in virtio_queue_rq()
443 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
444 err = virtblk_add_req(vblk->vqs[qid].vq, vbr); in virtio_queue_rq()
446 virtqueue_kick(vblk->vqs[qid].vq); in virtio_queue_rq()
447 /* Don't stop the queue if -ENOMEM: we may have failed to in virtio_queue_rq()
450 if (err == -ENOSPC) in virtio_queue_rq()
452 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
457 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) in virtio_queue_rq()
459 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
462 virtqueue_notify(vblk->vqs[qid].vq); in virtio_queue_rq()
468 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; in virtblk_prep_rq_batch()
471 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK; in virtblk_prep_rq_batch()
481 spin_lock_irqsave(&vq->lock, flags); in virtblk_add_req_batch()
487 err = virtblk_add_req(vq->vq, vbr); in virtblk_add_req_batch()
495 kick = virtqueue_kick_prepare(vq->vq); in virtblk_add_req_batch()
496 spin_unlock_irqrestore(&vq->lock, flags); in virtblk_add_req_batch()
507 struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx); in virtio_queue_rqs()
517 if (!next || req->mq_hctx != next->mq_hctx) { in virtio_queue_rqs()
518 req->rq_next = NULL; in virtio_queue_rqs()
521 virtqueue_notify(vq->vq); in virtio_queue_rqs()
537 struct request_queue *q = vblk->disk->queue; in virtblk_alloc_report_buffer()
542 get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors)); in virtblk_alloc_report_buffer()
566 struct request_queue *q = vblk->disk->queue; in virtblk_submit_zone_report()
576 vbr->in_hdr_len = sizeof(vbr->in_hdr.status); in virtblk_submit_zone_report()
577 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT); in virtblk_submit_zone_report()
578 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector); in virtblk_submit_zone_report()
585 err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status)); in virtblk_submit_zone_report()
597 zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start); in virtblk_parse_zone()
598 if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk)) in virtblk_parse_zone()
599 zone.len = vblk->zone_sectors; in virtblk_parse_zone()
601 zone.len = get_capacity(vblk->disk) - zone.start; in virtblk_parse_zone()
602 zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap); in virtblk_parse_zone()
603 zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp); in virtblk_parse_zone()
605 switch (entry->z_type) { in virtblk_parse_zone()
616 dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n", in virtblk_parse_zone()
617 zone.start, entry->z_type); in virtblk_parse_zone()
618 return -EIO; in virtblk_parse_zone()
621 switch (entry->z_state) { in virtblk_parse_zone()
623 zone.cond = BLK_ZONE_COND_EMPTY; in virtblk_parse_zone()
626 zone.cond = BLK_ZONE_COND_CLOSED; in virtblk_parse_zone()
629 zone.cond = BLK_ZONE_COND_FULL; in virtblk_parse_zone()
633 zone.cond = BLK_ZONE_COND_EXP_OPEN; in virtblk_parse_zone()
636 zone.cond = BLK_ZONE_COND_IMP_OPEN; in virtblk_parse_zone()
639 zone.cond = BLK_ZONE_COND_NOT_WP; in virtblk_parse_zone()
642 zone.cond = BLK_ZONE_COND_READONLY; in virtblk_parse_zone()
646 zone.cond = BLK_ZONE_COND_OFFLINE; in virtblk_parse_zone()
650 dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n", in virtblk_parse_zone()
651 zone.start, entry->z_state); in virtblk_parse_zone()
652 return -EIO; in virtblk_parse_zone()
666 struct virtio_blk *vblk = disk->private_data; in virtblk_report_zones()
673 if (WARN_ON_ONCE(!vblk->zone_sectors)) in virtblk_report_zones()
674 return -EOPNOTSUPP; in virtblk_report_zones()
678 return -ENOMEM; in virtblk_report_zones()
680 mutex_lock(&vblk->vdev_mutex); in virtblk_report_zones()
682 if (!vblk->vdev) { in virtblk_report_zones()
683 ret = -ENXIO; in virtblk_report_zones()
687 while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) { in virtblk_report_zones()
695 nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones), in virtblk_report_zones()
701 ret = virtblk_parse_zone(vblk, &report->zones[i], in virtblk_report_zones()
706 sector = virtio64_to_cpu(vblk->vdev, in virtblk_report_zones()
707 report->zones[i].z_start) + in virtblk_report_zones()
708 vblk->zone_sectors; in virtblk_report_zones()
716 ret = -EINVAL; in virtblk_report_zones()
718 mutex_unlock(&vblk->vdev_mutex); in virtblk_report_zones()
726 struct virtio_device *vdev = vblk->vdev; in virtblk_read_zoned_limits()
729 dev_dbg(&vdev->dev, "probing host-managed zoned device\n"); in virtblk_read_zoned_limits()
731 lim->features |= BLK_FEAT_ZONED; in virtblk_read_zoned_limits()
735 lim->max_open_zones = v; in virtblk_read_zoned_limits()
736 dev_dbg(&vdev->dev, "max open zones = %u\n", v); in virtblk_read_zoned_limits()
740 lim->max_active_zones = v; in virtblk_read_zoned_limits()
741 dev_dbg(&vdev->dev, "max active zones = %u\n", v); in virtblk_read_zoned_limits()
746 dev_warn(&vdev->dev, "zero write granularity reported\n"); in virtblk_read_zoned_limits()
747 return -ENODEV; in virtblk_read_zoned_limits()
749 lim->physical_block_size = wg; in virtblk_read_zoned_limits()
750 lim->io_min = wg; in virtblk_read_zoned_limits()
752 dev_dbg(&vdev->dev, "write granularity = %u\n", wg); in virtblk_read_zoned_limits()
759 &vblk->zone_sectors); in virtblk_read_zoned_limits()
760 if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) { in virtblk_read_zoned_limits()
761 dev_err(&vdev->dev, in virtblk_read_zoned_limits()
763 vblk->zone_sectors); in virtblk_read_zoned_limits()
764 return -ENODEV; in virtblk_read_zoned_limits()
766 lim->chunk_sectors = vblk->zone_sectors; in virtblk_read_zoned_limits()
767 dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors); in virtblk_read_zoned_limits()
770 dev_warn(&vblk->vdev->dev, in virtblk_read_zoned_limits()
772 lim->max_hw_discard_sectors = 0; in virtblk_read_zoned_limits()
778 dev_warn(&vdev->dev, "zero max_append_sectors reported\n"); in virtblk_read_zoned_limits()
779 return -ENODEV; in virtblk_read_zoned_limits()
782 dev_err(&vdev->dev, in virtblk_read_zoned_limits()
785 return -ENODEV; in virtblk_read_zoned_limits()
787 lim->max_zone_append_sectors = v; in virtblk_read_zoned_limits()
788 dev_dbg(&vdev->dev, "max append sectors = %u\n", v); in virtblk_read_zoned_limits()
794 * Zoned block device support is not configured in this kernel, host-managed
801 dev_err(&vblk->vdev->dev, in virtblk_read_zoned_limits()
803 return -EOPNOTSUPP; in virtblk_read_zoned_limits()
811 struct virtio_blk *vblk = disk->private_data; in virtblk_get_id()
812 struct request_queue *q = vblk->disk->queue; in virtblk_get_id()
822 vbr->in_hdr_len = sizeof(vbr->in_hdr.status); in virtblk_get_id()
823 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); in virtblk_get_id()
824 vbr->out_hdr.sector = 0; in virtblk_get_id()
831 err = blk_status_to_errno(virtblk_result(vbr->in_hdr.status)); in virtblk_get_id()
840 struct virtio_blk *vblk = bd->bd_disk->private_data; in virtblk_getgeo()
843 mutex_lock(&vblk->vdev_mutex); in virtblk_getgeo()
845 if (!vblk->vdev) { in virtblk_getgeo()
846 ret = -ENXIO; in virtblk_getgeo()
850 /* see if the host passed in geometry config */ in virtblk_getgeo()
851 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { in virtblk_getgeo()
852 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_getgeo()
853 geometry.cylinders, &geo->cylinders); in virtblk_getgeo()
854 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_getgeo()
855 geometry.heads, &geo->heads); in virtblk_getgeo()
856 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_getgeo()
857 geometry.sectors, &geo->sectors); in virtblk_getgeo()
860 geo->heads = 1 << 6; in virtblk_getgeo()
861 geo->sectors = 1 << 5; in virtblk_getgeo()
862 geo->cylinders = get_capacity(bd->bd_disk) >> 11; in virtblk_getgeo()
865 mutex_unlock(&vblk->vdev_mutex); in virtblk_getgeo()
871 struct virtio_blk *vblk = disk->private_data; in virtblk_free_disk()
873 ida_free(&vd_index_ida, vblk->index); in virtblk_free_disk()
874 mutex_destroy(&vblk->vdev_mutex); in virtblk_free_disk()
909 if (err == -EIO) /* Unsupported? Make it empty. */ in serial_show()
920 struct virtio_device *vdev = vblk->vdev; in virtblk_update_capacity()
921 struct request_queue *q = vblk->disk->queue; in virtblk_update_capacity()
936 dev_notice(&vdev->dev, in virtblk_update_capacity()
937 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n", in virtblk_update_capacity()
938 vblk->disk->disk_name, in virtblk_update_capacity()
945 set_capacity_and_notify(vblk->disk, capacity); in virtblk_update_capacity()
958 struct virtio_blk *vblk = vdev->priv; in virtblk_config_changed()
960 queue_work(virtblk_wq, &vblk->config_work); in virtblk_config_changed()
971 struct virtio_device *vdev = vblk->vdev; in init_vq()
981 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n"); in init_vq()
982 return -EINVAL; in init_vq()
989 num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1); in init_vq()
991 vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs; in init_vq()
992 vblk->io_queues[HCTX_TYPE_READ] = 0; in init_vq()
993 vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs; in init_vq()
995 dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n", in init_vq()
996 vblk->io_queues[HCTX_TYPE_DEFAULT], in init_vq()
997 vblk->io_queues[HCTX_TYPE_READ], in init_vq()
998 vblk->io_queues[HCTX_TYPE_POLL]); in init_vq()
1000 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); in init_vq()
1001 if (!vblk->vqs) in init_vq()
1002 return -ENOMEM; in init_vq()
1007 err = -ENOMEM; in init_vq()
1011 for (i = 0; i < num_vqs - num_poll_vqs; i++) { in init_vq()
1013 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i); in init_vq()
1014 vqs_info[i].name = vblk->vqs[i].name; in init_vq()
1018 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%u", i); in init_vq()
1019 vqs_info[i].name = vblk->vqs[i].name; in init_vq()
1028 spin_lock_init(&vblk->vqs[i].lock); in init_vq()
1029 vblk->vqs[i].vq = vqs[i]; in init_vq()
1031 vblk->num_vqs = num_vqs; in init_vq()
1037 kfree(vblk->vqs); in init_vq()
1047 const int base = 'z' - 'a' + 1; in virtblk_name_format()
1053 p = end - 1; in virtblk_name_format()
1058 return -EINVAL; in virtblk_name_format()
1059 *--p = 'a' + (index % unit); in virtblk_name_format()
1060 index = (index / unit) - 1; in virtblk_name_format()
1063 memmove(begin, p, end - p); in virtblk_name_format()
1097 struct virtio_blk *vblk = disk->private_data; in cache_type_store()
1098 struct virtio_device *vdev = vblk->vdev; in cache_type_store()
1102 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); in cache_type_store()
1109 lim = queue_limits_start_update(disk->queue); in cache_type_store()
1114 blk_mq_freeze_queue(disk->queue); in cache_type_store()
1115 i = queue_limits_commit_update(disk->queue, &lim); in cache_type_store()
1116 blk_mq_unfreeze_queue(disk->queue); in cache_type_store()
1126 struct virtio_blk *vblk = disk->private_data; in cache_type_show()
1127 u8 writeback = virtblk_get_cache_mode(vblk->vdev); in cache_type_show()
1146 struct virtio_blk *vblk = disk->private_data; in virtblk_attrs_are_visible()
1147 struct virtio_device *vdev = vblk->vdev; in virtblk_attrs_are_visible()
1153 return a->mode; in virtblk_attrs_are_visible()
1168 struct virtio_blk *vblk = set->driver_data; in virtblk_map_queues()
1171 for (i = 0, qoff = 0; i < set->nr_maps; i++) { in virtblk_map_queues()
1172 struct blk_mq_queue_map *map = &set->map[i]; in virtblk_map_queues()
1174 map->nr_queues = vblk->io_queues[i]; in virtblk_map_queues()
1175 map->queue_offset = qoff; in virtblk_map_queues()
1176 qoff += map->nr_queues; in virtblk_map_queues()
1178 if (map->nr_queues == 0) in virtblk_map_queues()
1187 blk_mq_map_queues(&set->map[i]); in virtblk_map_queues()
1189 blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0); in virtblk_map_queues()
1197 rq_list_for_each(&iob->req_list, req) { in virtblk_complete_batch()
1206 struct virtio_blk *vblk = hctx->queue->queuedata; in virtblk_poll()
1213 spin_lock_irqsave(&vq->lock, flags); in virtblk_poll()
1215 while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { in virtblk_poll()
1226 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); in virtblk_poll()
1228 spin_unlock_irqrestore(&vq->lock, flags); in virtblk_poll()
1248 struct virtio_device *vdev = vblk->vdev; in virtblk_read_limits()
1267 sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2); in virtblk_read_limits()
1270 lim->max_segments = sg_elems; in virtblk_read_limits()
1273 lim->max_hw_sectors = UINT_MAX; in virtblk_read_limits()
1285 lim->max_segment_size = max_size; in virtblk_read_limits()
1290 &lim->logical_block_size); in virtblk_read_limits()
1297 lim->physical_block_size = in virtblk_read_limits()
1298 lim->logical_block_size * (1 << physical_block_exp); in virtblk_read_limits()
1304 lim->alignment_offset = in virtblk_read_limits()
1305 lim->logical_block_size * alignment_offset; in virtblk_read_limits()
1311 lim->io_min = lim->logical_block_size * min_io_size; in virtblk_read_limits()
1317 lim->io_opt = lim->logical_block_size * opt_io_size; in virtblk_read_limits()
1325 lim->max_hw_discard_sectors = v ? v : UINT_MAX; in virtblk_read_limits()
1334 lim->max_write_zeroes_sectors = v ? v : UINT_MAX; in virtblk_read_limits()
1358 dev_err(&vdev->dev, in virtblk_read_limits()
1360 return -EINVAL; in virtblk_read_limits()
1372 dev_err(&vdev->dev, in virtblk_read_limits()
1374 return -EINVAL; in virtblk_read_limits()
1377 lim->max_secure_erase_sectors = v; in virtblk_read_limits()
1386 dev_err(&vdev->dev, in virtblk_read_limits()
1388 return -EINVAL; in virtblk_read_limits()
1398 * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated. in virtblk_read_limits()
1404 lim->max_discard_segments = in virtblk_read_limits()
1408 lim->discard_granularity = in virtblk_read_limits()
1411 lim->discard_granularity = lim->logical_block_size; in virtblk_read_limits()
1421 /* treat host-aware devices as non-zoned */ in virtblk_read_limits()
1429 dev_err(&vdev->dev, "unsupported zone model %d\n", model); in virtblk_read_limits()
1430 return -EINVAL; in virtblk_read_limits()
1447 if (!vdev->config->get) { in virtblk_probe()
1448 dev_err(&vdev->dev, "%s failure: config access disabled\n", in virtblk_probe()
1450 return -EINVAL; in virtblk_probe()
1454 minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL); in virtblk_probe()
1459 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); in virtblk_probe()
1461 err = -ENOMEM; in virtblk_probe()
1465 mutex_init(&vblk->vdev_mutex); in virtblk_probe()
1467 vblk->vdev = vdev; in virtblk_probe()
1469 INIT_WORK(&vblk->config_work, virtblk_config_changed_work); in virtblk_probe()
1477 queue_depth = vblk->vqs[0].vq->num_free; in virtblk_probe()
1485 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); in virtblk_probe()
1486 vblk->tag_set.ops = &virtio_mq_ops; in virtblk_probe()
1487 vblk->tag_set.queue_depth = queue_depth; in virtblk_probe()
1488 vblk->tag_set.numa_node = NUMA_NO_NODE; in virtblk_probe()
1489 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in virtblk_probe()
1490 vblk->tag_set.cmd_size = in virtblk_probe()
1493 vblk->tag_set.driver_data = vblk; in virtblk_probe()
1494 vblk->tag_set.nr_hw_queues = vblk->num_vqs; in virtblk_probe()
1495 vblk->tag_set.nr_maps = 1; in virtblk_probe()
1496 if (vblk->io_queues[HCTX_TYPE_POLL]) in virtblk_probe()
1497 vblk->tag_set.nr_maps = 3; in virtblk_probe()
1499 err = blk_mq_alloc_tag_set(&vblk->tag_set); in virtblk_probe()
1510 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk); in virtblk_probe()
1511 if (IS_ERR(vblk->disk)) { in virtblk_probe()
1512 err = PTR_ERR(vblk->disk); in virtblk_probe()
1516 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); in virtblk_probe()
1518 vblk->disk->major = major; in virtblk_probe()
1519 vblk->disk->first_minor = index_to_minor(index); in virtblk_probe()
1520 vblk->disk->minors = 1 << PART_BITS; in virtblk_probe()
1521 vblk->disk->private_data = vblk; in virtblk_probe()
1522 vblk->disk->fops = &virtblk_fops; in virtblk_probe()
1523 vblk->index = index; in virtblk_probe()
1525 /* If disk is read-only in the host, the guest should obey */ in virtblk_probe()
1527 set_disk_ro(vblk->disk, 1); in virtblk_probe()
1538 err = blk_revalidate_disk_zones(vblk->disk); in virtblk_probe()
1543 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); in virtblk_probe()
1550 put_disk(vblk->disk); in virtblk_probe()
1552 blk_mq_free_tag_set(&vblk->tag_set); in virtblk_probe()
1554 vdev->config->del_vqs(vdev); in virtblk_probe()
1555 kfree(vblk->vqs); in virtblk_probe()
1566 struct virtio_blk *vblk = vdev->priv; in virtblk_remove()
1569 flush_work(&vblk->config_work); in virtblk_remove()
1571 del_gendisk(vblk->disk); in virtblk_remove()
1572 blk_mq_free_tag_set(&vblk->tag_set); in virtblk_remove()
1574 mutex_lock(&vblk->vdev_mutex); in virtblk_remove()
1579 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ in virtblk_remove()
1580 vblk->vdev = NULL; in virtblk_remove()
1582 vdev->config->del_vqs(vdev); in virtblk_remove()
1583 kfree(vblk->vqs); in virtblk_remove()
1585 mutex_unlock(&vblk->vdev_mutex); in virtblk_remove()
1587 put_disk(vblk->disk); in virtblk_remove()
1593 struct virtio_blk *vblk = vdev->priv; in virtblk_freeze()
1596 blk_mq_freeze_queue(vblk->disk->queue); in virtblk_freeze()
1602 flush_work(&vblk->config_work); in virtblk_freeze()
1604 vdev->config->del_vqs(vdev); in virtblk_freeze()
1605 kfree(vblk->vqs); in virtblk_freeze()
1612 struct virtio_blk *vblk = vdev->priv; in virtblk_restore()
1615 ret = init_vq(vdev->priv); in virtblk_restore()
1621 blk_mq_unfreeze_queue(vblk->disk->queue); in virtblk_restore()
1667 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); in virtio_blk_init()
1669 return -ENOMEM; in virtio_blk_init()