Lines Matching full:req
69 void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req) in nvmet_execute_identify_ctrl_zns() argument
71 u8 zasl = req->sq->ctrl->subsys->zasl; in nvmet_execute_identify_ctrl_zns()
72 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_ctrl_zns()
87 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); in nvmet_execute_identify_ctrl_zns()
91 nvmet_req_complete(req, status); in nvmet_execute_identify_ctrl_zns()
94 void nvmet_execute_identify_ns_zns(struct nvmet_req *req) in nvmet_execute_identify_ns_zns() argument
101 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { in nvmet_execute_identify_ns_zns()
102 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_ns_zns()
113 status = nvmet_req_find_ns(req); in nvmet_execute_identify_ns_zns()
117 if (nvmet_ns_revalidate(req->ns)) { in nvmet_execute_identify_ns_zns()
118 mutex_lock(&req->ns->subsys->lock); in nvmet_execute_identify_ns_zns()
119 nvmet_ns_changed(req->ns->subsys, req->ns->nsid); in nvmet_execute_identify_ns_zns()
120 mutex_unlock(&req->ns->subsys->lock); in nvmet_execute_identify_ns_zns()
123 if (!bdev_is_zoned(req->ns->bdev)) { in nvmet_execute_identify_ns_zns()
125 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_ns_zns()
129 zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> in nvmet_execute_identify_ns_zns()
130 req->ns->blksize_shift; in nvmet_execute_identify_ns_zns()
133 mor = bdev_max_open_zones(req->ns->bdev); in nvmet_execute_identify_ns_zns()
140 mar = bdev_max_active_zones(req->ns->bdev); in nvmet_execute_identify_ns_zns()
148 status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns)); in nvmet_execute_identify_ns_zns()
151 nvmet_req_complete(req, status); in nvmet_execute_identify_ns_zns()
154 static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) in nvmet_bdev_validate_zone_mgmt_recv() argument
156 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_validate_zone_mgmt_recv()
157 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; in nvmet_bdev_validate_zone_mgmt_recv()
159 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_validate_zone_mgmt_recv()
160 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); in nvmet_bdev_validate_zone_mgmt_recv()
165 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); in nvmet_bdev_validate_zone_mgmt_recv()
169 if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { in nvmet_bdev_validate_zone_mgmt_recv()
170 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); in nvmet_bdev_validate_zone_mgmt_recv()
174 switch (req->cmd->zmr.pr) { in nvmet_bdev_validate_zone_mgmt_recv()
179 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); in nvmet_bdev_validate_zone_mgmt_recv()
183 switch (req->cmd->zmr.zrasf) { in nvmet_bdev_validate_zone_mgmt_recv()
194 req->error_loc = in nvmet_bdev_validate_zone_mgmt_recv()
203 struct nvmet_req *req; member
231 zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity); in nvmet_bdev_report_zone_cb()
232 zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start); in nvmet_bdev_report_zone_cb()
233 zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp); in nvmet_bdev_report_zone_cb()
238 status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc, in nvmet_bdev_report_zone_cb()
251 static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req) in nvmet_req_nr_zones_from_slba() argument
253 unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_req_nr_zones_from_slba()
255 return bdev_nr_zones(req->ns->bdev) - bdev_zone_no(req->ns->bdev, sect); in nvmet_req_nr_zones_from_slba()
258 static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize) in get_nr_zones_from_buf() argument
269 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); in nvmet_bdev_zone_zmgmt_recv_work() local
270 sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_zone_zmgmt_recv_work()
271 unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req); in nvmet_bdev_zone_zmgmt_recv_work()
272 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; in nvmet_bdev_zone_zmgmt_recv_work()
277 .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize), in nvmet_bdev_zone_zmgmt_recv_work()
280 .zrasf = req->cmd->zmr.zrasf, in nvmet_bdev_zone_zmgmt_recv_work()
282 .req = req, in nvmet_bdev_zone_zmgmt_recv_work()
285 status = nvmet_bdev_validate_zone_mgmt_recv(req); in nvmet_bdev_zone_zmgmt_recv_work()
294 ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones, in nvmet_bdev_zone_zmgmt_recv_work()
305 if (req->cmd->zmr.pr) in nvmet_bdev_zone_zmgmt_recv_work()
309 status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones)); in nvmet_bdev_zone_zmgmt_recv_work()
312 nvmet_req_complete(req, status); in nvmet_bdev_zone_zmgmt_recv_work()
315 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req) in nvmet_bdev_execute_zone_mgmt_recv() argument
317 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work); in nvmet_bdev_execute_zone_mgmt_recv()
318 queue_work(zbd_wq, &req->z.zmgmt_work); in nvmet_bdev_execute_zone_mgmt_recv()
352 struct nvmet_req *req; member
359 switch (zsa_req_op(data->req->cmd->zms.zsa)) { in zmgmt_send_scan_cb()
396 static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) in nvmet_bdev_zone_mgmt_emulate_all() argument
398 struct block_device *bdev = req->ns->bdev; in nvmet_bdev_zone_mgmt_emulate_all()
404 .req = req, in nvmet_bdev_zone_mgmt_emulate_all()
428 zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC, in nvmet_bdev_zone_mgmt_emulate_all()
448 static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) in nvmet_bdev_execute_zmgmt_send_all() argument
452 switch (zsa_req_op(req->cmd->zms.zsa)) { in nvmet_bdev_execute_zmgmt_send_all()
454 ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0, in nvmet_bdev_execute_zmgmt_send_all()
455 get_capacity(req->ns->bdev->bd_disk)); in nvmet_bdev_execute_zmgmt_send_all()
462 return nvmet_bdev_zone_mgmt_emulate_all(req); in nvmet_bdev_execute_zmgmt_send_all()
465 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); in nvmet_bdev_execute_zmgmt_send_all()
474 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); in nvmet_bdev_zmgmt_send_work() local
475 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); in nvmet_bdev_zmgmt_send_work()
476 enum req_op op = zsa_req_op(req->cmd->zms.zsa); in nvmet_bdev_zmgmt_send_work()
477 struct block_device *bdev = req->ns->bdev; in nvmet_bdev_zmgmt_send_work()
483 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); in nvmet_bdev_zmgmt_send_work()
489 if (req->cmd->zms.select_all) { in nvmet_bdev_zmgmt_send_work()
490 status = nvmet_bdev_execute_zmgmt_send_all(req); in nvmet_bdev_zmgmt_send_work()
495 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); in nvmet_bdev_zmgmt_send_work()
501 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); in nvmet_bdev_zmgmt_send_work()
511 nvmet_req_complete(req, status); in nvmet_bdev_zmgmt_send_work()
514 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req) in nvmet_bdev_execute_zone_mgmt_send() argument
516 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work); in nvmet_bdev_execute_zone_mgmt_send()
517 queue_work(zbd_wq, &req->z.zmgmt_work); in nvmet_bdev_execute_zone_mgmt_send()
522 struct nvmet_req *req = bio->bi_private; in nvmet_bdev_zone_append_bio_done() local
525 req->cqe->result.u64 = in nvmet_bdev_zone_append_bio_done()
526 nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector); in nvmet_bdev_zone_append_bio_done()
529 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bdev_zone_append_bio_done()
530 nvmet_req_bio_put(req, bio); in nvmet_bdev_zone_append_bio_done()
533 void nvmet_bdev_execute_zone_append(struct nvmet_req *req) in nvmet_bdev_execute_zone_append() argument
535 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_zone_append()
544 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) in nvmet_bdev_execute_zone_append()
547 if (!req->sg_cnt) { in nvmet_bdev_execute_zone_append()
548 nvmet_req_complete(req, 0); in nvmet_bdev_execute_zone_append()
552 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_execute_zone_append()
553 req->error_loc = offsetof(struct nvme_rw_command, slba); in nvmet_bdev_execute_zone_append()
558 if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { in nvmet_bdev_execute_zone_append()
559 req->error_loc = offsetof(struct nvme_rw_command, slba); in nvmet_bdev_execute_zone_append()
564 if (nvmet_use_inline_bvec(req)) { in nvmet_bdev_execute_zone_append()
565 bio = &req->z.inline_bio; in nvmet_bdev_execute_zone_append()
566 bio_init(bio, req->ns->bdev, req->inline_bvec, in nvmet_bdev_execute_zone_append()
567 ARRAY_SIZE(req->inline_bvec), opf); in nvmet_bdev_execute_zone_append()
569 bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL); in nvmet_bdev_execute_zone_append()
574 bio->bi_private = req; in nvmet_bdev_execute_zone_append()
575 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) in nvmet_bdev_execute_zone_append()
578 for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { in nvmet_bdev_execute_zone_append()
592 if (total_len != nvmet_rw_data_len(req)) { in nvmet_bdev_execute_zone_append()
601 nvmet_req_bio_put(req, bio); in nvmet_bdev_execute_zone_append()
603 nvmet_req_complete(req, status); in nvmet_bdev_execute_zone_append()
606 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req) in nvmet_bdev_zns_parse_io_cmd() argument
608 struct nvme_command *cmd = req->cmd; in nvmet_bdev_zns_parse_io_cmd()
612 req->execute = nvmet_bdev_execute_zone_append; in nvmet_bdev_zns_parse_io_cmd()
615 req->execute = nvmet_bdev_execute_zone_mgmt_recv; in nvmet_bdev_zns_parse_io_cmd()
618 req->execute = nvmet_bdev_execute_zone_mgmt_send; in nvmet_bdev_zns_parse_io_cmd()
621 return nvmet_bdev_parse_io_cmd(req); in nvmet_bdev_zns_parse_io_cmd()