Lines Matching full:req
28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10) in nvmet_feat_data_len() argument
32 return sizeof(req->sq->ctrl->hostid); in nvmet_feat_data_len()
43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) in nvmet_execute_get_log_page_noop() argument
45 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len)); in nvmet_execute_get_log_page_noop()
48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req) in nvmet_execute_get_log_page_error() argument
50 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_page_error()
60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot], in nvmet_execute_get_log_page_error()
71 nvmet_req_complete(req, 0); in nvmet_execute_get_log_page_error()
74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, in nvmet_get_smart_log_nsid() argument
80 status = nvmet_req_find_ns(req); in nvmet_get_smart_log_nsid()
85 if (!req->ns->bdev) in nvmet_get_smart_log_nsid()
88 host_reads = part_stat_read(req->ns->bdev, ios[READ]); in nvmet_get_smart_log_nsid()
90 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); in nvmet_get_smart_log_nsid()
91 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); in nvmet_get_smart_log_nsid()
93 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); in nvmet_get_smart_log_nsid()
103 static u16 nvmet_get_smart_log_all(struct nvmet_req *req, in nvmet_get_smart_log_all() argument
112 ctrl = req->sq->ctrl; in nvmet_get_smart_log_all()
133 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req) in nvmet_execute_get_log_page_smart() argument
139 if (req->transfer_len != sizeof(*log)) in nvmet_execute_get_log_page_smart()
146 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL)) in nvmet_execute_get_log_page_smart()
147 status = nvmet_get_smart_log_all(req, log); in nvmet_execute_get_log_page_smart()
149 status = nvmet_get_smart_log_nsid(req, log); in nvmet_execute_get_log_page_smart()
153 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags); in nvmet_execute_get_log_page_smart()
154 put_unaligned_le64(req->sq->ctrl->err_counter, in nvmet_execute_get_log_page_smart()
156 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags); in nvmet_execute_get_log_page_smart()
158 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); in nvmet_execute_get_log_page_smart()
162 nvmet_req_complete(req, status); in nvmet_execute_get_log_page_smart()
194 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) in nvmet_execute_get_log_cmd_effects_ns() argument
205 switch (req->cmd->get_log_page.csi) { in nvmet_execute_get_log_cmd_effects_ns()
222 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); in nvmet_execute_get_log_cmd_effects_ns()
226 nvmet_req_complete(req, status); in nvmet_execute_get_log_cmd_effects_ns()
229 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req) in nvmet_execute_get_log_changed_ns() argument
231 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_changed_ns()
235 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) in nvmet_execute_get_log_changed_ns()
243 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len); in nvmet_execute_get_log_changed_ns()
245 status = nvmet_zero_sgl(req, len, req->transfer_len - len); in nvmet_execute_get_log_changed_ns()
247 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR); in nvmet_execute_get_log_changed_ns()
250 nvmet_req_complete(req, status); in nvmet_execute_get_log_changed_ns()
253 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid, in nvmet_format_ana_group() argument
256 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_format_ana_group()
261 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { in nvmet_format_ana_group()
270 desc->state = req->port->ana_state[grpid]; in nvmet_format_ana_group()
275 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) in nvmet_execute_get_log_page_ana() argument
295 len = nvmet_format_ana_group(req, grpid, desc); in nvmet_execute_get_log_page_ana()
296 status = nvmet_copy_to_sgl(req, offset, desc, len); in nvmet_execute_get_log_page_ana()
309 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE); in nvmet_execute_get_log_page_ana()
315 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr)); in nvmet_execute_get_log_page_ana()
317 nvmet_req_complete(req, status); in nvmet_execute_get_log_page_ana()
320 static void nvmet_execute_get_log_page(struct nvmet_req *req) in nvmet_execute_get_log_page() argument
322 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd))) in nvmet_execute_get_log_page()
325 switch (req->cmd->get_log_page.lid) { in nvmet_execute_get_log_page()
327 return nvmet_execute_get_log_page_error(req); in nvmet_execute_get_log_page()
329 return nvmet_execute_get_log_page_smart(req); in nvmet_execute_get_log_page()
336 return nvmet_execute_get_log_page_noop(req); in nvmet_execute_get_log_page()
338 return nvmet_execute_get_log_changed_ns(req); in nvmet_execute_get_log_page()
340 return nvmet_execute_get_log_cmd_effects_ns(req); in nvmet_execute_get_log_page()
342 return nvmet_execute_get_log_page_ana(req); in nvmet_execute_get_log_page()
345 req->cmd->get_log_page.lid, req->sq->qid); in nvmet_execute_get_log_page()
346 req->error_loc = offsetof(struct nvme_get_log_page_command, lid); in nvmet_execute_get_log_page()
347 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR); in nvmet_execute_get_log_page()
350 static void nvmet_execute_identify_ctrl(struct nvmet_req *req) in nvmet_execute_identify_ctrl() argument
352 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_ctrl()
451 if (req->port->inline_data_size) in nvmet_execute_identify_ctrl()
462 cmd_capsule_size += req->port->inline_data_size; in nvmet_execute_identify_ctrl()
485 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); in nvmet_execute_identify_ctrl()
489 nvmet_req_complete(req, status); in nvmet_execute_identify_ctrl()
492 static void nvmet_execute_identify_ns(struct nvmet_req *req) in nvmet_execute_identify_ns() argument
497 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { in nvmet_execute_identify_ns()
498 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_ns()
510 status = nvmet_req_find_ns(req); in nvmet_execute_identify_ns()
516 if (nvmet_ns_revalidate(req->ns)) { in nvmet_execute_identify_ns()
517 mutex_lock(&req->ns->subsys->lock); in nvmet_execute_identify_ns()
518 nvmet_ns_changed(req->ns->subsys, req->ns->nsid); in nvmet_execute_identify_ns()
519 mutex_unlock(&req->ns->subsys->lock); in nvmet_execute_identify_ns()
527 cpu_to_le64(req->ns->size >> req->ns->blksize_shift); in nvmet_execute_identify_ns()
528 switch (req->port->ana_state[req->ns->anagrpid]) { in nvmet_execute_identify_ns()
537 if (req->ns->bdev) in nvmet_execute_identify_ns()
538 nvmet_bdev_set_limits(req->ns->bdev, id); in nvmet_execute_identify_ns()
552 id->anagrpid = cpu_to_le32(req->ns->anagrpid); in nvmet_execute_identify_ns()
554 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); in nvmet_execute_identify_ns()
556 id->lbaf[0].ds = req->ns->blksize_shift; in nvmet_execute_identify_ns()
558 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { in nvmet_execute_identify_ns()
563 id->dps = req->ns->pi_type; in nvmet_execute_identify_ns()
565 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size); in nvmet_execute_identify_ns()
568 if (req->ns->readonly) in nvmet_execute_identify_ns()
572 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); in nvmet_execute_identify_ns()
576 nvmet_req_complete(req, status); in nvmet_execute_identify_ns()
579 static void nvmet_execute_identify_nslist(struct nvmet_req *req) in nvmet_execute_identify_nslist() argument
582 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_nslist()
585 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid); in nvmet_execute_identify_nslist()
595 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_nslist()
614 status = nvmet_copy_to_sgl(req, 0, list, buf_size); in nvmet_execute_identify_nslist()
618 nvmet_req_complete(req, status); in nvmet_execute_identify_nslist()
621 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len, in nvmet_copy_ns_identifier() argument
630 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc)); in nvmet_copy_ns_identifier()
635 status = nvmet_copy_to_sgl(req, *off, id, len); in nvmet_copy_ns_identifier()
643 static void nvmet_execute_identify_desclist(struct nvmet_req *req) in nvmet_execute_identify_desclist() argument
648 status = nvmet_req_find_ns(req); in nvmet_execute_identify_desclist()
652 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) { in nvmet_execute_identify_desclist()
653 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID, in nvmet_execute_identify_desclist()
655 &req->ns->uuid, &off); in nvmet_execute_identify_desclist()
659 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) { in nvmet_execute_identify_desclist()
660 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID, in nvmet_execute_identify_desclist()
662 &req->ns->nguid, &off); in nvmet_execute_identify_desclist()
667 status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI, in nvmet_execute_identify_desclist()
669 &req->ns->csi, &off); in nvmet_execute_identify_desclist()
673 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, in nvmet_execute_identify_desclist()
678 nvmet_req_complete(req, status); in nvmet_execute_identify_desclist()
681 static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req) in nvmet_execute_identify_ctrl_nvm() argument
684 nvmet_req_complete(req, in nvmet_execute_identify_ctrl_nvm()
685 nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm))); in nvmet_execute_identify_ctrl_nvm()
688 static void nvmet_execute_identify(struct nvmet_req *req) in nvmet_execute_identify() argument
690 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) in nvmet_execute_identify()
693 switch (req->cmd->identify.cns) { in nvmet_execute_identify()
695 nvmet_execute_identify_ns(req); in nvmet_execute_identify()
698 nvmet_execute_identify_ctrl(req); in nvmet_execute_identify()
701 nvmet_execute_identify_nslist(req); in nvmet_execute_identify()
704 nvmet_execute_identify_desclist(req); in nvmet_execute_identify()
707 switch (req->cmd->identify.csi) { in nvmet_execute_identify()
713 nvmet_execute_identify_ns_zns(req); in nvmet_execute_identify()
720 switch (req->cmd->identify.csi) { in nvmet_execute_identify()
722 nvmet_execute_identify_ctrl_nvm(req); in nvmet_execute_identify()
726 nvmet_execute_identify_ctrl_zns(req); in nvmet_execute_identify()
735 req->cmd->identify.cns, req->sq->qid); in nvmet_execute_identify()
736 req->error_loc = offsetof(struct nvme_identify, cns); in nvmet_execute_identify()
737 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR); in nvmet_execute_identify()
747 static void nvmet_execute_abort(struct nvmet_req *req) in nvmet_execute_abort() argument
749 if (!nvmet_check_transfer_len(req, 0)) in nvmet_execute_abort()
751 nvmet_set_result(req, 1); in nvmet_execute_abort()
752 nvmet_req_complete(req, 0); in nvmet_execute_abort()
755 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req) in nvmet_write_protect_flush_sync() argument
759 if (req->ns->file) in nvmet_write_protect_flush_sync()
760 status = nvmet_file_flush(req); in nvmet_write_protect_flush_sync()
762 status = nvmet_bdev_flush(req); in nvmet_write_protect_flush_sync()
765 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid); in nvmet_write_protect_flush_sync()
769 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req) in nvmet_set_feat_write_protect() argument
771 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_write_protect()
772 struct nvmet_subsys *subsys = nvmet_req_subsys(req); in nvmet_set_feat_write_protect()
775 status = nvmet_req_find_ns(req); in nvmet_set_feat_write_protect()
782 req->ns->readonly = true; in nvmet_set_feat_write_protect()
783 status = nvmet_write_protect_flush_sync(req); in nvmet_set_feat_write_protect()
785 req->ns->readonly = false; in nvmet_set_feat_write_protect()
788 req->ns->readonly = false; in nvmet_set_feat_write_protect()
796 nvmet_ns_changed(subsys, req->ns->nsid); in nvmet_set_feat_write_protect()
801 u16 nvmet_set_feat_kato(struct nvmet_req *req) in nvmet_set_feat_kato() argument
803 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_kato()
805 nvmet_stop_keep_alive_timer(req->sq->ctrl); in nvmet_set_feat_kato()
806 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); in nvmet_set_feat_kato()
807 nvmet_start_keep_alive_timer(req->sq->ctrl); in nvmet_set_feat_kato()
809 nvmet_set_result(req, req->sq->ctrl->kato); in nvmet_set_feat_kato()
814 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) in nvmet_set_feat_async_event() argument
816 u32 val32 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_set_feat_async_event()
819 req->error_loc = offsetof(struct nvme_common_command, cdw11); in nvmet_set_feat_async_event()
823 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); in nvmet_set_feat_async_event()
824 nvmet_set_result(req, val32); in nvmet_set_feat_async_event()
829 void nvmet_execute_set_features(struct nvmet_req *req) in nvmet_execute_set_features() argument
831 struct nvmet_subsys *subsys = nvmet_req_subsys(req); in nvmet_execute_set_features()
832 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); in nvmet_execute_set_features()
833 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); in nvmet_execute_set_features()
838 if (!nvmet_check_data_len_lte(req, 0)) in nvmet_execute_set_features()
849 nvmet_set_result(req, in nvmet_execute_set_features()
853 status = nvmet_set_feat_kato(req); in nvmet_execute_set_features()
856 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL); in nvmet_execute_set_features()
862 status = nvmet_set_feat_write_protect(req); in nvmet_execute_set_features()
865 req->error_loc = offsetof(struct nvme_common_command, cdw10); in nvmet_execute_set_features()
870 nvmet_req_complete(req, status); in nvmet_execute_set_features()
873 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req) in nvmet_get_feat_write_protect() argument
875 struct nvmet_subsys *subsys = nvmet_req_subsys(req); in nvmet_get_feat_write_protect()
878 result = nvmet_req_find_ns(req); in nvmet_get_feat_write_protect()
883 if (req->ns->readonly == true) in nvmet_get_feat_write_protect()
887 nvmet_set_result(req, result); in nvmet_get_feat_write_protect()
893 void nvmet_get_feat_kato(struct nvmet_req *req) in nvmet_get_feat_kato() argument
895 nvmet_set_result(req, req->sq->ctrl->kato * 1000); in nvmet_get_feat_kato()
898 void nvmet_get_feat_async_event(struct nvmet_req *req) in nvmet_get_feat_async_event() argument
900 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); in nvmet_get_feat_async_event()
903 void nvmet_execute_get_features(struct nvmet_req *req) in nvmet_execute_get_features() argument
905 struct nvmet_subsys *subsys = nvmet_req_subsys(req); in nvmet_execute_get_features()
906 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); in nvmet_execute_get_features()
909 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10))) in nvmet_execute_get_features()
935 nvmet_get_feat_async_event(req); in nvmet_execute_get_features()
938 nvmet_set_result(req, 1); in nvmet_execute_get_features()
941 nvmet_set_result(req, in nvmet_execute_get_features()
945 nvmet_get_feat_kato(req); in nvmet_execute_get_features()
949 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { in nvmet_execute_get_features()
950 req->error_loc = in nvmet_execute_get_features()
956 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, in nvmet_execute_get_features()
957 sizeof(req->sq->ctrl->hostid)); in nvmet_execute_get_features()
960 status = nvmet_get_feat_write_protect(req); in nvmet_execute_get_features()
963 req->error_loc = in nvmet_execute_get_features()
969 nvmet_req_complete(req, status); in nvmet_execute_get_features()
972 void nvmet_execute_async_event(struct nvmet_req *req) in nvmet_execute_async_event() argument
974 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_async_event()
976 if (!nvmet_check_transfer_len(req, 0)) in nvmet_execute_async_event()
982 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR); in nvmet_execute_async_event()
985 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; in nvmet_execute_async_event()
991 void nvmet_execute_keep_alive(struct nvmet_req *req) in nvmet_execute_keep_alive() argument
993 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_keep_alive()
996 if (!nvmet_check_transfer_len(req, 0)) in nvmet_execute_keep_alive()
1008 nvmet_req_complete(req, status); in nvmet_execute_keep_alive()
1011 u16 nvmet_parse_admin_cmd(struct nvmet_req *req) in nvmet_parse_admin_cmd() argument
1013 struct nvme_command *cmd = req->cmd; in nvmet_parse_admin_cmd()
1017 return nvmet_parse_fabrics_admin_cmd(req); in nvmet_parse_admin_cmd()
1018 if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) in nvmet_parse_admin_cmd()
1019 return nvmet_parse_discovery_cmd(req); in nvmet_parse_admin_cmd()
1021 ret = nvmet_check_ctrl_status(req); in nvmet_parse_admin_cmd()
1025 if (nvmet_is_passthru_req(req)) in nvmet_parse_admin_cmd()
1026 return nvmet_parse_passthru_admin_cmd(req); in nvmet_parse_admin_cmd()
1030 req->execute = nvmet_execute_get_log_page; in nvmet_parse_admin_cmd()
1033 req->execute = nvmet_execute_identify; in nvmet_parse_admin_cmd()
1036 req->execute = nvmet_execute_abort; in nvmet_parse_admin_cmd()
1039 req->execute = nvmet_execute_set_features; in nvmet_parse_admin_cmd()
1042 req->execute = nvmet_execute_get_features; in nvmet_parse_admin_cmd()
1045 req->execute = nvmet_execute_async_event; in nvmet_parse_admin_cmd()
1048 req->execute = nvmet_execute_keep_alive; in nvmet_parse_admin_cmd()
1051 return nvmet_report_invalid_opcode(req); in nvmet_parse_admin_cmd()