Lines Matching +full:ns +full:- +full:firmware
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2011-2014, Intel Corporation.
9 #include <linux/blk-mq.h>
10 #include <linux/blk-integrity.h>
17 #include <linux/backing-dev.h>
29 #include <linux/nvme-auth.h>
106 * nvme_wq - hosts nvme related works that are not reset or delete
107 * nvme_reset_wq - hosts nvme reset works
108 * nvme_delete_wq - hosts nvme delete works
111 * keep-alive, periodic reconnects etc. nvme_reset_wq
137 .name = "nvme-subsystem",
143 .name = "nvme-generic",
157 if (nvme_ctrl_state(ctrl) == NVME_CTRL_LIVE && ctrl->tagset) in nvme_queue_scan()
158 queue_work(nvme_wq, &ctrl->scan_work); in nvme_queue_scan()
170 return -EBUSY; in nvme_try_sched_reset()
171 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) in nvme_try_sched_reset()
172 return -EBUSY; in nvme_try_sched_reset()
185 set_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); in nvme_failfast_work()
186 dev_info(ctrl->device, "failfast expired\n"); in nvme_failfast_work()
192 if (!ctrl->opts || ctrl->opts->fast_io_fail_tmo == -1) in nvme_start_failfast_work()
195 schedule_delayed_work(&ctrl->failfast_work, in nvme_start_failfast_work()
196 ctrl->opts->fast_io_fail_tmo * HZ); in nvme_start_failfast_work()
201 if (!ctrl->opts) in nvme_stop_failfast_work()
204 cancel_delayed_work_sync(&ctrl->failfast_work); in nvme_stop_failfast_work()
205 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); in nvme_stop_failfast_work()
212 return -EBUSY; in nvme_reset_ctrl()
213 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) in nvme_reset_ctrl()
214 return -EBUSY; in nvme_reset_ctrl()
225 flush_work(&ctrl->reset_work); in nvme_reset_ctrl_sync()
227 ret = -ENETRESET; in nvme_reset_ctrl_sync()
235 dev_info(ctrl->device, in nvme_do_delete_ctrl()
238 flush_work(&ctrl->reset_work); in nvme_do_delete_ctrl()
241 ctrl->ops->delete_ctrl(ctrl); in nvme_do_delete_ctrl()
256 return -EBUSY; in nvme_delete_ctrl()
257 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) in nvme_delete_ctrl()
258 return -EBUSY; in nvme_delete_ctrl()
267 * since ->delete_ctrl can free the controller. in nvme_delete_ctrl_sync()
323 crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11; in nvme_retry_req()
325 delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; in nvme_retry_req()
327 nvme_req(req)->retries++; in nvme_retry_req()
329 blk_mq_delay_kick_requeue_list(req->q, delay); in nvme_retry_req()
334 struct nvme_ns *ns = req->q->queuedata; in nvme_log_error() local
337 if (ns) { in nvme_log_error()
339 ns->disk ? ns->disk->disk_name : "?", in nvme_log_error()
340 nvme_get_opcode_str(nr->cmd->common.opcode), in nvme_log_error()
341 nr->cmd->common.opcode, in nvme_log_error()
342 nvme_sect_to_lba(ns->head, blk_rq_pos(req)), in nvme_log_error()
343 blk_rq_bytes(req) >> ns->head->lba_shift, in nvme_log_error()
344 nvme_get_error_status_str(nr->status), in nvme_log_error()
345 NVME_SCT(nr->status), /* Status Code Type */ in nvme_log_error()
346 nr->status & NVME_SC_MASK, /* Status Code */ in nvme_log_error()
347 nr->status & NVME_STATUS_MORE ? "MORE " : "", in nvme_log_error()
348 nr->status & NVME_STATUS_DNR ? "DNR " : ""); in nvme_log_error()
353 dev_name(nr->ctrl->device), in nvme_log_error()
354 nvme_get_admin_opcode_str(nr->cmd->common.opcode), in nvme_log_error()
355 nr->cmd->common.opcode, in nvme_log_error()
356 nvme_get_error_status_str(nr->status), in nvme_log_error()
357 NVME_SCT(nr->status), /* Status Code Type */ in nvme_log_error()
358 nr->status & NVME_SC_MASK, /* Status Code */ in nvme_log_error()
359 nr->status & NVME_STATUS_MORE ? "MORE " : "", in nvme_log_error()
360 nr->status & NVME_STATUS_DNR ? "DNR " : ""); in nvme_log_error()
365 struct nvme_ns *ns = req->q->queuedata; in nvme_log_err_passthru() local
370 ns ? ns->disk->disk_name : dev_name(nr->ctrl->device), in nvme_log_err_passthru()
371 ns ? nvme_get_opcode_str(nr->cmd->common.opcode) : in nvme_log_err_passthru()
372 nvme_get_admin_opcode_str(nr->cmd->common.opcode), in nvme_log_err_passthru()
373 nr->cmd->common.opcode, in nvme_log_err_passthru()
374 nvme_get_error_status_str(nr->status), in nvme_log_err_passthru()
375 NVME_SCT(nr->status), /* Status Code Type */ in nvme_log_err_passthru()
376 nr->status & NVME_SC_MASK, /* Status Code */ in nvme_log_err_passthru()
377 nr->status & NVME_STATUS_MORE ? "MORE " : "", in nvme_log_err_passthru()
378 nr->status & NVME_STATUS_DNR ? "DNR " : "", in nvme_log_err_passthru()
379 nr->cmd->common.cdw10, in nvme_log_err_passthru()
380 nr->cmd->common.cdw11, in nvme_log_err_passthru()
381 nr->cmd->common.cdw12, in nvme_log_err_passthru()
382 nr->cmd->common.cdw13, in nvme_log_err_passthru()
383 nr->cmd->common.cdw14, in nvme_log_err_passthru()
384 nr->cmd->common.cdw14); in nvme_log_err_passthru()
396 if (likely(nvme_req(req)->status == 0)) in nvme_decide_disposition()
400 (nvme_req(req)->status & NVME_STATUS_DNR) || in nvme_decide_disposition()
401 nvme_req(req)->retries >= nvme_max_retries) in nvme_decide_disposition()
404 if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED) in nvme_decide_disposition()
407 if (req->cmd_flags & REQ_NVME_MPATH) { in nvme_decide_disposition()
408 if (nvme_is_path_error(nvme_req(req)->status) || in nvme_decide_disposition()
409 blk_queue_dying(req->q)) in nvme_decide_disposition()
412 if (blk_queue_dying(req->q)) in nvme_decide_disposition()
423 struct nvme_ns *ns = req->q->queuedata; in nvme_end_req_zoned() local
425 req->__sector = nvme_lba_to_sect(ns->head, in nvme_end_req_zoned()
426 le64_to_cpu(nvme_req(req)->result.u64)); in nvme_end_req_zoned()
434 if (req->cmd_flags & REQ_NVME_MPATH) in __nvme_end_req()
440 blk_status_t status = nvme_error_status(nvme_req(req)->status); in nvme_end_req()
442 if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { in nvme_end_req()
454 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; in nvme_complete_rq()
460 * Completions of long-running commands should not be able to in nvme_complete_rq()
464 * req->deadline - req->timeout is the command submission time in nvme_complete_rq()
467 if (ctrl->kas && in nvme_complete_rq()
468 req->deadline - req->timeout >= ctrl->ka_last_check_time) in nvme_complete_rq()
469 ctrl->comp_seen = true; in nvme_complete_rq()
483 queue_work(nvme_wq, &ctrl->dhchap_auth_work); in nvme_complete_rq()
502 * Called to unwind from ->queue_rq on a failed command submission so that the
509 nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; in nvme_host_path_error()
518 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, in nvme_cancel_request()
519 "Cancelling I/O %d", req->tag); in nvme_cancel_request()
525 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; in nvme_cancel_request()
526 nvme_req(req)->flags |= NVME_REQ_CANCELLED; in nvme_cancel_request()
534 if (ctrl->tagset) { in nvme_cancel_tagset()
535 blk_mq_tagset_busy_iter(ctrl->tagset, in nvme_cancel_tagset()
537 blk_mq_tagset_wait_completed_request(ctrl->tagset); in nvme_cancel_tagset()
544 if (ctrl->admin_tagset) { in nvme_cancel_admin_tagset()
545 blk_mq_tagset_busy_iter(ctrl->admin_tagset, in nvme_cancel_admin_tagset()
547 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); in nvme_cancel_admin_tagset()
559 spin_lock_irqsave(&ctrl->lock, flags); in nvme_change_ctrl_state()
629 WRITE_ONCE(ctrl->state, new_state); in nvme_change_ctrl_state()
630 wake_up_all(&ctrl->state_wq); in nvme_change_ctrl_state()
633 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_change_ctrl_state()
655 wait_event(ctrl->state_wq, in nvme_wait_reset()
668 ida_free(&head->subsys->ns_ida, head->instance); in nvme_free_ns_head()
669 cleanup_srcu_struct(&head->srcu); in nvme_free_ns_head()
670 nvme_put_subsystem(head->subsys); in nvme_free_ns_head()
676 return kref_get_unless_zero(&head->ref); in nvme_tryget_ns_head()
681 kref_put(&head->ref, nvme_free_ns_head); in nvme_put_ns_head()
686 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); in nvme_free_ns() local
688 put_disk(ns->disk); in nvme_free_ns()
689 nvme_put_ns_head(ns->head); in nvme_free_ns()
690 nvme_put_ctrl(ns->ctrl); in nvme_free_ns()
691 kfree(ns); in nvme_free_ns()
694 bool nvme_get_ns(struct nvme_ns *ns) in nvme_get_ns() argument
696 return kref_get_unless_zero(&ns->kref); in nvme_get_ns()
699 void nvme_put_ns(struct nvme_ns *ns) in nvme_put_ns() argument
701 kref_put(&ns->kref, nvme_free_ns); in nvme_put_ns()
707 nvme_req(req)->status = 0; in nvme_clear_nvme_request()
708 nvme_req(req)->retries = 0; in nvme_clear_nvme_request()
709 nvme_req(req)->flags = 0; in nvme_clear_nvme_request()
710 req->rq_flags |= RQF_DONTPREP; in nvme_clear_nvme_request()
719 if (req->q->queuedata) { in nvme_init_request()
720 struct nvme_ns *ns = req->q->disk->private_data; in nvme_init_request() local
722 logging_enabled = ns->head->passthru_err_log_enabled; in nvme_init_request()
723 req->timeout = NVME_IO_TIMEOUT; in nvme_init_request()
725 logging_enabled = nr->ctrl->passthru_err_log_enabled; in nvme_init_request()
726 req->timeout = NVME_ADMIN_TIMEOUT; in nvme_init_request()
730 req->rq_flags |= RQF_QUIET; in nvme_init_request()
733 cmd->common.flags &= ~NVME_CMD_SGL_ALL; in nvme_init_request()
735 req->cmd_flags |= REQ_FAILFAST_DRIVER; in nvme_init_request()
736 if (req->mq_hctx->type == HCTX_TYPE_POLL) in nvme_init_request()
737 req->cmd_flags |= REQ_POLLED; in nvme_init_request()
739 memcpy(nr->cmd, cmd, sizeof(*cmd)); in nvme_init_request()
760 !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && in nvme_fail_nonready_command()
761 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) in nvme_fail_nonready_command()
780 if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD)) in __nvme_check_ready()
783 if (ctrl->ops->flags & NVME_F_FABRICS) { in __nvme_check_ready()
791 if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) && in __nvme_check_ready()
792 (req->cmd->fabrics.fctype == nvme_fabrics_type_connect || in __nvme_check_ready()
793 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_send || in __nvme_check_ready()
794 req->cmd->fabrics.fctype == nvme_fabrics_type_auth_receive)) in __nvme_check_ready()
808 static inline void nvme_setup_flush(struct nvme_ns *ns, in nvme_setup_flush() argument
812 cmnd->common.opcode = nvme_cmd_flush; in nvme_setup_flush()
813 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); in nvme_setup_flush()
816 static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, in nvme_setup_discard() argument
837 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) in nvme_setup_discard()
840 range = page_address(ns->ctrl->discard_page); in nvme_setup_discard()
843 if (queue_max_discard_segments(req->q) == 1) { in nvme_setup_discard()
844 u64 slba = nvme_sect_to_lba(ns->head, blk_rq_pos(req)); in nvme_setup_discard()
845 u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9); in nvme_setup_discard()
853 u64 slba = nvme_sect_to_lba(ns->head, in nvme_setup_discard()
854 bio->bi_iter.bi_sector); in nvme_setup_discard()
855 u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift; in nvme_setup_discard()
867 if (virt_to_page(range) == ns->ctrl->discard_page) in nvme_setup_discard()
868 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); in nvme_setup_discard()
875 cmnd->dsm.opcode = nvme_cmd_dsm; in nvme_setup_discard()
876 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); in nvme_setup_discard()
877 cmnd->dsm.nr = cpu_to_le32(segments - 1); in nvme_setup_discard()
878 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); in nvme_setup_discard()
880 bvec_set_virt(&req->special_vec, range, alloc_size); in nvme_setup_discard()
881 req->rq_flags |= RQF_SPECIAL_PAYLOAD; in nvme_setup_discard()
886 static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, in nvme_set_ref_tag() argument
893 switch (ns->head->guard_type) { in nvme_set_ref_tag()
895 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); in nvme_set_ref_tag()
902 cmnd->rw.reftag = cpu_to_le32(lower); in nvme_set_ref_tag()
903 cmnd->rw.cdw3 = cpu_to_le32(upper); in nvme_set_ref_tag()
910 static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, in nvme_setup_write_zeroes() argument
915 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) in nvme_setup_write_zeroes()
916 return nvme_setup_discard(ns, req, cmnd); in nvme_setup_write_zeroes()
918 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; in nvme_setup_write_zeroes()
919 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); in nvme_setup_write_zeroes()
920 cmnd->write_zeroes.slba = in nvme_setup_write_zeroes()
921 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); in nvme_setup_write_zeroes()
922 cmnd->write_zeroes.length = in nvme_setup_write_zeroes()
923 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); in nvme_setup_write_zeroes()
925 if (!(req->cmd_flags & REQ_NOUNMAP) && in nvme_setup_write_zeroes()
926 (ns->head->features & NVME_NS_DEAC)) in nvme_setup_write_zeroes()
927 cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC); in nvme_setup_write_zeroes()
929 if (nvme_ns_has_pi(ns->head)) { in nvme_setup_write_zeroes()
930 cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); in nvme_setup_write_zeroes()
932 switch (ns->head->pi_type) { in nvme_setup_write_zeroes()
935 nvme_set_ref_tag(ns, cmnd, req); in nvme_setup_write_zeroes()
946 * non-atomically. The request issuer should ensure that the write is within
951 struct request_queue *q = req->q; in nvme_valid_atomic_write()
958 u64 mask = boundary_bytes - 1, imask = ~mask; in nvme_valid_atomic_write()
960 u64 end = start + blk_rq_bytes(req) - 1; in nvme_valid_atomic_write()
973 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, in nvme_setup_rw() argument
980 if (req->cmd_flags & REQ_FUA) in nvme_setup_rw()
982 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) in nvme_setup_rw()
985 if (req->cmd_flags & REQ_RAHEAD) in nvme_setup_rw()
988 if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req)) in nvme_setup_rw()
991 cmnd->rw.opcode = op; in nvme_setup_rw()
992 cmnd->rw.flags = 0; in nvme_setup_rw()
993 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); in nvme_setup_rw()
994 cmnd->rw.cdw2 = 0; in nvme_setup_rw()
995 cmnd->rw.cdw3 = 0; in nvme_setup_rw()
996 cmnd->rw.metadata = 0; in nvme_setup_rw()
997 cmnd->rw.slba = in nvme_setup_rw()
998 cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); in nvme_setup_rw()
999 cmnd->rw.length = in nvme_setup_rw()
1000 cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); in nvme_setup_rw()
1001 cmnd->rw.reftag = 0; in nvme_setup_rw()
1002 cmnd->rw.lbat = 0; in nvme_setup_rw()
1003 cmnd->rw.lbatm = 0; in nvme_setup_rw()
1005 if (ns->head->ms) { in nvme_setup_rw()
1013 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head))) in nvme_setup_rw()
1018 switch (ns->head->pi_type) { in nvme_setup_rw()
1028 nvme_set_ref_tag(ns, cmnd, req); in nvme_setup_rw()
1033 cmnd->rw.control = cpu_to_le16(control); in nvme_setup_rw()
1034 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); in nvme_setup_rw()
1040 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { in nvme_cleanup_cmd()
1041 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; in nvme_cleanup_cmd()
1043 if (req->special_vec.bv_page == ctrl->discard_page) in nvme_cleanup_cmd()
1044 clear_bit_unlock(0, &ctrl->discard_page_busy); in nvme_cleanup_cmd()
1046 kfree(bvec_virt(&req->special_vec)); in nvme_cleanup_cmd()
1047 req->rq_flags &= ~RQF_SPECIAL_PAYLOAD; in nvme_cleanup_cmd()
1052 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) in nvme_setup_cmd() argument
1054 struct nvme_command *cmd = nvme_req(req)->cmd; in nvme_setup_cmd()
1057 if (!(req->rq_flags & RQF_DONTPREP)) in nvme_setup_cmd()
1066 nvme_setup_flush(ns, cmd); in nvme_setup_cmd()
1070 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET); in nvme_setup_cmd()
1073 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN); in nvme_setup_cmd()
1076 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE); in nvme_setup_cmd()
1079 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH); in nvme_setup_cmd()
1082 ret = nvme_setup_write_zeroes(ns, req, cmd); in nvme_setup_cmd()
1085 ret = nvme_setup_discard(ns, req, cmd); in nvme_setup_cmd()
1088 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read); in nvme_setup_cmd()
1091 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write); in nvme_setup_cmd()
1094 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); in nvme_setup_cmd()
1101 cmd->common.command_id = nvme_cid(req); in nvme_setup_cmd()
1118 if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) in nvme_execute_rq()
1119 return -EINTR; in nvme_execute_rq()
1120 if (nvme_req(rq)->status) in nvme_execute_rq()
1121 return nvme_req(rq)->status; in nvme_execute_rq()
1146 qid - 1); in __nvme_submit_sync_cmd()
1152 req->cmd_flags &= ~REQ_FAILFAST_DRIVER; in __nvme_submit_sync_cmd()
1162 *result = nvme_req(req)->result; in __nvme_submit_sync_cmd()
1177 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) in nvme_command_effects() argument
1181 if (ns) { in nvme_command_effects()
1182 effects = le32_to_cpu(ns->head->effects->iocs[opcode]); in nvme_command_effects()
1184 dev_warn_once(ctrl->device, in nvme_command_effects()
1195 effects = le32_to_cpu(ctrl->effects->acs[opcode]); in nvme_command_effects()
1206 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) in nvme_passthru_start() argument
1208 u32 effects = nvme_command_effects(ctrl, ns, opcode); in nvme_passthru_start()
1215 mutex_lock(&ctrl->scan_lock); in nvme_passthru_start()
1216 mutex_lock(&ctrl->subsys->lock); in nvme_passthru_start()
1217 nvme_mpath_start_freeze(ctrl->subsys); in nvme_passthru_start()
1218 nvme_mpath_wait_freeze(ctrl->subsys); in nvme_passthru_start()
1226 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, in nvme_passthru_end() argument
1231 nvme_mpath_unfreeze(ctrl->subsys); in nvme_passthru_end()
1232 mutex_unlock(&ctrl->subsys->lock); in nvme_passthru_end()
1233 mutex_unlock(&ctrl->scan_lock); in nvme_passthru_end()
1237 &ctrl->flags)) { in nvme_passthru_end()
1238 dev_info(ctrl->device, in nvme_passthru_end()
1244 flush_work(&ctrl->scan_work); in nvme_passthru_end()
1246 if (ns) in nvme_passthru_end()
1249 switch (cmd->common.opcode) { in nvme_passthru_end()
1251 switch (le32_to_cpu(cmd->common.cdw10) & 0xFF) { in nvme_passthru_end()
1279 unsigned long delay = ctrl->kato * HZ / 2; in nvme_keep_alive_work_period()
1287 if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) in nvme_keep_alive_work_period()
1296 unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay; in nvme_queue_keep_alive_work()
1301 delay = ka_next_check_tm - now; in nvme_queue_keep_alive_work()
1303 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); in nvme_queue_keep_alive_work()
1309 unsigned long rtt = jiffies - (rq->deadline - rq->timeout); in nvme_keep_alive_finish()
1318 delay -= rtt; in nvme_keep_alive_finish()
1320 dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n", in nvme_keep_alive_finish()
1326 dev_err(ctrl->device, in nvme_keep_alive_finish()
1332 ctrl->ka_last_check_time = jiffies; in nvme_keep_alive_finish()
1333 ctrl->comp_seen = false; in nvme_keep_alive_finish()
1335 queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); in nvme_keep_alive_finish()
1342 bool comp_seen = ctrl->comp_seen; in nvme_keep_alive_work()
1346 ctrl->ka_last_check_time = jiffies; in nvme_keep_alive_work()
1348 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { in nvme_keep_alive_work()
1349 dev_dbg(ctrl->device, in nvme_keep_alive_work()
1350 "reschedule traffic based keep-alive timer\n"); in nvme_keep_alive_work()
1351 ctrl->comp_seen = false; in nvme_keep_alive_work()
1356 rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd), in nvme_keep_alive_work()
1360 dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq)); in nvme_keep_alive_work()
1364 nvme_init_request(rq, &ctrl->ka_cmd); in nvme_keep_alive_work()
1366 rq->timeout = ctrl->kato * HZ; in nvme_keep_alive_work()
1374 if (unlikely(ctrl->kato == 0)) in nvme_start_keep_alive()
1382 if (unlikely(ctrl->kato == 0)) in nvme_stop_keep_alive()
1385 cancel_delayed_work_sync(&ctrl->ka_work); in nvme_stop_keep_alive()
1393 DIV_ROUND_UP(le32_to_cpu(cmd->common.cdw11), 1000); in nvme_update_keep_alive()
1395 dev_info(ctrl->device, in nvme_update_keep_alive()
1397 ctrl->kato * 1000 / 2, new_kato * 1000 / 2); in nvme_update_keep_alive()
1400 ctrl->kato = new_kato; in nvme_update_keep_alive()
1409 if (ctrl->vs >= NVME_VS(1, 2, 0)) in nvme_id_cns_ok()
1420 if (ctrl->vs >= NVME_VS(1, 1, 0) && in nvme_id_cns_ok()
1421 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) in nvme_id_cns_ok()
1435 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ in nvme_identify_ctrl()
1441 return -ENOMEM; in nvme_identify_ctrl()
1443 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, in nvme_identify_ctrl()
1458 switch (cur->nidt) { in nvme_process_ns_desc()
1460 if (cur->nidl != NVME_NIDT_EUI64_LEN) { in nvme_process_ns_desc()
1461 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", in nvme_process_ns_desc()
1462 warn_str, cur->nidl); in nvme_process_ns_desc()
1463 return -1; in nvme_process_ns_desc()
1465 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) in nvme_process_ns_desc()
1467 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); in nvme_process_ns_desc()
1470 if (cur->nidl != NVME_NIDT_NGUID_LEN) { in nvme_process_ns_desc()
1471 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", in nvme_process_ns_desc()
1472 warn_str, cur->nidl); in nvme_process_ns_desc()
1473 return -1; in nvme_process_ns_desc()
1475 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) in nvme_process_ns_desc()
1477 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); in nvme_process_ns_desc()
1480 if (cur->nidl != NVME_NIDT_UUID_LEN) { in nvme_process_ns_desc()
1481 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", in nvme_process_ns_desc()
1482 warn_str, cur->nidl); in nvme_process_ns_desc()
1483 return -1; in nvme_process_ns_desc()
1485 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) in nvme_process_ns_desc()
1487 uuid_copy(&ids->uuid, data + sizeof(*cur)); in nvme_process_ns_desc()
1490 if (cur->nidl != NVME_NIDT_CSI_LEN) { in nvme_process_ns_desc()
1491 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", in nvme_process_ns_desc()
1492 warn_str, cur->nidl); in nvme_process_ns_desc()
1493 return -1; in nvme_process_ns_desc()
1495 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); in nvme_process_ns_desc()
1500 return cur->nidl; in nvme_process_ns_desc()
1512 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) in nvme_identify_ns_descs()
1514 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) in nvme_identify_ns_descs()
1518 c.identify.nsid = cpu_to_le32(info->nsid); in nvme_identify_ns_descs()
1523 return -ENOMEM; in nvme_identify_ns_descs()
1525 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, in nvme_identify_ns_descs()
1528 dev_warn(ctrl->device, in nvme_identify_ns_descs()
1530 info->nsid, status); in nvme_identify_ns_descs()
1537 if (cur->nidl == 0) in nvme_identify_ns_descs()
1540 len = nvme_process_ns_desc(ctrl, &info->ids, cur, &csi_seen); in nvme_identify_ns_descs()
1548 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", in nvme_identify_ns_descs()
1549 info->nsid); in nvme_identify_ns_descs()
1550 status = -EINVAL; in nvme_identify_ns_descs()
1564 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ in nvme_identify_ns()
1571 return -ENOMEM; in nvme_identify_ns()
1573 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); in nvme_identify_ns()
1575 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); in nvme_identify_ns()
1585 struct nvme_ns_ids *ids = &info->ids; in nvme_ns_info_from_identify()
1589 ret = nvme_identify_ns(ctrl, info->nsid, &id); in nvme_ns_info_from_identify()
1593 if (id->ncap == 0) { in nvme_ns_info_from_identify()
1595 info->is_removed = true; in nvme_ns_info_from_identify()
1596 ret = -ENODEV; in nvme_ns_info_from_identify()
1600 info->anagrpid = id->anagrpid; in nvme_ns_info_from_identify()
1601 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; in nvme_ns_info_from_identify()
1602 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; in nvme_ns_info_from_identify()
1603 info->is_ready = true; in nvme_ns_info_from_identify()
1604 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { in nvme_ns_info_from_identify()
1605 dev_info(ctrl->device, in nvme_ns_info_from_identify()
1608 if (ctrl->vs >= NVME_VS(1, 1, 0) && in nvme_ns_info_from_identify()
1609 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) in nvme_ns_info_from_identify()
1610 memcpy(ids->eui64, id->eui64, sizeof(ids->eui64)); in nvme_ns_info_from_identify()
1611 if (ctrl->vs >= NVME_VS(1, 2, 0) && in nvme_ns_info_from_identify()
1612 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) in nvme_ns_info_from_identify()
1613 memcpy(ids->nguid, id->nguid, sizeof(ids->nguid)); in nvme_ns_info_from_identify()
1627 .identify.nsid = cpu_to_le32(info->nsid), in nvme_ns_info_from_id_cs_indep()
1634 return -ENOMEM; in nvme_ns_info_from_id_cs_indep()
1636 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); in nvme_ns_info_from_id_cs_indep()
1638 info->anagrpid = id->anagrpid; in nvme_ns_info_from_id_cs_indep()
1639 info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; in nvme_ns_info_from_id_cs_indep()
1640 info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; in nvme_ns_info_from_id_cs_indep()
1641 info->is_ready = id->nstat & NVME_NSTAT_NRDY; in nvme_ns_info_from_id_cs_indep()
1658 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, in nvme_features()
1685 u32 q_count = (*count - 1) | ((*count - 1) << 16); in nvme_set_queue_count()
1700 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); in nvme_set_queue_count()
1717 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; in nvme_enable_aen()
1726 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", in nvme_enable_aen()
1729 queue_work(nvme_wq, &ctrl->async_event_work); in nvme_enable_aen()
1732 static int nvme_ns_open(struct nvme_ns *ns) in nvme_ns_open() argument
1736 if (WARN_ON_ONCE(nvme_ns_head_multipath(ns->head))) in nvme_ns_open()
1738 if (!nvme_get_ns(ns)) in nvme_ns_open()
1740 if (!try_module_get(ns->ctrl->ops->module)) in nvme_ns_open()
1746 nvme_put_ns(ns); in nvme_ns_open()
1748 return -ENXIO; in nvme_ns_open()
1751 static void nvme_ns_release(struct nvme_ns *ns) in nvme_ns_release() argument
1754 module_put(ns->ctrl->ops->module); in nvme_ns_release()
1755 nvme_put_ns(ns); in nvme_ns_release()
1760 return nvme_ns_open(disk->private_data); in nvme_open()
1765 nvme_ns_release(disk->private_data); in nvme_release()
1771 geo->heads = 1 << 6; in nvme_getgeo()
1772 geo->sectors = 1 << 5; in nvme_getgeo()
1773 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; in nvme_getgeo()
1780 struct blk_integrity *bi = &lim->integrity; in nvme_init_integrity()
1784 if (!head->ms) in nvme_init_integrity()
1792 !(head->features & NVME_NS_METADATA_SUPPORTED)) in nvme_init_integrity()
1795 switch (head->pi_type) { in nvme_init_integrity()
1797 switch (head->guard_type) { in nvme_init_integrity()
1799 bi->csum_type = BLK_INTEGRITY_CSUM_CRC; in nvme_init_integrity()
1800 bi->tag_size = sizeof(u16) + sizeof(u32); in nvme_init_integrity()
1801 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; in nvme_init_integrity()
1804 bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; in nvme_init_integrity()
1805 bi->tag_size = sizeof(u16) + 6; in nvme_init_integrity()
1806 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; in nvme_init_integrity()
1814 switch (head->guard_type) { in nvme_init_integrity()
1816 bi->csum_type = BLK_INTEGRITY_CSUM_CRC; in nvme_init_integrity()
1817 bi->tag_size = sizeof(u16); in nvme_init_integrity()
1818 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | in nvme_init_integrity()
1822 bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; in nvme_init_integrity()
1823 bi->tag_size = sizeof(u16); in nvme_init_integrity()
1824 bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | in nvme_init_integrity()
1835 bi->tuple_size = head->ms; in nvme_init_integrity()
1836 bi->pi_offset = info->pi_offset; in nvme_init_integrity()
1840 static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim) in nvme_config_discard() argument
1842 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_config_discard()
1844 if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX)) in nvme_config_discard()
1845 lim->max_hw_discard_sectors = in nvme_config_discard()
1846 nvme_lba_to_sect(ns->head, ctrl->dmrsl); in nvme_config_discard()
1847 else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) in nvme_config_discard()
1848 lim->max_hw_discard_sectors = UINT_MAX; in nvme_config_discard()
1850 lim->max_hw_discard_sectors = 0; in nvme_config_discard()
1852 lim->discard_granularity = lim->logical_block_size; in nvme_config_discard()
1854 if (ctrl->dmrl) in nvme_config_discard()
1855 lim->max_discard_segments = ctrl->dmrl; in nvme_config_discard()
1857 lim->max_discard_segments = NVME_DSM_MAX_RANGES; in nvme_config_discard()
1862 return uuid_equal(&a->uuid, &b->uuid) && in nvme_ns_ids_equal()
1863 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && in nvme_ns_ids_equal()
1864 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 && in nvme_ns_ids_equal()
1865 a->csi == b->csi; in nvme_ns_ids_equal()
1882 return -ENOMEM; in nvme_identify_ns_nvm()
1884 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm)); in nvme_identify_ns_nvm()
1895 u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]); in nvme_configure_pi_elbas()
1903 if ((nvm->pic & NVME_ID_NS_NVM_QPIFS) && in nvme_configure_pi_elbas()
1907 head->guard_type = guard_type; in nvme_configure_pi_elbas()
1908 switch (head->guard_type) { in nvme_configure_pi_elbas()
1910 head->pi_size = sizeof(struct crc64_pi_tuple); in nvme_configure_pi_elbas()
1913 head->pi_size = sizeof(struct t10_pi_tuple); in nvme_configure_pi_elbas()
1924 head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); in nvme_configure_metadata()
1925 head->pi_type = 0; in nvme_configure_metadata()
1926 head->pi_size = 0; in nvme_configure_metadata()
1927 head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms); in nvme_configure_metadata()
1928 if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) in nvme_configure_metadata()
1931 if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { in nvme_configure_metadata()
1934 head->pi_size = sizeof(struct t10_pi_tuple); in nvme_configure_metadata()
1935 head->guard_type = NVME_NVM_NS_16B_GUARD; in nvme_configure_metadata()
1938 if (head->pi_size && head->ms >= head->pi_size) in nvme_configure_metadata()
1939 head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; in nvme_configure_metadata()
1940 if (!(id->dps & NVME_NS_DPS_PI_FIRST)) { in nvme_configure_metadata()
1942 head->pi_type = 0; in nvme_configure_metadata()
1944 info->pi_offset = head->ms - head->pi_size; in nvme_configure_metadata()
1947 if (ctrl->ops->flags & NVME_F_FABRICS) { in nvme_configure_metadata()
1953 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) in nvme_configure_metadata()
1956 head->features |= NVME_NS_EXT_LBAS; in nvme_configure_metadata()
1967 if (ctrl->max_integrity_segments && nvme_ns_has_pi(head)) in nvme_configure_metadata()
1968 head->features |= NVME_NS_METADATA_SUPPORTED; in nvme_configure_metadata()
1976 if (id->flbas & NVME_NS_FLBAS_META_EXT) in nvme_configure_metadata()
1977 head->features |= NVME_NS_EXT_LBAS; in nvme_configure_metadata()
1979 head->features |= NVME_NS_METADATA_SUPPORTED; in nvme_configure_metadata()
1984 static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns, in nvme_update_atomic_write_disk_info() argument
1990 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) { in nvme_update_atomic_write_disk_info()
1991 if (le16_to_cpu(id->nabspf)) in nvme_update_atomic_write_disk_info()
1992 boundary = (le16_to_cpu(id->nabspf) + 1) * bs; in nvme_update_atomic_write_disk_info()
1994 lim->atomic_write_hw_max = atomic_bs; in nvme_update_atomic_write_disk_info()
1995 lim->atomic_write_hw_boundary = boundary; in nvme_update_atomic_write_disk_info()
1996 lim->atomic_write_hw_unit_min = bs; in nvme_update_atomic_write_disk_info()
1997 lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs); in nvme_update_atomic_write_disk_info()
2002 return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1; in nvme_max_drv_segments()
2008 lim->max_hw_sectors = ctrl->max_hw_sectors; in nvme_set_ctrl_limits()
2009 lim->max_segments = min_t(u32, USHRT_MAX, in nvme_set_ctrl_limits()
2010 min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments)); in nvme_set_ctrl_limits()
2011 lim->max_integrity_segments = ctrl->max_integrity_segments; in nvme_set_ctrl_limits()
2012 lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1; in nvme_set_ctrl_limits()
2013 lim->max_segment_size = UINT_MAX; in nvme_set_ctrl_limits()
2014 lim->dma_alignment = 3; in nvme_set_ctrl_limits()
2017 static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, in nvme_update_disk_info() argument
2020 struct nvme_ns_head *head = ns->head; in nvme_update_disk_info()
2021 u32 bs = 1U << head->lba_shift; in nvme_update_disk_info()
2030 if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) { in nvme_update_disk_info()
2036 if (id->nabo == 0) { in nvme_update_disk_info()
2042 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) in nvme_update_disk_info()
2043 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; in nvme_update_disk_info()
2045 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; in nvme_update_disk_info()
2047 nvme_update_atomic_write_disk_info(ns, id, lim, bs, atomic_bs); in nvme_update_disk_info()
2050 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { in nvme_update_disk_info()
2052 phys_bs = bs * (1 + le16_to_cpu(id->npwg)); in nvme_update_disk_info()
2054 if (id->nows) in nvme_update_disk_info()
2055 io_opt = bs * (1 + le16_to_cpu(id->nows)); in nvme_update_disk_info()
2063 lim->logical_block_size = bs; in nvme_update_disk_info()
2064 lim->physical_block_size = min(phys_bs, atomic_bs); in nvme_update_disk_info()
2065 lim->io_min = phys_bs; in nvme_update_disk_info()
2066 lim->io_opt = io_opt; in nvme_update_disk_info()
2067 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) in nvme_update_disk_info()
2068 lim->max_write_zeroes_sectors = UINT_MAX; in nvme_update_disk_info()
2070 lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors; in nvme_update_disk_info()
2074 static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) in nvme_ns_is_readonly() argument
2076 return info->is_readonly || test_bit(NVME_NS_FORCE_RO, &ns->flags); in nvme_ns_is_readonly()
2085 static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id, in nvme_set_chunk_sectors() argument
2088 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_set_chunk_sectors()
2091 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && in nvme_set_chunk_sectors()
2092 is_power_of_2(ctrl->max_hw_sectors)) in nvme_set_chunk_sectors()
2093 iob = ctrl->max_hw_sectors; in nvme_set_chunk_sectors()
2095 iob = nvme_lba_to_sect(ns->head, le16_to_cpu(id->noiob)); in nvme_set_chunk_sectors()
2101 if (nvme_first_scan(ns->disk)) in nvme_set_chunk_sectors()
2103 ns->disk->disk_name, iob); in nvme_set_chunk_sectors()
2107 if (blk_queue_is_zoned(ns->disk->queue)) { in nvme_set_chunk_sectors()
2108 if (nvme_first_scan(ns->disk)) in nvme_set_chunk_sectors()
2110 ns->disk->disk_name); in nvme_set_chunk_sectors()
2114 lim->chunk_sectors = iob; in nvme_set_chunk_sectors()
2117 static int nvme_update_ns_info_generic(struct nvme_ns *ns, in nvme_update_ns_info_generic() argument
2123 blk_mq_freeze_queue(ns->disk->queue); in nvme_update_ns_info_generic()
2124 lim = queue_limits_start_update(ns->disk->queue); in nvme_update_ns_info_generic()
2125 nvme_set_ctrl_limits(ns->ctrl, &lim); in nvme_update_ns_info_generic()
2126 ret = queue_limits_commit_update(ns->disk->queue, &lim); in nvme_update_ns_info_generic()
2127 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); in nvme_update_ns_info_generic()
2128 blk_mq_unfreeze_queue(ns->disk->queue); in nvme_update_ns_info_generic()
2130 /* Hide the block-interface for these devices */ in nvme_update_ns_info_generic()
2132 ret = -ENODEV; in nvme_update_ns_info_generic()
2136 static int nvme_update_ns_info_block(struct nvme_ns *ns, in nvme_update_ns_info_block() argument
2147 ret = nvme_identify_ns(ns->ctrl, info->nsid, &id); in nvme_update_ns_info_block()
2151 if (id->ncap == 0) { in nvme_update_ns_info_block()
2153 info->is_removed = true; in nvme_update_ns_info_block()
2154 ret = -ENXIO; in nvme_update_ns_info_block()
2157 lbaf = nvme_lbaf_index(id->flbas); in nvme_update_ns_info_block()
2159 if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) { in nvme_update_ns_info_block()
2160 ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm); in nvme_update_ns_info_block()
2166 ns->head->ids.csi == NVME_CSI_ZNS) { in nvme_update_ns_info_block()
2167 ret = nvme_query_zone_info(ns, lbaf, &zi); in nvme_update_ns_info_block()
2172 blk_mq_freeze_queue(ns->disk->queue); in nvme_update_ns_info_block()
2173 ns->head->lba_shift = id->lbaf[lbaf].ds; in nvme_update_ns_info_block()
2174 ns->head->nuse = le64_to_cpu(id->nuse); in nvme_update_ns_info_block()
2175 capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze)); in nvme_update_ns_info_block()
2177 lim = queue_limits_start_update(ns->disk->queue); in nvme_update_ns_info_block()
2178 nvme_set_ctrl_limits(ns->ctrl, &lim); in nvme_update_ns_info_block()
2179 nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info); in nvme_update_ns_info_block()
2180 nvme_set_chunk_sectors(ns, id, &lim); in nvme_update_ns_info_block()
2181 if (!nvme_update_disk_info(ns, id, &lim)) in nvme_update_ns_info_block()
2183 nvme_config_discard(ns, &lim); in nvme_update_ns_info_block()
2185 ns->head->ids.csi == NVME_CSI_ZNS) in nvme_update_ns_info_block()
2186 nvme_update_zone_info(ns, &lim, &zi); in nvme_update_ns_info_block()
2188 if (ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) in nvme_update_ns_info_block()
2194 * Register a metadata profile for PI, or the plain non-integrity NVMe in nvme_update_ns_info_block()
2199 if (!nvme_init_integrity(ns->head, &lim, info)) in nvme_update_ns_info_block()
2202 ret = queue_limits_commit_update(ns->disk->queue, &lim); in nvme_update_ns_info_block()
2204 blk_mq_unfreeze_queue(ns->disk->queue); in nvme_update_ns_info_block()
2208 set_capacity_and_notify(ns->disk, capacity); in nvme_update_ns_info_block()
2213 * require that, it must be a no-op if reads from deallocated data in nvme_update_ns_info_block()
2216 if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) in nvme_update_ns_info_block()
2217 ns->head->features |= NVME_NS_DEAC; in nvme_update_ns_info_block()
2218 set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); in nvme_update_ns_info_block()
2219 set_bit(NVME_NS_READY, &ns->flags); in nvme_update_ns_info_block()
2220 blk_mq_unfreeze_queue(ns->disk->queue); in nvme_update_ns_info_block()
2222 if (blk_queue_is_zoned(ns->queue)) { in nvme_update_ns_info_block()
2223 ret = blk_revalidate_disk_zones(ns->disk); in nvme_update_ns_info_block()
2224 if (ret && !nvme_first_scan(ns->disk)) in nvme_update_ns_info_block()
2235 static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) in nvme_update_ns_info() argument
2240 switch (info->ids.csi) { in nvme_update_ns_info()
2243 dev_info(ns->ctrl->device, in nvme_update_ns_info()
2245 info->nsid); in nvme_update_ns_info()
2246 ret = nvme_update_ns_info_generic(ns, info); in nvme_update_ns_info()
2249 ret = nvme_update_ns_info_block(ns, info); in nvme_update_ns_info()
2252 ret = nvme_update_ns_info_block(ns, info); in nvme_update_ns_info()
2255 dev_info(ns->ctrl->device, in nvme_update_ns_info()
2257 info->nsid, info->ids.csi); in nvme_update_ns_info()
2258 ret = nvme_update_ns_info_generic(ns, info); in nvme_update_ns_info()
2266 if (ret == -ENODEV) { in nvme_update_ns_info()
2267 ns->disk->flags |= GENHD_FL_HIDDEN; in nvme_update_ns_info()
2268 set_bit(NVME_NS_READY, &ns->flags); in nvme_update_ns_info()
2273 if (!ret && nvme_ns_head_multipath(ns->head)) { in nvme_update_ns_info()
2274 struct queue_limits *ns_lim = &ns->disk->queue->limits; in nvme_update_ns_info()
2277 blk_mq_freeze_queue(ns->head->disk->queue); in nvme_update_ns_info()
2293 lim = queue_limits_start_update(ns->head->disk->queue); in nvme_update_ns_info()
2294 lim.logical_block_size = ns_lim->logical_block_size; in nvme_update_ns_info()
2295 lim.physical_block_size = ns_lim->physical_block_size; in nvme_update_ns_info()
2296 lim.io_min = ns_lim->io_min; in nvme_update_ns_info()
2297 lim.io_opt = ns_lim->io_opt; in nvme_update_ns_info()
2298 queue_limits_stack_bdev(&lim, ns->disk->part0, 0, in nvme_update_ns_info()
2299 ns->head->disk->disk_name); in nvme_update_ns_info()
2301 ns->head->disk->flags |= GENHD_FL_HIDDEN; in nvme_update_ns_info()
2303 nvme_init_integrity(ns->head, &lim, info); in nvme_update_ns_info()
2304 ret = queue_limits_commit_update(ns->head->disk->queue, &lim); in nvme_update_ns_info()
2306 set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); in nvme_update_ns_info()
2307 set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); in nvme_update_ns_info()
2308 nvme_mpath_revalidate_paths(ns); in nvme_update_ns_info()
2310 blk_mq_unfreeze_queue(ns->head->disk->queue); in nvme_update_ns_info()
2316 int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], in nvme_ns_get_unique_id() argument
2319 struct nvme_ns_ids *ids = &ns->head->ids; in nvme_ns_get_unique_id()
2322 return -EINVAL; in nvme_ns_get_unique_id()
2324 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) { in nvme_ns_get_unique_id()
2325 memcpy(id, &ids->nguid, sizeof(ids->nguid)); in nvme_ns_get_unique_id()
2326 return sizeof(ids->nguid); in nvme_ns_get_unique_id()
2328 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) { in nvme_ns_get_unique_id()
2329 memcpy(id, &ids->eui64, sizeof(ids->eui64)); in nvme_ns_get_unique_id()
2330 return sizeof(ids->eui64); in nvme_ns_get_unique_id()
2333 return -EINVAL; in nvme_ns_get_unique_id()
2339 return nvme_ns_get_unique_id(disk->private_data, id, type); in nvme_get_unique_id()
2357 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, in nvme_sec_submit()
2363 if (ctrl->oacs & NVME_CTRL_OACS_SEC_SUPP) { in nvme_configure_opal()
2364 if (!ctrl->opal_dev) in nvme_configure_opal()
2365 ctrl->opal_dev = init_opal_dev(ctrl, &nvme_sec_submit); in nvme_configure_opal()
2367 opal_unlock_from_suspend(ctrl->opal_dev); in nvme_configure_opal()
2369 free_opal_dev(ctrl->opal_dev); in nvme_configure_opal()
2370 ctrl->opal_dev = NULL; in nvme_configure_opal()
2383 return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, in nvme_report_zones()
2409 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { in nvme_wait_ready()
2411 return -ENODEV; in nvme_wait_ready()
2417 return -EINTR; in nvme_wait_ready()
2419 dev_err(ctrl->device, in nvme_wait_ready()
2422 return -ENODEV; in nvme_wait_ready()
2433 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; in nvme_disable_ctrl()
2435 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; in nvme_disable_ctrl()
2437 ctrl->ctrl_config &= ~NVME_CC_ENABLE; in nvme_disable_ctrl()
2439 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); in nvme_disable_ctrl()
2446 ctrl->shutdown_timeout, "shutdown"); in nvme_disable_ctrl()
2448 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) in nvme_disable_ctrl()
2451 (NVME_CAP_TIMEOUT(ctrl->cap) + 1) / 2, "reset"); in nvme_disable_ctrl()
2461 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); in nvme_enable_ctrl()
2463 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); in nvme_enable_ctrl()
2466 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; in nvme_enable_ctrl()
2469 dev_err(ctrl->device, in nvme_enable_ctrl()
2472 return -ENODEV; in nvme_enable_ctrl()
2475 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) in nvme_enable_ctrl()
2476 ctrl->ctrl_config = NVME_CC_CSS_CSI; in nvme_enable_ctrl()
2478 ctrl->ctrl_config = NVME_CC_CSS_NVM; in nvme_enable_ctrl()
2486 ctrl->ctrl_config &= ~NVME_CC_CRIME; in nvme_enable_ctrl()
2488 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; in nvme_enable_ctrl()
2489 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; in nvme_enable_ctrl()
2490 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; in nvme_enable_ctrl()
2491 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); in nvme_enable_ctrl()
2496 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); in nvme_enable_ctrl()
2500 timeout = NVME_CAP_TIMEOUT(ctrl->cap); in nvme_enable_ctrl()
2501 if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { in nvme_enable_ctrl()
2504 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); in nvme_enable_ctrl()
2506 dev_err(ctrl->device, "Reading CRTO failed (%d)\n", in nvme_enable_ctrl()
2519 dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", in nvme_enable_ctrl()
2520 crto, ctrl->cap); in nvme_enable_ctrl()
2525 ctrl->ctrl_config |= NVME_CC_ENABLE; in nvme_enable_ctrl()
2526 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); in nvme_enable_ctrl()
2539 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) in nvme_configure_timestamp()
2546 dev_warn_once(ctrl->device, in nvme_configure_timestamp()
2558 if (ctrl->crdt[0]) in nvme_configure_host_options()
2560 if (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) in nvme_configure_host_options()
2570 host->acre = acre; in nvme_configure_host_options()
2571 host->lbafee = lbafee; in nvme_configure_host_options()
2613 * - If the parameters provide explicit timeouts and tolerances, they will be
2614 * used to build a table with up to 2 non-operational states to transition to.
2620 * - If not, we'll configure the table with a simple heuristic: we are willing
2623 * lower-power non-operational state after waiting 50 * (enlat + exlat)
2627 * We will not autonomously enter any non-operational state for which the total
2638 int max_ps = -1; in nvme_configure_apst()
2647 if (!ctrl->apsta) in nvme_configure_apst()
2650 if (ctrl->npss > 31) { in nvme_configure_apst()
2651 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); in nvme_configure_apst()
2659 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { in nvme_configure_apst()
2661 dev_dbg(ctrl->device, "APST disabled\n"); in nvme_configure_apst()
2666 * Walk through all states from lowest- to highest-power. in nvme_configure_apst()
2667 * According to the spec, lower-numbered states use more power. NPSS, in nvme_configure_apst()
2668 * despite the name, is the index of the lowest-power state, not the in nvme_configure_apst()
2671 for (state = (int)ctrl->npss; state >= 0; state--) { in nvme_configure_apst()
2675 table->entries[state] = target; in nvme_configure_apst()
2681 if (state == ctrl->npss && in nvme_configure_apst()
2682 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) in nvme_configure_apst()
2686 * Is this state a useful non-operational state for higher-power in nvme_configure_apst()
2689 if (!(ctrl->psd[state].flags & NVME_PS_FLAGS_NON_OP_STATE)) in nvme_configure_apst()
2692 exit_latency_us = (u64)le32_to_cpu(ctrl->psd[state].exit_lat); in nvme_configure_apst()
2693 if (exit_latency_us > ctrl->ps_max_latency_us) in nvme_configure_apst()
2697 le32_to_cpu(ctrl->psd[state].entry_lat); in nvme_configure_apst()
2710 if (transition_ms > (1 << 24) - 1) in nvme_configure_apst()
2711 transition_ms = (1 << 24) - 1; in nvme_configure_apst()
2715 if (max_ps == -1) in nvme_configure_apst()
2721 if (max_ps == -1) in nvme_configure_apst()
2722 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); in nvme_configure_apst()
2724 …dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n… in nvme_configure_apst()
2732 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); in nvme_configure_apst()
2752 if (ctrl->ps_max_latency_us != latency) { in nvme_set_latency_tolerance()
2753 ctrl->ps_max_latency_us = latency; in nvme_set_latency_tolerance()
2761 * NVMe model and firmware strings are padded with spaces. For
2783 * This LiteON CL1-3D*-Q11 firmware version has a race
2785 * LiteON has resolved the problem in future firmware
2793 * This Kioxia CD6-V Series / HPE PE8030 device times out and
2798 * to use "nvme set-feature" to disable APST, but booting with
2821 /* match is null-terminated but idstr is space-padded. */
2845 return q->vid == le16_to_cpu(id->vid) && in quirk_matches()
2846 string_matches(id->mn, q->mn, sizeof(id->mn)) && in quirk_matches()
2847 string_matches(id->fr, q->fr, sizeof(id->fr)); in quirk_matches()
2856 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { in nvme_init_subnqn()
2857 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); in nvme_init_subnqn()
2859 strscpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); in nvme_init_subnqn()
2863 if (ctrl->vs >= NVME_VS(1, 2, 1)) in nvme_init_subnqn()
2864 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); in nvme_init_subnqn()
2872 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, in nvme_init_subnqn()
2874 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); in nvme_init_subnqn()
2875 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); in nvme_init_subnqn()
2876 off += sizeof(id->sn); in nvme_init_subnqn()
2877 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); in nvme_init_subnqn()
2878 off += sizeof(id->mn); in nvme_init_subnqn()
2879 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); in nvme_init_subnqn()
2887 if (subsys->instance >= 0) in nvme_release_subsystem()
2888 ida_free(&nvme_instance_ida, subsys->instance); in nvme_release_subsystem()
2898 list_del(&subsys->entry); in nvme_destroy_subsystem()
2901 ida_destroy(&subsys->ns_ida); in nvme_destroy_subsystem()
2902 device_del(&subsys->dev); in nvme_destroy_subsystem()
2903 put_device(&subsys->dev); in nvme_destroy_subsystem()
2908 kref_put(&subsys->ref, nvme_destroy_subsystem); in nvme_put_subsystem()
2929 if (strcmp(subsys->subnqn, subsysnqn)) in __nvme_find_get_subsystem()
2931 if (!kref_get_unless_zero(&subsys->ref)) in __nvme_find_get_subsystem()
2941 return ctrl->opts && ctrl->opts->discovery_nqn; in nvme_discovery_ctrl()
2951 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { in nvme_validate_cntlid()
2955 if (tmp->cntlid == ctrl->cntlid) { in nvme_validate_cntlid()
2956 dev_err(ctrl->device, in nvme_validate_cntlid()
2958 ctrl->cntlid, dev_name(tmp->device), in nvme_validate_cntlid()
2959 subsys->subnqn); in nvme_validate_cntlid()
2963 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || in nvme_validate_cntlid()
2967 dev_err(ctrl->device, in nvme_validate_cntlid()
2982 return -ENOMEM; in nvme_init_subsystem()
2984 subsys->instance = -1; in nvme_init_subsystem()
2985 mutex_init(&subsys->lock); in nvme_init_subsystem()
2986 kref_init(&subsys->ref); in nvme_init_subsystem()
2987 INIT_LIST_HEAD(&subsys->ctrls); in nvme_init_subsystem()
2988 INIT_LIST_HEAD(&subsys->nsheads); in nvme_init_subsystem()
2990 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); in nvme_init_subsystem()
2991 memcpy(subsys->model, id->mn, sizeof(subsys->model)); in nvme_init_subsystem()
2992 subsys->vendor_id = le16_to_cpu(id->vid); in nvme_init_subsystem()
2993 subsys->cmic = id->cmic; in nvme_init_subsystem()
2996 if (id->cntrltype == NVME_CTRL_DISC || in nvme_init_subsystem()
2997 !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME)) in nvme_init_subsystem()
2998 subsys->subtype = NVME_NQN_DISC; in nvme_init_subsystem()
3000 subsys->subtype = NVME_NQN_NVME; in nvme_init_subsystem()
3002 if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) { in nvme_init_subsystem()
3003 dev_err(ctrl->device, in nvme_init_subsystem()
3005 subsys->subnqn); in nvme_init_subsystem()
3007 return -EINVAL; in nvme_init_subsystem()
3009 subsys->awupf = le16_to_cpu(id->awupf); in nvme_init_subsystem()
3012 subsys->dev.class = &nvme_subsys_class; in nvme_init_subsystem()
3013 subsys->dev.release = nvme_release_subsystem; in nvme_init_subsystem()
3014 subsys->dev.groups = nvme_subsys_attrs_groups; in nvme_init_subsystem()
3015 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); in nvme_init_subsystem()
3016 device_initialize(&subsys->dev); in nvme_init_subsystem()
3019 found = __nvme_find_get_subsystem(subsys->subnqn); in nvme_init_subsystem()
3021 put_device(&subsys->dev); in nvme_init_subsystem()
3025 ret = -EINVAL; in nvme_init_subsystem()
3029 ret = device_add(&subsys->dev); in nvme_init_subsystem()
3031 dev_err(ctrl->device, in nvme_init_subsystem()
3033 put_device(&subsys->dev); in nvme_init_subsystem()
3036 ida_init(&subsys->ns_ida); in nvme_init_subsystem()
3037 list_add_tail(&subsys->entry, &nvme_subsystems); in nvme_init_subsystem()
3040 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, in nvme_init_subsystem()
3041 dev_name(ctrl->device)); in nvme_init_subsystem()
3043 dev_err(ctrl->device, in nvme_init_subsystem()
3049 subsys->instance = ctrl->instance; in nvme_init_subsystem()
3050 ctrl->subsys = subsys; in nvme_init_subsystem()
3051 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); in nvme_init_subsystem()
3072 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); in nvme_get_log()
3078 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); in nvme_get_log()
3084 struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); in nvme_get_effects_log()
3092 return -ENOMEM; in nvme_get_effects_log()
3101 xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); in nvme_get_effects_log()
3109 u32 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12, val; in nvme_mps_to_sectors()
3111 if (check_shl_overflow(1U, units + page_shift - 9, &val)) in nvme_mps_to_sectors()
3124 * to the write-zeroes, we are cautious and limit the size to the in nvme_init_non_mdts_limits()
3128 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && in nvme_init_non_mdts_limits()
3129 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) in nvme_init_non_mdts_limits()
3130 ctrl->max_zeroes_sectors = ctrl->max_hw_sectors; in nvme_init_non_mdts_limits()
3132 ctrl->max_zeroes_sectors = 0; in nvme_init_non_mdts_limits()
3134 if (ctrl->subsys->subtype != NVME_NQN_NVME || in nvme_init_non_mdts_limits()
3136 test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags)) in nvme_init_non_mdts_limits()
3141 return -ENOMEM; in nvme_init_non_mdts_limits()
3147 ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); in nvme_init_non_mdts_limits()
3151 ctrl->dmrl = id->dmrl; in nvme_init_non_mdts_limits()
3152 ctrl->dmrsl = le32_to_cpu(id->dmrsl); in nvme_init_non_mdts_limits()
3153 if (id->wzsl) in nvme_init_non_mdts_limits()
3154 ctrl->max_zeroes_sectors = nvme_mps_to_sectors(ctrl, id->wzsl); in nvme_init_non_mdts_limits()
3158 set_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags); in nvme_init_non_mdts_limits()
3165 struct nvme_effects_log *log = ctrl->effects; in nvme_init_known_nvm_effects()
3167 log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | in nvme_init_known_nvm_effects()
3170 log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC | in nvme_init_known_nvm_effects()
3188 log->acs[nvme_admin_security_recv] &= cpu_to_le32(~NVME_CMD_EFFECTS_CSE_MASK); in nvme_init_known_nvm_effects()
3190 log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); in nvme_init_known_nvm_effects()
3191 log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); in nvme_init_known_nvm_effects()
3192 log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC); in nvme_init_known_nvm_effects()
3199 if (ctrl->effects) in nvme_init_effects()
3202 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { in nvme_init_effects()
3203 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); in nvme_init_effects()
3208 if (!ctrl->effects) { in nvme_init_effects()
3209 ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); in nvme_init_effects()
3210 if (!ctrl->effects) in nvme_init_effects()
3211 return -ENOMEM; in nvme_init_effects()
3212 xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL); in nvme_init_effects()
3225 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { in nvme_check_ctrl_fabric_info()
3226 dev_err(ctrl->device, in nvme_check_ctrl_fabric_info()
3228 ctrl->cntlid, le16_to_cpu(id->cntlid)); in nvme_check_ctrl_fabric_info()
3229 return -EINVAL; in nvme_check_ctrl_fabric_info()
3232 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { in nvme_check_ctrl_fabric_info()
3233 dev_err(ctrl->device, in nvme_check_ctrl_fabric_info()
3234 "keep-alive support is mandatory for fabrics\n"); in nvme_check_ctrl_fabric_info()
3235 return -EINVAL; in nvme_check_ctrl_fabric_info()
3238 if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) { in nvme_check_ctrl_fabric_info()
3239 dev_err(ctrl->device, in nvme_check_ctrl_fabric_info()
3241 ctrl->ioccsz); in nvme_check_ctrl_fabric_info()
3242 return -EINVAL; in nvme_check_ctrl_fabric_info()
3245 if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) { in nvme_check_ctrl_fabric_info()
3246 dev_err(ctrl->device, in nvme_check_ctrl_fabric_info()
3248 ctrl->iorcsz); in nvme_check_ctrl_fabric_info()
3249 return -EINVAL; in nvme_check_ctrl_fabric_info()
3252 if (!ctrl->maxcmd) { in nvme_check_ctrl_fabric_info()
3253 dev_err(ctrl->device, "Maximum outstanding commands is 0\n"); in nvme_check_ctrl_fabric_info()
3254 return -EINVAL; in nvme_check_ctrl_fabric_info()
3270 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); in nvme_init_identify()
3271 return -EIO; in nvme_init_identify()
3274 if (!(ctrl->ops->flags & NVME_F_FABRICS)) in nvme_init_identify()
3275 ctrl->cntlid = le16_to_cpu(id->cntlid); in nvme_init_identify()
3277 if (!ctrl->identified) { in nvme_init_identify()
3281 * Check for quirks. Quirk can depend on firmware version, in nvme_init_identify()
3284 * could re-scan for quirks every time we reinitialize in nvme_init_identify()
3290 ctrl->quirks |= core_quirks[i].quirks; in nvme_init_identify()
3301 memcpy(ctrl->subsys->firmware_rev, id->fr, in nvme_init_identify()
3302 sizeof(ctrl->subsys->firmware_rev)); in nvme_init_identify()
3304 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { in nvme_init_identify()
3305 …dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at y… in nvme_init_identify()
3306 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; in nvme_init_identify()
3309 ctrl->crdt[0] = le16_to_cpu(id->crdt1); in nvme_init_identify()
3310 ctrl->crdt[1] = le16_to_cpu(id->crdt2); in nvme_init_identify()
3311 ctrl->crdt[2] = le16_to_cpu(id->crdt3); in nvme_init_identify()
3313 ctrl->oacs = le16_to_cpu(id->oacs); in nvme_init_identify()
3314 ctrl->oncs = le16_to_cpu(id->oncs); in nvme_init_identify()
3315 ctrl->mtfa = le16_to_cpu(id->mtfa); in nvme_init_identify()
3316 ctrl->oaes = le32_to_cpu(id->oaes); in nvme_init_identify()
3317 ctrl->wctemp = le16_to_cpu(id->wctemp); in nvme_init_identify()
3318 ctrl->cctemp = le16_to_cpu(id->cctemp); in nvme_init_identify()
3320 atomic_set(&ctrl->abort_limit, id->acl + 1); in nvme_init_identify()
3321 ctrl->vwc = id->vwc; in nvme_init_identify()
3322 if (id->mdts) in nvme_init_identify()
3323 max_hw_sectors = nvme_mps_to_sectors(ctrl, id->mdts); in nvme_init_identify()
3326 ctrl->max_hw_sectors = in nvme_init_identify()
3327 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); in nvme_init_identify()
3329 lim = queue_limits_start_update(ctrl->admin_q); in nvme_init_identify()
3331 ret = queue_limits_commit_update(ctrl->admin_q, &lim); in nvme_init_identify()
3335 ctrl->sgls = le32_to_cpu(id->sgls); in nvme_init_identify()
3336 ctrl->kas = le16_to_cpu(id->kas); in nvme_init_identify()
3337 ctrl->max_namespaces = le32_to_cpu(id->mnan); in nvme_init_identify()
3338 ctrl->ctratt = le32_to_cpu(id->ctratt); in nvme_init_identify()
3340 ctrl->cntrltype = id->cntrltype; in nvme_init_identify()
3341 ctrl->dctype = id->dctype; in nvme_init_identify()
3343 if (id->rtd3e) { in nvme_init_identify()
3344 /* us -> s */ in nvme_init_identify()
3345 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; in nvme_init_identify()
3347 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, in nvme_init_identify()
3350 if (ctrl->shutdown_timeout != shutdown_timeout) in nvme_init_identify()
3351 dev_info(ctrl->device, in nvme_init_identify()
3353 ctrl->shutdown_timeout); in nvme_init_identify()
3355 ctrl->shutdown_timeout = shutdown_timeout; in nvme_init_identify()
3357 ctrl->npss = id->npss; in nvme_init_identify()
3358 ctrl->apsta = id->apsta; in nvme_init_identify()
3359 prev_apst_enabled = ctrl->apst_enabled; in nvme_init_identify()
3360 if (ctrl->quirks & NVME_QUIRK_NO_APST) { in nvme_init_identify()
3361 if (force_apst && id->apsta) { in nvme_init_identify()
3362 …dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk… in nvme_init_identify()
3363 ctrl->apst_enabled = true; in nvme_init_identify()
3365 ctrl->apst_enabled = false; in nvme_init_identify()
3368 ctrl->apst_enabled = id->apsta; in nvme_init_identify()
3370 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); in nvme_init_identify()
3372 if (ctrl->ops->flags & NVME_F_FABRICS) { in nvme_init_identify()
3373 ctrl->icdoff = le16_to_cpu(id->icdoff); in nvme_init_identify()
3374 ctrl->ioccsz = le32_to_cpu(id->ioccsz); in nvme_init_identify()
3375 ctrl->iorcsz = le32_to_cpu(id->iorcsz); in nvme_init_identify()
3376 ctrl->maxcmd = le16_to_cpu(id->maxcmd); in nvme_init_identify()
3382 ctrl->hmpre = le32_to_cpu(id->hmpre); in nvme_init_identify()
3383 ctrl->hmmin = le32_to_cpu(id->hmmin); in nvme_init_identify()
3384 ctrl->hmminds = le32_to_cpu(id->hmminds); in nvme_init_identify()
3385 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); in nvme_init_identify()
3392 if (ctrl->apst_enabled && !prev_apst_enabled) in nvme_init_identify()
3393 dev_pm_qos_expose_latency_tolerance(ctrl->device); in nvme_init_identify()
3394 else if (!ctrl->apst_enabled && prev_apst_enabled) in nvme_init_identify()
3395 dev_pm_qos_hide_latency_tolerance(ctrl->device); in nvme_init_identify()
3411 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); in nvme_init_ctrl_finish()
3413 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); in nvme_init_ctrl_finish()
3417 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); in nvme_init_ctrl_finish()
3419 if (ctrl->vs >= NVME_VS(1, 1, 0)) in nvme_init_ctrl_finish()
3420 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); in nvme_init_ctrl_finish()
3440 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { in nvme_init_ctrl_finish()
3446 if (ret == -EINTR) in nvme_init_ctrl_finish()
3450 clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags); in nvme_init_ctrl_finish()
3451 ctrl->identified = true; in nvme_init_ctrl_finish()
3462 container_of(inode->i_cdev, struct nvme_ctrl, cdev); in nvme_dev_open()
3468 return -EWOULDBLOCK; in nvme_dev_open()
3472 if (!try_module_get(ctrl->ops->module)) { in nvme_dev_open()
3474 return -EINVAL; in nvme_dev_open()
3477 file->private_data = ctrl; in nvme_dev_open()
3484 container_of(inode->i_cdev, struct nvme_ctrl, cdev); in nvme_dev_release()
3486 module_put(ctrl->ops->module); in nvme_dev_release()
3505 lockdep_assert_held(&ctrl->subsys->lock); in nvme_find_ns_head()
3507 list_for_each_entry(h, &ctrl->subsys->nsheads, entry) { in nvme_find_ns_head()
3513 if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) in nvme_find_ns_head()
3515 if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) in nvme_find_ns_head()
3525 bool has_uuid = !uuid_is_null(&ids->uuid); in nvme_subsys_check_duplicate_ids()
3526 bool has_nguid = memchr_inv(ids->nguid, 0, sizeof(ids->nguid)); in nvme_subsys_check_duplicate_ids()
3527 bool has_eui64 = memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); in nvme_subsys_check_duplicate_ids()
3530 lockdep_assert_held(&subsys->lock); in nvme_subsys_check_duplicate_ids()
3532 list_for_each_entry(h, &subsys->nsheads, entry) { in nvme_subsys_check_duplicate_ids()
3533 if (has_uuid && uuid_equal(&ids->uuid, &h->ids.uuid)) in nvme_subsys_check_duplicate_ids()
3534 return -EINVAL; in nvme_subsys_check_duplicate_ids()
3536 memcmp(&ids->nguid, &h->ids.nguid, sizeof(ids->nguid)) == 0) in nvme_subsys_check_duplicate_ids()
3537 return -EINVAL; in nvme_subsys_check_duplicate_ids()
3539 memcmp(&ids->eui64, &h->ids.eui64, sizeof(ids->eui64)) == 0) in nvme_subsys_check_duplicate_ids()
3540 return -EINVAL; in nvme_subsys_check_duplicate_ids()
3548 ida_free(&nvme_ns_chr_minor_ida, MINOR(dev->devt)); in nvme_cdev_rel()
3565 cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); in nvme_cdev_add()
3566 cdev_device->class = &nvme_ns_chr_class; in nvme_cdev_add()
3567 cdev_device->release = nvme_cdev_rel; in nvme_cdev_add()
3570 cdev->owner = owner; in nvme_cdev_add()
3580 return nvme_ns_open(container_of(inode->i_cdev, struct nvme_ns, cdev)); in nvme_ns_chr_open()
3585 nvme_ns_release(container_of(inode->i_cdev, struct nvme_ns, cdev)); in nvme_ns_chr_release()
3599 static int nvme_add_ns_cdev(struct nvme_ns *ns) in nvme_add_ns_cdev() argument
3603 ns->cdev_device.parent = ns->ctrl->device; in nvme_add_ns_cdev()
3604 ret = dev_set_name(&ns->cdev_device, "ng%dn%d", in nvme_add_ns_cdev()
3605 ns->ctrl->instance, ns->head->instance); in nvme_add_ns_cdev()
3609 return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops, in nvme_add_ns_cdev()
3610 ns->ctrl->ops->module); in nvme_add_ns_cdev()
3618 int ret = -ENOMEM; in nvme_alloc_ns_head()
3627 ret = ida_alloc_min(&ctrl->subsys->ns_ida, 1, GFP_KERNEL); in nvme_alloc_ns_head()
3630 head->instance = ret; in nvme_alloc_ns_head()
3631 INIT_LIST_HEAD(&head->list); in nvme_alloc_ns_head()
3632 ret = init_srcu_struct(&head->srcu); in nvme_alloc_ns_head()
3635 head->subsys = ctrl->subsys; in nvme_alloc_ns_head()
3636 head->ns_id = info->nsid; in nvme_alloc_ns_head()
3637 head->ids = info->ids; in nvme_alloc_ns_head()
3638 head->shared = info->is_shared; in nvme_alloc_ns_head()
3639 ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1); in nvme_alloc_ns_head()
3640 ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE); in nvme_alloc_ns_head()
3641 kref_init(&head->ref); in nvme_alloc_ns_head()
3643 if (head->ids.csi) { in nvme_alloc_ns_head()
3644 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); in nvme_alloc_ns_head()
3648 head->effects = ctrl->effects; in nvme_alloc_ns_head()
3654 list_add_tail(&head->entry, &ctrl->subsys->nsheads); in nvme_alloc_ns_head()
3656 kref_get(&ctrl->subsys->ref); in nvme_alloc_ns_head()
3660 cleanup_srcu_struct(&head->srcu); in nvme_alloc_ns_head()
3662 ida_free(&ctrl->subsys->ns_ida, head->instance); in nvme_alloc_ns_head()
3686 mutex_lock(&s->lock); in nvme_global_check_duplicate_ids()
3688 mutex_unlock(&s->lock); in nvme_global_check_duplicate_ids()
3697 static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) in nvme_init_ns_head() argument
3699 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_init_ns_head()
3703 ret = nvme_global_check_duplicate_ids(ctrl->subsys, &info->ids); in nvme_init_ns_head()
3710 * and in user space the /dev/disk/by-id/ links rely on it. in nvme_init_ns_head()
3712 * If the device also claims to be multi-path capable back off in nvme_init_ns_head()
3722 if ((ns->ctrl->ops->flags & NVME_F_FABRICS) || /* !PCIe */ in nvme_init_ns_head()
3723 ((ns->ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) && in nvme_init_ns_head()
3724 info->is_shared)) { in nvme_init_ns_head()
3725 dev_err(ctrl->device, in nvme_init_ns_head()
3727 info->nsid); in nvme_init_ns_head()
3731 dev_err(ctrl->device, in nvme_init_ns_head()
3732 "clearing duplicate IDs for nsid %d\n", info->nsid); in nvme_init_ns_head()
3733 dev_err(ctrl->device, in nvme_init_ns_head()
3734 "use of /dev/disk/by-id/ may cause data corruption\n"); in nvme_init_ns_head()
3735 memset(&info->ids.nguid, 0, sizeof(info->ids.nguid)); in nvme_init_ns_head()
3736 memset(&info->ids.uuid, 0, sizeof(info->ids.uuid)); in nvme_init_ns_head()
3737 memset(&info->ids.eui64, 0, sizeof(info->ids.eui64)); in nvme_init_ns_head()
3738 ctrl->quirks |= NVME_QUIRK_BOGUS_NID; in nvme_init_ns_head()
3741 mutex_lock(&ctrl->subsys->lock); in nvme_init_ns_head()
3742 head = nvme_find_ns_head(ctrl, info->nsid); in nvme_init_ns_head()
3744 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &info->ids); in nvme_init_ns_head()
3746 dev_err(ctrl->device, in nvme_init_ns_head()
3748 info->nsid); in nvme_init_ns_head()
3757 ret = -EINVAL; in nvme_init_ns_head()
3758 if (!info->is_shared || !head->shared) { in nvme_init_ns_head()
3759 dev_err(ctrl->device, in nvme_init_ns_head()
3761 info->nsid); in nvme_init_ns_head()
3764 if (!nvme_ns_ids_equal(&head->ids, &info->ids)) { in nvme_init_ns_head()
3765 dev_err(ctrl->device, in nvme_init_ns_head()
3767 info->nsid); in nvme_init_ns_head()
3772 dev_warn(ctrl->device, in nvme_init_ns_head()
3774 info->nsid); in nvme_init_ns_head()
3775 dev_warn_once(ctrl->device, in nvme_init_ns_head()
3780 list_add_tail_rcu(&ns->siblings, &head->list); in nvme_init_ns_head()
3781 ns->head = head; in nvme_init_ns_head()
3782 mutex_unlock(&ctrl->subsys->lock); in nvme_init_ns_head()
3788 mutex_unlock(&ctrl->subsys->lock); in nvme_init_ns_head()
3794 struct nvme_ns *ns, *ret = NULL; in nvme_find_get_ns() local
3797 srcu_idx = srcu_read_lock(&ctrl->srcu); in nvme_find_get_ns()
3798 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, in nvme_find_get_ns()
3799 srcu_read_lock_held(&ctrl->srcu)) { in nvme_find_get_ns()
3800 if (ns->head->ns_id == nsid) { in nvme_find_get_ns()
3801 if (!nvme_get_ns(ns)) in nvme_find_get_ns()
3803 ret = ns; in nvme_find_get_ns()
3806 if (ns->head->ns_id > nsid) in nvme_find_get_ns()
3809 srcu_read_unlock(&ctrl->srcu, srcu_idx); in nvme_find_get_ns()
3817 static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) in nvme_ns_add_to_ctrl_list() argument
3821 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { in nvme_ns_add_to_ctrl_list()
3822 if (tmp->head->ns_id < ns->head->ns_id) { in nvme_ns_add_to_ctrl_list()
3823 list_add_rcu(&ns->list, &tmp->list); in nvme_ns_add_to_ctrl_list()
3827 list_add(&ns->list, &ns->ctrl->namespaces); in nvme_ns_add_to_ctrl_list()
3833 struct nvme_ns *ns; in nvme_alloc_ns() local
3835 int node = ctrl->numa_node; in nvme_alloc_ns()
3837 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); in nvme_alloc_ns()
3838 if (!ns) in nvme_alloc_ns()
3841 if (ctrl->opts && ctrl->opts->data_digest) in nvme_alloc_ns()
3843 if (ctrl->ops->supports_pci_p2pdma && in nvme_alloc_ns()
3844 ctrl->ops->supports_pci_p2pdma(ctrl)) in nvme_alloc_ns()
3847 disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns); in nvme_alloc_ns()
3850 disk->fops = &nvme_bdev_ops; in nvme_alloc_ns()
3851 disk->private_data = ns; in nvme_alloc_ns()
3853 ns->disk = disk; in nvme_alloc_ns()
3854 ns->queue = disk->queue; in nvme_alloc_ns()
3855 ns->ctrl = ctrl; in nvme_alloc_ns()
3856 kref_init(&ns->kref); in nvme_alloc_ns()
3858 if (nvme_init_ns_head(ns, info)) in nvme_alloc_ns()
3872 if (nvme_ns_head_multipath(ns->head)) { in nvme_alloc_ns()
3873 sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, in nvme_alloc_ns()
3874 ctrl->instance, ns->head->instance); in nvme_alloc_ns()
3875 disk->flags |= GENHD_FL_HIDDEN; in nvme_alloc_ns()
3877 sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance, in nvme_alloc_ns()
3878 ns->head->instance); in nvme_alloc_ns()
3880 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, in nvme_alloc_ns()
3881 ns->head->instance); in nvme_alloc_ns()
3884 if (nvme_update_ns_info(ns, info)) in nvme_alloc_ns()
3887 mutex_lock(&ctrl->namespaces_lock); in nvme_alloc_ns()
3892 if (test_bit(NVME_CTRL_FROZEN, &ctrl->flags)) { in nvme_alloc_ns()
3893 mutex_unlock(&ctrl->namespaces_lock); in nvme_alloc_ns()
3896 nvme_ns_add_to_ctrl_list(ns); in nvme_alloc_ns()
3897 mutex_unlock(&ctrl->namespaces_lock); in nvme_alloc_ns()
3898 synchronize_srcu(&ctrl->srcu); in nvme_alloc_ns()
3901 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups)) in nvme_alloc_ns()
3904 if (!nvme_ns_head_multipath(ns->head)) in nvme_alloc_ns()
3905 nvme_add_ns_cdev(ns); in nvme_alloc_ns()
3907 nvme_mpath_add_disk(ns, info->anagrpid); in nvme_alloc_ns()
3908 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); in nvme_alloc_ns()
3911 * Set ns->disk->device->driver_data to ns so we can access in nvme_alloc_ns()
3912 * ns->head->passthru_err_log_enabled in in nvme_alloc_ns()
3915 dev_set_drvdata(disk_to_dev(ns->disk), ns); in nvme_alloc_ns()
3921 mutex_lock(&ctrl->namespaces_lock); in nvme_alloc_ns()
3922 list_del_rcu(&ns->list); in nvme_alloc_ns()
3923 mutex_unlock(&ctrl->namespaces_lock); in nvme_alloc_ns()
3924 synchronize_srcu(&ctrl->srcu); in nvme_alloc_ns()
3926 mutex_lock(&ctrl->subsys->lock); in nvme_alloc_ns()
3927 list_del_rcu(&ns->siblings); in nvme_alloc_ns()
3928 if (list_empty(&ns->head->list)) in nvme_alloc_ns()
3929 list_del_init(&ns->head->entry); in nvme_alloc_ns()
3930 mutex_unlock(&ctrl->subsys->lock); in nvme_alloc_ns()
3931 nvme_put_ns_head(ns->head); in nvme_alloc_ns()
3935 kfree(ns); in nvme_alloc_ns()
3938 static void nvme_ns_remove(struct nvme_ns *ns) in nvme_ns_remove() argument
3942 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) in nvme_ns_remove()
3945 clear_bit(NVME_NS_READY, &ns->flags); in nvme_ns_remove()
3946 set_capacity(ns->disk, 0); in nvme_ns_remove()
3947 nvme_fault_inject_fini(&ns->fault_inject); in nvme_ns_remove()
3951 * this ns going back into current_path. in nvme_ns_remove()
3953 synchronize_srcu(&ns->head->srcu); in nvme_ns_remove()
3956 if (nvme_mpath_clear_current_path(ns)) in nvme_ns_remove()
3957 synchronize_srcu(&ns->head->srcu); in nvme_ns_remove()
3959 mutex_lock(&ns->ctrl->subsys->lock); in nvme_ns_remove()
3960 list_del_rcu(&ns->siblings); in nvme_ns_remove()
3961 if (list_empty(&ns->head->list)) { in nvme_ns_remove()
3962 list_del_init(&ns->head->entry); in nvme_ns_remove()
3965 mutex_unlock(&ns->ctrl->subsys->lock); in nvme_ns_remove()
3967 /* guarantee not available in head->list */ in nvme_ns_remove()
3968 synchronize_srcu(&ns->head->srcu); in nvme_ns_remove()
3970 if (!nvme_ns_head_multipath(ns->head)) in nvme_ns_remove()
3971 nvme_cdev_del(&ns->cdev, &ns->cdev_device); in nvme_ns_remove()
3972 del_gendisk(ns->disk); in nvme_ns_remove()
3974 mutex_lock(&ns->ctrl->namespaces_lock); in nvme_ns_remove()
3975 list_del_rcu(&ns->list); in nvme_ns_remove()
3976 mutex_unlock(&ns->ctrl->namespaces_lock); in nvme_ns_remove()
3977 synchronize_srcu(&ns->ctrl->srcu); in nvme_ns_remove()
3980 nvme_mpath_shutdown_disk(ns->head); in nvme_ns_remove()
3981 nvme_put_ns(ns); in nvme_ns_remove()
3986 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); in nvme_ns_remove_by_nsid() local
3988 if (ns) { in nvme_ns_remove_by_nsid()
3989 nvme_ns_remove(ns); in nvme_ns_remove_by_nsid()
3990 nvme_put_ns(ns); in nvme_ns_remove_by_nsid()
3994 static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) in nvme_validate_ns() argument
3998 if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { in nvme_validate_ns()
3999 dev_err(ns->ctrl->device, in nvme_validate_ns()
4000 "identifiers changed for nsid %d\n", ns->head->ns_id); in nvme_validate_ns()
4004 ret = nvme_update_ns_info(ns, info); in nvme_validate_ns()
4013 nvme_ns_remove(ns); in nvme_validate_ns()
4019 struct nvme_ns *ns; in nvme_scan_ns() local
4026 dev_warn(ctrl->device, in nvme_scan_ns()
4036 if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || in nvme_scan_ns()
4052 ns = nvme_find_get_ns(ctrl, nsid); in nvme_scan_ns()
4053 if (ns) { in nvme_scan_ns()
4054 nvme_validate_ns(ns, &info); in nvme_scan_ns()
4055 nvme_put_ns(ns); in nvme_scan_ns()
4062 * struct async_scan_info - keeps track of controller & NSIDs to scan
4084 idx = (u32)atomic_fetch_inc(&scan_info->next_nsid); in nvme_scan_ns_async()
4085 nsid = le32_to_cpu(scan_info->ns_list[idx]); in nvme_scan_ns_async()
4087 nvme_scan_ns(scan_info->ctrl, nsid); in nvme_scan_ns_async()
4093 struct nvme_ns *ns, *next; in nvme_remove_invalid_namespaces() local
4096 mutex_lock(&ctrl->namespaces_lock); in nvme_remove_invalid_namespaces()
4097 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { in nvme_remove_invalid_namespaces()
4098 if (ns->head->ns_id > nsid) { in nvme_remove_invalid_namespaces()
4099 list_del_rcu(&ns->list); in nvme_remove_invalid_namespaces()
4100 synchronize_srcu(&ctrl->srcu); in nvme_remove_invalid_namespaces()
4101 list_add_tail_rcu(&ns->list, &rm_list); in nvme_remove_invalid_namespaces()
4104 mutex_unlock(&ctrl->namespaces_lock); in nvme_remove_invalid_namespaces()
4106 list_for_each_entry_safe(ns, next, &rm_list, list) in nvme_remove_invalid_namespaces()
4107 nvme_ns_remove(ns); in nvme_remove_invalid_namespaces()
4121 return -ENOMEM; in nvme_scan_ns_list()
4132 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, in nvme_scan_ns_list()
4135 dev_warn(ctrl->device, in nvme_scan_ns_list()
4136 "Identify NS List failed (status=0x%x)\n", ret); in nvme_scan_ns_list()
4168 nn = le32_to_cpu(id->nn); in nvme_scan_ns_sequential()
4196 dev_warn(ctrl->device, in nvme_clear_changed_ns_log()
4197 "reading changed ns log failed: %d\n", error); in nvme_clear_changed_ns_log()
4209 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE || !ctrl->tagset) in nvme_scan_work()
4214 * new firmware download, even though it is not common we cannot ignore in nvme_scan_work()
4215 * such scenario. Controller's non-mdts limits are reported in the unit in nvme_scan_work()
4217 * namespace. Hence re-read the limits at the time of ns allocation. in nvme_scan_work()
4221 dev_warn(ctrl->device, in nvme_scan_work()
4222 "reading non-mdts-limits failed: %d\n", ret); in nvme_scan_work()
4226 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { in nvme_scan_work()
4227 dev_info(ctrl->device, "rescanning namespaces.\n"); in nvme_scan_work()
4231 mutex_lock(&ctrl->scan_lock); in nvme_scan_work()
4237 * devices which should support Identify NS List (as per the VS in nvme_scan_work()
4244 mutex_unlock(&ctrl->scan_lock); in nvme_scan_work()
4254 struct nvme_ns *ns, *next; in nvme_remove_namespaces() local
4270 /* prevent racing with ns scanning */ in nvme_remove_namespaces()
4271 flush_work(&ctrl->scan_work); in nvme_remove_namespaces()
4282 /* this is a no-op when called from the controller reset handler */ in nvme_remove_namespaces()
4285 mutex_lock(&ctrl->namespaces_lock); in nvme_remove_namespaces()
4286 list_splice_init_rcu(&ctrl->namespaces, &ns_list, synchronize_rcu); in nvme_remove_namespaces()
4287 mutex_unlock(&ctrl->namespaces_lock); in nvme_remove_namespaces()
4288 synchronize_srcu(&ctrl->srcu); in nvme_remove_namespaces()
4290 list_for_each_entry_safe(ns, next, &ns_list, list) in nvme_remove_namespaces()
4291 nvme_ns_remove(ns); in nvme_remove_namespaces()
4299 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_class_uevent()
4302 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); in nvme_class_uevent()
4307 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); in nvme_class_uevent()
4312 opts->trsvcid ?: "none"); in nvme_class_uevent()
4317 opts->host_traddr ?: "none"); in nvme_class_uevent()
4322 opts->host_iface ?: "none"); in nvme_class_uevent()
4331 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); in nvme_change_uevent()
4337 u32 aen_result = ctrl->aen_result; in nvme_aen_uevent()
4339 ctrl->aen_result = 0; in nvme_aen_uevent()
4346 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); in nvme_aen_uevent()
4363 ctrl->ops->submit_async_event(ctrl); in nvme_async_event_work()
4371 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) in nvme_ctrl_pp_status()
4377 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); in nvme_ctrl_pp_status()
4391 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); in nvme_get_fw_slot_info()
4395 cur_fw_slot = log->afi & 0x7; in nvme_get_fw_slot_info()
4396 next_fw_slot = (log->afi & 0x70) >> 4; in nvme_get_fw_slot_info()
4398 dev_info(ctrl->device, in nvme_get_fw_slot_info()
4399 "Firmware is activated after next Controller Level Reset\n"); in nvme_get_fw_slot_info()
4403 memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1], in nvme_get_fw_slot_info()
4404 sizeof(ctrl->subsys->firmware_rev)); in nvme_get_fw_slot_info()
4418 if (ctrl->mtfa) in nvme_fw_act_work()
4420 msecs_to_jiffies(ctrl->mtfa * 100); in nvme_fw_act_work()
4428 dev_warn(ctrl->device, in nvme_fw_act_work()
4443 queue_work(nvme_wq, &ctrl->async_event_work); in nvme_fw_act_work()
4463 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); in nvme_handle_aen_notice()
4470 * firmware activation. in nvme_handle_aen_notice()
4474 queue_work(nvme_wq, &ctrl->fw_act_work); in nvme_handle_aen_notice()
4479 if (!ctrl->ana_log_buf) in nvme_handle_aen_notice()
4481 queue_work(nvme_wq, &ctrl->ana_work); in nvme_handle_aen_notice()
4485 ctrl->aen_result = result; in nvme_handle_aen_notice()
4488 dev_warn(ctrl->device, "async event result %08x\n", result); in nvme_handle_aen_notice()
4495 dev_warn(ctrl->device, in nvme_handle_aer_persistent_error()
4503 u32 result = le32_to_cpu(res->u32); in nvme_complete_async_event()
4529 ctrl->aen_result = result; in nvme_complete_async_event()
4536 queue_work(nvme_wq, &ctrl->async_event_work); in nvme_complete_async_event()
4547 set->ops = ops; in nvme_alloc_admin_tag_set()
4548 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_alloc_admin_tag_set()
4549 if (ctrl->ops->flags & NVME_F_FABRICS) in nvme_alloc_admin_tag_set()
4551 set->reserved_tags = 2; in nvme_alloc_admin_tag_set()
4552 set->numa_node = ctrl->numa_node; in nvme_alloc_admin_tag_set()
4553 set->flags = BLK_MQ_F_NO_SCHED; in nvme_alloc_admin_tag_set()
4554 if (ctrl->ops->flags & NVME_F_BLOCKING) in nvme_alloc_admin_tag_set()
4555 set->flags |= BLK_MQ_F_BLOCKING; in nvme_alloc_admin_tag_set()
4556 set->cmd_size = cmd_size; in nvme_alloc_admin_tag_set()
4557 set->driver_data = ctrl; in nvme_alloc_admin_tag_set()
4558 set->nr_hw_queues = 1; in nvme_alloc_admin_tag_set()
4559 set->timeout = NVME_ADMIN_TIMEOUT; in nvme_alloc_admin_tag_set()
4564 ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL); in nvme_alloc_admin_tag_set()
4565 if (IS_ERR(ctrl->admin_q)) { in nvme_alloc_admin_tag_set()
4566 ret = PTR_ERR(ctrl->admin_q); in nvme_alloc_admin_tag_set()
4570 if (ctrl->ops->flags & NVME_F_FABRICS) { in nvme_alloc_admin_tag_set()
4571 ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL); in nvme_alloc_admin_tag_set()
4572 if (IS_ERR(ctrl->fabrics_q)) { in nvme_alloc_admin_tag_set()
4573 ret = PTR_ERR(ctrl->fabrics_q); in nvme_alloc_admin_tag_set()
4578 ctrl->admin_tagset = set; in nvme_alloc_admin_tag_set()
4582 blk_mq_destroy_queue(ctrl->admin_q); in nvme_alloc_admin_tag_set()
4583 blk_put_queue(ctrl->admin_q); in nvme_alloc_admin_tag_set()
4586 ctrl->admin_q = NULL; in nvme_alloc_admin_tag_set()
4587 ctrl->fabrics_q = NULL; in nvme_alloc_admin_tag_set()
4594 blk_mq_destroy_queue(ctrl->admin_q); in nvme_remove_admin_tag_set()
4595 blk_put_queue(ctrl->admin_q); in nvme_remove_admin_tag_set()
4596 if (ctrl->ops->flags & NVME_F_FABRICS) { in nvme_remove_admin_tag_set()
4597 blk_mq_destroy_queue(ctrl->fabrics_q); in nvme_remove_admin_tag_set()
4598 blk_put_queue(ctrl->fabrics_q); in nvme_remove_admin_tag_set()
4600 blk_mq_free_tag_set(ctrl->admin_tagset); in nvme_remove_admin_tag_set()
4611 set->ops = ops; in nvme_alloc_io_tag_set()
4612 set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1); in nvme_alloc_io_tag_set()
4617 if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS) in nvme_alloc_io_tag_set()
4618 set->reserved_tags = NVME_AQ_DEPTH; in nvme_alloc_io_tag_set()
4619 else if (ctrl->ops->flags & NVME_F_FABRICS) in nvme_alloc_io_tag_set()
4621 set->reserved_tags = 1; in nvme_alloc_io_tag_set()
4622 set->numa_node = ctrl->numa_node; in nvme_alloc_io_tag_set()
4623 set->flags = BLK_MQ_F_SHOULD_MERGE; in nvme_alloc_io_tag_set()
4624 if (ctrl->ops->flags & NVME_F_BLOCKING) in nvme_alloc_io_tag_set()
4625 set->flags |= BLK_MQ_F_BLOCKING; in nvme_alloc_io_tag_set()
4626 set->cmd_size = cmd_size; in nvme_alloc_io_tag_set()
4627 set->driver_data = ctrl; in nvme_alloc_io_tag_set()
4628 set->nr_hw_queues = ctrl->queue_count - 1; in nvme_alloc_io_tag_set()
4629 set->timeout = NVME_IO_TIMEOUT; in nvme_alloc_io_tag_set()
4630 set->nr_maps = nr_maps; in nvme_alloc_io_tag_set()
4635 if (ctrl->ops->flags & NVME_F_FABRICS) { in nvme_alloc_io_tag_set()
4640 ctrl->connect_q = blk_mq_alloc_queue(set, &lim, NULL); in nvme_alloc_io_tag_set()
4641 if (IS_ERR(ctrl->connect_q)) { in nvme_alloc_io_tag_set()
4642 ret = PTR_ERR(ctrl->connect_q); in nvme_alloc_io_tag_set()
4647 ctrl->tagset = set; in nvme_alloc_io_tag_set()
4652 ctrl->connect_q = NULL; in nvme_alloc_io_tag_set()
4659 if (ctrl->ops->flags & NVME_F_FABRICS) { in nvme_remove_io_tag_set()
4660 blk_mq_destroy_queue(ctrl->connect_q); in nvme_remove_io_tag_set()
4661 blk_put_queue(ctrl->connect_q); in nvme_remove_io_tag_set()
4663 blk_mq_free_tag_set(ctrl->tagset); in nvme_remove_io_tag_set()
4672 flush_work(&ctrl->async_event_work); in nvme_stop_ctrl()
4673 cancel_work_sync(&ctrl->fw_act_work); in nvme_stop_ctrl()
4674 if (ctrl->ops->stop_ctrl) in nvme_stop_ctrl()
4675 ctrl->ops->stop_ctrl(ctrl); in nvme_stop_ctrl()
4685 * to re-read the discovery log page to learn about possible changes in nvme_start_ctrl()
4689 if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && in nvme_start_ctrl()
4693 if (ctrl->queue_count > 1) { in nvme_start_ctrl()
4700 set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags); in nvme_start_ctrl()
4708 nvme_fault_inject_fini(&ctrl->fault_inject); in nvme_uninit_ctrl()
4709 dev_pm_qos_hide_latency_tolerance(ctrl->device); in nvme_uninit_ctrl()
4710 cdev_device_del(&ctrl->cdev, ctrl->device); in nvme_uninit_ctrl()
4720 xa_for_each(&ctrl->cels, i, cel) { in nvme_free_cels()
4721 xa_erase(&ctrl->cels, i); in nvme_free_cels()
4725 xa_destroy(&ctrl->cels); in nvme_free_cels()
4732 struct nvme_subsystem *subsys = ctrl->subsys; in nvme_free_ctrl()
4734 if (!subsys || ctrl->instance != subsys->instance) in nvme_free_ctrl()
4735 ida_free(&nvme_instance_ida, ctrl->instance); in nvme_free_ctrl()
4738 cleanup_srcu_struct(&ctrl->srcu); in nvme_free_ctrl()
4741 __free_page(ctrl->discard_page); in nvme_free_ctrl()
4742 free_opal_dev(ctrl->opal_dev); in nvme_free_ctrl()
4746 list_del(&ctrl->subsys_entry); in nvme_free_ctrl()
4747 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); in nvme_free_ctrl()
4751 ctrl->ops->free_ctrl(ctrl); in nvme_free_ctrl()
4763 * needed, which also invokes the ops->free_ctrl() callback.
4770 WRITE_ONCE(ctrl->state, NVME_CTRL_NEW); in nvme_init_ctrl()
4771 ctrl->passthru_err_log_enabled = false; in nvme_init_ctrl()
4772 clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags); in nvme_init_ctrl()
4773 spin_lock_init(&ctrl->lock); in nvme_init_ctrl()
4774 mutex_init(&ctrl->namespaces_lock); in nvme_init_ctrl()
4776 ret = init_srcu_struct(&ctrl->srcu); in nvme_init_ctrl()
4780 mutex_init(&ctrl->scan_lock); in nvme_init_ctrl()
4781 INIT_LIST_HEAD(&ctrl->namespaces); in nvme_init_ctrl()
4782 xa_init(&ctrl->cels); in nvme_init_ctrl()
4783 ctrl->dev = dev; in nvme_init_ctrl()
4784 ctrl->ops = ops; in nvme_init_ctrl()
4785 ctrl->quirks = quirks; in nvme_init_ctrl()
4786 ctrl->numa_node = NUMA_NO_NODE; in nvme_init_ctrl()
4787 INIT_WORK(&ctrl->scan_work, nvme_scan_work); in nvme_init_ctrl()
4788 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); in nvme_init_ctrl()
4789 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); in nvme_init_ctrl()
4790 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); in nvme_init_ctrl()
4791 init_waitqueue_head(&ctrl->state_wq); in nvme_init_ctrl()
4793 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); in nvme_init_ctrl()
4794 INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work); in nvme_init_ctrl()
4795 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); in nvme_init_ctrl()
4796 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; in nvme_init_ctrl()
4797 ctrl->ka_last_check_time = jiffies; in nvme_init_ctrl()
4801 ctrl->discard_page = alloc_page(GFP_KERNEL); in nvme_init_ctrl()
4802 if (!ctrl->discard_page) { in nvme_init_ctrl()
4803 ret = -ENOMEM; in nvme_init_ctrl()
4810 ctrl->instance = ret; in nvme_init_ctrl()
4818 device_initialize(&ctrl->ctrl_device); in nvme_init_ctrl()
4819 ctrl->device = &ctrl->ctrl_device; in nvme_init_ctrl()
4820 ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), in nvme_init_ctrl()
4821 ctrl->instance); in nvme_init_ctrl()
4822 ctrl->device->class = &nvme_class; in nvme_init_ctrl()
4823 ctrl->device->parent = ctrl->dev; in nvme_init_ctrl()
4824 if (ops->dev_attr_groups) in nvme_init_ctrl()
4825 ctrl->device->groups = ops->dev_attr_groups; in nvme_init_ctrl()
4827 ctrl->device->groups = nvme_dev_attr_groups; in nvme_init_ctrl()
4828 ctrl->device->release = nvme_free_ctrl; in nvme_init_ctrl()
4829 dev_set_drvdata(ctrl->device, ctrl); in nvme_init_ctrl()
4834 ida_free(&nvme_instance_ida, ctrl->instance); in nvme_init_ctrl()
4836 if (ctrl->discard_page) in nvme_init_ctrl()
4837 __free_page(ctrl->discard_page); in nvme_init_ctrl()
4838 cleanup_srcu_struct(&ctrl->srcu); in nvme_init_ctrl()
4851 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); in nvme_add_ctrl()
4855 cdev_init(&ctrl->cdev, &nvme_dev_fops); in nvme_add_ctrl()
4856 ctrl->cdev.owner = ctrl->ops->module; in nvme_add_ctrl()
4857 ret = cdev_device_add(&ctrl->cdev, ctrl->device); in nvme_add_ctrl()
4865 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; in nvme_add_ctrl()
4866 dev_pm_qos_update_user_latency_tolerance(ctrl->device, in nvme_add_ctrl()
4869 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); in nvme_add_ctrl()
4879 struct nvme_ns *ns; in nvme_mark_namespaces_dead() local
4882 srcu_idx = srcu_read_lock(&ctrl->srcu); in nvme_mark_namespaces_dead()
4883 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, in nvme_mark_namespaces_dead()
4884 srcu_read_lock_held(&ctrl->srcu)) in nvme_mark_namespaces_dead()
4885 blk_mark_disk_dead(ns->disk); in nvme_mark_namespaces_dead()
4886 srcu_read_unlock(&ctrl->srcu, srcu_idx); in nvme_mark_namespaces_dead()
4892 struct nvme_ns *ns; in nvme_unfreeze() local
4895 srcu_idx = srcu_read_lock(&ctrl->srcu); in nvme_unfreeze()
4896 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, in nvme_unfreeze()
4897 srcu_read_lock_held(&ctrl->srcu)) in nvme_unfreeze()
4898 blk_mq_unfreeze_queue(ns->queue); in nvme_unfreeze()
4899 srcu_read_unlock(&ctrl->srcu, srcu_idx); in nvme_unfreeze()
4900 clear_bit(NVME_CTRL_FROZEN, &ctrl->flags); in nvme_unfreeze()
4906 struct nvme_ns *ns; in nvme_wait_freeze_timeout() local
4909 srcu_idx = srcu_read_lock(&ctrl->srcu); in nvme_wait_freeze_timeout()
4910 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, in nvme_wait_freeze_timeout()
4911 srcu_read_lock_held(&ctrl->srcu)) { in nvme_wait_freeze_timeout()
4912 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); in nvme_wait_freeze_timeout()
4916 srcu_read_unlock(&ctrl->srcu, srcu_idx); in nvme_wait_freeze_timeout()
4923 struct nvme_ns *ns; in nvme_wait_freeze() local
4926 srcu_idx = srcu_read_lock(&ctrl->srcu); in nvme_wait_freeze()
4927 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, in nvme_wait_freeze()
4928 srcu_read_lock_held(&ctrl->srcu)) in nvme_wait_freeze()
4929 blk_mq_freeze_queue_wait(ns->queue); in nvme_wait_freeze()
4930 srcu_read_unlock(&ctrl->srcu, srcu_idx); in nvme_wait_freeze()
4936 struct nvme_ns *ns; in nvme_start_freeze() local
4939 set_bit(NVME_CTRL_FROZEN, &ctrl->flags); in nvme_start_freeze()
4940 srcu_idx = srcu_read_lock(&ctrl->srcu); in nvme_start_freeze()
4941 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, in nvme_start_freeze()
4942 srcu_read_lock_held(&ctrl->srcu)) in nvme_start_freeze()
4943 blk_freeze_queue_start(ns->queue); in nvme_start_freeze()
4944 srcu_read_unlock(&ctrl->srcu, srcu_idx); in nvme_start_freeze()
4950 if (!ctrl->tagset) in nvme_quiesce_io_queues()
4952 if (!test_and_set_bit(NVME_CTRL_STOPPED, &ctrl->flags)) in nvme_quiesce_io_queues()
4953 blk_mq_quiesce_tagset(ctrl->tagset); in nvme_quiesce_io_queues()
4955 blk_mq_wait_quiesce_done(ctrl->tagset); in nvme_quiesce_io_queues()
4961 if (!ctrl->tagset) in nvme_unquiesce_io_queues()
4963 if (test_and_clear_bit(NVME_CTRL_STOPPED, &ctrl->flags)) in nvme_unquiesce_io_queues()
4964 blk_mq_unquiesce_tagset(ctrl->tagset); in nvme_unquiesce_io_queues()
4970 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) in nvme_quiesce_admin_queue()
4971 blk_mq_quiesce_queue(ctrl->admin_q); in nvme_quiesce_admin_queue()
4973 blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set); in nvme_quiesce_admin_queue()
4979 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) in nvme_unquiesce_admin_queue()
4980 blk_mq_unquiesce_queue(ctrl->admin_q); in nvme_unquiesce_admin_queue()
4986 struct nvme_ns *ns; in nvme_sync_io_queues() local
4989 srcu_idx = srcu_read_lock(&ctrl->srcu); in nvme_sync_io_queues()
4990 list_for_each_entry_srcu(ns, &ctrl->namespaces, list, in nvme_sync_io_queues()
4991 srcu_read_lock_held(&ctrl->srcu)) in nvme_sync_io_queues()
4992 blk_sync_queue(ns->queue); in nvme_sync_io_queues()
4993 srcu_read_unlock(&ctrl->srcu, srcu_idx); in nvme_sync_io_queues()
5000 if (ctrl->admin_q) in nvme_sync_queues()
5001 blk_sync_queue(ctrl->admin_q); in nvme_sync_queues()
5007 if (file->f_op != &nvme_dev_fops) in nvme_ctrl_from_file()
5009 return file->private_data; in nvme_ctrl_from_file()
5047 int result = -ENOMEM; in nvme_core_init()
5051 nvme_wq = alloc_workqueue("nvme-wq", in nvme_core_init()
5056 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", in nvme_core_init()
5061 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", in nvme_core_init()
5080 "nvme-generic"); in nvme_core_init()