Lines Matching full:subsys
124 static u32 nvmet_max_nsid(struct nvmet_subsys *subsys) in nvmet_max_nsid() argument
130 xa_for_each(&subsys->namespaces, idx, cur) in nvmet_max_nsid()
242 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) in nvmet_ns_changed() argument
246 lockdep_assert_held(&subsys->lock); in nvmet_ns_changed()
248 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_ns_changed()
258 void nvmet_send_ana_event(struct nvmet_subsys *subsys, in nvmet_send_ana_event() argument
263 mutex_lock(&subsys->lock); in nvmet_send_ana_event()
264 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_send_ana_event()
272 mutex_unlock(&subsys->lock); in nvmet_send_ana_event()
281 nvmet_send_ana_event(p->subsys, port); in nvmet_port_send_ana_event()
308 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys) in nvmet_port_del_ctrls() argument
312 mutex_lock(&subsys->lock); in nvmet_port_del_ctrls()
313 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_port_del_ctrls()
317 mutex_unlock(&subsys->lock); in nvmet_port_del_ctrls()
441 struct nvmet_subsys *subsys = nvmet_req_subsys(req); in nvmet_req_find_ns() local
443 req->ns = xa_load(&subsys->namespaces, nsid); in nvmet_req_find_ns()
446 if (nvmet_subsys_nsid_exists(subsys, nsid)) in nvmet_req_find_ns()
518 * Note: ctrl->subsys->lock should be held when calling this function
570 struct nvmet_subsys *subsys = ns->subsys; in nvmet_ns_enable() local
574 mutex_lock(&subsys->lock); in nvmet_ns_enable()
577 if (nvmet_is_passthru_subsys(subsys)) { in nvmet_ns_enable()
586 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) in nvmet_ns_enable()
599 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_enable()
607 if (ns->nsid > subsys->max_nsid) in nvmet_ns_enable()
608 subsys->max_nsid = ns->nsid; in nvmet_ns_enable()
610 ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL); in nvmet_ns_enable()
614 subsys->nr_namespaces++; in nvmet_ns_enable()
616 nvmet_ns_changed(subsys, ns->nsid); in nvmet_ns_enable()
620 mutex_unlock(&subsys->lock); in nvmet_ns_enable()
624 subsys->max_nsid = nvmet_max_nsid(subsys); in nvmet_ns_enable()
627 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_enable()
636 struct nvmet_subsys *subsys = ns->subsys; in nvmet_ns_disable() local
639 mutex_lock(&subsys->lock); in nvmet_ns_disable()
644 xa_erase(&ns->subsys->namespaces, ns->nsid); in nvmet_ns_disable()
645 if (ns->nsid == subsys->max_nsid) in nvmet_ns_disable()
646 subsys->max_nsid = nvmet_max_nsid(subsys); in nvmet_ns_disable()
648 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_disable()
651 mutex_unlock(&subsys->lock); in nvmet_ns_disable()
666 mutex_lock(&subsys->lock); in nvmet_ns_disable()
668 subsys->nr_namespaces--; in nvmet_ns_disable()
669 nvmet_ns_changed(subsys, ns->nsid); in nvmet_ns_disable()
672 mutex_unlock(&subsys->lock); in nvmet_ns_disable()
687 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) in nvmet_ns_alloc() argument
698 ns->subsys = subsys; in nvmet_ns_alloc()
1186 if (!nvmet_is_disc_subsys(ctrl->subsys) && in nvmet_start_ctrl()
1257 if (nvmet_is_passthru_subsys(ctrl->subsys)) in nvmet_init_cap()
1266 struct nvmet_subsys *subsys; in nvmet_ctrl_find_get() local
1268 subsys = nvmet_find_get_subsys(req->port, subsysnqn); in nvmet_ctrl_find_get()
1269 if (!subsys) { in nvmet_ctrl_find_get()
1276 mutex_lock(&subsys->lock); in nvmet_ctrl_find_get()
1277 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_ctrl_find_get()
1292 pr_warn("could not find controller %d for subsys %s / host %s\n", in nvmet_ctrl_find_get()
1297 mutex_unlock(&subsys->lock); in nvmet_ctrl_find_get()
1298 nvmet_subsys_put(subsys); in nvmet_ctrl_find_get()
1324 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn) in nvmet_host_allowed() argument
1330 if (subsys->allow_any_host) in nvmet_host_allowed()
1333 if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */ in nvmet_host_allowed()
1336 list_for_each_entry(p, &subsys->hosts, entry) { in nvmet_host_allowed()
1345 * Note: ctrl->subsys->lock should be held when calling this function
1358 xa_for_each(&ctrl->subsys->namespaces, idx, ns) in nvmet_setup_p2p_ns_map()
1363 * Note: ctrl->subsys->lock should be held when calling this function
1388 struct nvmet_subsys *subsys; in nvmet_alloc_ctrl() local
1394 subsys = nvmet_find_get_subsys(req->port, subsysnqn); in nvmet_alloc_ctrl()
1395 if (!subsys) { in nvmet_alloc_ctrl()
1404 if (!nvmet_host_allowed(subsys, hostnqn)) { in nvmet_alloc_ctrl()
1427 subsys->clear_ids = 1; in nvmet_alloc_ctrl()
1440 ctrl->subsys = subsys; in nvmet_alloc_ctrl()
1441 ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support; in nvmet_alloc_ctrl()
1450 ctrl->sqs = kcalloc(subsys->max_qid + 1, in nvmet_alloc_ctrl()
1457 subsys->cntlid_min, subsys->cntlid_max, in nvmet_alloc_ctrl()
1469 if (nvmet_is_disc_subsys(ctrl->subsys) && !kato) in nvmet_alloc_ctrl()
1480 mutex_lock(&subsys->lock); in nvmet_alloc_ctrl()
1481 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); in nvmet_alloc_ctrl()
1484 mutex_unlock(&subsys->lock); in nvmet_alloc_ctrl()
1496 nvmet_subsys_put(subsys); in nvmet_alloc_ctrl()
1504 struct nvmet_subsys *subsys = ctrl->subsys; in nvmet_ctrl_free() local
1506 mutex_lock(&subsys->lock); in nvmet_ctrl_free()
1509 mutex_unlock(&subsys->lock); in nvmet_ctrl_free()
1527 nvmet_subsys_put(subsys); in nvmet_ctrl_free()
1577 if (!strncmp(p->subsys->subsysnqn, subsysnqn, in nvmet_find_get_subsys()
1579 if (!kref_get_unless_zero(&p->subsys->ref)) in nvmet_find_get_subsys()
1582 return p->subsys; in nvmet_find_get_subsys()
1592 struct nvmet_subsys *subsys; in nvmet_subsys_alloc() local
1596 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); in nvmet_subsys_alloc()
1597 if (!subsys) in nvmet_subsys_alloc()
1600 subsys->ver = NVMET_DEFAULT_VS; in nvmet_subsys_alloc()
1603 bin2hex(subsys->serial, &serial, sizeof(serial)); in nvmet_subsys_alloc()
1605 subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL); in nvmet_subsys_alloc()
1606 if (!subsys->model_number) { in nvmet_subsys_alloc()
1611 subsys->ieee_oui = 0; in nvmet_subsys_alloc()
1613 subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL); in nvmet_subsys_alloc()
1614 if (!subsys->firmware_rev) { in nvmet_subsys_alloc()
1621 subsys->max_qid = NVMET_NR_QUEUES; in nvmet_subsys_alloc()
1625 subsys->max_qid = 0; in nvmet_subsys_alloc()
1632 subsys->type = type; in nvmet_subsys_alloc()
1633 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE, in nvmet_subsys_alloc()
1635 if (!subsys->subsysnqn) { in nvmet_subsys_alloc()
1639 subsys->cntlid_min = NVME_CNTLID_MIN; in nvmet_subsys_alloc()
1640 subsys->cntlid_max = NVME_CNTLID_MAX; in nvmet_subsys_alloc()
1641 kref_init(&subsys->ref); in nvmet_subsys_alloc()
1643 mutex_init(&subsys->lock); in nvmet_subsys_alloc()
1644 xa_init(&subsys->namespaces); in nvmet_subsys_alloc()
1645 INIT_LIST_HEAD(&subsys->ctrls); in nvmet_subsys_alloc()
1646 INIT_LIST_HEAD(&subsys->hosts); in nvmet_subsys_alloc()
1648 ret = nvmet_debugfs_subsys_setup(subsys); in nvmet_subsys_alloc()
1652 return subsys; in nvmet_subsys_alloc()
1655 kfree(subsys->subsysnqn); in nvmet_subsys_alloc()
1657 kfree(subsys->firmware_rev); in nvmet_subsys_alloc()
1659 kfree(subsys->model_number); in nvmet_subsys_alloc()
1661 kfree(subsys); in nvmet_subsys_alloc()
1667 struct nvmet_subsys *subsys = in nvmet_subsys_free() local
1670 WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); in nvmet_subsys_free()
1672 nvmet_debugfs_subsys_free(subsys); in nvmet_subsys_free()
1674 xa_destroy(&subsys->namespaces); in nvmet_subsys_free()
1675 nvmet_passthru_subsys_free(subsys); in nvmet_subsys_free()
1677 kfree(subsys->subsysnqn); in nvmet_subsys_free()
1678 kfree(subsys->model_number); in nvmet_subsys_free()
1679 kfree(subsys->firmware_rev); in nvmet_subsys_free()
1680 kfree(subsys); in nvmet_subsys_free()
1683 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys) in nvmet_subsys_del_ctrls() argument
1687 mutex_lock(&subsys->lock); in nvmet_subsys_del_ctrls()
1688 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_subsys_del_ctrls()
1690 mutex_unlock(&subsys->lock); in nvmet_subsys_del_ctrls()
1693 void nvmet_subsys_put(struct nvmet_subsys *subsys) in nvmet_subsys_put() argument
1695 kref_put(&subsys->ref, nvmet_subsys_free); in nvmet_subsys_put()