Lines Matching +full:host +full:- +full:id

1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2017-2018 Western Digital Corporation or its
7 * Copyright (c) 2019-2020, Eideticom Inc.
13 #include "../host/nvme.h"
29 if (!nvme_multi_css(ctrl->subsys->passthru_ctrl)) in nvmet_passthrough_override_cap()
30 ctrl->cap &= ~(1ULL << 43); in nvmet_passthrough_override_cap()
35 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_passthru_override_id_descs()
42 if (!ctrl->subsys->clear_ids) in nvmet_passthru_override_id_descs()
56 if (cur->nidl == 0) in nvmet_passthru_override_id_descs()
58 if (cur->nidt == NVME_NIDT_CSI) { in nvmet_passthru_override_id_descs()
63 len = sizeof(struct nvme_ns_id_desc) + cur->nidl; in nvmet_passthru_override_id_descs()
70 cur->nidt = NVME_NIDT_CSI; in nvmet_passthru_override_id_descs()
71 cur->nidl = NVME_NIDT_CSI_LEN; in nvmet_passthru_override_id_descs()
82 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_passthru_override_id_ctrl()
83 struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl; in nvmet_passthru_override_id_ctrl()
85 struct nvme_id_ctrl *id; in nvmet_passthru_override_id_ctrl() local
89 id = kzalloc(sizeof(*id), GFP_KERNEL); in nvmet_passthru_override_id_ctrl()
90 if (!id) in nvmet_passthru_override_id_ctrl()
93 status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id)); in nvmet_passthru_override_id_ctrl()
97 id->cntlid = cpu_to_le16(ctrl->cntlid); in nvmet_passthru_override_id_ctrl()
98 id->ver = cpu_to_le32(ctrl->subsys->ver); in nvmet_passthru_override_id_ctrl()
102 * which depends on the host's memory fragementation. To solve this, in nvmet_passthru_override_id_ctrl()
105 max_hw_sectors = min_not_zero(pctrl->max_segments << PAGE_SECTORS_SHIFT, in nvmet_passthru_override_id_ctrl()
106 pctrl->max_hw_sectors); in nvmet_passthru_override_id_ctrl()
115 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; in nvmet_passthru_override_id_ctrl()
117 id->mdts = ilog2(max_hw_sectors) + 9 - page_shift; in nvmet_passthru_override_id_ctrl()
119 id->acl = 3; in nvmet_passthru_override_id_ctrl()
124 id->aerl = NVMET_ASYNC_EVENTS - 1; in nvmet_passthru_override_id_ctrl()
127 id->kas = cpu_to_le16(NVMET_KAS); in nvmet_passthru_override_id_ctrl()
129 /* don't support host memory buffer */ in nvmet_passthru_override_id_ctrl()
130 id->hmpre = 0; in nvmet_passthru_override_id_ctrl()
131 id->hmmin = 0; in nvmet_passthru_override_id_ctrl()
133 id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes); in nvmet_passthru_override_id_ctrl()
134 id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); in nvmet_passthru_override_id_ctrl()
135 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl)); in nvmet_passthru_override_id_ctrl()
138 id->fuses = 0; in nvmet_passthru_override_id_ctrl()
140 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ in nvmet_passthru_override_id_ctrl()
141 if (ctrl->ops->flags & NVMF_KEYED_SGLS) in nvmet_passthru_override_id_ctrl()
142 id->sgls |= cpu_to_le32(1 << 2); in nvmet_passthru_override_id_ctrl()
143 if (req->port->inline_data_size) in nvmet_passthru_override_id_ctrl()
144 id->sgls |= cpu_to_le32(1 << 20); in nvmet_passthru_override_id_ctrl()
147 * When passthru controller is setup using nvme-loop transport it will in nvmet_passthru_override_id_ctrl()
149 * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl() in nvmet_passthru_override_id_ctrl()
151 * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. in nvmet_passthru_override_id_ctrl()
153 memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn)); in nvmet_passthru_override_id_ctrl()
155 /* use fabric id-ctrl values */ in nvmet_passthru_override_id_ctrl()
156 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + in nvmet_passthru_override_id_ctrl()
157 req->port->inline_data_size) / 16); in nvmet_passthru_override_id_ctrl()
158 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); in nvmet_passthru_override_id_ctrl()
160 id->msdbd = ctrl->ops->msdbd; in nvmet_passthru_override_id_ctrl()
163 id->cmic |= 1 << 1; in nvmet_passthru_override_id_ctrl()
166 id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS); in nvmet_passthru_override_id_ctrl()
168 status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl)); in nvmet_passthru_override_id_ctrl()
171 kfree(id); in nvmet_passthru_override_id_ctrl()
178 struct nvme_id_ns *id; in nvmet_passthru_override_id_ns() local
181 id = kzalloc(sizeof(*id), GFP_KERNEL); in nvmet_passthru_override_id_ns()
182 if (!id) in nvmet_passthru_override_id_ns()
185 status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns)); in nvmet_passthru_override_id_ns()
189 for (i = 0; i < (id->nlbaf + 1); i++) in nvmet_passthru_override_id_ns()
190 if (id->lbaf[i].ms) in nvmet_passthru_override_id_ns()
191 memset(&id->lbaf[i], 0, sizeof(id->lbaf[i])); in nvmet_passthru_override_id_ns()
193 id->flbas = id->flbas & ~(1 << 4); in nvmet_passthru_override_id_ns()
200 id->mc = 0; in nvmet_passthru_override_id_ns()
202 if (req->sq->ctrl->subsys->clear_ids) { in nvmet_passthru_override_id_ns()
203 memset(id->nguid, 0, NVME_NIDT_NGUID_LEN); in nvmet_passthru_override_id_ns()
204 memset(id->eui64, 0, NVME_NIDT_EUI64_LEN); in nvmet_passthru_override_id_ns()
207 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); in nvmet_passthru_override_id_ns()
210 kfree(id); in nvmet_passthru_override_id_ns()
217 struct request *rq = req->p.rq; in nvmet_passthru_execute_cmd_work()
218 struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl; in nvmet_passthru_execute_cmd_work()
219 struct nvme_ns *ns = rq->q->queuedata; in nvmet_passthru_execute_cmd_work()
223 effects = nvme_passthru_start(ctrl, ns, req->cmd->common.opcode); in nvmet_passthru_execute_cmd_work()
226 req->cmd->common.opcode == nvme_admin_identify) { in nvmet_passthru_execute_cmd_work()
227 switch (req->cmd->identify.cns) { in nvmet_passthru_execute_cmd_work()
241 req->cqe->result = nvme_req(rq)->result; in nvmet_passthru_execute_cmd_work()
246 nvme_passthru_end(ctrl, ns, effects, req->cmd, status); in nvmet_passthru_execute_cmd_work()
252 struct nvmet_req *req = rq->end_io_data; in nvmet_passthru_req_done()
254 req->cqe->result = nvme_req(rq)->result; in nvmet_passthru_req_done()
255 nvmet_req_complete(req, nvme_req(rq)->status); in nvmet_passthru_req_done()
266 if (req->sg_cnt > BIO_MAX_VECS) in nvmet_passthru_map_sg()
267 return -EINVAL; in nvmet_passthru_map_sg()
270 bio = &req->p.inline_bio; in nvmet_passthru_map_sg()
271 bio_init(bio, NULL, req->inline_bvec, in nvmet_passthru_map_sg()
272 ARRAY_SIZE(req->inline_bvec), req_op(rq)); in nvmet_passthru_map_sg()
274 bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq), in nvmet_passthru_map_sg()
276 bio->bi_end_io = bio_put; in nvmet_passthru_map_sg()
279 for_each_sg(req->sg, sg, req->sg_cnt, i) { in nvmet_passthru_map_sg()
280 if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length, in nvmet_passthru_map_sg()
281 sg->offset) < sg->length) { in nvmet_passthru_map_sg()
283 return -EINVAL; in nvmet_passthru_map_sg()
287 blk_rq_bio_prep(rq, bio, req->sg_cnt); in nvmet_passthru_map_sg()
294 struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl; in nvmet_passthru_execute_cmd()
295 struct request_queue *q = ctrl->admin_q; in nvmet_passthru_execute_cmd()
303 if (likely(req->sq->qid != 0)) { in nvmet_passthru_execute_cmd()
304 u32 nsid = le32_to_cpu(req->cmd->common.nsid); in nvmet_passthru_execute_cmd()
313 q = ns->queue; in nvmet_passthru_execute_cmd()
314 timeout = nvmet_req_subsys(req)->io_timeout; in nvmet_passthru_execute_cmd()
316 timeout = nvmet_req_subsys(req)->admin_timeout; in nvmet_passthru_execute_cmd()
319 rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0); in nvmet_passthru_execute_cmd()
324 nvme_init_request(rq, req->cmd); in nvmet_passthru_execute_cmd()
327 rq->timeout = timeout; in nvmet_passthru_execute_cmd()
329 if (req->sg_cnt) { in nvmet_passthru_execute_cmd()
338 * If a command needs post-execution fixups, or there are any in nvmet_passthru_execute_cmd()
339 * non-trivial effects, make sure to execute the command synchronously in nvmet_passthru_execute_cmd()
342 effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode); in nvmet_passthru_execute_cmd()
343 if (req->p.use_workqueue || in nvmet_passthru_execute_cmd()
345 INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work); in nvmet_passthru_execute_cmd()
346 req->p.rq = rq; in nvmet_passthru_execute_cmd()
347 queue_work(nvmet_wq, &req->p.work); in nvmet_passthru_execute_cmd()
349 rq->end_io = nvmet_passthru_req_done; in nvmet_passthru_execute_cmd()
350 rq->end_io_data = req; in nvmet_passthru_execute_cmd()
369 * We need to emulate set host behaviour to ensure that any requested
370 * behaviour of the target's host matches the requested behaviour
371 * of the device's host and fail otherwise.
375 struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl; in nvmet_passthru_set_host_behaviour()
376 struct nvme_feat_host_behavior *host; in nvmet_passthru_set_host_behaviour() local
380 host = kzalloc(sizeof(*host) * 2, GFP_KERNEL); in nvmet_passthru_set_host_behaviour()
381 if (!host) in nvmet_passthru_set_host_behaviour()
385 host, sizeof(*host), NULL); in nvmet_passthru_set_host_behaviour()
389 status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host)); in nvmet_passthru_set_host_behaviour()
393 if (memcmp(&host[0], &host[1], sizeof(host[0]))) { in nvmet_passthru_set_host_behaviour()
394 pr_warn("target host has requested different behaviour from the local host\n"); in nvmet_passthru_set_host_behaviour()
399 kfree(host); in nvmet_passthru_set_host_behaviour()
406 req->p.use_workqueue = false; in nvmet_setup_passthru_command()
407 req->execute = nvmet_passthru_execute_cmd; in nvmet_setup_passthru_command()
413 /* Reject any commands with non-sgl flags set (ie. fused commands) */ in nvmet_parse_passthru_io_cmd()
414 if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL) in nvmet_parse_passthru_io_cmd()
417 switch (req->cmd->common.opcode) { in nvmet_parse_passthru_io_cmd()
442 switch (le32_to_cpu(req->cmd->features.fid)) { in nvmet_passthru_get_set_features()
474 * The Pre-Boot Software Load Count doesn't make much in nvmet_passthru_get_set_features()
487 /* Reject any commands with non-sgl flags set (ie. fused commands) */ in nvmet_parse_passthru_admin_cmd()
488 if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL) in nvmet_parse_passthru_admin_cmd()
494 if (req->cmd->common.opcode >= nvme_admin_vendor_start) in nvmet_parse_passthru_admin_cmd()
497 switch (req->cmd->common.opcode) { in nvmet_parse_passthru_admin_cmd()
499 req->execute = nvmet_execute_async_event; in nvmet_parse_passthru_admin_cmd()
504 * alive to the non-passthru mode. In future please change this in nvmet_parse_passthru_admin_cmd()
507 req->execute = nvmet_execute_keep_alive; in nvmet_parse_passthru_admin_cmd()
510 switch (le32_to_cpu(req->cmd->features.fid)) { in nvmet_parse_passthru_admin_cmd()
515 req->execute = nvmet_execute_set_features; in nvmet_parse_passthru_admin_cmd()
518 req->execute = nvmet_passthru_set_host_behaviour; in nvmet_parse_passthru_admin_cmd()
525 switch (le32_to_cpu(req->cmd->features.fid)) { in nvmet_parse_passthru_admin_cmd()
530 req->execute = nvmet_execute_get_features; in nvmet_parse_passthru_admin_cmd()
537 switch (req->cmd->identify.cns) { in nvmet_parse_passthru_admin_cmd()
539 switch (req->cmd->identify.csi) { in nvmet_parse_passthru_admin_cmd()
541 req->execute = nvmet_passthru_execute_cmd; in nvmet_parse_passthru_admin_cmd()
542 req->p.use_workqueue = true; in nvmet_parse_passthru_admin_cmd()
549 req->execute = nvmet_passthru_execute_cmd; in nvmet_parse_passthru_admin_cmd()
550 req->p.use_workqueue = true; in nvmet_parse_passthru_admin_cmd()
553 switch (req->cmd->identify.csi) { in nvmet_parse_passthru_admin_cmd()
555 req->execute = nvmet_passthru_execute_cmd; in nvmet_parse_passthru_admin_cmd()
556 req->p.use_workqueue = true; in nvmet_parse_passthru_admin_cmd()
575 int ret = -EINVAL; in nvmet_passthru_ctrl_enable()
578 mutex_lock(&subsys->lock); in nvmet_passthru_ctrl_enable()
579 if (!subsys->passthru_ctrl_path) in nvmet_passthru_ctrl_enable()
581 if (subsys->passthru_ctrl) in nvmet_passthru_ctrl_enable()
584 if (subsys->nr_namespaces) { in nvmet_passthru_ctrl_enable()
589 file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0); in nvmet_passthru_ctrl_enable()
598 subsys->passthru_ctrl_path); in nvmet_passthru_ctrl_enable()
603 old = xa_cmpxchg(&passthru_subsystems, ctrl->instance, NULL, in nvmet_passthru_ctrl_enable()
613 subsys->passthru_ctrl = ctrl; in nvmet_passthru_ctrl_enable()
614 subsys->ver = ctrl->vs; in nvmet_passthru_ctrl_enable()
616 if (subsys->ver < NVME_VS(1, 2, 1)) { in nvmet_passthru_ctrl_enable()
618 NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver), in nvmet_passthru_ctrl_enable()
619 NVME_TERTIARY(subsys->ver)); in nvmet_passthru_ctrl_enable()
620 subsys->ver = NVME_VS(1, 2, 1); in nvmet_passthru_ctrl_enable()
623 __module_get(subsys->passthru_ctrl->ops->module); in nvmet_passthru_ctrl_enable()
629 mutex_unlock(&subsys->lock); in nvmet_passthru_ctrl_enable()
635 if (subsys->passthru_ctrl) { in __nvmet_passthru_ctrl_disable()
636 xa_erase(&passthru_subsystems, subsys->passthru_ctrl->instance); in __nvmet_passthru_ctrl_disable()
637 module_put(subsys->passthru_ctrl->ops->module); in __nvmet_passthru_ctrl_disable()
638 nvme_put_ctrl(subsys->passthru_ctrl); in __nvmet_passthru_ctrl_disable()
640 subsys->passthru_ctrl = NULL; in __nvmet_passthru_ctrl_disable()
641 subsys->ver = NVMET_DEFAULT_VS; in __nvmet_passthru_ctrl_disable()
646 mutex_lock(&subsys->lock); in nvmet_passthru_ctrl_disable()
648 mutex_unlock(&subsys->lock); in nvmet_passthru_ctrl_disable()
653 mutex_lock(&subsys->lock); in nvmet_passthru_subsys_free()
655 mutex_unlock(&subsys->lock); in nvmet_passthru_subsys_free()
656 kfree(subsys->passthru_ctrl_path); in nvmet_passthru_subsys_free()