Lines Matching +full:ctrl +full:- +full:ids
1 // SPDX-License-Identifier: GPL-2.0
10 static int nvme_set_max_append(struct nvme_ctrl *ctrl) in nvme_set_max_append() argument
18 return -ENOMEM; in nvme_set_max_append()
24 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id)); in nvme_set_max_append()
30 if (id->zasl) in nvme_set_max_append()
31 ctrl->max_zone_append = 1 << (id->zasl + 3); in nvme_set_max_append()
33 ctrl->max_zone_append = ctrl->max_hw_sectors; in nvme_set_max_append()
41 struct nvme_effects_log *log = ns->head->effects; in nvme_query_zone_info()
47 if ((le32_to_cpu(log->iocs[nvme_cmd_zone_append]) & in nvme_query_zone_info()
49 if (test_and_clear_bit(NVME_NS_FORCE_RO, &ns->flags)) in nvme_query_zone_info()
50 dev_warn(ns->ctrl->device, in nvme_query_zone_info()
51 "Zone Append supported for zoned namespace:%d. Remove read-only mode\n", in nvme_query_zone_info()
52 ns->head->ns_id); in nvme_query_zone_info()
54 set_bit(NVME_NS_FORCE_RO, &ns->flags); in nvme_query_zone_info()
55 dev_warn(ns->ctrl->device, in nvme_query_zone_info()
56 "Zone Append not supported for zoned namespace:%d. Forcing to read-only mode\n", in nvme_query_zone_info()
57 ns->head->ns_id); in nvme_query_zone_info()
61 if (!ns->ctrl->max_zone_append) { in nvme_query_zone_info()
62 status = nvme_set_max_append(ns->ctrl); in nvme_query_zone_info()
69 return -ENOMEM; in nvme_query_zone_info()
72 c.identify.nsid = cpu_to_le32(ns->head->ns_id); in nvme_query_zone_info()
76 status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, id, sizeof(*id)); in nvme_query_zone_info()
84 if (id->zoc) { in nvme_query_zone_info()
85 dev_warn(ns->ctrl->device, in nvme_query_zone_info()
87 le16_to_cpu(id->zoc), ns->head->ns_id); in nvme_query_zone_info()
88 status = -ENODEV; in nvme_query_zone_info()
92 zi->zone_size = le64_to_cpu(id->lbafe[lbaf].zsze); in nvme_query_zone_info()
93 if (!is_power_of_2(zi->zone_size)) { in nvme_query_zone_info()
94 dev_warn(ns->ctrl->device, in nvme_query_zone_info()
96 zi->zone_size, ns->head->ns_id); in nvme_query_zone_info()
97 status = -ENODEV; in nvme_query_zone_info()
100 zi->max_open_zones = le32_to_cpu(id->mor) + 1; in nvme_query_zone_info()
101 zi->max_active_zones = le32_to_cpu(id->mar) + 1; in nvme_query_zone_info()
111 lim->features |= BLK_FEAT_ZONED; in nvme_update_zone_info()
112 lim->max_open_zones = zi->max_open_zones; in nvme_update_zone_info()
113 lim->max_active_zones = zi->max_active_zones; in nvme_update_zone_info()
114 lim->max_zone_append_sectors = ns->ctrl->max_zone_append; in nvme_update_zone_info()
115 lim->chunk_sectors = ns->head->zsze = in nvme_update_zone_info()
116 nvme_lba_to_sect(ns->head, zi->zone_size); in nvme_update_zone_info()
122 struct request_queue *q = ns->disk->queue; in nvme_zns_alloc_report_buffer()
130 get_capacity(ns->disk) >> ilog2(ns->head->zsze)); in nvme_zns_alloc_report_buffer()
149 static int nvme_zone_parse_entry(struct nvme_ctrl *ctrl, in nvme_zone_parse_entry() argument
157 if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) { in nvme_zone_parse_entry()
158 dev_err(ctrl->device, "invalid zone type %#x\n", in nvme_zone_parse_entry()
159 entry->zt); in nvme_zone_parse_entry()
160 return -EINVAL; in nvme_zone_parse_entry()
164 zone.cond = entry->zs >> 4; in nvme_zone_parse_entry()
165 zone.len = head->zsze; in nvme_zone_parse_entry()
166 zone.capacity = nvme_lba_to_sect(head, le64_to_cpu(entry->zcap)); in nvme_zone_parse_entry()
167 zone.start = nvme_lba_to_sect(head, le64_to_cpu(entry->zslba)); in nvme_zone_parse_entry()
171 zone.wp = nvme_lba_to_sect(head, le64_to_cpu(entry->wp)); in nvme_zone_parse_entry()
185 if (ns->head->ids.csi != NVME_CSI_ZNS) in nvme_ns_report_zones()
186 return -EINVAL; in nvme_ns_report_zones()
190 return -ENOMEM; in nvme_ns_report_zones()
193 c.zmr.nsid = cpu_to_le32(ns->head->ns_id); in nvme_ns_report_zones()
199 sector &= ~(ns->head->zsze - 1); in nvme_ns_report_zones()
200 while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { in nvme_ns_report_zones()
203 c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, sector)); in nvme_ns_report_zones()
204 ret = nvme_submit_sync_cmd(ns->queue, &c, report, buflen); in nvme_ns_report_zones()
207 ret = -EIO; in nvme_ns_report_zones()
211 nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones); in nvme_ns_report_zones()
216 ret = nvme_zone_parse_entry(ns->ctrl, ns->head, in nvme_ns_report_zones()
217 &report->entries[i], in nvme_ns_report_zones()
224 sector += ns->head->zsze * nz; in nvme_ns_report_zones()
230 ret = -EINVAL; in nvme_ns_report_zones()
241 c->zms.opcode = nvme_cmd_zone_mgmt_send; in nvme_setup_zone_mgmt_send()
242 c->zms.nsid = cpu_to_le32(ns->head->ns_id); in nvme_setup_zone_mgmt_send()
243 c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req))); in nvme_setup_zone_mgmt_send()
244 c->zms.zsa = action; in nvme_setup_zone_mgmt_send()
247 c->zms.select_all = 1; in nvme_setup_zone_mgmt_send()