1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4   */
5  
6  #ifndef _NVMET_H
7  #define _NVMET_H
8  
9  #include <linux/dma-mapping.h>
10  #include <linux/types.h>
11  #include <linux/device.h>
12  #include <linux/kref.h>
13  #include <linux/percpu-refcount.h>
14  #include <linux/list.h>
15  #include <linux/mutex.h>
16  #include <linux/uuid.h>
17  #include <linux/nvme.h>
18  #include <linux/configfs.h>
19  #include <linux/rcupdate.h>
20  #include <linux/blkdev.h>
21  #include <linux/radix-tree.h>
22  #include <linux/t10-pi.h>
23  
24  #define NVMET_DEFAULT_VS		NVME_VS(1, 3, 0)
25  
26  #define NVMET_ASYNC_EVENTS		4
27  #define NVMET_ERROR_LOG_SLOTS		128
28  #define NVMET_NO_ERROR_LOC		((u16)-1)
29  #define NVMET_DEFAULT_CTRL_MODEL	"Linux"
30  #define NVMET_MN_MAX_SIZE		40
31  #define NVMET_SN_MAX_SIZE		20
32  #define NVMET_FR_MAX_SIZE		8
33  
34  /*
35   * Supported optional AENs:
36   */
37  #define NVMET_AEN_CFG_OPTIONAL \
38  	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
39  #define NVMET_DISC_AEN_CFG_OPTIONAL \
40  	(NVME_AEN_CFG_DISC_CHANGE)
41  
42  /*
43   * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
44   */
45  #define NVMET_AEN_CFG_ALL \
46  	(NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
47  	 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
48  	 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
49  
50  /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
51   * The 16 bit shift is to set IATTR bit to 1, which means offending
52   * offset starts in the data section of connect()
53   */
54  #define IPO_IATTR_CONNECT_DATA(x)	\
55  	(cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
56  #define IPO_IATTR_CONNECT_SQE(x)	\
57  	(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
58  
59  struct nvmet_ns {
60  	struct percpu_ref	ref;
61  	struct file		*bdev_file;
62  	struct block_device	*bdev;
63  	struct file		*file;
64  	bool			readonly;
65  	u32			nsid;
66  	u32			blksize_shift;
67  	loff_t			size;
68  	u8			nguid[16];
69  	uuid_t			uuid;
70  	u32			anagrpid;
71  
72  	bool			buffered_io;
73  	bool			enabled;
74  	struct nvmet_subsys	*subsys;
75  	const char		*device_path;
76  
77  	struct config_group	device_group;
78  	struct config_group	group;
79  
80  	struct completion	disable_done;
81  	mempool_t		*bvec_pool;
82  
83  	struct pci_dev		*p2p_dev;
84  	int			use_p2pmem;
85  	int			pi_type;
86  	int			metadata_size;
87  	u8			csi;
88  };
89  
to_nvmet_ns(struct config_item * item)90  static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
91  {
92  	return container_of(to_config_group(item), struct nvmet_ns, group);
93  }
94  
nvmet_ns_dev(struct nvmet_ns * ns)95  static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
96  {
97  	return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
98  }
99  
100  struct nvmet_cq {
101  	u16			qid;
102  	u16			size;
103  };
104  
105  struct nvmet_sq {
106  	struct nvmet_ctrl	*ctrl;
107  	struct percpu_ref	ref;
108  	u16			qid;
109  	u16			size;
110  	u32			sqhd;
111  	bool			sqhd_disabled;
112  #ifdef CONFIG_NVME_TARGET_AUTH
113  	bool			authenticated;
114  	struct delayed_work	auth_expired_work;
115  	u16			dhchap_tid;
116  	u8			dhchap_status;
117  	u8			dhchap_step;
118  	u8			*dhchap_c1;
119  	u8			*dhchap_c2;
120  	u32			dhchap_s1;
121  	u32			dhchap_s2;
122  	u8			*dhchap_skey;
123  	int			dhchap_skey_len;
124  #endif
125  	struct completion	free_done;
126  	struct completion	confirm_done;
127  };
128  
129  struct nvmet_ana_group {
130  	struct config_group	group;
131  	struct nvmet_port	*port;
132  	u32			grpid;
133  };
134  
to_ana_group(struct config_item * item)135  static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
136  {
137  	return container_of(to_config_group(item), struct nvmet_ana_group,
138  			group);
139  }
140  
141  /**
142   * struct nvmet_port -	Common structure to keep port
143   *				information for the target.
144   * @entry:		Entry into referrals or transport list.
145   * @disc_addr:		Address information is stored in a format defined
146   *				for a discovery log page entry.
147   * @group:		ConfigFS group for this element's folder.
148   * @priv:		Private data for the transport.
149   */
150  struct nvmet_port {
151  	struct list_head		entry;
152  	struct nvmf_disc_rsp_page_entry	disc_addr;
153  	struct config_group		group;
154  	struct config_group		subsys_group;
155  	struct list_head		subsystems;
156  	struct config_group		referrals_group;
157  	struct list_head		referrals;
158  	struct list_head		global_entry;
159  	struct config_group		ana_groups_group;
160  	struct nvmet_ana_group		ana_default_group;
161  	enum nvme_ana_state		*ana_state;
162  	struct key			*keyring;
163  	void				*priv;
164  	bool				enabled;
165  	int				inline_data_size;
166  	int				max_queue_size;
167  	const struct nvmet_fabrics_ops	*tr_ops;
168  	bool				pi_enable;
169  };
170  
to_nvmet_port(struct config_item * item)171  static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
172  {
173  	return container_of(to_config_group(item), struct nvmet_port,
174  			group);
175  }
176  
ana_groups_to_port(struct config_item * item)177  static inline struct nvmet_port *ana_groups_to_port(
178  		struct config_item *item)
179  {
180  	return container_of(to_config_group(item), struct nvmet_port,
181  			ana_groups_group);
182  }
183  
nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port * port)184  static inline u8 nvmet_port_disc_addr_treq_secure_channel(struct nvmet_port *port)
185  {
186  	return (port->disc_addr.treq & NVME_TREQ_SECURE_CHANNEL_MASK);
187  }
188  
nvmet_port_secure_channel_required(struct nvmet_port * port)189  static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port)
190  {
191      return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED;
192  }
193  
194  struct nvmet_ctrl {
195  	struct nvmet_subsys	*subsys;
196  	struct nvmet_sq		**sqs;
197  
198  	bool			reset_tbkas;
199  
200  	struct mutex		lock;
201  	u64			cap;
202  	u32			cc;
203  	u32			csts;
204  
205  	uuid_t			hostid;
206  	u16			cntlid;
207  	u32			kato;
208  
209  	struct nvmet_port	*port;
210  
211  	u32			aen_enabled;
212  	unsigned long		aen_masked;
213  	struct nvmet_req	*async_event_cmds[NVMET_ASYNC_EVENTS];
214  	unsigned int		nr_async_event_cmds;
215  	struct list_head	async_events;
216  	struct work_struct	async_event_work;
217  
218  	struct list_head	subsys_entry;
219  	struct kref		ref;
220  	struct delayed_work	ka_work;
221  	struct work_struct	fatal_err_work;
222  
223  	const struct nvmet_fabrics_ops *ops;
224  
225  	__le32			*changed_ns_list;
226  	u32			nr_changed_ns;
227  
228  	char			subsysnqn[NVMF_NQN_FIELD_LEN];
229  	char			hostnqn[NVMF_NQN_FIELD_LEN];
230  
231  	struct device		*p2p_client;
232  	struct radix_tree_root	p2p_ns_map;
233  #ifdef CONFIG_NVME_TARGET_DEBUGFS
234  	struct dentry		*debugfs_dir;
235  #endif
236  	spinlock_t		error_lock;
237  	u64			err_counter;
238  	struct nvme_error_slot	slots[NVMET_ERROR_LOG_SLOTS];
239  	bool			pi_support;
240  #ifdef CONFIG_NVME_TARGET_AUTH
241  	struct nvme_dhchap_key	*host_key;
242  	struct nvme_dhchap_key	*ctrl_key;
243  	u8			shash_id;
244  	struct crypto_kpp	*dh_tfm;
245  	u8			dh_gid;
246  	u8			*dh_key;
247  	size_t			dh_keysize;
248  #endif
249  };
250  
251  struct nvmet_subsys {
252  	enum nvme_subsys_type	type;
253  
254  	struct mutex		lock;
255  	struct kref		ref;
256  
257  	struct xarray		namespaces;
258  	unsigned int		nr_namespaces;
259  	u32			max_nsid;
260  	u16			cntlid_min;
261  	u16			cntlid_max;
262  
263  	struct list_head	ctrls;
264  
265  	struct list_head	hosts;
266  	bool			allow_any_host;
267  #ifdef CONFIG_NVME_TARGET_DEBUGFS
268  	struct dentry		*debugfs_dir;
269  #endif
270  	u16			max_qid;
271  
272  	u64			ver;
273  	char			serial[NVMET_SN_MAX_SIZE];
274  	bool			subsys_discovered;
275  	char			*subsysnqn;
276  	bool			pi_support;
277  
278  	struct config_group	group;
279  
280  	struct config_group	namespaces_group;
281  	struct config_group	allowed_hosts_group;
282  
283  	char			*model_number;
284  	u32			ieee_oui;
285  	char			*firmware_rev;
286  
287  #ifdef CONFIG_NVME_TARGET_PASSTHRU
288  	struct nvme_ctrl	*passthru_ctrl;
289  	char			*passthru_ctrl_path;
290  	struct config_group	passthru_group;
291  	unsigned int		admin_timeout;
292  	unsigned int		io_timeout;
293  	unsigned int		clear_ids;
294  #endif /* CONFIG_NVME_TARGET_PASSTHRU */
295  
296  #ifdef CONFIG_BLK_DEV_ZONED
297  	u8			zasl;
298  #endif /* CONFIG_BLK_DEV_ZONED */
299  };
300  
to_subsys(struct config_item * item)301  static inline struct nvmet_subsys *to_subsys(struct config_item *item)
302  {
303  	return container_of(to_config_group(item), struct nvmet_subsys, group);
304  }
305  
namespaces_to_subsys(struct config_item * item)306  static inline struct nvmet_subsys *namespaces_to_subsys(
307  		struct config_item *item)
308  {
309  	return container_of(to_config_group(item), struct nvmet_subsys,
310  			namespaces_group);
311  }
312  
313  struct nvmet_host {
314  	struct config_group	group;
315  	u8			*dhchap_secret;
316  	u8			*dhchap_ctrl_secret;
317  	u8			dhchap_key_hash;
318  	u8			dhchap_ctrl_key_hash;
319  	u8			dhchap_hash_id;
320  	u8			dhchap_dhgroup_id;
321  };
322  
to_host(struct config_item * item)323  static inline struct nvmet_host *to_host(struct config_item *item)
324  {
325  	return container_of(to_config_group(item), struct nvmet_host, group);
326  }
327  
nvmet_host_name(struct nvmet_host * host)328  static inline char *nvmet_host_name(struct nvmet_host *host)
329  {
330  	return config_item_name(&host->group.cg_item);
331  }
332  
333  struct nvmet_host_link {
334  	struct list_head	entry;
335  	struct nvmet_host	*host;
336  };
337  
338  struct nvmet_subsys_link {
339  	struct list_head	entry;
340  	struct nvmet_subsys	*subsys;
341  };
342  
343  struct nvmet_req;
344  struct nvmet_fabrics_ops {
345  	struct module *owner;
346  	unsigned int type;
347  	unsigned int msdbd;
348  	unsigned int flags;
349  #define NVMF_KEYED_SGLS			(1 << 0)
350  #define NVMF_METADATA_SUPPORTED		(1 << 1)
351  	void (*queue_response)(struct nvmet_req *req);
352  	int (*add_port)(struct nvmet_port *port);
353  	void (*remove_port)(struct nvmet_port *port);
354  	void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
355  	void (*disc_traddr)(struct nvmet_req *req,
356  			struct nvmet_port *port, char *traddr);
357  	ssize_t (*host_traddr)(struct nvmet_ctrl *ctrl,
358  			char *traddr, size_t traddr_len);
359  	u16 (*install_queue)(struct nvmet_sq *nvme_sq);
360  	void (*discovery_chg)(struct nvmet_port *port);
361  	u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
362  	u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
363  };
364  
365  #define NVMET_MAX_INLINE_BIOVEC	8
366  #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
367  
368  struct nvmet_req {
369  	struct nvme_command	*cmd;
370  	struct nvme_completion	*cqe;
371  	struct nvmet_sq		*sq;
372  	struct nvmet_cq		*cq;
373  	struct nvmet_ns		*ns;
374  	struct scatterlist	*sg;
375  	struct scatterlist	*metadata_sg;
376  	struct bio_vec		inline_bvec[NVMET_MAX_INLINE_BIOVEC];
377  	union {
378  		struct {
379  			struct bio      inline_bio;
380  		} b;
381  		struct {
382  			bool			mpool_alloc;
383  			struct kiocb            iocb;
384  			struct bio_vec          *bvec;
385  			struct work_struct      work;
386  		} f;
387  		struct {
388  			struct bio		inline_bio;
389  			struct request		*rq;
390  			struct work_struct      work;
391  			bool			use_workqueue;
392  		} p;
393  #ifdef CONFIG_BLK_DEV_ZONED
394  		struct {
395  			struct bio		inline_bio;
396  			struct work_struct	zmgmt_work;
397  		} z;
398  #endif /* CONFIG_BLK_DEV_ZONED */
399  	};
400  	int			sg_cnt;
401  	int			metadata_sg_cnt;
402  	/* data length as parsed from the SGL descriptor: */
403  	size_t			transfer_len;
404  	size_t			metadata_len;
405  
406  	struct nvmet_port	*port;
407  
408  	void (*execute)(struct nvmet_req *req);
409  	const struct nvmet_fabrics_ops *ops;
410  
411  	struct pci_dev		*p2p_dev;
412  	struct device		*p2p_client;
413  	u16			error_loc;
414  	u64			error_slba;
415  };
416  
417  #define NVMET_MAX_MPOOL_BVEC		16
418  extern struct kmem_cache *nvmet_bvec_cache;
419  extern struct workqueue_struct *buffered_io_wq;
420  extern struct workqueue_struct *zbd_wq;
421  extern struct workqueue_struct *nvmet_wq;
422  
nvmet_set_result(struct nvmet_req * req,u32 result)423  static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
424  {
425  	req->cqe->result.u32 = cpu_to_le32(result);
426  }
427  
428  /*
429   * NVMe command writes actually are DMA reads for us on the target side.
430   */
431  static inline enum dma_data_direction
nvmet_data_dir(struct nvmet_req * req)432  nvmet_data_dir(struct nvmet_req *req)
433  {
434  	return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
435  }
436  
437  struct nvmet_async_event {
438  	struct list_head	entry;
439  	u8			event_type;
440  	u8			event_info;
441  	u8			log_page;
442  };
443  
nvmet_clear_aen_bit(struct nvmet_req * req,u32 bn)444  static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
445  {
446  	int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
447  
448  	if (!rae)
449  		clear_bit(bn, &req->sq->ctrl->aen_masked);
450  }
451  
nvmet_aen_bit_disabled(struct nvmet_ctrl * ctrl,u32 bn)452  static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
453  {
454  	if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
455  		return true;
456  	return test_and_set_bit(bn, &ctrl->aen_masked);
457  }
458  
459  void nvmet_get_feat_kato(struct nvmet_req *req);
460  void nvmet_get_feat_async_event(struct nvmet_req *req);
461  u16 nvmet_set_feat_kato(struct nvmet_req *req);
462  u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
463  void nvmet_execute_async_event(struct nvmet_req *req);
464  void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
465  void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
466  
467  u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
468  void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
469  u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
470  u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
471  u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
472  u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
473  u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
474  u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
475  u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
476  
477  bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
478  		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
479  void nvmet_req_uninit(struct nvmet_req *req);
480  bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
481  bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
482  void nvmet_req_complete(struct nvmet_req *req, u16 status);
483  int nvmet_req_alloc_sgls(struct nvmet_req *req);
484  void nvmet_req_free_sgls(struct nvmet_req *req);
485  
486  void nvmet_execute_set_features(struct nvmet_req *req);
487  void nvmet_execute_get_features(struct nvmet_req *req);
488  void nvmet_execute_keep_alive(struct nvmet_req *req);
489  
490  void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
491  		u16 size);
492  void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
493  		u16 size);
494  void nvmet_sq_destroy(struct nvmet_sq *sq);
495  int nvmet_sq_init(struct nvmet_sq *sq);
496  
497  void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
498  
499  void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
500  u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
501  		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
502  struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
503  				       const char *hostnqn, u16 cntlid,
504  				       struct nvmet_req *req);
505  void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
506  u16 nvmet_check_ctrl_status(struct nvmet_req *req);
507  ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl,
508  		char *traddr, size_t traddr_len);
509  
510  struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
511  		enum nvme_subsys_type type);
512  void nvmet_subsys_put(struct nvmet_subsys *subsys);
513  void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
514  
515  u16 nvmet_req_find_ns(struct nvmet_req *req);
516  void nvmet_put_namespace(struct nvmet_ns *ns);
517  int nvmet_ns_enable(struct nvmet_ns *ns);
518  void nvmet_ns_disable(struct nvmet_ns *ns);
519  struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
520  void nvmet_ns_free(struct nvmet_ns *ns);
521  
522  void nvmet_send_ana_event(struct nvmet_subsys *subsys,
523  		struct nvmet_port *port);
524  void nvmet_port_send_ana_event(struct nvmet_port *port);
525  
526  int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
527  void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
528  
529  void nvmet_port_del_ctrls(struct nvmet_port *port,
530  			  struct nvmet_subsys *subsys);
531  
532  int nvmet_enable_port(struct nvmet_port *port);
533  void nvmet_disable_port(struct nvmet_port *port);
534  
535  void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
536  void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
537  
538  u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
539  		size_t len);
540  u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
541  		size_t len);
542  u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
543  
544  u32 nvmet_get_log_page_len(struct nvme_command *cmd);
545  u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
546  
547  extern struct list_head *nvmet_ports;
548  void nvmet_port_disc_changed(struct nvmet_port *port,
549  		struct nvmet_subsys *subsys);
550  void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
551  		struct nvmet_host *host);
552  void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
553  		u8 event_info, u8 log_page);
554  bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
555  
556  #define NVMET_MIN_QUEUE_SIZE	16
557  #define NVMET_MAX_QUEUE_SIZE	1024
558  #define NVMET_NR_QUEUES		128
559  #define NVMET_MAX_CMD(ctrl)	(NVME_CAP_MQES(ctrl->cap) + 1)
560  
561  /*
562   * Nice round number that makes a list of nsids fit into a page.
563   * Should become tunable at some point in the future.
564   */
565  #define NVMET_MAX_NAMESPACES	1024
566  
567  /*
568   * 0 is not a valid ANA group ID, so we start numbering at 1.
569   *
570   * ANA Group 1 exists without manual intervention, has namespaces assigned to it
571   * by default, and is available in an optimized state through all ports.
572   */
573  #define NVMET_MAX_ANAGRPS	128
574  #define NVMET_DEFAULT_ANA_GRPID	1
575  
576  #define NVMET_KAS		10
577  #define NVMET_DISC_KATO_MS		120000
578  
579  int __init nvmet_init_configfs(void);
580  void __exit nvmet_exit_configfs(void);
581  
582  int __init nvmet_init_discovery(void);
583  void nvmet_exit_discovery(void);
584  
585  extern struct nvmet_subsys *nvmet_disc_subsys;
586  extern struct rw_semaphore nvmet_config_sem;
587  
588  extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
589  extern u64 nvmet_ana_chgcnt;
590  extern struct rw_semaphore nvmet_ana_sem;
591  
592  bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
593  
594  int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
595  int nvmet_file_ns_enable(struct nvmet_ns *ns);
596  void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
597  void nvmet_file_ns_disable(struct nvmet_ns *ns);
598  u16 nvmet_bdev_flush(struct nvmet_req *req);
599  u16 nvmet_file_flush(struct nvmet_req *req);
600  void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
601  void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
602  void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
603  bool nvmet_ns_revalidate(struct nvmet_ns *ns);
604  u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
605  
606  bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
607  void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
608  void nvmet_execute_identify_ns_zns(struct nvmet_req *req);
609  void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
610  void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
611  void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
612  
nvmet_rw_data_len(struct nvmet_req * req)613  static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
614  {
615  	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
616  			req->ns->blksize_shift;
617  }
618  
nvmet_rw_metadata_len(struct nvmet_req * req)619  static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
620  {
621  	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
622  		return 0;
623  	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
624  			req->ns->metadata_size;
625  }
626  
nvmet_dsm_len(struct nvmet_req * req)627  static inline u32 nvmet_dsm_len(struct nvmet_req *req)
628  {
629  	return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
630  		sizeof(struct nvme_dsm_range);
631  }
632  
nvmet_req_subsys(struct nvmet_req * req)633  static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
634  {
635  	return req->sq->ctrl->subsys;
636  }
637  
nvmet_is_disc_subsys(struct nvmet_subsys * subsys)638  static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
639  {
640      return subsys->type != NVME_NQN_NVME;
641  }
642  
643  #ifdef CONFIG_NVME_TARGET_PASSTHRU
644  void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
645  int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
646  void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
647  u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
648  u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
nvmet_is_passthru_subsys(struct nvmet_subsys * subsys)649  static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
650  {
651  	return subsys->passthru_ctrl;
652  }
653  #else /* CONFIG_NVME_TARGET_PASSTHRU */
nvmet_passthru_subsys_free(struct nvmet_subsys * subsys)654  static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
655  {
656  }
nvmet_passthru_ctrl_disable(struct nvmet_subsys * subsys)657  static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
658  {
659  }
nvmet_parse_passthru_admin_cmd(struct nvmet_req * req)660  static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
661  {
662  	return 0;
663  }
nvmet_parse_passthru_io_cmd(struct nvmet_req * req)664  static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
665  {
666  	return 0;
667  }
nvmet_is_passthru_subsys(struct nvmet_subsys * subsys)668  static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
669  {
670  	return NULL;
671  }
672  #endif /* CONFIG_NVME_TARGET_PASSTHRU */
673  
nvmet_is_passthru_req(struct nvmet_req * req)674  static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
675  {
676  	return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
677  }
678  
679  void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
680  
681  u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
682  u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
683  
684  /* Convert a 32-bit number to a 16-bit 0's based number */
to0based(u32 a)685  static inline __le16 to0based(u32 a)
686  {
687  	return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
688  }
689  
nvmet_ns_has_pi(struct nvmet_ns * ns)690  static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
691  {
692  	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
693  		return false;
694  	return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
695  }
696  
nvmet_sect_to_lba(struct nvmet_ns * ns,sector_t sect)697  static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
698  {
699  	return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
700  }
701  
nvmet_lba_to_sect(struct nvmet_ns * ns,__le64 lba)702  static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
703  {
704  	return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
705  }
706  
nvmet_use_inline_bvec(struct nvmet_req * req)707  static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
708  {
709  	return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
710  	       req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
711  }
712  
nvmet_req_bio_put(struct nvmet_req * req,struct bio * bio)713  static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
714  {
715  	if (bio != &req->b.inline_bio)
716  		bio_put(bio);
717  }
718  
719  #ifdef CONFIG_NVME_TARGET_AUTH
720  void nvmet_execute_auth_send(struct nvmet_req *req);
721  void nvmet_execute_auth_receive(struct nvmet_req *req);
722  int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
723  		       bool set_ctrl);
724  int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
725  u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl);
726  void nvmet_auth_sq_init(struct nvmet_sq *sq);
727  void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
728  void nvmet_auth_sq_free(struct nvmet_sq *sq);
729  int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
730  bool nvmet_check_auth_status(struct nvmet_req *req);
731  int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
732  			 unsigned int hash_len);
733  int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
734  			 unsigned int hash_len);
nvmet_has_auth(struct nvmet_ctrl * ctrl)735  static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
736  {
737  	return ctrl->host_key != NULL;
738  }
739  int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
740  				u8 *buf, int buf_size);
741  int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
742  			    u8 *buf, int buf_size);
743  #else
nvmet_setup_auth(struct nvmet_ctrl * ctrl)744  static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl)
745  {
746  	return 0;
747  }
nvmet_auth_sq_init(struct nvmet_sq * sq)748  static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
749  {
750  }
nvmet_destroy_auth(struct nvmet_ctrl * ctrl)751  static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
nvmet_auth_sq_free(struct nvmet_sq * sq)752  static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
nvmet_check_auth_status(struct nvmet_req * req)753  static inline bool nvmet_check_auth_status(struct nvmet_req *req)
754  {
755  	return true;
756  }
nvmet_has_auth(struct nvmet_ctrl * ctrl)757  static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
758  {
759  	return false;
760  }
nvmet_dhchap_dhgroup_name(u8 dhgid)761  static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
762  #endif
763  
764  #endif /* _NVMET_H */
765