1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   * libnvdimm - Non-volatile-memory Devices Subsystem
4   *
5   * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
6   */
7  #ifndef __LIBNVDIMM_H__
8  #define __LIBNVDIMM_H__
9  #include <linux/kernel.h>
10  #include <linux/sizes.h>
11  #include <linux/types.h>
12  #include <linux/uuid.h>
13  #include <linux/spinlock.h>
14  #include <linux/bio.h>
15  
16  struct badrange_entry {
17  	u64 start;
18  	u64 length;
19  	struct list_head list;
20  };
21  
22  struct badrange {
23  	struct list_head list;
24  	spinlock_t lock;
25  };
26  
27  enum {
28  	/* unarmed memory devices may not persist writes */
29  	NDD_UNARMED = 1,
30  	/* locked memory devices should not be accessed */
31  	NDD_LOCKED = 2,
32  	/* memory under security wipes should not be accessed */
33  	NDD_SECURITY_OVERWRITE = 3,
34  	/*  tracking whether or not there is a pending device reference */
35  	NDD_WORK_PENDING = 4,
36  	/* dimm supports namespace labels */
37  	NDD_LABELING = 6,
38  	/*
39  	 * dimm contents have changed requiring invalidation of CPU caches prior
40  	 * to activation of a region that includes this device
41  	 */
42  	NDD_INCOHERENT = 7,
43  
44  	/* dimm provider wants synchronous registration by __nvdimm_create() */
45  	NDD_REGISTER_SYNC = 8,
46  
47  	/* need to set a limit somewhere, but yes, this is likely overkill */
48  	ND_IOCTL_MAX_BUFLEN = SZ_4M,
49  	ND_CMD_MAX_ELEM = 5,
50  	ND_CMD_MAX_ENVELOPE = 256,
51  	ND_MAX_MAPPINGS = 32,
52  
53  	/* region flag indicating to direct-map persistent memory by default */
54  	ND_REGION_PAGEMAP = 0,
55  	/*
56  	 * Platform ensures entire CPU store data path is flushed to pmem on
57  	 * system power loss.
58  	 */
59  	ND_REGION_PERSIST_CACHE = 1,
60  	/*
61  	 * Platform provides mechanisms to automatically flush outstanding
62  	 * write data from memory controler to pmem on system power loss.
63  	 * (ADR)
64  	 */
65  	ND_REGION_PERSIST_MEMCTRL = 2,
66  
67  	/* Platform provides asynchronous flush mechanism */
68  	ND_REGION_ASYNC = 3,
69  
70  	/* Region was created by CXL subsystem */
71  	ND_REGION_CXL = 4,
72  
73  	/* mark newly adjusted resources as requiring a label update */
74  	DPA_RESOURCE_ADJUSTED = 1 << 0,
75  };
76  
77  struct nvdimm;
78  struct nvdimm_bus_descriptor;
79  typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
80  		struct nvdimm *nvdimm, unsigned int cmd, void *buf,
81  		unsigned int buf_len, int *cmd_rc);
82  
83  struct device_node;
84  struct nvdimm_bus_descriptor {
85  	const struct attribute_group **attr_groups;
86  	unsigned long cmd_mask;
87  	unsigned long dimm_family_mask;
88  	unsigned long bus_family_mask;
89  	struct module *module;
90  	char *provider_name;
91  	struct device_node *of_node;
92  	ndctl_fn ndctl;
93  	int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc);
94  	int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc,
95  			struct nvdimm *nvdimm, unsigned int cmd, void *data);
96  	const struct nvdimm_bus_fw_ops *fw_ops;
97  };
98  
99  struct nd_cmd_desc {
100  	int in_num;
101  	int out_num;
102  	u32 in_sizes[ND_CMD_MAX_ELEM];
103  	int out_sizes[ND_CMD_MAX_ELEM];
104  };
105  
106  struct nd_interleave_set {
107  	/* v1.1 definition of the interleave-set-cookie algorithm */
108  	u64 cookie1;
109  	/* v1.2 definition of the interleave-set-cookie algorithm */
110  	u64 cookie2;
111  	/* compatibility with initial buggy Linux implementation */
112  	u64 altcookie;
113  
114  	guid_t type_guid;
115  };
116  
117  struct nd_mapping_desc {
118  	struct nvdimm *nvdimm;
119  	u64 start;
120  	u64 size;
121  	int position;
122  };
123  
124  struct nd_region;
125  struct nd_region_desc {
126  	struct resource *res;
127  	struct nd_mapping_desc *mapping;
128  	u16 num_mappings;
129  	const struct attribute_group **attr_groups;
130  	struct nd_interleave_set *nd_set;
131  	void *provider_data;
132  	int num_lanes;
133  	int numa_node;
134  	int target_node;
135  	unsigned long flags;
136  	int memregion;
137  	struct device_node *of_node;
138  	int (*flush)(struct nd_region *nd_region, struct bio *bio);
139  };
140  
141  struct device;
142  void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
143  		size_t size, unsigned long flags);
devm_nvdimm_ioremap(struct device * dev,resource_size_t offset,size_t size)144  static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
145  		resource_size_t offset, size_t size)
146  {
147  	return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0);
148  }
149  
150  struct nvdimm_bus;
151  
152  /*
153   * Note that separate bits for locked + unlocked are defined so that
154   * 'flags == 0' corresponds to an error / not-supported state.
155   */
156  enum nvdimm_security_bits {
157  	NVDIMM_SECURITY_DISABLED,
158  	NVDIMM_SECURITY_UNLOCKED,
159  	NVDIMM_SECURITY_LOCKED,
160  	NVDIMM_SECURITY_FROZEN,
161  	NVDIMM_SECURITY_OVERWRITE,
162  };
163  
164  #define NVDIMM_PASSPHRASE_LEN		32
165  #define NVDIMM_KEY_DESC_LEN		22
166  
167  struct nvdimm_key_data {
168  	u8 data[NVDIMM_PASSPHRASE_LEN];
169  };
170  
171  enum nvdimm_passphrase_type {
172  	NVDIMM_USER,
173  	NVDIMM_MASTER,
174  };
175  
176  struct nvdimm_security_ops {
177  	unsigned long (*get_flags)(struct nvdimm *nvdimm,
178  			enum nvdimm_passphrase_type pass_type);
179  	int (*freeze)(struct nvdimm *nvdimm);
180  	int (*change_key)(struct nvdimm *nvdimm,
181  			const struct nvdimm_key_data *old_data,
182  			const struct nvdimm_key_data *new_data,
183  			enum nvdimm_passphrase_type pass_type);
184  	int (*unlock)(struct nvdimm *nvdimm,
185  			const struct nvdimm_key_data *key_data);
186  	int (*disable)(struct nvdimm *nvdimm,
187  			const struct nvdimm_key_data *key_data);
188  	int (*erase)(struct nvdimm *nvdimm,
189  			const struct nvdimm_key_data *key_data,
190  			enum nvdimm_passphrase_type pass_type);
191  	int (*overwrite)(struct nvdimm *nvdimm,
192  			const struct nvdimm_key_data *key_data);
193  	int (*query_overwrite)(struct nvdimm *nvdimm);
194  	int (*disable_master)(struct nvdimm *nvdimm,
195  			      const struct nvdimm_key_data *key_data);
196  };
197  
198  enum nvdimm_fwa_state {
199  	NVDIMM_FWA_INVALID,
200  	NVDIMM_FWA_IDLE,
201  	NVDIMM_FWA_ARMED,
202  	NVDIMM_FWA_BUSY,
203  	NVDIMM_FWA_ARM_OVERFLOW,
204  };
205  
206  enum nvdimm_fwa_trigger {
207  	NVDIMM_FWA_ARM,
208  	NVDIMM_FWA_DISARM,
209  };
210  
211  enum nvdimm_fwa_capability {
212  	NVDIMM_FWA_CAP_INVALID,
213  	NVDIMM_FWA_CAP_NONE,
214  	NVDIMM_FWA_CAP_QUIESCE,
215  	NVDIMM_FWA_CAP_LIVE,
216  };
217  
218  enum nvdimm_fwa_result {
219  	NVDIMM_FWA_RESULT_INVALID,
220  	NVDIMM_FWA_RESULT_NONE,
221  	NVDIMM_FWA_RESULT_SUCCESS,
222  	NVDIMM_FWA_RESULT_NOTSTAGED,
223  	NVDIMM_FWA_RESULT_NEEDRESET,
224  	NVDIMM_FWA_RESULT_FAIL,
225  };
226  
227  struct nvdimm_bus_fw_ops {
228  	enum nvdimm_fwa_state (*activate_state)
229  		(struct nvdimm_bus_descriptor *nd_desc);
230  	enum nvdimm_fwa_capability (*capability)
231  		(struct nvdimm_bus_descriptor *nd_desc);
232  	int (*activate)(struct nvdimm_bus_descriptor *nd_desc);
233  };
234  
235  struct nvdimm_fw_ops {
236  	enum nvdimm_fwa_state (*activate_state)(struct nvdimm *nvdimm);
237  	enum nvdimm_fwa_result (*activate_result)(struct nvdimm *nvdimm);
238  	int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg);
239  };
240  
241  void badrange_init(struct badrange *badrange);
242  int badrange_add(struct badrange *badrange, u64 addr, u64 length);
243  void badrange_forget(struct badrange *badrange, phys_addr_t start,
244  		unsigned int len);
245  int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr,
246  		u64 length);
247  struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
248  		struct nvdimm_bus_descriptor *nfit_desc);
249  void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus);
250  struct nvdimm_bus *to_nvdimm_bus(struct device *dev);
251  struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm);
252  struct nvdimm *to_nvdimm(struct device *dev);
253  struct nd_region *to_nd_region(struct device *dev);
254  struct device *nd_region_dev(struct nd_region *nd_region);
255  struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
256  struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
257  const char *nvdimm_name(struct nvdimm *nvdimm);
258  struct kobject *nvdimm_kobj(struct nvdimm *nvdimm);
259  unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm);
260  void *nvdimm_provider_data(struct nvdimm *nvdimm);
261  struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
262  		void *provider_data, const struct attribute_group **groups,
263  		unsigned long flags, unsigned long cmd_mask, int num_flush,
264  		struct resource *flush_wpq, const char *dimm_id,
265  		const struct nvdimm_security_ops *sec_ops,
266  		const struct nvdimm_fw_ops *fw_ops);
nvdimm_create(struct nvdimm_bus * nvdimm_bus,void * provider_data,const struct attribute_group ** groups,unsigned long flags,unsigned long cmd_mask,int num_flush,struct resource * flush_wpq)267  static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
268  		void *provider_data, const struct attribute_group **groups,
269  		unsigned long flags, unsigned long cmd_mask, int num_flush,
270  		struct resource *flush_wpq)
271  {
272  	return __nvdimm_create(nvdimm_bus, provider_data, groups, flags,
273  			cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL);
274  }
275  void nvdimm_delete(struct nvdimm *nvdimm);
276  void nvdimm_region_delete(struct nd_region *nd_region);
277  
278  const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
279  const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
280  u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
281  		const struct nd_cmd_desc *desc, int idx, void *buf);
282  u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
283  		const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
284  		const u32 *out_field, unsigned long remainder);
285  int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count);
286  struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
287  		struct nd_region_desc *ndr_desc);
288  struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
289  		struct nd_region_desc *ndr_desc);
290  struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
291  		struct nd_region_desc *ndr_desc);
292  void *nd_region_provider_data(struct nd_region *nd_region);
293  unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
294  void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
295  u64 nd_fletcher64(void *addr, size_t len, bool le);
296  int nvdimm_flush(struct nd_region *nd_region, struct bio *bio);
297  int generic_nvdimm_flush(struct nd_region *nd_region);
298  int nvdimm_has_flush(struct nd_region *nd_region);
299  int nvdimm_has_cache(struct nd_region *nd_region);
300  int nvdimm_in_overwrite(struct nvdimm *nvdimm);
301  bool is_nvdimm_sync(struct nd_region *nd_region);
302  
nvdimm_ctl(struct nvdimm * nvdimm,unsigned int cmd,void * buf,unsigned int buf_len,int * cmd_rc)303  static inline int nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
304  		unsigned int buf_len, int *cmd_rc)
305  {
306  	struct nvdimm_bus *nvdimm_bus = nvdimm_to_bus(nvdimm);
307  	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
308  
309  	return nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, cmd_rc);
310  }
311  
312  #ifdef CONFIG_ARCH_HAS_PMEM_API
313  #define ARCH_MEMREMAP_PMEM MEMREMAP_WB
314  void arch_wb_cache_pmem(void *addr, size_t size);
315  void arch_invalidate_pmem(void *addr, size_t size);
316  #else
317  #define ARCH_MEMREMAP_PMEM MEMREMAP_WT
arch_wb_cache_pmem(void * addr,size_t size)318  static inline void arch_wb_cache_pmem(void *addr, size_t size)
319  {
320  }
arch_invalidate_pmem(void * addr,size_t size)321  static inline void arch_invalidate_pmem(void *addr, size_t size)
322  {
323  }
324  #endif
325  
326  #endif /* __LIBNVDIMM_H__ */
327