1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #include <linux/memregion.h>
6 #include <linux/cpumask.h>
7 #include <linux/module.h>
8 #include <linux/device.h>
9 #include <linux/nd.h>
10 #include "nd-core.h"
11 #include "nd.h"
12 
nd_region_probe(struct device * dev)13 static int nd_region_probe(struct device *dev)
14 {
15 	int err, rc;
16 	static unsigned long once;
17 	struct nd_region_data *ndrd;
18 	struct nd_region *nd_region = to_nd_region(dev);
19 	struct range range = {
20 		.start = nd_region->ndr_start,
21 		.end = nd_region->ndr_start + nd_region->ndr_size - 1,
22 	};
23 
24 	if (nd_region->num_lanes > num_online_cpus()
25 			&& nd_region->num_lanes < num_possible_cpus()
26 			&& !test_and_set_bit(0, &once)) {
27 		dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
28 				num_online_cpus(), nd_region->num_lanes,
29 				num_possible_cpus());
30 		dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
31 				nd_region->num_lanes);
32 	}
33 
34 	rc = nd_region_activate(nd_region);
35 	if (rc)
36 		return rc;
37 
38 	if (devm_init_badblocks(dev, &nd_region->bb))
39 		return -ENODEV;
40 	nd_region->bb_state =
41 		sysfs_get_dirent(nd_region->dev.kobj.sd, "badblocks");
42 	if (!nd_region->bb_state)
43 		dev_warn(dev, "'badblocks' notification disabled\n");
44 	nvdimm_badblocks_populate(nd_region, &nd_region->bb, &range);
45 
46 	rc = nd_region_register_namespaces(nd_region, &err);
47 	if (rc < 0)
48 		return rc;
49 
50 	ndrd = dev_get_drvdata(dev);
51 	ndrd->ns_active = rc;
52 	ndrd->ns_count = rc + err;
53 
54 	if (rc && err && rc == err)
55 		return -ENODEV;
56 
57 	nd_region->btt_seed = nd_btt_create(nd_region);
58 	nd_region->pfn_seed = nd_pfn_create(nd_region);
59 	nd_region->dax_seed = nd_dax_create(nd_region);
60 	if (err == 0)
61 		return 0;
62 
63 	/*
64 	 * Given multiple namespaces per region, we do not want to
65 	 * disable all the successfully registered peer namespaces upon
66 	 * a single registration failure.  If userspace is missing a
67 	 * namespace that it expects it can disable/re-enable the region
68 	 * to retry discovery after correcting the failure.
69 	 * <regionX>/namespaces returns the current
70 	 * "<async-registered>/<total>" namespace count.
71 	 */
72 	dev_err(dev, "failed to register %d namespace%s, continuing...\n",
73 			err, err == 1 ? "" : "s");
74 	return 0;
75 }
76 
child_unregister(struct device * dev,void * data)77 static int child_unregister(struct device *dev, void *data)
78 {
79 	nd_device_unregister(dev, ND_SYNC);
80 	return 0;
81 }
82 
nd_region_remove(struct device * dev)83 static void nd_region_remove(struct device *dev)
84 {
85 	struct nd_region *nd_region = to_nd_region(dev);
86 
87 	device_for_each_child(dev, NULL, child_unregister);
88 
89 	/* flush attribute readers and disable */
90 	nvdimm_bus_lock(dev);
91 	nd_region->ns_seed = NULL;
92 	nd_region->btt_seed = NULL;
93 	nd_region->pfn_seed = NULL;
94 	nd_region->dax_seed = NULL;
95 	dev_set_drvdata(dev, NULL);
96 	nvdimm_bus_unlock(dev);
97 
98 	/*
99 	 * Note, this assumes device_lock() context to not race
100 	 * nd_region_notify()
101 	 */
102 	sysfs_put(nd_region->bb_state);
103 	nd_region->bb_state = NULL;
104 
105 	/*
106 	 * Try to flush caches here since a disabled region may be subject to
107 	 * secure erase while disabled, and previous dirty data should not be
108 	 * written back to a new instance of the region. This only matters on
109 	 * bare metal where security commands are available, so silent failure
110 	 * here is ok.
111 	 */
112 	if (cpu_cache_has_invalidate_memregion())
113 		cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
114 }
115 
child_notify(struct device * dev,void * data)116 static int child_notify(struct device *dev, void *data)
117 {
118 	nd_device_notify(dev, *(enum nvdimm_event *) data);
119 	return 0;
120 }
121 
nd_region_notify(struct device * dev,enum nvdimm_event event)122 static void nd_region_notify(struct device *dev, enum nvdimm_event event)
123 {
124 	if (event == NVDIMM_REVALIDATE_POISON) {
125 		struct nd_region *nd_region = to_nd_region(dev);
126 
127 		if (is_memory(&nd_region->dev)) {
128 			struct range range = {
129 				.start = nd_region->ndr_start,
130 				.end = nd_region->ndr_start +
131 					nd_region->ndr_size - 1,
132 			};
133 
134 			nvdimm_badblocks_populate(nd_region,
135 					&nd_region->bb, &range);
136 			if (nd_region->bb_state)
137 				sysfs_notify_dirent(nd_region->bb_state);
138 		}
139 	}
140 	device_for_each_child(dev, &event, child_notify);
141 }
142 
143 static struct nd_device_driver nd_region_driver = {
144 	.probe = nd_region_probe,
145 	.remove = nd_region_remove,
146 	.notify = nd_region_notify,
147 	.drv = {
148 		.name = "nd_region",
149 	},
150 	.type = ND_DRIVER_REGION_BLK | ND_DRIVER_REGION_PMEM,
151 };
152 
nd_region_init(void)153 int __init nd_region_init(void)
154 {
155 	return nd_driver_register(&nd_region_driver);
156 }
157 
nd_region_exit(void)158 void nd_region_exit(void)
159 {
160 	driver_unregister(&nd_region_driver.drv);
161 }
162 
163 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_REGION_PMEM);
164