1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * cacheinfo support - processor cache information via sysfs
4  *
5  * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6  * Author: Sudeep Holla <sudeep.holla@arm.com>
7  */
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/acpi.h>
11 #include <linux/bitops.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/device.h>
16 #include <linux/init.h>
17 #include <linux/of.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/sysfs.h>
22 
23 /* pointer to per cpu cacheinfo */
24 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25 #define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
26 #define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
27 #define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
28 #define per_cpu_cacheinfo_idx(cpu, idx)		\
29 				(per_cpu_cacheinfo(cpu) + (idx))
30 
31 /* Set if no cache information is found in DT/ACPI. */
32 static bool use_arch_info;
33 
get_cpu_cacheinfo(unsigned int cpu)34 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
35 {
36 	return ci_cacheinfo(cpu);
37 }
38 
cache_leaves_are_shared(struct cacheinfo * this_leaf,struct cacheinfo * sib_leaf)39 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
40 					   struct cacheinfo *sib_leaf)
41 {
42 	/*
43 	 * For non DT/ACPI systems, assume unique level 1 caches,
44 	 * system-wide shared caches for all other levels.
45 	 */
46 	if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)) ||
47 	    use_arch_info)
48 		return (this_leaf->level != 1) && (sib_leaf->level != 1);
49 
50 	if ((sib_leaf->attributes & CACHE_ID) &&
51 	    (this_leaf->attributes & CACHE_ID))
52 		return sib_leaf->id == this_leaf->id;
53 
54 	return sib_leaf->fw_token == this_leaf->fw_token;
55 }
56 
last_level_cache_is_valid(unsigned int cpu)57 bool last_level_cache_is_valid(unsigned int cpu)
58 {
59 	struct cacheinfo *llc;
60 
61 	if (!cache_leaves(cpu))
62 		return false;
63 
64 	llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
65 
66 	return (llc->attributes & CACHE_ID) || !!llc->fw_token;
67 
68 }
69 
last_level_cache_is_shared(unsigned int cpu_x,unsigned int cpu_y)70 bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
71 {
72 	struct cacheinfo *llc_x, *llc_y;
73 
74 	if (!last_level_cache_is_valid(cpu_x) ||
75 	    !last_level_cache_is_valid(cpu_y))
76 		return false;
77 
78 	llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
79 	llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
80 
81 	return cache_leaves_are_shared(llc_x, llc_y);
82 }
83 
84 #ifdef CONFIG_OF
85 
86 static bool of_check_cache_nodes(struct device_node *np);
87 
88 /* OF properties to query for a given cache type */
89 struct cache_type_info {
90 	const char *size_prop;
91 	const char *line_size_props[2];
92 	const char *nr_sets_prop;
93 };
94 
95 static const struct cache_type_info cache_type_info[] = {
96 	{
97 		.size_prop       = "cache-size",
98 		.line_size_props = { "cache-line-size",
99 				     "cache-block-size", },
100 		.nr_sets_prop    = "cache-sets",
101 	}, {
102 		.size_prop       = "i-cache-size",
103 		.line_size_props = { "i-cache-line-size",
104 				     "i-cache-block-size", },
105 		.nr_sets_prop    = "i-cache-sets",
106 	}, {
107 		.size_prop       = "d-cache-size",
108 		.line_size_props = { "d-cache-line-size",
109 				     "d-cache-block-size", },
110 		.nr_sets_prop    = "d-cache-sets",
111 	},
112 };
113 
get_cacheinfo_idx(enum cache_type type)114 static inline int get_cacheinfo_idx(enum cache_type type)
115 {
116 	if (type == CACHE_TYPE_UNIFIED)
117 		return 0;
118 	return type;
119 }
120 
cache_size(struct cacheinfo * this_leaf,struct device_node * np)121 static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
122 {
123 	const char *propname;
124 	int ct_idx;
125 
126 	ct_idx = get_cacheinfo_idx(this_leaf->type);
127 	propname = cache_type_info[ct_idx].size_prop;
128 
129 	of_property_read_u32(np, propname, &this_leaf->size);
130 }
131 
132 /* not cache_line_size() because that's a macro in include/linux/cache.h */
cache_get_line_size(struct cacheinfo * this_leaf,struct device_node * np)133 static void cache_get_line_size(struct cacheinfo *this_leaf,
134 				struct device_node *np)
135 {
136 	int i, lim, ct_idx;
137 
138 	ct_idx = get_cacheinfo_idx(this_leaf->type);
139 	lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
140 
141 	for (i = 0; i < lim; i++) {
142 		int ret;
143 		u32 line_size;
144 		const char *propname;
145 
146 		propname = cache_type_info[ct_idx].line_size_props[i];
147 		ret = of_property_read_u32(np, propname, &line_size);
148 		if (!ret) {
149 			this_leaf->coherency_line_size = line_size;
150 			break;
151 		}
152 	}
153 }
154 
cache_nr_sets(struct cacheinfo * this_leaf,struct device_node * np)155 static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
156 {
157 	const char *propname;
158 	int ct_idx;
159 
160 	ct_idx = get_cacheinfo_idx(this_leaf->type);
161 	propname = cache_type_info[ct_idx].nr_sets_prop;
162 
163 	of_property_read_u32(np, propname, &this_leaf->number_of_sets);
164 }
165 
cache_associativity(struct cacheinfo * this_leaf)166 static void cache_associativity(struct cacheinfo *this_leaf)
167 {
168 	unsigned int line_size = this_leaf->coherency_line_size;
169 	unsigned int nr_sets = this_leaf->number_of_sets;
170 	unsigned int size = this_leaf->size;
171 
172 	/*
173 	 * If the cache is fully associative, there is no need to
174 	 * check the other properties.
175 	 */
176 	if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
177 		this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
178 }
179 
cache_node_is_unified(struct cacheinfo * this_leaf,struct device_node * np)180 static bool cache_node_is_unified(struct cacheinfo *this_leaf,
181 				  struct device_node *np)
182 {
183 	return of_property_read_bool(np, "cache-unified");
184 }
185 
cache_of_set_props(struct cacheinfo * this_leaf,struct device_node * np)186 static void cache_of_set_props(struct cacheinfo *this_leaf,
187 			       struct device_node *np)
188 {
189 	/*
190 	 * init_cache_level must setup the cache level correctly
191 	 * overriding the architecturally specified levels, so
192 	 * if type is NONE at this stage, it should be unified
193 	 */
194 	if (this_leaf->type == CACHE_TYPE_NOCACHE &&
195 	    cache_node_is_unified(this_leaf, np))
196 		this_leaf->type = CACHE_TYPE_UNIFIED;
197 	cache_size(this_leaf, np);
198 	cache_get_line_size(this_leaf, np);
199 	cache_nr_sets(this_leaf, np);
200 	cache_associativity(this_leaf);
201 }
202 
cache_setup_of_node(unsigned int cpu)203 static int cache_setup_of_node(unsigned int cpu)
204 {
205 	struct cacheinfo *this_leaf;
206 	unsigned int index = 0;
207 
208 	struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
209 	if (!np) {
210 		pr_err("Failed to find cpu%d device node\n", cpu);
211 		return -ENOENT;
212 	}
213 
214 	if (!of_check_cache_nodes(np)) {
215 		return -ENOENT;
216 	}
217 
218 	while (index < cache_leaves(cpu)) {
219 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
220 		if (this_leaf->level != 1) {
221 			struct device_node *prev __free(device_node) = np;
222 			np = of_find_next_cache_node(np);
223 			if (!np)
224 				break;
225 		}
226 		cache_of_set_props(this_leaf, np);
227 		this_leaf->fw_token = np;
228 		index++;
229 	}
230 
231 	if (index != cache_leaves(cpu)) /* not all OF nodes populated */
232 		return -ENOENT;
233 
234 	return 0;
235 }
236 
of_check_cache_nodes(struct device_node * np)237 static bool of_check_cache_nodes(struct device_node *np)
238 {
239 	if (of_property_present(np, "cache-size")   ||
240 	    of_property_present(np, "i-cache-size") ||
241 	    of_property_present(np, "d-cache-size") ||
242 	    of_property_present(np, "cache-unified"))
243 		return true;
244 
245 	struct device_node *next __free(device_node) = of_find_next_cache_node(np);
246 	if (next) {
247 		return true;
248 	}
249 
250 	return false;
251 }
252 
of_count_cache_leaves(struct device_node * np)253 static int of_count_cache_leaves(struct device_node *np)
254 {
255 	unsigned int leaves = 0;
256 
257 	if (of_property_read_bool(np, "cache-size"))
258 		++leaves;
259 	if (of_property_read_bool(np, "i-cache-size"))
260 		++leaves;
261 	if (of_property_read_bool(np, "d-cache-size"))
262 		++leaves;
263 
264 	if (!leaves) {
265 		/* The '[i-|d-|]cache-size' property is required, but
266 		 * if absent, fallback on the 'cache-unified' property.
267 		 */
268 		if (of_property_read_bool(np, "cache-unified"))
269 			return 1;
270 		else
271 			return 2;
272 	}
273 
274 	return leaves;
275 }
276 
init_of_cache_level(unsigned int cpu)277 int init_of_cache_level(unsigned int cpu)
278 {
279 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
280 	struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
281 	unsigned int levels = 0, leaves, level;
282 
283 	if (!of_check_cache_nodes(np)) {
284 		return -ENOENT;
285 	}
286 
287 	leaves = of_count_cache_leaves(np);
288 	if (leaves > 0)
289 		levels = 1;
290 
291 	while (1) {
292 		struct device_node *prev __free(device_node) = np;
293 		np = of_find_next_cache_node(np);
294 		if (!np)
295 			break;
296 
297 		if (!of_device_is_compatible(np, "cache"))
298 			return -EINVAL;
299 		if (of_property_read_u32(np, "cache-level", &level))
300 			return -EINVAL;
301 		if (level <= levels)
302 			return -EINVAL;
303 
304 		leaves += of_count_cache_leaves(np);
305 		levels = level;
306 	}
307 
308 	this_cpu_ci->num_levels = levels;
309 	this_cpu_ci->num_leaves = leaves;
310 
311 	return 0;
312 }
313 
314 #else
cache_setup_of_node(unsigned int cpu)315 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
init_of_cache_level(unsigned int cpu)316 int init_of_cache_level(unsigned int cpu) { return 0; }
317 #endif
318 
cache_setup_acpi(unsigned int cpu)319 int __weak cache_setup_acpi(unsigned int cpu)
320 {
321 	return -ENOTSUPP;
322 }
323 
324 unsigned int coherency_max_size;
325 
cache_setup_properties(unsigned int cpu)326 static int cache_setup_properties(unsigned int cpu)
327 {
328 	int ret = 0;
329 
330 	if (of_have_populated_dt())
331 		ret = cache_setup_of_node(cpu);
332 	else if (!acpi_disabled)
333 		ret = cache_setup_acpi(cpu);
334 
335 	// Assume there is no cache information available in DT/ACPI from now.
336 	if (ret && use_arch_cache_info())
337 		use_arch_info = true;
338 
339 	return ret;
340 }
341 
cache_shared_cpu_map_setup(unsigned int cpu)342 static int cache_shared_cpu_map_setup(unsigned int cpu)
343 {
344 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
345 	struct cacheinfo *this_leaf, *sib_leaf;
346 	unsigned int index, sib_index;
347 	int ret = 0;
348 
349 	if (this_cpu_ci->cpu_map_populated)
350 		return 0;
351 
352 	/*
353 	 * skip setting up cache properties if LLC is valid, just need
354 	 * to update the shared cpu_map if the cache attributes were
355 	 * populated early before all the cpus are brought online
356 	 */
357 	if (!last_level_cache_is_valid(cpu) && !use_arch_info) {
358 		ret = cache_setup_properties(cpu);
359 		if (ret)
360 			return ret;
361 	}
362 
363 	for (index = 0; index < cache_leaves(cpu); index++) {
364 		unsigned int i;
365 
366 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
367 
368 		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
369 		for_each_online_cpu(i) {
370 			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
371 
372 			if (i == cpu || !sib_cpu_ci->info_list)
373 				continue;/* skip if itself or no cacheinfo */
374 			for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
375 				sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
376 
377 				/*
378 				 * Comparing cache IDs only makes sense if the leaves
379 				 * belong to the same cache level of same type. Skip
380 				 * the check if level and type do not match.
381 				 */
382 				if (sib_leaf->level != this_leaf->level ||
383 				    sib_leaf->type != this_leaf->type)
384 					continue;
385 
386 				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
387 					cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
388 					cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
389 					break;
390 				}
391 			}
392 		}
393 		/* record the maximum cache line size */
394 		if (this_leaf->coherency_line_size > coherency_max_size)
395 			coherency_max_size = this_leaf->coherency_line_size;
396 	}
397 
398 	/* shared_cpu_map is now populated for the cpu */
399 	this_cpu_ci->cpu_map_populated = true;
400 	return 0;
401 }
402 
cache_shared_cpu_map_remove(unsigned int cpu)403 static void cache_shared_cpu_map_remove(unsigned int cpu)
404 {
405 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
406 	struct cacheinfo *this_leaf, *sib_leaf;
407 	unsigned int sibling, index, sib_index;
408 
409 	for (index = 0; index < cache_leaves(cpu); index++) {
410 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
411 		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
412 			struct cpu_cacheinfo *sib_cpu_ci =
413 						get_cpu_cacheinfo(sibling);
414 
415 			if (sibling == cpu || !sib_cpu_ci->info_list)
416 				continue;/* skip if itself or no cacheinfo */
417 
418 			for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
419 				sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
420 
421 				/*
422 				 * Comparing cache IDs only makes sense if the leaves
423 				 * belong to the same cache level of same type. Skip
424 				 * the check if level and type do not match.
425 				 */
426 				if (sib_leaf->level != this_leaf->level ||
427 				    sib_leaf->type != this_leaf->type)
428 					continue;
429 
430 				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
431 					cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
432 					cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
433 					break;
434 				}
435 			}
436 		}
437 	}
438 
439 	/* cpu is no longer populated in the shared map */
440 	this_cpu_ci->cpu_map_populated = false;
441 }
442 
free_cache_attributes(unsigned int cpu)443 static void free_cache_attributes(unsigned int cpu)
444 {
445 	if (!per_cpu_cacheinfo(cpu))
446 		return;
447 
448 	cache_shared_cpu_map_remove(cpu);
449 }
450 
early_cache_level(unsigned int cpu)451 int __weak early_cache_level(unsigned int cpu)
452 {
453 	return -ENOENT;
454 }
455 
init_cache_level(unsigned int cpu)456 int __weak init_cache_level(unsigned int cpu)
457 {
458 	return -ENOENT;
459 }
460 
populate_cache_leaves(unsigned int cpu)461 int __weak populate_cache_leaves(unsigned int cpu)
462 {
463 	return -ENOENT;
464 }
465 
466 static inline
allocate_cache_info(int cpu)467 int allocate_cache_info(int cpu)
468 {
469 	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
470 					 sizeof(struct cacheinfo), GFP_ATOMIC);
471 	if (!per_cpu_cacheinfo(cpu)) {
472 		cache_leaves(cpu) = 0;
473 		return -ENOMEM;
474 	}
475 
476 	return 0;
477 }
478 
fetch_cache_info(unsigned int cpu)479 int fetch_cache_info(unsigned int cpu)
480 {
481 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
482 	unsigned int levels = 0, split_levels = 0;
483 	int ret;
484 
485 	if (acpi_disabled) {
486 		ret = init_of_cache_level(cpu);
487 	} else {
488 		ret = acpi_get_cache_info(cpu, &levels, &split_levels);
489 		if (!ret) {
490 			this_cpu_ci->num_levels = levels;
491 			/*
492 			 * This assumes that:
493 			 * - there cannot be any split caches (data/instruction)
494 			 *   above a unified cache
495 			 * - data/instruction caches come by pair
496 			 */
497 			this_cpu_ci->num_leaves = levels + split_levels;
498 		}
499 	}
500 
501 	if (ret || !cache_leaves(cpu)) {
502 		ret = early_cache_level(cpu);
503 		if (ret)
504 			return ret;
505 
506 		if (!cache_leaves(cpu))
507 			return -ENOENT;
508 
509 		this_cpu_ci->early_ci_levels = true;
510 	}
511 
512 	return allocate_cache_info(cpu);
513 }
514 
init_level_allocate_ci(unsigned int cpu)515 static inline int init_level_allocate_ci(unsigned int cpu)
516 {
517 	unsigned int early_leaves = cache_leaves(cpu);
518 
519 	/* Since early initialization/allocation of the cacheinfo is allowed
520 	 * via fetch_cache_info() and this also gets called as CPU hotplug
521 	 * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
522 	 * as it will happen only once (the cacheinfo memory is never freed).
523 	 * Just populate the cacheinfo. However, if the cacheinfo has been
524 	 * allocated early through the arch-specific early_cache_level() call,
525 	 * there is a chance the info is wrong (this can happen on arm64). In
526 	 * that case, call init_cache_level() anyway to give the arch-specific
527 	 * code a chance to make things right.
528 	 */
529 	if (per_cpu_cacheinfo(cpu) && !ci_cacheinfo(cpu)->early_ci_levels)
530 		return 0;
531 
532 	if (init_cache_level(cpu) || !cache_leaves(cpu))
533 		return -ENOENT;
534 
535 	/*
536 	 * Now that we have properly initialized the cache level info, make
537 	 * sure we don't try to do that again the next time we are called
538 	 * (e.g. as CPU hotplug callbacks).
539 	 */
540 	ci_cacheinfo(cpu)->early_ci_levels = false;
541 
542 	if (cache_leaves(cpu) <= early_leaves)
543 		return 0;
544 
545 	kfree(per_cpu_cacheinfo(cpu));
546 	return allocate_cache_info(cpu);
547 }
548 
detect_cache_attributes(unsigned int cpu)549 int detect_cache_attributes(unsigned int cpu)
550 {
551 	int ret;
552 
553 	ret = init_level_allocate_ci(cpu);
554 	if (ret)
555 		return ret;
556 
557 	/*
558 	 * If LLC is valid the cache leaves were already populated so just go to
559 	 * update the cpu map.
560 	 */
561 	if (!last_level_cache_is_valid(cpu)) {
562 		/*
563 		 * populate_cache_leaves() may completely setup the cache leaves and
564 		 * shared_cpu_map or it may leave it partially setup.
565 		 */
566 		ret = populate_cache_leaves(cpu);
567 		if (ret)
568 			goto free_ci;
569 	}
570 
571 	/*
572 	 * For systems using DT for cache hierarchy, fw_token
573 	 * and shared_cpu_map will be set up here only if they are
574 	 * not populated already
575 	 */
576 	ret = cache_shared_cpu_map_setup(cpu);
577 	if (ret) {
578 		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
579 		goto free_ci;
580 	}
581 
582 	return 0;
583 
584 free_ci:
585 	free_cache_attributes(cpu);
586 	return ret;
587 }
588 
589 /* pointer to cpuX/cache device */
590 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
591 #define per_cpu_cache_dev(cpu)	(per_cpu(ci_cache_dev, cpu))
592 
593 static cpumask_t cache_dev_map;
594 
595 /* pointer to array of devices for cpuX/cache/indexY */
596 static DEFINE_PER_CPU(struct device **, ci_index_dev);
597 #define per_cpu_index_dev(cpu)	(per_cpu(ci_index_dev, cpu))
598 #define per_cache_index_dev(cpu, idx)	((per_cpu_index_dev(cpu))[idx])
599 
600 #define show_one(file_name, object)				\
601 static ssize_t file_name##_show(struct device *dev,		\
602 		struct device_attribute *attr, char *buf)	\
603 {								\
604 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);	\
605 	return sysfs_emit(buf, "%u\n", this_leaf->object);	\
606 }
607 
608 show_one(id, id);
609 show_one(level, level);
610 show_one(coherency_line_size, coherency_line_size);
611 show_one(number_of_sets, number_of_sets);
612 show_one(physical_line_partition, physical_line_partition);
613 show_one(ways_of_associativity, ways_of_associativity);
614 
size_show(struct device * dev,struct device_attribute * attr,char * buf)615 static ssize_t size_show(struct device *dev,
616 			 struct device_attribute *attr, char *buf)
617 {
618 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
619 
620 	return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
621 }
622 
shared_cpu_map_show(struct device * dev,struct device_attribute * attr,char * buf)623 static ssize_t shared_cpu_map_show(struct device *dev,
624 				   struct device_attribute *attr, char *buf)
625 {
626 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
627 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
628 
629 	return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
630 }
631 
shared_cpu_list_show(struct device * dev,struct device_attribute * attr,char * buf)632 static ssize_t shared_cpu_list_show(struct device *dev,
633 				    struct device_attribute *attr, char *buf)
634 {
635 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
636 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
637 
638 	return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
639 }
640 
type_show(struct device * dev,struct device_attribute * attr,char * buf)641 static ssize_t type_show(struct device *dev,
642 			 struct device_attribute *attr, char *buf)
643 {
644 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
645 	const char *output;
646 
647 	switch (this_leaf->type) {
648 	case CACHE_TYPE_DATA:
649 		output = "Data";
650 		break;
651 	case CACHE_TYPE_INST:
652 		output = "Instruction";
653 		break;
654 	case CACHE_TYPE_UNIFIED:
655 		output = "Unified";
656 		break;
657 	default:
658 		return -EINVAL;
659 	}
660 
661 	return sysfs_emit(buf, "%s\n", output);
662 }
663 
allocation_policy_show(struct device * dev,struct device_attribute * attr,char * buf)664 static ssize_t allocation_policy_show(struct device *dev,
665 				      struct device_attribute *attr, char *buf)
666 {
667 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
668 	unsigned int ci_attr = this_leaf->attributes;
669 	const char *output;
670 
671 	if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
672 		output = "ReadWriteAllocate";
673 	else if (ci_attr & CACHE_READ_ALLOCATE)
674 		output = "ReadAllocate";
675 	else if (ci_attr & CACHE_WRITE_ALLOCATE)
676 		output = "WriteAllocate";
677 	else
678 		return 0;
679 
680 	return sysfs_emit(buf, "%s\n", output);
681 }
682 
write_policy_show(struct device * dev,struct device_attribute * attr,char * buf)683 static ssize_t write_policy_show(struct device *dev,
684 				 struct device_attribute *attr, char *buf)
685 {
686 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
687 	unsigned int ci_attr = this_leaf->attributes;
688 	int n = 0;
689 
690 	if (ci_attr & CACHE_WRITE_THROUGH)
691 		n = sysfs_emit(buf, "WriteThrough\n");
692 	else if (ci_attr & CACHE_WRITE_BACK)
693 		n = sysfs_emit(buf, "WriteBack\n");
694 	return n;
695 }
696 
697 static DEVICE_ATTR_RO(id);
698 static DEVICE_ATTR_RO(level);
699 static DEVICE_ATTR_RO(type);
700 static DEVICE_ATTR_RO(coherency_line_size);
701 static DEVICE_ATTR_RO(ways_of_associativity);
702 static DEVICE_ATTR_RO(number_of_sets);
703 static DEVICE_ATTR_RO(size);
704 static DEVICE_ATTR_RO(allocation_policy);
705 static DEVICE_ATTR_RO(write_policy);
706 static DEVICE_ATTR_RO(shared_cpu_map);
707 static DEVICE_ATTR_RO(shared_cpu_list);
708 static DEVICE_ATTR_RO(physical_line_partition);
709 
710 static struct attribute *cache_default_attrs[] = {
711 	&dev_attr_id.attr,
712 	&dev_attr_type.attr,
713 	&dev_attr_level.attr,
714 	&dev_attr_shared_cpu_map.attr,
715 	&dev_attr_shared_cpu_list.attr,
716 	&dev_attr_coherency_line_size.attr,
717 	&dev_attr_ways_of_associativity.attr,
718 	&dev_attr_number_of_sets.attr,
719 	&dev_attr_size.attr,
720 	&dev_attr_allocation_policy.attr,
721 	&dev_attr_write_policy.attr,
722 	&dev_attr_physical_line_partition.attr,
723 	NULL
724 };
725 
726 static umode_t
cache_default_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int unused)727 cache_default_attrs_is_visible(struct kobject *kobj,
728 			       struct attribute *attr, int unused)
729 {
730 	struct device *dev = kobj_to_dev(kobj);
731 	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
732 	const struct cpumask *mask = &this_leaf->shared_cpu_map;
733 	umode_t mode = attr->mode;
734 
735 	if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
736 		return mode;
737 	if ((attr == &dev_attr_type.attr) && this_leaf->type)
738 		return mode;
739 	if ((attr == &dev_attr_level.attr) && this_leaf->level)
740 		return mode;
741 	if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
742 		return mode;
743 	if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
744 		return mode;
745 	if ((attr == &dev_attr_coherency_line_size.attr) &&
746 	    this_leaf->coherency_line_size)
747 		return mode;
748 	if ((attr == &dev_attr_ways_of_associativity.attr) &&
749 	    this_leaf->size) /* allow 0 = full associativity */
750 		return mode;
751 	if ((attr == &dev_attr_number_of_sets.attr) &&
752 	    this_leaf->number_of_sets)
753 		return mode;
754 	if ((attr == &dev_attr_size.attr) && this_leaf->size)
755 		return mode;
756 	if ((attr == &dev_attr_write_policy.attr) &&
757 	    (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
758 		return mode;
759 	if ((attr == &dev_attr_allocation_policy.attr) &&
760 	    (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
761 		return mode;
762 	if ((attr == &dev_attr_physical_line_partition.attr) &&
763 	    this_leaf->physical_line_partition)
764 		return mode;
765 
766 	return 0;
767 }
768 
769 static const struct attribute_group cache_default_group = {
770 	.attrs = cache_default_attrs,
771 	.is_visible = cache_default_attrs_is_visible,
772 };
773 
774 static const struct attribute_group *cache_default_groups[] = {
775 	&cache_default_group,
776 	NULL,
777 };
778 
779 static const struct attribute_group *cache_private_groups[] = {
780 	&cache_default_group,
781 	NULL, /* Place holder for private group */
782 	NULL,
783 };
784 
785 const struct attribute_group *
cache_get_priv_group(struct cacheinfo * this_leaf)786 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
787 {
788 	return NULL;
789 }
790 
791 static const struct attribute_group **
cache_get_attribute_groups(struct cacheinfo * this_leaf)792 cache_get_attribute_groups(struct cacheinfo *this_leaf)
793 {
794 	const struct attribute_group *priv_group =
795 			cache_get_priv_group(this_leaf);
796 
797 	if (!priv_group)
798 		return cache_default_groups;
799 
800 	if (!cache_private_groups[1])
801 		cache_private_groups[1] = priv_group;
802 
803 	return cache_private_groups;
804 }
805 
806 /* Add/Remove cache interface for CPU device */
cpu_cache_sysfs_exit(unsigned int cpu)807 static void cpu_cache_sysfs_exit(unsigned int cpu)
808 {
809 	int i;
810 	struct device *ci_dev;
811 
812 	if (per_cpu_index_dev(cpu)) {
813 		for (i = 0; i < cache_leaves(cpu); i++) {
814 			ci_dev = per_cache_index_dev(cpu, i);
815 			if (!ci_dev)
816 				continue;
817 			device_unregister(ci_dev);
818 		}
819 		kfree(per_cpu_index_dev(cpu));
820 		per_cpu_index_dev(cpu) = NULL;
821 	}
822 	device_unregister(per_cpu_cache_dev(cpu));
823 	per_cpu_cache_dev(cpu) = NULL;
824 }
825 
cpu_cache_sysfs_init(unsigned int cpu)826 static int cpu_cache_sysfs_init(unsigned int cpu)
827 {
828 	struct device *dev = get_cpu_device(cpu);
829 
830 	if (per_cpu_cacheinfo(cpu) == NULL)
831 		return -ENOENT;
832 
833 	per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
834 	if (IS_ERR(per_cpu_cache_dev(cpu)))
835 		return PTR_ERR(per_cpu_cache_dev(cpu));
836 
837 	/* Allocate all required memory */
838 	per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
839 					 sizeof(struct device *), GFP_KERNEL);
840 	if (unlikely(per_cpu_index_dev(cpu) == NULL))
841 		goto err_out;
842 
843 	return 0;
844 
845 err_out:
846 	cpu_cache_sysfs_exit(cpu);
847 	return -ENOMEM;
848 }
849 
cache_add_dev(unsigned int cpu)850 static int cache_add_dev(unsigned int cpu)
851 {
852 	unsigned int i;
853 	int rc;
854 	struct device *ci_dev, *parent;
855 	struct cacheinfo *this_leaf;
856 	const struct attribute_group **cache_groups;
857 
858 	rc = cpu_cache_sysfs_init(cpu);
859 	if (unlikely(rc < 0))
860 		return rc;
861 
862 	parent = per_cpu_cache_dev(cpu);
863 	for (i = 0; i < cache_leaves(cpu); i++) {
864 		this_leaf = per_cpu_cacheinfo_idx(cpu, i);
865 		if (this_leaf->disable_sysfs)
866 			continue;
867 		if (this_leaf->type == CACHE_TYPE_NOCACHE)
868 			break;
869 		cache_groups = cache_get_attribute_groups(this_leaf);
870 		ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
871 					   "index%1u", i);
872 		if (IS_ERR(ci_dev)) {
873 			rc = PTR_ERR(ci_dev);
874 			goto err;
875 		}
876 		per_cache_index_dev(cpu, i) = ci_dev;
877 	}
878 	cpumask_set_cpu(cpu, &cache_dev_map);
879 
880 	return 0;
881 err:
882 	cpu_cache_sysfs_exit(cpu);
883 	return rc;
884 }
885 
cpu_map_shared_cache(bool online,unsigned int cpu,cpumask_t ** map)886 static unsigned int cpu_map_shared_cache(bool online, unsigned int cpu,
887 					 cpumask_t **map)
888 {
889 	struct cacheinfo *llc, *sib_llc;
890 	unsigned int sibling;
891 
892 	if (!last_level_cache_is_valid(cpu))
893 		return 0;
894 
895 	llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
896 
897 	if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED)
898 		return 0;
899 
900 	if (online) {
901 		*map = &llc->shared_cpu_map;
902 		return cpumask_weight(*map);
903 	}
904 
905 	/* shared_cpu_map of offlined CPU will be cleared, so use sibling map */
906 	for_each_cpu(sibling, &llc->shared_cpu_map) {
907 		if (sibling == cpu || !last_level_cache_is_valid(sibling))
908 			continue;
909 		sib_llc = per_cpu_cacheinfo_idx(sibling, cache_leaves(sibling) - 1);
910 		*map = &sib_llc->shared_cpu_map;
911 		return cpumask_weight(*map);
912 	}
913 
914 	return 0;
915 }
916 
917 /*
918  * Calculate the size of the per-CPU data cache slice.  This can be
919  * used to estimate the size of the data cache slice that can be used
920  * by one CPU under ideal circumstances.  UNIFIED caches are counted
921  * in addition to DATA caches.  So, please consider code cache usage
922  * when use the result.
923  *
924  * Because the cache inclusive/non-inclusive information isn't
925  * available, we just use the size of the per-CPU slice of LLC to make
926  * the result more predictable across architectures.
927  */
update_per_cpu_data_slice_size_cpu(unsigned int cpu)928 static void update_per_cpu_data_slice_size_cpu(unsigned int cpu)
929 {
930 	struct cpu_cacheinfo *ci;
931 	struct cacheinfo *llc;
932 	unsigned int nr_shared;
933 
934 	if (!last_level_cache_is_valid(cpu))
935 		return;
936 
937 	ci = ci_cacheinfo(cpu);
938 	llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
939 
940 	if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED)
941 		return;
942 
943 	nr_shared = cpumask_weight(&llc->shared_cpu_map);
944 	if (nr_shared)
945 		ci->per_cpu_data_slice_size = llc->size / nr_shared;
946 }
947 
update_per_cpu_data_slice_size(bool cpu_online,unsigned int cpu,cpumask_t * cpu_map)948 static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu,
949 					   cpumask_t *cpu_map)
950 {
951 	unsigned int icpu;
952 
953 	for_each_cpu(icpu, cpu_map) {
954 		if (!cpu_online && icpu == cpu)
955 			continue;
956 		update_per_cpu_data_slice_size_cpu(icpu);
957 		setup_pcp_cacheinfo(icpu);
958 	}
959 }
960 
cacheinfo_cpu_online(unsigned int cpu)961 static int cacheinfo_cpu_online(unsigned int cpu)
962 {
963 	int rc = detect_cache_attributes(cpu);
964 	cpumask_t *cpu_map;
965 
966 	if (rc)
967 		return rc;
968 	rc = cache_add_dev(cpu);
969 	if (rc)
970 		goto err;
971 	if (cpu_map_shared_cache(true, cpu, &cpu_map))
972 		update_per_cpu_data_slice_size(true, cpu, cpu_map);
973 	return 0;
974 err:
975 	free_cache_attributes(cpu);
976 	return rc;
977 }
978 
cacheinfo_cpu_pre_down(unsigned int cpu)979 static int cacheinfo_cpu_pre_down(unsigned int cpu)
980 {
981 	cpumask_t *cpu_map;
982 	unsigned int nr_shared;
983 
984 	nr_shared = cpu_map_shared_cache(false, cpu, &cpu_map);
985 	if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
986 		cpu_cache_sysfs_exit(cpu);
987 
988 	free_cache_attributes(cpu);
989 	if (nr_shared > 1)
990 		update_per_cpu_data_slice_size(false, cpu, cpu_map);
991 	return 0;
992 }
993 
cacheinfo_sysfs_init(void)994 static int __init cacheinfo_sysfs_init(void)
995 {
996 	return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
997 				 "base/cacheinfo:online",
998 				 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
999 }
1000 device_initcall(cacheinfo_sysfs_init);
1001