1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * uncore-frquency-tpmi: Uncore frequency scaling using TPMI
4  *
5  * Copyright (c) 2023, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * The hardware interface to read/write is basically substitution of
9  * MSR 0x620 and 0x621.
10  * There are specific MMIO offset and bits to get/set minimum and
11  * maximum uncore ratio, similar to MSRs.
12  * The scope of the uncore MSRs was package scope. But TPMI allows
13  * new gen CPUs to have multiple uncore controls at uncore-cluster
14  * level. Each package can have multiple power domains which further
15  * can have multiple clusters.
16  * Here number of power domains = number of resources in this aux
17  * device. There are offsets and bits to discover number of clusters
18  * and offset for each cluster level controls.
19  *
20  */
21 
22 #include <linux/auxiliary_bus.h>
23 #include <linux/bitfield.h>
24 #include <linux/bits.h>
25 #include <linux/io.h>
26 #include <linux/module.h>
27 #include <linux/intel_tpmi.h>
28 
29 #include "uncore-frequency-common.h"
30 
31 #define	UNCORE_MAJOR_VERSION		0
32 #define	UNCORE_MINOR_VERSION		2
33 #define UNCORE_ELC_SUPPORTED_VERSION	2
34 #define UNCORE_HEADER_INDEX		0
35 #define UNCORE_FABRIC_CLUSTER_OFFSET	8
36 
37 /* status + control + adv_ctl1 + adv_ctl2 */
38 #define UNCORE_FABRIC_CLUSTER_SIZE	(4 * 8)
39 
40 #define UNCORE_STATUS_INDEX		0
41 #define UNCORE_CONTROL_INDEX		8
42 
43 #define UNCORE_FREQ_KHZ_MULTIPLIER	100000
44 
45 struct tpmi_uncore_struct;
46 
47 /* Information for each cluster */
48 struct tpmi_uncore_cluster_info {
49 	bool root_domain;
50 	bool elc_supported;
51 	u8 __iomem *cluster_base;
52 	struct uncore_data uncore_data;
53 	struct tpmi_uncore_struct *uncore_root;
54 };
55 
56 /* Information for each power domain */
57 struct tpmi_uncore_power_domain_info {
58 	u8 __iomem *uncore_base;
59 	int ufs_header_ver;
60 	int cluster_count;
61 	struct tpmi_uncore_cluster_info *cluster_infos;
62 };
63 
64 /* Information for all power domains in a package */
65 struct tpmi_uncore_struct {
66 	int power_domain_count;
67 	int max_ratio;
68 	int min_ratio;
69 	struct tpmi_uncore_power_domain_info *pd_info;
70 	struct tpmi_uncore_cluster_info root_cluster;
71 	bool write_blocked;
72 };
73 
74 /* Bit definitions for STATUS register */
75 #define UNCORE_CURRENT_RATIO_MASK			GENMASK_ULL(6, 0)
76 
77 /* Bit definitions for CONTROL register */
78 #define UNCORE_MAX_RATIO_MASK				GENMASK_ULL(14, 8)
79 #define UNCORE_MIN_RATIO_MASK				GENMASK_ULL(21, 15)
80 #define UNCORE_EFF_LAT_CTRL_RATIO_MASK			GENMASK_ULL(28, 22)
81 #define UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK		GENMASK_ULL(38, 32)
82 #define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE	BIT(39)
83 #define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK		GENMASK_ULL(46, 40)
84 
85 /* Helper function to read MMIO offset for max/min control frequency */
read_control_freq(struct tpmi_uncore_cluster_info * cluster_info,unsigned int * value,enum uncore_index index)86 static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info,
87 			     unsigned int *value, enum uncore_index index)
88 {
89 	u64 control;
90 
91 	control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
92 	if (index == UNCORE_INDEX_MAX_FREQ)
93 		*value = FIELD_GET(UNCORE_MAX_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
94 	else
95 		*value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
96 }
97 
98 /* Helper function to read efficiency latency control values over MMIO */
read_eff_lat_ctrl(struct uncore_data * data,unsigned int * val,enum uncore_index index)99 static int read_eff_lat_ctrl(struct uncore_data *data, unsigned int *val, enum uncore_index index)
100 {
101 	struct tpmi_uncore_cluster_info *cluster_info;
102 	u64 ctrl;
103 
104 	cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
105 	if (cluster_info->root_domain)
106 		return -ENODATA;
107 
108 	if (!cluster_info->elc_supported)
109 		return -EOPNOTSUPP;
110 
111 	ctrl = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
112 
113 	switch (index) {
114 	case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
115 		*val = FIELD_GET(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, ctrl);
116 		*val *= 100;
117 		*val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK));
118 		break;
119 
120 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
121 		*val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, ctrl);
122 		*val *= 100;
123 		*val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK));
124 		break;
125 
126 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
127 		*val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, ctrl);
128 		break;
129 	case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
130 		*val = FIELD_GET(UNCORE_EFF_LAT_CTRL_RATIO_MASK, ctrl) * UNCORE_FREQ_KHZ_MULTIPLIER;
131 		break;
132 
133 	default:
134 		return -EOPNOTSUPP;
135 	}
136 
137 	return 0;
138 }
139 
140 #define UNCORE_MAX_RATIO	FIELD_MAX(UNCORE_MAX_RATIO_MASK)
141 
142 /* Helper for sysfs read for max/min frequencies. Called under mutex locks */
uncore_read_control_freq(struct uncore_data * data,unsigned int * value,enum uncore_index index)143 static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value,
144 				    enum uncore_index index)
145 {
146 	struct tpmi_uncore_cluster_info *cluster_info;
147 
148 	cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
149 
150 	if (cluster_info->root_domain) {
151 		struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
152 		unsigned int min, max, v;
153 		int i;
154 
155 		min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER;
156 		max = 0;
157 
158 		/*
159 		 * Get the max/min by looking at each cluster. Get the lowest
160 		 * min and highest max.
161 		 */
162 		for (i = 0; i < uncore_root->power_domain_count; ++i) {
163 			int j;
164 
165 			for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) {
166 				read_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
167 						  &v, index);
168 				if (v < min)
169 					min = v;
170 				if (v > max)
171 					max = v;
172 			}
173 		}
174 
175 		if (index == UNCORE_INDEX_MIN_FREQ)
176 			*value = min;
177 		else
178 			*value = max;
179 
180 		return 0;
181 	}
182 
183 	read_control_freq(cluster_info, value, index);
184 
185 	return 0;
186 }
187 
188 /* Helper function for writing efficiency latency control values over MMIO */
write_eff_lat_ctrl(struct uncore_data * data,unsigned int val,enum uncore_index index)189 static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index)
190 {
191 	struct tpmi_uncore_cluster_info *cluster_info;
192 	u64 control;
193 
194 	cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
195 
196 	if (cluster_info->root_domain)
197 		return -ENODATA;
198 
199 	if (!cluster_info->elc_supported)
200 		return -EOPNOTSUPP;
201 
202 	switch (index) {
203 	case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
204 		if (val > 100)
205 			return -EINVAL;
206 		break;
207 
208 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
209 		if (val > 100)
210 			return -EINVAL;
211 		break;
212 
213 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
214 		if (val > 1)
215 			return -EINVAL;
216 		break;
217 
218 	case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
219 		val /= UNCORE_FREQ_KHZ_MULTIPLIER;
220 		if (val > FIELD_MAX(UNCORE_EFF_LAT_CTRL_RATIO_MASK))
221 			return -EINVAL;
222 		break;
223 
224 	default:
225 		return -EOPNOTSUPP;
226 	}
227 
228 	control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
229 
230 	switch (index) {
231 	case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
232 		val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK);
233 		val /= 100;
234 		control &= ~UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK;
235 		control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, val);
236 		break;
237 
238 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
239 		val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK);
240 		val /= 100;
241 		control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK;
242 		control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, val);
243 		break;
244 
245 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
246 		control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE;
247 		control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, val);
248 		break;
249 
250 	case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
251 		control &= ~UNCORE_EFF_LAT_CTRL_RATIO_MASK;
252 		control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_RATIO_MASK, val);
253 		break;
254 
255 	default:
256 		break;
257 	}
258 
259 	writeq(control, cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
260 
261 	return 0;
262 }
263 
264 /* Helper function to write MMIO offset for max/min control frequency */
write_control_freq(struct tpmi_uncore_cluster_info * cluster_info,unsigned int input,unsigned int index)265 static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input,
266 			      unsigned int index)
267 {
268 	u64 control;
269 
270 	control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
271 
272 	if (index == UNCORE_INDEX_MAX_FREQ) {
273 		control &= ~UNCORE_MAX_RATIO_MASK;
274 		control |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input);
275 	} else {
276 		control &= ~UNCORE_MIN_RATIO_MASK;
277 		control |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input);
278 	}
279 
280 	writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX));
281 }
282 
283 /* Helper for sysfs write for max/min frequencies. Called under mutex locks */
uncore_write_control_freq(struct uncore_data * data,unsigned int input,enum uncore_index index)284 static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
285 				     enum uncore_index index)
286 {
287 	struct tpmi_uncore_cluster_info *cluster_info;
288 	struct tpmi_uncore_struct *uncore_root;
289 
290 	input /= UNCORE_FREQ_KHZ_MULTIPLIER;
291 	if (!input || input > UNCORE_MAX_RATIO)
292 		return -EINVAL;
293 
294 	cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
295 	uncore_root = cluster_info->uncore_root;
296 
297 	if (uncore_root->write_blocked)
298 		return -EPERM;
299 
300 	/* Update each cluster in a package */
301 	if (cluster_info->root_domain) {
302 		struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
303 		int i;
304 
305 		for (i = 0; i < uncore_root->power_domain_count; ++i) {
306 			int j;
307 
308 			for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j)
309 				write_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
310 						  input, index);
311 		}
312 
313 		if (index == UNCORE_INDEX_MAX_FREQ)
314 			uncore_root->max_ratio = input;
315 		else
316 			uncore_root->min_ratio = input;
317 
318 		return 0;
319 	}
320 
321 	if (index == UNCORE_INDEX_MAX_FREQ && uncore_root->max_ratio &&
322 	    uncore_root->max_ratio < input)
323 		return -EINVAL;
324 
325 	if (index == UNCORE_INDEX_MIN_FREQ && uncore_root->min_ratio &&
326 	    uncore_root->min_ratio > input)
327 		return -EINVAL;
328 
329 	write_control_freq(cluster_info, input, index);
330 
331 	return 0;
332 }
333 
334 /* Helper for sysfs read for the current uncore frequency. Called under mutex locks */
uncore_read_freq(struct uncore_data * data,unsigned int * freq)335 static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
336 {
337 	struct tpmi_uncore_cluster_info *cluster_info;
338 	u64 status;
339 
340 	cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
341 	if (cluster_info->root_domain)
342 		return -ENODATA;
343 
344 	status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX);
345 	*freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, status) * UNCORE_FREQ_KHZ_MULTIPLIER;
346 
347 	return 0;
348 }
349 
350 /* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */
uncore_read(struct uncore_data * data,unsigned int * value,enum uncore_index index)351 static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
352 {
353 	switch (index) {
354 	case UNCORE_INDEX_MIN_FREQ:
355 	case UNCORE_INDEX_MAX_FREQ:
356 		return uncore_read_control_freq(data, value, index);
357 
358 	case UNCORE_INDEX_CURRENT_FREQ:
359 		return uncore_read_freq(data, value);
360 
361 	case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
362 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
363 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
364 	case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
365 		return read_eff_lat_ctrl(data, value, index);
366 
367 	default:
368 		break;
369 	}
370 
371 	return -EOPNOTSUPP;
372 }
373 
374 /* Callback for sysfs write for TPMI uncore data. Called under mutex locks. */
uncore_write(struct uncore_data * data,unsigned int value,enum uncore_index index)375 static int uncore_write(struct uncore_data *data, unsigned int value, enum uncore_index index)
376 {
377 	switch (index) {
378 	case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
379 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
380 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
381 	case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
382 		return write_eff_lat_ctrl(data, value, index);
383 
384 	case UNCORE_INDEX_MIN_FREQ:
385 	case UNCORE_INDEX_MAX_FREQ:
386 		return uncore_write_control_freq(data, value, index);
387 
388 	default:
389 		break;
390 	}
391 
392 	return -EOPNOTSUPP;
393 }
394 
remove_cluster_entries(struct tpmi_uncore_struct * tpmi_uncore)395 static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore)
396 {
397 	int i;
398 
399 	for (i = 0; i < tpmi_uncore->power_domain_count; ++i) {
400 		struct tpmi_uncore_power_domain_info *pd_info;
401 		int j;
402 
403 		pd_info = &tpmi_uncore->pd_info[i];
404 		if (!pd_info->uncore_base)
405 			continue;
406 
407 		for (j = 0; j < pd_info->cluster_count; ++j) {
408 			struct tpmi_uncore_cluster_info *cluster_info;
409 
410 			cluster_info = &pd_info->cluster_infos[j];
411 			uncore_freq_remove_die_entry(&cluster_info->uncore_data);
412 		}
413 	}
414 }
415 
416 #define UNCORE_VERSION_MASK			GENMASK_ULL(7, 0)
417 #define UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK	GENMASK_ULL(15, 8)
418 #define UNCORE_CLUSTER_OFF_MASK			GENMASK_ULL(7, 0)
419 #define UNCORE_MAX_CLUSTER_PER_DOMAIN		8
420 
uncore_probe(struct auxiliary_device * auxdev,const struct auxiliary_device_id * id)421 static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
422 {
423 	bool read_blocked = 0, write_blocked = 0;
424 	struct intel_tpmi_plat_info *plat_info;
425 	struct tpmi_uncore_struct *tpmi_uncore;
426 	bool uncore_sysfs_added = false;
427 	int ret, i, pkg = 0;
428 	int num_resources;
429 
430 	ret = tpmi_get_feature_status(auxdev, TPMI_ID_UNCORE, &read_blocked, &write_blocked);
431 	if (ret)
432 		dev_info(&auxdev->dev, "Can't read feature status: ignoring blocked status\n");
433 
434 	if (read_blocked) {
435 		dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n");
436 		return -ENODEV;
437 	}
438 
439 	/* Get number of power domains, which is equal to number of resources */
440 	num_resources = tpmi_get_resource_count(auxdev);
441 	if (!num_resources)
442 		return -EINVAL;
443 
444 	/* Register callbacks to uncore core */
445 	ret = uncore_freq_common_init(uncore_read, uncore_write);
446 	if (ret)
447 		return ret;
448 
449 	/* Allocate uncore instance per package */
450 	tpmi_uncore = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_uncore), GFP_KERNEL);
451 	if (!tpmi_uncore) {
452 		ret = -ENOMEM;
453 		goto err_rem_common;
454 	}
455 
456 	/* Allocate memory for all power domains in a package */
457 	tpmi_uncore->pd_info = devm_kcalloc(&auxdev->dev, num_resources,
458 					    sizeof(*tpmi_uncore->pd_info),
459 					    GFP_KERNEL);
460 	if (!tpmi_uncore->pd_info) {
461 		ret = -ENOMEM;
462 		goto err_rem_common;
463 	}
464 
465 	tpmi_uncore->power_domain_count = num_resources;
466 	tpmi_uncore->write_blocked = write_blocked;
467 
468 	/* Get the package ID from the TPMI core */
469 	plat_info = tpmi_get_platform_data(auxdev);
470 	if (plat_info)
471 		pkg = plat_info->package_id;
472 	else
473 		dev_info(&auxdev->dev, "Platform information is NULL\n");
474 
475 	for (i = 0; i < num_resources; ++i) {
476 		struct tpmi_uncore_power_domain_info *pd_info;
477 		struct resource *res;
478 		u64 cluster_offset;
479 		u8 cluster_mask;
480 		int mask, j;
481 		u64 header;
482 
483 		res = tpmi_get_resource_at_index(auxdev, i);
484 		if (!res)
485 			continue;
486 
487 		pd_info = &tpmi_uncore->pd_info[i];
488 
489 		pd_info->uncore_base = devm_ioremap_resource(&auxdev->dev, res);
490 		if (IS_ERR(pd_info->uncore_base)) {
491 			ret = PTR_ERR(pd_info->uncore_base);
492 			/*
493 			 * Set to NULL so that clean up can still remove other
494 			 * entries already created if any by
495 			 * remove_cluster_entries()
496 			 */
497 			pd_info->uncore_base = NULL;
498 			goto remove_clusters;
499 		}
500 
501 		/* Check for version and skip this resource if there is mismatch */
502 		header = readq(pd_info->uncore_base);
503 		pd_info->ufs_header_ver = header & UNCORE_VERSION_MASK;
504 
505 		if (pd_info->ufs_header_ver == TPMI_VERSION_INVALID)
506 			continue;
507 
508 		if (TPMI_MAJOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MAJOR_VERSION) {
509 			dev_err(&auxdev->dev, "Uncore: Unsupported major version:%lx\n",
510 				TPMI_MAJOR_VERSION(pd_info->ufs_header_ver));
511 			ret = -ENODEV;
512 			goto remove_clusters;
513 		}
514 
515 		if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION)
516 			dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n",
517 				 TPMI_MINOR_VERSION(pd_info->ufs_header_ver));
518 
519 		/* Get Cluster ID Mask */
520 		cluster_mask = FIELD_GET(UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK, header);
521 		if (!cluster_mask) {
522 			dev_info(&auxdev->dev, "Uncore: Invalid cluster mask:%x\n", cluster_mask);
523 			continue;
524 		}
525 
526 		/* Find out number of clusters in this resource */
527 		pd_info->cluster_count = hweight8(cluster_mask);
528 
529 		pd_info->cluster_infos = devm_kcalloc(&auxdev->dev, pd_info->cluster_count,
530 						      sizeof(struct tpmi_uncore_cluster_info),
531 						      GFP_KERNEL);
532 		if (!pd_info->cluster_infos) {
533 			ret = -ENOMEM;
534 			goto remove_clusters;
535 		}
536 		/*
537 		 * Each byte in the register point to status and control
538 		 * registers belonging to cluster id 0-8.
539 		 */
540 		cluster_offset = readq(pd_info->uncore_base +
541 					UNCORE_FABRIC_CLUSTER_OFFSET);
542 
543 		for (j = 0; j < pd_info->cluster_count; ++j) {
544 			struct tpmi_uncore_cluster_info *cluster_info;
545 
546 			/* Get the offset for this cluster */
547 			mask = (cluster_offset & UNCORE_CLUSTER_OFF_MASK);
548 			/* Offset in QWORD, so change to bytes */
549 			mask <<= 3;
550 
551 			cluster_info = &pd_info->cluster_infos[j];
552 
553 			cluster_info->cluster_base = pd_info->uncore_base + mask;
554 
555 			cluster_info->uncore_data.package_id = pkg;
556 			/* There are no dies like Cascade Lake */
557 			cluster_info->uncore_data.die_id = 0;
558 			cluster_info->uncore_data.domain_id = i;
559 			cluster_info->uncore_data.cluster_id = j;
560 
561 			cluster_info->uncore_root = tpmi_uncore;
562 
563 			if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION)
564 				cluster_info->elc_supported = true;
565 
566 			ret = uncore_freq_add_entry(&cluster_info->uncore_data, 0);
567 			if (ret) {
568 				cluster_info->cluster_base = NULL;
569 				goto remove_clusters;
570 			}
571 			/* Point to next cluster offset */
572 			cluster_offset >>= UNCORE_MAX_CLUSTER_PER_DOMAIN;
573 			uncore_sysfs_added = true;
574 		}
575 	}
576 
577 	if (!uncore_sysfs_added) {
578 		ret = -ENODEV;
579 		goto remove_clusters;
580 	}
581 
582 	auxiliary_set_drvdata(auxdev, tpmi_uncore);
583 
584 	if (topology_max_dies_per_package() > 1)
585 		return 0;
586 
587 	tpmi_uncore->root_cluster.root_domain = true;
588 	tpmi_uncore->root_cluster.uncore_root = tpmi_uncore;
589 
590 	tpmi_uncore->root_cluster.uncore_data.package_id = pkg;
591 	tpmi_uncore->root_cluster.uncore_data.domain_id = UNCORE_DOMAIN_ID_INVALID;
592 	ret = uncore_freq_add_entry(&tpmi_uncore->root_cluster.uncore_data, 0);
593 	if (ret)
594 		goto remove_clusters;
595 
596 	return 0;
597 
598 remove_clusters:
599 	remove_cluster_entries(tpmi_uncore);
600 err_rem_common:
601 	uncore_freq_common_exit();
602 
603 	return ret;
604 }
605 
uncore_remove(struct auxiliary_device * auxdev)606 static void uncore_remove(struct auxiliary_device *auxdev)
607 {
608 	struct tpmi_uncore_struct *tpmi_uncore = auxiliary_get_drvdata(auxdev);
609 
610 	if (tpmi_uncore->root_cluster.root_domain)
611 		uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data);
612 
613 	remove_cluster_entries(tpmi_uncore);
614 
615 	uncore_freq_common_exit();
616 }
617 
618 static const struct auxiliary_device_id intel_uncore_id_table[] = {
619 	{ .name = "intel_vsec.tpmi-uncore" },
620 	{}
621 };
622 MODULE_DEVICE_TABLE(auxiliary, intel_uncore_id_table);
623 
624 static struct auxiliary_driver intel_uncore_aux_driver = {
625 	.id_table       = intel_uncore_id_table,
626 	.remove         = uncore_remove,
627 	.probe          = uncore_probe,
628 };
629 
630 module_auxiliary_driver(intel_uncore_aux_driver);
631 
632 MODULE_IMPORT_NS(INTEL_TPMI);
633 MODULE_IMPORT_NS(INTEL_UNCORE_FREQUENCY);
634 MODULE_DESCRIPTION("Intel TPMI UFS Driver");
635 MODULE_LICENSE("GPL");
636