xref: /linux/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c (revision 685f88c72a0c4d12d3bd2ff50286938f14486f85)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * uncore-frquency-tpmi: Uncore frequency scaling using TPMI
4  *
5  * Copyright (c) 2023, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * The hardware interface to read/write is basically substitution of
9  * MSR 0x620 and 0x621.
10  * There are specific MMIO offset and bits to get/set minimum and
11  * maximum uncore ratio, similar to MSRs.
12  * The scope of the uncore MSRs was package scope. But TPMI allows
13  * new gen CPUs to have multiple uncore controls at uncore-cluster
14  * level. Each package can have multiple power domains which further
15  * can have multiple clusters.
16  * Here number of power domains = number of resources in this aux
17  * device. There are offsets and bits to discover number of clusters
18  * and offset for each cluster level controls.
19  *
20  */
21 
22 #include <linux/auxiliary_bus.h>
23 #include <linux/bitfield.h>
24 #include <linux/bits.h>
25 #include <linux/io.h>
26 #include <linux/module.h>
27 #include <linux/intel_tpmi.h>
28 
29 #include "../tpmi_power_domains.h"
30 #include "uncore-frequency-common.h"
31 
32 #define	UNCORE_MAJOR_VERSION		0
33 #define	UNCORE_MINOR_VERSION		2
34 #define UNCORE_ELC_SUPPORTED_VERSION	2
35 #define UNCORE_HEADER_INDEX		0
36 #define UNCORE_FABRIC_CLUSTER_OFFSET	8
37 
38 /* status + control + adv_ctl1 + adv_ctl2 */
39 #define UNCORE_FABRIC_CLUSTER_SIZE	(4 * 8)
40 
41 #define UNCORE_STATUS_INDEX		0
42 #define UNCORE_CONTROL_INDEX		8
43 
44 #define UNCORE_FREQ_KHZ_MULTIPLIER	100000
45 
46 struct tpmi_uncore_struct;
47 
48 /* Information for each cluster */
49 struct tpmi_uncore_cluster_info {
50 	bool root_domain;
51 	bool elc_supported;
52 	u8 __iomem *cluster_base;
53 	u16 cdie_id;
54 	struct uncore_data uncore_data;
55 	struct tpmi_uncore_struct *uncore_root;
56 };
57 
58 /* Information for each power domain */
59 struct tpmi_uncore_power_domain_info {
60 	u8 __iomem *uncore_base;
61 	int ufs_header_ver;
62 	int cluster_count;
63 	struct tpmi_uncore_cluster_info *cluster_infos;
64 };
65 
66 /* Information for all power domains in a package */
67 struct tpmi_uncore_struct {
68 	int power_domain_count;
69 	int max_ratio;
70 	int min_ratio;
71 	struct tpmi_uncore_power_domain_info *pd_info;
72 	struct tpmi_uncore_cluster_info root_cluster;
73 	bool write_blocked;
74 };
75 
76 /* Bit definitions for STATUS register */
77 #define UNCORE_CURRENT_RATIO_MASK			GENMASK_ULL(6, 0)
78 
79 /* Bit definitions for CONTROL register */
80 #define UNCORE_MAX_RATIO_MASK				GENMASK_ULL(14, 8)
81 #define UNCORE_MIN_RATIO_MASK				GENMASK_ULL(21, 15)
82 #define UNCORE_EFF_LAT_CTRL_RATIO_MASK			GENMASK_ULL(28, 22)
83 #define UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK		GENMASK_ULL(38, 32)
84 #define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE	BIT(39)
85 #define UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK		GENMASK_ULL(46, 40)
86 
87 /* Helper function to read MMIO offset for max/min control frequency */
88 static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info,
89 			     unsigned int *value, enum uncore_index index)
90 {
91 	u64 control;
92 
93 	control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
94 	if (index == UNCORE_INDEX_MAX_FREQ)
95 		*value = FIELD_GET(UNCORE_MAX_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
96 	else
97 		*value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER;
98 }
99 
100 /* Helper function to read efficiency latency control values over MMIO */
101 static int read_eff_lat_ctrl(struct uncore_data *data, unsigned int *val, enum uncore_index index)
102 {
103 	struct tpmi_uncore_cluster_info *cluster_info;
104 	u64 ctrl;
105 
106 	cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
107 	if (cluster_info->root_domain)
108 		return -ENODATA;
109 
110 	if (!cluster_info->elc_supported)
111 		return -EOPNOTSUPP;
112 
113 	ctrl = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
114 
115 	switch (index) {
116 	case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
117 		*val = FIELD_GET(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, ctrl);
118 		*val *= 100;
119 		*val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK));
120 		break;
121 
122 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
123 		*val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, ctrl);
124 		*val *= 100;
125 		*val = DIV_ROUND_UP(*val, FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK));
126 		break;
127 
128 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
129 		*val = FIELD_GET(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, ctrl);
130 		break;
131 	case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
132 		*val = FIELD_GET(UNCORE_EFF_LAT_CTRL_RATIO_MASK, ctrl) * UNCORE_FREQ_KHZ_MULTIPLIER;
133 		break;
134 
135 	default:
136 		return -EOPNOTSUPP;
137 	}
138 
139 	return 0;
140 }
141 
142 #define UNCORE_MAX_RATIO	FIELD_MAX(UNCORE_MAX_RATIO_MASK)
143 
144 /* Helper for sysfs read for max/min frequencies. Called under mutex locks */
145 static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value,
146 				    enum uncore_index index)
147 {
148 	struct tpmi_uncore_cluster_info *cluster_info;
149 
150 	cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
151 
152 	if (cluster_info->root_domain) {
153 		struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
154 		unsigned int min, max, v;
155 		int i;
156 
157 		min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER;
158 		max = 0;
159 
160 		/*
161 		 * Get the max/min by looking at each cluster. Get the lowest
162 		 * min and highest max.
163 		 */
164 		for (i = 0; i < uncore_root->power_domain_count; ++i) {
165 			int j;
166 
167 			for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) {
168 				read_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
169 						  &v, index);
170 				if (v < min)
171 					min = v;
172 				if (v > max)
173 					max = v;
174 			}
175 		}
176 
177 		if (index == UNCORE_INDEX_MIN_FREQ)
178 			*value = min;
179 		else
180 			*value = max;
181 
182 		return 0;
183 	}
184 
185 	read_control_freq(cluster_info, value, index);
186 
187 	return 0;
188 }
189 
190 /* Helper function for writing efficiency latency control values over MMIO */
191 static int write_eff_lat_ctrl(struct uncore_data *data, unsigned int val, enum uncore_index index)
192 {
193 	struct tpmi_uncore_cluster_info *cluster_info;
194 	u64 control;
195 
196 	cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
197 
198 	if (cluster_info->root_domain)
199 		return -ENODATA;
200 
201 	if (!cluster_info->elc_supported)
202 		return -EOPNOTSUPP;
203 
204 	switch (index) {
205 	case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
206 		if (val > 100)
207 			return -EINVAL;
208 		break;
209 
210 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
211 		if (val > 100)
212 			return -EINVAL;
213 		break;
214 
215 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
216 		if (val > 1)
217 			return -EINVAL;
218 		break;
219 
220 	case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
221 		val /= UNCORE_FREQ_KHZ_MULTIPLIER;
222 		if (val > FIELD_MAX(UNCORE_EFF_LAT_CTRL_RATIO_MASK))
223 			return -EINVAL;
224 		break;
225 
226 	default:
227 		return -EOPNOTSUPP;
228 	}
229 
230 	control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
231 
232 	switch (index) {
233 	case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
234 		val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK);
235 		val /= 100;
236 		control &= ~UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK;
237 		control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_LOW_THRESHOLD_MASK, val);
238 		break;
239 
240 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
241 		val *= FIELD_MAX(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK);
242 		val /= 100;
243 		control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK;
244 		control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_MASK, val);
245 		break;
246 
247 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
248 		control &= ~UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE;
249 		control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE, val);
250 		break;
251 
252 	case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
253 		control &= ~UNCORE_EFF_LAT_CTRL_RATIO_MASK;
254 		control |= FIELD_PREP(UNCORE_EFF_LAT_CTRL_RATIO_MASK, val);
255 		break;
256 
257 	default:
258 		break;
259 	}
260 
261 	writeq(control, cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
262 
263 	return 0;
264 }
265 
266 /* Helper function to write MMIO offset for max/min control frequency */
267 static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input,
268 			      unsigned int index)
269 {
270 	u64 control;
271 
272 	control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX);
273 
274 	if (index == UNCORE_INDEX_MAX_FREQ) {
275 		control &= ~UNCORE_MAX_RATIO_MASK;
276 		control |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input);
277 	} else {
278 		control &= ~UNCORE_MIN_RATIO_MASK;
279 		control |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input);
280 	}
281 
282 	writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX));
283 }
284 
285 /* Helper for sysfs write for max/min frequencies. Called under mutex locks */
286 static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
287 				     enum uncore_index index)
288 {
289 	struct tpmi_uncore_cluster_info *cluster_info;
290 	struct tpmi_uncore_struct *uncore_root;
291 
292 	input /= UNCORE_FREQ_KHZ_MULTIPLIER;
293 	if (!input || input > UNCORE_MAX_RATIO)
294 		return -EINVAL;
295 
296 	cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
297 	uncore_root = cluster_info->uncore_root;
298 
299 	if (uncore_root->write_blocked)
300 		return -EPERM;
301 
302 	/* Update each cluster in a package */
303 	if (cluster_info->root_domain) {
304 		struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root;
305 		int i;
306 
307 		for (i = 0; i < uncore_root->power_domain_count; ++i) {
308 			int j;
309 
310 			for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j)
311 				write_control_freq(&uncore_root->pd_info[i].cluster_infos[j],
312 						  input, index);
313 		}
314 
315 		if (index == UNCORE_INDEX_MAX_FREQ)
316 			uncore_root->max_ratio = input;
317 		else
318 			uncore_root->min_ratio = input;
319 
320 		return 0;
321 	}
322 
323 	if (index == UNCORE_INDEX_MAX_FREQ && uncore_root->max_ratio &&
324 	    uncore_root->max_ratio < input)
325 		return -EINVAL;
326 
327 	if (index == UNCORE_INDEX_MIN_FREQ && uncore_root->min_ratio &&
328 	    uncore_root->min_ratio > input)
329 		return -EINVAL;
330 
331 	write_control_freq(cluster_info, input, index);
332 
333 	return 0;
334 }
335 
336 /* Helper for sysfs read for the current uncore frequency. Called under mutex locks */
337 static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
338 {
339 	struct tpmi_uncore_cluster_info *cluster_info;
340 	u64 status;
341 
342 	cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
343 	if (cluster_info->root_domain)
344 		return -ENODATA;
345 
346 	status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX);
347 	*freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, status) * UNCORE_FREQ_KHZ_MULTIPLIER;
348 
349 	return 0;
350 }
351 
352 /*
353  * Agent types as per the TPMI UFS Specification for UFS_STATUS
354  * Agent Type - Core	Bit: 23
355  * Agent Type - Cache	Bit: 24
356  * Agent Type - Memory	Bit: 25
357  * Agent Type - IO	Bit: 26
358  */
359 
360 #define UNCORE_AGENT_TYPES	GENMASK_ULL(26, 23)
361 
362 /* Helper function to read agent type over MMIO and set the agent type mask */
363 static void uncore_set_agent_type(struct tpmi_uncore_cluster_info *cluster_info)
364 {
365 	u64 status;
366 
367 	status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX);
368 	cluster_info->uncore_data.agent_type_mask = FIELD_GET(UNCORE_AGENT_TYPES, status);
369 }
370 
371 /* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */
372 static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index)
373 {
374 	struct tpmi_uncore_cluster_info *cluster_info;
375 	int ret;
376 
377 	switch (index) {
378 	case UNCORE_INDEX_MIN_FREQ:
379 	case UNCORE_INDEX_MAX_FREQ:
380 		return uncore_read_control_freq(data, value, index);
381 
382 	case UNCORE_INDEX_CURRENT_FREQ:
383 		return uncore_read_freq(data, value);
384 
385 	case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
386 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
387 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
388 	case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
389 		return read_eff_lat_ctrl(data, value, index);
390 
391 	case UNCORE_INDEX_DIE_ID:
392 		cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data);
393 		ret = tpmi_get_linux_die_id(cluster_info->uncore_data.package_id,
394 					    cluster_info->cdie_id);
395 		if (ret < 0)
396 			return ret;
397 
398 		*value = ret;
399 		return 0;
400 
401 	default:
402 		break;
403 	}
404 
405 	return -EOPNOTSUPP;
406 }
407 
408 /* Callback for sysfs write for TPMI uncore data. Called under mutex locks. */
409 static int uncore_write(struct uncore_data *data, unsigned int value, enum uncore_index index)
410 {
411 	switch (index) {
412 	case UNCORE_INDEX_EFF_LAT_CTRL_LOW_THRESHOLD:
413 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD:
414 	case UNCORE_INDEX_EFF_LAT_CTRL_HIGH_THRESHOLD_ENABLE:
415 	case UNCORE_INDEX_EFF_LAT_CTRL_FREQ:
416 		return write_eff_lat_ctrl(data, value, index);
417 
418 	case UNCORE_INDEX_MIN_FREQ:
419 	case UNCORE_INDEX_MAX_FREQ:
420 		return uncore_write_control_freq(data, value, index);
421 
422 	default:
423 		break;
424 	}
425 
426 	return -EOPNOTSUPP;
427 }
428 
429 static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore)
430 {
431 	int i;
432 
433 	for (i = 0; i < tpmi_uncore->power_domain_count; ++i) {
434 		struct tpmi_uncore_power_domain_info *pd_info;
435 		int j;
436 
437 		pd_info = &tpmi_uncore->pd_info[i];
438 		if (!pd_info->uncore_base)
439 			continue;
440 
441 		for (j = 0; j < pd_info->cluster_count; ++j) {
442 			struct tpmi_uncore_cluster_info *cluster_info;
443 
444 			cluster_info = &pd_info->cluster_infos[j];
445 			uncore_freq_remove_die_entry(&cluster_info->uncore_data);
446 		}
447 	}
448 }
449 
450 static void set_cdie_id(int domain_id, struct tpmi_uncore_cluster_info *cluster_info,
451 		       struct intel_tpmi_plat_info *plat_info)
452 {
453 
454 	cluster_info->cdie_id = domain_id;
455 
456 	if (plat_info->cdie_mask && cluster_info->uncore_data.agent_type_mask & AGENT_TYPE_CORE)
457 		cluster_info->cdie_id = domain_id + ffs(plat_info->cdie_mask) - 1;
458 }
459 
460 #define UNCORE_VERSION_MASK			GENMASK_ULL(7, 0)
461 #define UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK	GENMASK_ULL(15, 8)
462 #define UNCORE_CLUSTER_OFF_MASK			GENMASK_ULL(7, 0)
463 #define UNCORE_MAX_CLUSTER_PER_DOMAIN		8
464 
465 static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
466 {
467 	bool read_blocked = 0, write_blocked = 0;
468 	struct intel_tpmi_plat_info *plat_info;
469 	struct tpmi_uncore_struct *tpmi_uncore;
470 	bool uncore_sysfs_added = false;
471 	int ret, i, pkg = 0;
472 	int num_resources;
473 
474 	ret = tpmi_get_feature_status(auxdev, TPMI_ID_UNCORE, &read_blocked, &write_blocked);
475 	if (ret)
476 		dev_info(&auxdev->dev, "Can't read feature status: ignoring blocked status\n");
477 
478 	if (read_blocked) {
479 		dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n");
480 		return -ENODEV;
481 	}
482 
483 	/* Get number of power domains, which is equal to number of resources */
484 	num_resources = tpmi_get_resource_count(auxdev);
485 	if (!num_resources)
486 		return -EINVAL;
487 
488 	/* Register callbacks to uncore core */
489 	ret = uncore_freq_common_init(uncore_read, uncore_write);
490 	if (ret)
491 		return ret;
492 
493 	/* Allocate uncore instance per package */
494 	tpmi_uncore = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_uncore), GFP_KERNEL);
495 	if (!tpmi_uncore) {
496 		ret = -ENOMEM;
497 		goto err_rem_common;
498 	}
499 
500 	/* Allocate memory for all power domains in a package */
501 	tpmi_uncore->pd_info = devm_kcalloc(&auxdev->dev, num_resources,
502 					    sizeof(*tpmi_uncore->pd_info),
503 					    GFP_KERNEL);
504 	if (!tpmi_uncore->pd_info) {
505 		ret = -ENOMEM;
506 		goto err_rem_common;
507 	}
508 
509 	tpmi_uncore->power_domain_count = num_resources;
510 	tpmi_uncore->write_blocked = write_blocked;
511 
512 	/* Get the package ID from the TPMI core */
513 	plat_info = tpmi_get_platform_data(auxdev);
514 	if (unlikely(!plat_info)) {
515 		dev_info(&auxdev->dev, "Platform information is NULL\n");
516 		ret = -ENODEV;
517 		goto err_rem_common;
518 	}
519 
520 	pkg = plat_info->package_id;
521 
522 	for (i = 0; i < num_resources; ++i) {
523 		struct tpmi_uncore_power_domain_info *pd_info;
524 		struct resource *res;
525 		u64 cluster_offset;
526 		u8 cluster_mask;
527 		int mask, j;
528 		u64 header;
529 
530 		res = tpmi_get_resource_at_index(auxdev, i);
531 		if (!res)
532 			continue;
533 
534 		pd_info = &tpmi_uncore->pd_info[i];
535 
536 		pd_info->uncore_base = devm_ioremap_resource(&auxdev->dev, res);
537 		if (IS_ERR(pd_info->uncore_base)) {
538 			ret = PTR_ERR(pd_info->uncore_base);
539 			/*
540 			 * Set to NULL so that clean up can still remove other
541 			 * entries already created if any by
542 			 * remove_cluster_entries()
543 			 */
544 			pd_info->uncore_base = NULL;
545 			goto remove_clusters;
546 		}
547 
548 		/* Check for version and skip this resource if there is mismatch */
549 		header = readq(pd_info->uncore_base);
550 		pd_info->ufs_header_ver = header & UNCORE_VERSION_MASK;
551 
552 		if (pd_info->ufs_header_ver == TPMI_VERSION_INVALID)
553 			continue;
554 
555 		if (TPMI_MAJOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MAJOR_VERSION) {
556 			dev_err(&auxdev->dev, "Uncore: Unsupported major version:%lx\n",
557 				TPMI_MAJOR_VERSION(pd_info->ufs_header_ver));
558 			ret = -ENODEV;
559 			goto remove_clusters;
560 		}
561 
562 		if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION)
563 			dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n",
564 				 TPMI_MINOR_VERSION(pd_info->ufs_header_ver));
565 
566 		/* Get Cluster ID Mask */
567 		cluster_mask = FIELD_GET(UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK, header);
568 		if (!cluster_mask) {
569 			dev_info(&auxdev->dev, "Uncore: Invalid cluster mask:%x\n", cluster_mask);
570 			continue;
571 		}
572 
573 		/* Find out number of clusters in this resource */
574 		pd_info->cluster_count = hweight8(cluster_mask);
575 
576 		pd_info->cluster_infos = devm_kcalloc(&auxdev->dev, pd_info->cluster_count,
577 						      sizeof(struct tpmi_uncore_cluster_info),
578 						      GFP_KERNEL);
579 		if (!pd_info->cluster_infos) {
580 			ret = -ENOMEM;
581 			goto remove_clusters;
582 		}
583 		/*
584 		 * Each byte in the register point to status and control
585 		 * registers belonging to cluster id 0-8.
586 		 */
587 		cluster_offset = readq(pd_info->uncore_base +
588 					UNCORE_FABRIC_CLUSTER_OFFSET);
589 
590 		for (j = 0; j < pd_info->cluster_count; ++j) {
591 			struct tpmi_uncore_cluster_info *cluster_info;
592 
593 			/* Get the offset for this cluster */
594 			mask = (cluster_offset & UNCORE_CLUSTER_OFF_MASK);
595 			/* Offset in QWORD, so change to bytes */
596 			mask <<= 3;
597 
598 			cluster_info = &pd_info->cluster_infos[j];
599 
600 			cluster_info->cluster_base = pd_info->uncore_base + mask;
601 
602 			uncore_set_agent_type(cluster_info);
603 
604 			cluster_info->uncore_data.package_id = pkg;
605 			/* There are no dies like Cascade Lake */
606 			cluster_info->uncore_data.die_id = 0;
607 			cluster_info->uncore_data.domain_id = i;
608 			cluster_info->uncore_data.cluster_id = j;
609 
610 			set_cdie_id(i, cluster_info, plat_info);
611 
612 			cluster_info->uncore_root = tpmi_uncore;
613 
614 			if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) >= UNCORE_ELC_SUPPORTED_VERSION)
615 				cluster_info->elc_supported = true;
616 
617 			ret = uncore_freq_add_entry(&cluster_info->uncore_data, 0);
618 			if (ret) {
619 				cluster_info->cluster_base = NULL;
620 				goto remove_clusters;
621 			}
622 			/* Point to next cluster offset */
623 			cluster_offset >>= UNCORE_MAX_CLUSTER_PER_DOMAIN;
624 			uncore_sysfs_added = true;
625 		}
626 	}
627 
628 	if (!uncore_sysfs_added) {
629 		ret = -ENODEV;
630 		goto remove_clusters;
631 	}
632 
633 	auxiliary_set_drvdata(auxdev, tpmi_uncore);
634 
635 	if (topology_max_dies_per_package() > 1)
636 		return 0;
637 
638 	tpmi_uncore->root_cluster.root_domain = true;
639 	tpmi_uncore->root_cluster.uncore_root = tpmi_uncore;
640 
641 	tpmi_uncore->root_cluster.uncore_data.package_id = pkg;
642 	tpmi_uncore->root_cluster.uncore_data.domain_id = UNCORE_DOMAIN_ID_INVALID;
643 	ret = uncore_freq_add_entry(&tpmi_uncore->root_cluster.uncore_data, 0);
644 	if (ret)
645 		goto remove_clusters;
646 
647 	return 0;
648 
649 remove_clusters:
650 	remove_cluster_entries(tpmi_uncore);
651 err_rem_common:
652 	uncore_freq_common_exit();
653 
654 	return ret;
655 }
656 
657 static void uncore_remove(struct auxiliary_device *auxdev)
658 {
659 	struct tpmi_uncore_struct *tpmi_uncore = auxiliary_get_drvdata(auxdev);
660 
661 	if (tpmi_uncore->root_cluster.root_domain)
662 		uncore_freq_remove_die_entry(&tpmi_uncore->root_cluster.uncore_data);
663 
664 	remove_cluster_entries(tpmi_uncore);
665 
666 	uncore_freq_common_exit();
667 }
668 
669 static const struct auxiliary_device_id intel_uncore_id_table[] = {
670 	{ .name = "intel_vsec.tpmi-uncore" },
671 	{}
672 };
673 MODULE_DEVICE_TABLE(auxiliary, intel_uncore_id_table);
674 
675 static struct auxiliary_driver intel_uncore_aux_driver = {
676 	.id_table       = intel_uncore_id_table,
677 	.remove         = uncore_remove,
678 	.probe          = uncore_probe,
679 };
680 
681 module_auxiliary_driver(intel_uncore_aux_driver);
682 
683 MODULE_IMPORT_NS("INTEL_TPMI");
684 MODULE_IMPORT_NS("INTEL_UNCORE_FREQUENCY");
685 MODULE_IMPORT_NS("INTEL_TPMI_POWER_DOMAIN");
686 MODULE_DESCRIPTION("Intel TPMI UFS Driver");
687 MODULE_LICENSE("GPL");
688