1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include <drm/drm_managed.h>
7
8 #include <regs/xe_gt_regs.h>
9 #include "xe_device.h"
10 #include "xe_gt.h"
11 #include "xe_gt_sysfs.h"
12 #include "xe_gt_throttle_sysfs.h"
13 #include "xe_mmio.h"
14
15 /**
16 * DOC: Xe GT Throttle
17 *
18 * Provides sysfs entries for frequency throttle reasons in GT
19 *
20 * device/gt#/freq0/throttle/status - Overall status
21 * device/gt#/freq0/throttle/reason_pl1 - Frequency throttle due to PL1
22 * device/gt#/freq0/throttle/reason_pl2 - Frequency throttle due to PL2
23 * device/gt#/freq0/throttle/reason_pl4 - Frequency throttle due to PL4, Iccmax etc.
24 * device/gt#/freq0/throttle/reason_thermal - Frequency throttle due to thermal
25 * device/gt#/freq0/throttle/reason_prochot - Frequency throttle due to prochot
26 * device/gt#/freq0/throttle/reason_ratl - Frequency throttle due to RATL
27 * device/gt#/freq0/throttle/reason_vr_thermalert - Frequency throttle due to VR THERMALERT
28 * device/gt#/freq0/throttle/reason_vr_tdc - Frequency throttle due to VR TDC
29 */
30
31 static struct xe_gt *
dev_to_gt(struct device * dev)32 dev_to_gt(struct device *dev)
33 {
34 return kobj_to_gt(dev->kobj.parent);
35 }
36
read_perf_limit_reasons(struct xe_gt * gt)37 static u32 read_perf_limit_reasons(struct xe_gt *gt)
38 {
39 u32 reg;
40
41 if (xe_gt_is_media_type(gt))
42 reg = xe_mmio_read32(gt, MTL_MEDIA_PERF_LIMIT_REASONS);
43 else
44 reg = xe_mmio_read32(gt, GT0_PERF_LIMIT_REASONS);
45
46 return reg;
47 }
48
read_status(struct xe_gt * gt)49 static u32 read_status(struct xe_gt *gt)
50 {
51 u32 status = read_perf_limit_reasons(gt) & GT0_PERF_LIMIT_REASONS_MASK;
52
53 return status;
54 }
55
read_reason_pl1(struct xe_gt * gt)56 static u32 read_reason_pl1(struct xe_gt *gt)
57 {
58 u32 pl1 = read_perf_limit_reasons(gt) & POWER_LIMIT_1_MASK;
59
60 return pl1;
61 }
62
read_reason_pl2(struct xe_gt * gt)63 static u32 read_reason_pl2(struct xe_gt *gt)
64 {
65 u32 pl2 = read_perf_limit_reasons(gt) & POWER_LIMIT_2_MASK;
66
67 return pl2;
68 }
69
read_reason_pl4(struct xe_gt * gt)70 static u32 read_reason_pl4(struct xe_gt *gt)
71 {
72 u32 pl4 = read_perf_limit_reasons(gt) & POWER_LIMIT_4_MASK;
73
74 return pl4;
75 }
76
read_reason_thermal(struct xe_gt * gt)77 static u32 read_reason_thermal(struct xe_gt *gt)
78 {
79 u32 thermal = read_perf_limit_reasons(gt) & THERMAL_LIMIT_MASK;
80
81 return thermal;
82 }
83
read_reason_prochot(struct xe_gt * gt)84 static u32 read_reason_prochot(struct xe_gt *gt)
85 {
86 u32 prochot = read_perf_limit_reasons(gt) & PROCHOT_MASK;
87
88 return prochot;
89 }
90
read_reason_ratl(struct xe_gt * gt)91 static u32 read_reason_ratl(struct xe_gt *gt)
92 {
93 u32 ratl = read_perf_limit_reasons(gt) & RATL_MASK;
94
95 return ratl;
96 }
97
read_reason_vr_thermalert(struct xe_gt * gt)98 static u32 read_reason_vr_thermalert(struct xe_gt *gt)
99 {
100 u32 thermalert = read_perf_limit_reasons(gt) & VR_THERMALERT_MASK;
101
102 return thermalert;
103 }
104
read_reason_vr_tdc(struct xe_gt * gt)105 static u32 read_reason_vr_tdc(struct xe_gt *gt)
106 {
107 u32 tdc = read_perf_limit_reasons(gt) & VR_TDC_MASK;
108
109 return tdc;
110 }
111
status_show(struct device * dev,struct device_attribute * attr,char * buff)112 static ssize_t status_show(struct device *dev,
113 struct device_attribute *attr,
114 char *buff)
115 {
116 struct xe_gt *gt = dev_to_gt(dev);
117 bool status = !!read_status(gt);
118
119 return sysfs_emit(buff, "%u\n", status);
120 }
121 static DEVICE_ATTR_RO(status);
122
reason_pl1_show(struct device * dev,struct device_attribute * attr,char * buff)123 static ssize_t reason_pl1_show(struct device *dev,
124 struct device_attribute *attr,
125 char *buff)
126 {
127 struct xe_gt *gt = dev_to_gt(dev);
128 bool pl1 = !!read_reason_pl1(gt);
129
130 return sysfs_emit(buff, "%u\n", pl1);
131 }
132 static DEVICE_ATTR_RO(reason_pl1);
133
reason_pl2_show(struct device * dev,struct device_attribute * attr,char * buff)134 static ssize_t reason_pl2_show(struct device *dev,
135 struct device_attribute *attr,
136 char *buff)
137 {
138 struct xe_gt *gt = dev_to_gt(dev);
139 bool pl2 = !!read_reason_pl2(gt);
140
141 return sysfs_emit(buff, "%u\n", pl2);
142 }
143 static DEVICE_ATTR_RO(reason_pl2);
144
reason_pl4_show(struct device * dev,struct device_attribute * attr,char * buff)145 static ssize_t reason_pl4_show(struct device *dev,
146 struct device_attribute *attr,
147 char *buff)
148 {
149 struct xe_gt *gt = dev_to_gt(dev);
150 bool pl4 = !!read_reason_pl4(gt);
151
152 return sysfs_emit(buff, "%u\n", pl4);
153 }
154 static DEVICE_ATTR_RO(reason_pl4);
155
reason_thermal_show(struct device * dev,struct device_attribute * attr,char * buff)156 static ssize_t reason_thermal_show(struct device *dev,
157 struct device_attribute *attr,
158 char *buff)
159 {
160 struct xe_gt *gt = dev_to_gt(dev);
161 bool thermal = !!read_reason_thermal(gt);
162
163 return sysfs_emit(buff, "%u\n", thermal);
164 }
165 static DEVICE_ATTR_RO(reason_thermal);
166
reason_prochot_show(struct device * dev,struct device_attribute * attr,char * buff)167 static ssize_t reason_prochot_show(struct device *dev,
168 struct device_attribute *attr,
169 char *buff)
170 {
171 struct xe_gt *gt = dev_to_gt(dev);
172 bool prochot = !!read_reason_prochot(gt);
173
174 return sysfs_emit(buff, "%u\n", prochot);
175 }
176 static DEVICE_ATTR_RO(reason_prochot);
177
reason_ratl_show(struct device * dev,struct device_attribute * attr,char * buff)178 static ssize_t reason_ratl_show(struct device *dev,
179 struct device_attribute *attr,
180 char *buff)
181 {
182 struct xe_gt *gt = dev_to_gt(dev);
183 bool ratl = !!read_reason_ratl(gt);
184
185 return sysfs_emit(buff, "%u\n", ratl);
186 }
187 static DEVICE_ATTR_RO(reason_ratl);
188
reason_vr_thermalert_show(struct device * dev,struct device_attribute * attr,char * buff)189 static ssize_t reason_vr_thermalert_show(struct device *dev,
190 struct device_attribute *attr,
191 char *buff)
192 {
193 struct xe_gt *gt = dev_to_gt(dev);
194 bool thermalert = !!read_reason_vr_thermalert(gt);
195
196 return sysfs_emit(buff, "%u\n", thermalert);
197 }
198 static DEVICE_ATTR_RO(reason_vr_thermalert);
199
reason_vr_tdc_show(struct device * dev,struct device_attribute * attr,char * buff)200 static ssize_t reason_vr_tdc_show(struct device *dev,
201 struct device_attribute *attr,
202 char *buff)
203 {
204 struct xe_gt *gt = dev_to_gt(dev);
205 bool tdc = !!read_reason_vr_tdc(gt);
206
207 return sysfs_emit(buff, "%u\n", tdc);
208 }
209 static DEVICE_ATTR_RO(reason_vr_tdc);
210
211 static struct attribute *throttle_attrs[] = {
212 &dev_attr_status.attr,
213 &dev_attr_reason_pl1.attr,
214 &dev_attr_reason_pl2.attr,
215 &dev_attr_reason_pl4.attr,
216 &dev_attr_reason_thermal.attr,
217 &dev_attr_reason_prochot.attr,
218 &dev_attr_reason_ratl.attr,
219 &dev_attr_reason_vr_thermalert.attr,
220 &dev_attr_reason_vr_tdc.attr,
221 NULL
222 };
223
224 static const struct attribute_group throttle_group_attrs = {
225 .name = "throttle",
226 .attrs = throttle_attrs,
227 };
228
gt_throttle_sysfs_fini(struct drm_device * drm,void * arg)229 static void gt_throttle_sysfs_fini(struct drm_device *drm, void *arg)
230 {
231 struct xe_gt *gt = arg;
232
233 sysfs_remove_group(gt->freq, &throttle_group_attrs);
234 }
235
xe_gt_throttle_sysfs_init(struct xe_gt * gt)236 void xe_gt_throttle_sysfs_init(struct xe_gt *gt)
237 {
238 struct xe_device *xe = gt_to_xe(gt);
239 int err;
240
241 err = sysfs_create_group(gt->freq, &throttle_group_attrs);
242 if (err) {
243 drm_warn(&xe->drm, "failed to register throttle sysfs, err: %d\n", err);
244 return;
245 }
246
247 err = drmm_add_action_or_reset(&xe->drm, gt_throttle_sysfs_fini, gt);
248 if (err)
249 drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
250 __func__, err);
251 }
252