1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * AMD Platform Management Framework Driver
4 *
5 * Copyright (c) 2022, Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
9 */
10
11 #include <linux/array_size.h>
12 #include <linux/cleanup.h>
13 #include <linux/debugfs.h>
14 #include <linux/iopoll.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/power_supply.h>
20 #include <linux/string.h>
21 #include <asm/amd/node.h>
22 #include "pmf.h"
23
24 /* PMF-SMU communication registers */
25 #define AMD_PMF_REGISTER_MESSAGE 0xA18
26 #define AMD_PMF_REGISTER_RESPONSE 0xA78
27 #define AMD_PMF_REGISTER_ARGUMENT 0xA58
28
29 /* Base address of SMU for mapping physical address to virtual address */
30 #define AMD_PMF_MAPPING_SIZE 0x01000
31 #define AMD_PMF_BASE_ADDR_OFFSET 0x10000
32 #define AMD_PMF_BASE_ADDR_LO 0x13B102E8
33 #define AMD_PMF_BASE_ADDR_HI 0x13B102EC
34 #define AMD_PMF_BASE_ADDR_LO_MASK GENMASK(15, 0)
35 #define AMD_PMF_BASE_ADDR_HI_MASK GENMASK(31, 20)
36
37 /* SMU Response Codes */
38 #define AMD_PMF_RESULT_OK 0x01
39 #define AMD_PMF_RESULT_CMD_REJECT_BUSY 0xFC
40 #define AMD_PMF_RESULT_CMD_REJECT_PREREQ 0xFD
41 #define AMD_PMF_RESULT_CMD_UNKNOWN 0xFE
42 #define AMD_PMF_RESULT_FAILED 0xFF
43
44 #define PMF_MSG_DELAY_MIN_US 50
45 #define RESPONSE_REGISTER_LOOP_MAX 20000
46
47 #define DELAY_MIN_US 2000
48 #define DELAY_MAX_US 3000
49
50 /* override Metrics Table sample size time (in ms) */
51 static int metrics_table_loop_ms = 1000;
52 module_param(metrics_table_loop_ms, int, 0644);
53 MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
54
55 /* Force load on supported older platforms */
56 static bool force_load;
57 module_param(force_load, bool, 0444);
58 MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
59
60 static bool smart_pc_support = true;
61 module_param(smart_pc_support, bool, 0444);
62 MODULE_PARM_DESC(smart_pc_support, "Smart PC Support (default = true)");
63
64 static struct device *pmf_device;
65
amd_pmf_pwr_src_notify_call(struct notifier_block * nb,unsigned long event,void * data)66 static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
67 {
68 struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
69
70 if (event != PSY_EVENT_PROP_CHANGED)
71 return NOTIFY_OK;
72
73 if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
74 is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
75 is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
76 if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
77 return NOTIFY_DONE;
78 }
79
80 if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR))
81 amd_pmf_set_sps_power_limits(pmf);
82
83 if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE))
84 amd_pmf_power_slider_update_event(pmf);
85
86 return NOTIFY_OK;
87 }
88
current_power_limits_show(struct seq_file * seq,void * unused)89 static int current_power_limits_show(struct seq_file *seq, void *unused)
90 {
91 struct amd_pmf_dev *dev = seq->private;
92 struct amd_pmf_static_slider_granular table;
93 int mode, src = 0;
94
95 mode = amd_pmf_get_pprof_modes(dev);
96 if (mode < 0)
97 return mode;
98
99 src = amd_pmf_get_power_source();
100 amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
101 seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
102 table.prop[src][mode].spl,
103 table.prop[src][mode].fppt,
104 table.prop[src][mode].sppt,
105 table.prop[src][mode].sppt_apu_only,
106 table.prop[src][mode].stt_min,
107 table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
108 table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
109 return 0;
110 }
111 DEFINE_SHOW_ATTRIBUTE(current_power_limits);
112
amd_pmf_dbgfs_unregister(struct amd_pmf_dev * dev)113 static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
114 {
115 debugfs_remove_recursive(dev->dbgfs_dir);
116 }
117
amd_pmf_dbgfs_register(struct amd_pmf_dev * dev)118 static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
119 {
120 dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
121 if (dev->pmf_if_version == PMF_IF_V1)
122 debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
123 ¤t_power_limits_fops);
124 }
125
amd_pmf_get_power_source(void)126 int amd_pmf_get_power_source(void)
127 {
128 if (power_supply_is_system_supplied() > 0)
129 return POWER_SOURCE_AC;
130 else
131 return POWER_SOURCE_DC;
132 }
133
amd_pmf_get_metrics(struct work_struct * work)134 static void amd_pmf_get_metrics(struct work_struct *work)
135 {
136 struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
137 ktime_t time_elapsed_ms;
138 int socket_power;
139
140 guard(mutex)(&dev->update_mutex);
141
142 /* Transfer table contents */
143 memset(dev->buf, 0, sizeof(dev->m_table));
144 amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, SET_CMD, METRICS_TABLE_ID, NULL);
145 memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
146
147 time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
148 /* Calculate the avg SoC power consumption */
149 socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
150
151 if (dev->amt_enabled) {
152 /* Apply the Auto Mode transition */
153 amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
154 }
155
156 if (dev->cnqf_enabled) {
157 /* Apply the CnQF transition */
158 amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
159 }
160
161 dev->start_time = ktime_to_ms(ktime_get());
162 schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
163 }
164
amd_pmf_reg_read(struct amd_pmf_dev * dev,int reg_offset)165 static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
166 {
167 return ioread32(dev->regbase + reg_offset);
168 }
169
amd_pmf_reg_write(struct amd_pmf_dev * dev,int reg_offset,u32 val)170 static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
171 {
172 iowrite32(val, dev->regbase + reg_offset);
173 }
174
amd_pmf_dump_registers(struct amd_pmf_dev * dev)175 static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
176 {
177 u32 value;
178
179 value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
180 dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
181
182 value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
183 dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
184
185 value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
186 dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
187 }
188
189 /**
190 * fixp_q88_fromint: Convert integer to Q8.8
191 * @val: input value
192 *
193 * Converts an integer into binary fixed point format where 8 bits
194 * are used for integer and 8 bits are used for the decimal.
195 *
196 * Return: unsigned integer converted to Q8.8 format
197 */
fixp_q88_fromint(u32 val)198 u32 fixp_q88_fromint(u32 val)
199 {
200 return val << 8;
201 }
202
amd_pmf_send_cmd(struct amd_pmf_dev * dev,u8 message,bool get,u32 arg,u32 * data)203 int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
204 {
205 int rc;
206 u32 val;
207
208 guard(mutex)(&dev->lock);
209
210 /* Wait until we get a valid response */
211 rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
212 val, val != 0, PMF_MSG_DELAY_MIN_US,
213 PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
214 if (rc) {
215 dev_err(dev->dev, "failed to talk to SMU\n");
216 return rc;
217 }
218
219 /* Write zero to response register */
220 amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
221
222 /* Write argument into argument register */
223 amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
224
225 /* Write message ID to message ID register */
226 amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
227
228 /* Wait until we get a valid response */
229 rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
230 val, val != 0, PMF_MSG_DELAY_MIN_US,
231 PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
232 if (rc) {
233 dev_err(dev->dev, "SMU response timed out\n");
234 return rc;
235 }
236
237 switch (val) {
238 case AMD_PMF_RESULT_OK:
239 if (get) {
240 /* PMFW may take longer time to return back the data */
241 usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
242 *data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
243 }
244 break;
245 case AMD_PMF_RESULT_CMD_REJECT_BUSY:
246 dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
247 rc = -EBUSY;
248 break;
249 case AMD_PMF_RESULT_CMD_UNKNOWN:
250 dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
251 rc = -EINVAL;
252 break;
253 case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
254 case AMD_PMF_RESULT_FAILED:
255 default:
256 dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
257 rc = -EIO;
258 break;
259 }
260
261 amd_pmf_dump_registers(dev);
262 return rc;
263 }
264
265 static const struct pci_device_id pmf_pci_ids[] = {
266 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
267 { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
268 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
269 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) },
270 { }
271 };
272
amd_pmf_set_dram_addr(struct amd_pmf_dev * dev,bool alloc_buffer)273 int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
274 {
275 u64 phys_addr;
276 u32 hi, low;
277
278 /* Get Metrics Table Address */
279 if (alloc_buffer) {
280 switch (dev->cpu_id) {
281 case AMD_CPU_ID_PS:
282 case AMD_CPU_ID_RMB:
283 dev->mtable_size = sizeof(dev->m_table);
284 break;
285 case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
286 case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
287 dev->mtable_size = sizeof(dev->m_table_v2);
288 break;
289 default:
290 dev_err(dev->dev, "Invalid CPU id: 0x%x", dev->cpu_id);
291 }
292
293 dev->buf = devm_kzalloc(dev->dev, dev->mtable_size, GFP_KERNEL);
294 if (!dev->buf)
295 return -ENOMEM;
296 }
297
298 phys_addr = virt_to_phys(dev->buf);
299 hi = phys_addr >> 32;
300 low = phys_addr & GENMASK(31, 0);
301
302 amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, SET_CMD, hi, NULL);
303 amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, SET_CMD, low, NULL);
304
305 return 0;
306 }
307
amd_pmf_init_metrics_table(struct amd_pmf_dev * dev)308 int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
309 {
310 int ret;
311
312 INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
313
314 ret = amd_pmf_set_dram_addr(dev, true);
315 if (ret)
316 return ret;
317
318 /*
319 * Start collecting the metrics data after a small delay
320 * or else, we might end up getting stale values from PMFW.
321 */
322 schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
323
324 return 0;
325 }
326
is_npu_metrics_supported(struct amd_pmf_dev * pdev)327 static int is_npu_metrics_supported(struct amd_pmf_dev *pdev)
328 {
329 switch (pdev->cpu_id) {
330 case PCI_DEVICE_ID_AMD_1AH_M20H_ROOT:
331 case PCI_DEVICE_ID_AMD_1AH_M60H_ROOT:
332 return 0;
333 default:
334 return -EOPNOTSUPP;
335 }
336 }
337
amd_pmf_get_smu_metrics(struct amd_pmf_dev * dev,struct amd_pmf_npu_metrics * data)338 static int amd_pmf_get_smu_metrics(struct amd_pmf_dev *dev, struct amd_pmf_npu_metrics *data)
339 {
340 int ret, i;
341
342 guard(mutex)(&dev->metrics_mutex);
343
344 ret = is_npu_metrics_supported(dev);
345 if (ret)
346 return ret;
347
348 ret = amd_pmf_set_dram_addr(dev, true);
349 if (ret)
350 return ret;
351
352 memset(dev->buf, 0, dev->mtable_size);
353
354 /* Send SMU command to get NPU metrics */
355 ret = amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, SET_CMD, METRICS_TABLE_ID, NULL);
356 if (ret) {
357 dev_err(dev->dev, "SMU command failed to get NPU metrics: %d\n", ret);
358 return ret;
359 }
360
361 memcpy(&dev->m_table_v2, dev->buf, dev->mtable_size);
362
363 data->npuclk_freq = dev->m_table_v2.npuclk_freq;
364 for (i = 0; i < ARRAY_SIZE(data->npu_busy); i++)
365 data->npu_busy[i] = dev->m_table_v2.npu_busy[i];
366 data->npu_power = dev->m_table_v2.npu_power;
367 data->mpnpuclk_freq = dev->m_table_v2.mpnpuclk_freq;
368 data->npu_reads = dev->m_table_v2.npu_reads;
369 data->npu_writes = dev->m_table_v2.npu_writes;
370
371 return 0;
372 }
373
amd_pmf_get_npu_data(struct amd_pmf_npu_metrics * info)374 int amd_pmf_get_npu_data(struct amd_pmf_npu_metrics *info)
375 {
376 struct amd_pmf_dev *pdev;
377
378 if (!info)
379 return -EINVAL;
380
381 if (!pmf_device)
382 return -ENODEV;
383
384 pdev = dev_get_drvdata(pmf_device);
385 if (!pdev)
386 return -ENODEV;
387
388 return amd_pmf_get_smu_metrics(pdev, info);
389 }
390 EXPORT_SYMBOL_NS_GPL(amd_pmf_get_npu_data, "AMD_PMF");
391
amd_pmf_reinit_ta(struct amd_pmf_dev * pdev)392 static int amd_pmf_reinit_ta(struct amd_pmf_dev *pdev)
393 {
394 bool status;
395 int ret, i;
396
397 for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) {
398 ret = amd_pmf_tee_init(pdev, &amd_pmf_ta_uuid[i]);
399 if (ret) {
400 dev_err(pdev->dev, "TEE init failed for UUID[%d] ret: %d\n", i, ret);
401 return ret;
402 }
403
404 ret = amd_pmf_start_policy_engine(pdev);
405 dev_dbg(pdev->dev, "start policy engine ret: %d (UUID idx: %d)\n", ret, i);
406 status = ret == TA_PMF_TYPE_SUCCESS;
407 if (status)
408 break;
409 amd_pmf_tee_deinit(pdev);
410 }
411
412 return 0;
413 }
414
amd_pmf_restore_handler(struct device * dev)415 static int amd_pmf_restore_handler(struct device *dev)
416 {
417 struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
418 int ret;
419
420 if (pdev->buf) {
421 ret = amd_pmf_set_dram_addr(pdev, false);
422 if (ret)
423 return ret;
424 }
425
426 if (pdev->smart_pc_enabled)
427 amd_pmf_reinit_ta(pdev);
428
429 return 0;
430 }
431
amd_pmf_freeze_handler(struct device * dev)432 static int amd_pmf_freeze_handler(struct device *dev)
433 {
434 struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
435
436 if (!pdev->smart_pc_enabled)
437 return 0;
438
439 cancel_delayed_work_sync(&pdev->pb_work);
440 /* Clear all TEE resources */
441 amd_pmf_tee_deinit(pdev);
442 pdev->session_id = 0;
443
444 return 0;
445 }
446
amd_pmf_suspend_handler(struct device * dev)447 static int amd_pmf_suspend_handler(struct device *dev)
448 {
449 struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
450
451 if (pdev->smart_pc_enabled)
452 cancel_delayed_work_sync(&pdev->pb_work);
453
454 if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
455 amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_SUSPEND);
456
457 return 0;
458 }
459
amd_pmf_resume_handler(struct device * dev)460 static int amd_pmf_resume_handler(struct device *dev)
461 {
462 struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
463 int ret;
464
465 if (pdev->buf) {
466 ret = amd_pmf_set_dram_addr(pdev, false);
467 if (ret)
468 return ret;
469 }
470
471 if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
472 amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_RESUME);
473
474 if (pdev->smart_pc_enabled)
475 schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
476
477 return 0;
478 }
479
480 static const struct dev_pm_ops amd_pmf_pm = {
481 .suspend = amd_pmf_suspend_handler,
482 .resume = amd_pmf_resume_handler,
483 .freeze = amd_pmf_freeze_handler,
484 .restore = amd_pmf_restore_handler,
485 };
486
amd_pmf_init_features(struct amd_pmf_dev * dev)487 static void amd_pmf_init_features(struct amd_pmf_dev *dev)
488 {
489 int ret;
490
491 /* Enable Static Slider */
492 if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
493 is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
494 amd_pmf_init_sps(dev);
495 dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
496 power_supply_reg_notifier(&dev->pwr_src_notifier);
497 dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
498 }
499
500 if (smart_pc_support) {
501 amd_pmf_init_smart_pc(dev);
502 if (dev->smart_pc_enabled) {
503 dev_dbg(dev->dev, "Smart PC Solution Enabled\n");
504 /* If Smart PC is enabled, no need to check for other features */
505 return;
506 }
507 } else {
508 dev->smart_pc_enabled = false;
509 }
510
511 if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
512 amd_pmf_init_auto_mode(dev);
513 dev_dbg(dev->dev, "Auto Mode Init done\n");
514 } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
515 is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
516 ret = amd_pmf_init_cnqf(dev);
517 if (ret)
518 dev_warn(dev->dev, "CnQF Init failed\n");
519 }
520 }
521
amd_pmf_deinit_features(struct amd_pmf_dev * dev)522 static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
523 {
524 if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
525 is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
526 power_supply_unreg_notifier(&dev->pwr_src_notifier);
527 }
528
529 if (dev->smart_pc_enabled) {
530 amd_pmf_deinit_smart_pc(dev);
531 } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
532 amd_pmf_deinit_auto_mode(dev);
533 } else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
534 is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
535 amd_pmf_deinit_cnqf(dev);
536 }
537 }
538
539 static const struct acpi_device_id amd_pmf_acpi_ids[] = {
540 {"AMDI0100", 0x100},
541 {"AMDI0102", 0},
542 {"AMDI0103", 0},
543 {"AMDI0105", 0},
544 {"AMDI0107", 0},
545 {"AMDI0108", 0},
546 { }
547 };
548 MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
549
amd_pmf_probe(struct platform_device * pdev)550 static int amd_pmf_probe(struct platform_device *pdev)
551 {
552 const struct acpi_device_id *id;
553 struct amd_pmf_dev *dev;
554 struct pci_dev *rdev;
555 u32 base_addr_lo;
556 u32 base_addr_hi;
557 u64 base_addr;
558 u32 val;
559 int err;
560
561 id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
562 if (!id)
563 return -ENODEV;
564
565 if (id->driver_data == 0x100 && !force_load)
566 return -ENODEV;
567
568 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
569 if (!dev)
570 return -ENOMEM;
571
572 dev->dev = &pdev->dev;
573
574 rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
575 if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
576 pci_dev_put(rdev);
577 return -ENODEV;
578 }
579
580 dev->cpu_id = rdev->device;
581
582 err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
583 if (err) {
584 pci_dev_put(rdev);
585 return dev_err_probe(dev->dev, pcibios_err_to_errno(err),
586 "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
587 }
588
589 base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
590
591 err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
592 if (err) {
593 pci_dev_put(rdev);
594 return dev_err_probe(dev->dev, pcibios_err_to_errno(err),
595 "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
596 }
597
598 base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
599 pci_dev_put(rdev);
600 base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
601
602 dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
603 AMD_PMF_MAPPING_SIZE);
604 if (!dev->regbase)
605 return -ENOMEM;
606
607 err = devm_mutex_init(dev->dev, &dev->lock);
608 if (err)
609 return err;
610
611 err = devm_mutex_init(dev->dev, &dev->update_mutex);
612 if (err)
613 return err;
614
615 err = devm_mutex_init(dev->dev, &dev->cb_mutex);
616 if (err)
617 return err;
618
619 err = devm_mutex_init(dev->dev, &dev->cbi_mutex);
620 if (err)
621 return err;
622
623 err = devm_mutex_init(dev->dev, &dev->metrics_mutex);
624 if (err)
625 return err;
626
627 apmf_acpi_init(dev);
628 platform_set_drvdata(pdev, dev);
629 amd_pmf_dbgfs_register(dev);
630 amd_pmf_init_features(dev);
631 apmf_install_handler(dev);
632 if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
633 amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_LOAD);
634
635 pmf_device = dev->dev;
636
637 dev_info(dev->dev, "registered PMF device successfully\n");
638
639 return 0;
640 }
641
amd_pmf_remove(struct platform_device * pdev)642 static void amd_pmf_remove(struct platform_device *pdev)
643 {
644 struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
645
646 amd_pmf_deinit_features(dev);
647 if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
648 amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_UNLOAD);
649 apmf_acpi_deinit(dev);
650 amd_pmf_dbgfs_unregister(dev);
651 }
652
653 static const struct attribute_group *amd_pmf_driver_groups[] = {
654 &cnqf_feature_attribute_group,
655 NULL,
656 };
657
658 static struct platform_driver amd_pmf_driver = {
659 .driver = {
660 .name = "amd-pmf",
661 .acpi_match_table = amd_pmf_acpi_ids,
662 .dev_groups = amd_pmf_driver_groups,
663 .pm = pm_sleep_ptr(&amd_pmf_pm),
664 },
665 .probe = amd_pmf_probe,
666 .remove = amd_pmf_remove,
667 };
668 module_platform_driver(amd_pmf_driver);
669
670 MODULE_LICENSE("GPL");
671 MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
672 MODULE_SOFTDEP("pre: amdtee");
673