Lines Matching full:pc

44  * DOC: GuC Power Conservation (PC)
46 * GuC Power Conservation (PC) supports multiple features for the most
64 * Render-C states is also a GuC PC feature that is now enabled in Xe for
70 pc_to_guc(struct xe_guc_pc *pc) in pc_to_guc() argument
72 return container_of(pc, struct xe_guc, pc); in pc_to_guc()
76 pc_to_xe(struct xe_guc_pc *pc) in pc_to_xe() argument
78 struct xe_guc *guc = pc_to_guc(pc); in pc_to_xe()
85 pc_to_gt(struct xe_guc_pc *pc) in pc_to_gt() argument
87 return container_of(pc, struct xe_gt, uc.guc.pc); in pc_to_gt()
91 pc_to_maps(struct xe_guc_pc *pc) in pc_to_maps() argument
93 return &pc->bo->vmap; in pc_to_maps()
108 static int wait_for_pc_state(struct xe_guc_pc *pc, in wait_for_pc_state() argument
114 xe_device_assert_mem_access(pc_to_xe(pc)); in wait_for_pc_state()
117 if (slpc_shared_data_read(pc, header.global_state) == state) in wait_for_pc_state()
130 static int pc_action_reset(struct xe_guc_pc *pc) in pc_action_reset() argument
132 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct; in pc_action_reset()
137 xe_bo_ggtt_addr(pc->bo), in pc_action_reset()
143 drm_err(&pc_to_xe(pc)->drm, "GuC PC reset: %pe", ERR_PTR(ret)); in pc_action_reset()
148 static int pc_action_shutdown(struct xe_guc_pc *pc) in pc_action_shutdown() argument
150 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct; in pc_action_shutdown()
155 xe_bo_ggtt_addr(pc->bo), in pc_action_shutdown()
161 drm_err(&pc_to_xe(pc)->drm, "GuC PC shutdown %pe", in pc_action_shutdown()
167 static int pc_action_query_task_state(struct xe_guc_pc *pc) in pc_action_query_task_state() argument
169 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct; in pc_action_query_task_state()
174 xe_bo_ggtt_addr(pc->bo), in pc_action_query_task_state()
178 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) in pc_action_query_task_state()
184 drm_err(&pc_to_xe(pc)->drm, in pc_action_query_task_state()
185 "GuC PC query task state failed: %pe", ERR_PTR(ret)); in pc_action_query_task_state()
190 static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value) in pc_action_set_param() argument
192 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct; in pc_action_set_param()
201 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) in pc_action_set_param()
206 drm_err(&pc_to_xe(pc)->drm, "GuC PC set param failed: %pe", in pc_action_set_param()
212 static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode) in pc_action_setup_gucrc() argument
214 struct xe_guc_ct *ct = &pc_to_guc(pc)->ct; in pc_action_setup_gucrc()
223 drm_err(&pc_to_xe(pc)->drm, "GuC RC enable failed: %pe", in pc_action_setup_gucrc()
240 static u32 pc_get_min_freq(struct xe_guc_pc *pc) in pc_get_min_freq() argument
245 slpc_shared_data_read(pc, task_state_data.freq)); in pc_get_min_freq()
250 static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable) in pc_set_manual_rp_ctrl() argument
252 struct xe_gt *gt = pc_to_gt(pc); in pc_set_manual_rp_ctrl()
259 static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq) in pc_set_cur_freq() argument
261 struct xe_gt *gt = pc_to_gt(pc); in pc_set_cur_freq()
264 pc_set_manual_rp_ctrl(pc, true); in pc_set_cur_freq()
273 pc_set_manual_rp_ctrl(pc, false); in pc_set_cur_freq()
276 static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq) in pc_set_min_freq() argument
282 if (freq < pc->rpn_freq || freq > pc->rp0_freq) in pc_set_min_freq()
289 pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY, in pc_set_min_freq()
290 freq < pc->rpe_freq); in pc_set_min_freq()
292 return pc_action_set_param(pc, in pc_set_min_freq()
297 static int pc_get_max_freq(struct xe_guc_pc *pc) in pc_get_max_freq() argument
302 slpc_shared_data_read(pc, task_state_data.freq)); in pc_get_max_freq()
307 static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) in pc_set_max_freq() argument
314 if (freq < pc->rpn_freq || freq > pc->rp0_freq) in pc_set_max_freq()
317 return pc_action_set_param(pc, in pc_set_max_freq()
322 static void mtl_update_rpe_value(struct xe_guc_pc *pc) in mtl_update_rpe_value() argument
324 struct xe_gt *gt = pc_to_gt(pc); in mtl_update_rpe_value()
332 pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg)); in mtl_update_rpe_value()
335 static void tgl_update_rpe_value(struct xe_guc_pc *pc) in tgl_update_rpe_value() argument
337 struct xe_gt *gt = pc_to_gt(pc); in tgl_update_rpe_value()
351 pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER; in tgl_update_rpe_value()
354 static void pc_update_rp_values(struct xe_guc_pc *pc) in pc_update_rp_values() argument
356 struct xe_gt *gt = pc_to_gt(pc); in pc_update_rp_values()
360 mtl_update_rpe_value(pc); in pc_update_rp_values()
362 tgl_update_rpe_value(pc); in pc_update_rp_values()
369 pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq); in pc_update_rp_values()
374 * @pc: The GuC PC
378 u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) in xe_guc_pc_get_act_freq() argument
380 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_get_act_freq()
404 * @pc: The GuC PC
408 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
410 int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq) in xe_guc_pc_get_cur_freq() argument
412 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_get_cur_freq()
437 * @pc: The GuC PC
441 u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc) in xe_guc_pc_get_rp0_freq() argument
443 return pc->rp0_freq; in xe_guc_pc_get_rp0_freq()
448 * @pc: The GuC PC
452 u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc) in xe_guc_pc_get_rpe_freq() argument
454 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_get_rpe_freq()
458 pc_update_rp_values(pc); in xe_guc_pc_get_rpe_freq()
461 return pc->rpe_freq; in xe_guc_pc_get_rpe_freq()
466 * @pc: The GuC PC
470 u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc) in xe_guc_pc_get_rpn_freq() argument
472 return pc->rpn_freq; in xe_guc_pc_get_rpn_freq()
477 * @pc: The GuC PC
481 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
483 int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) in xe_guc_pc_get_min_freq() argument
485 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_get_min_freq()
488 xe_device_mem_access_get(pc_to_xe(pc)); in xe_guc_pc_get_min_freq()
489 mutex_lock(&pc->freq_lock); in xe_guc_pc_get_min_freq()
490 if (!pc->freq_ready) { in xe_guc_pc_get_min_freq()
504 ret = pc_action_query_task_state(pc); in xe_guc_pc_get_min_freq()
508 *freq = pc_get_min_freq(pc); in xe_guc_pc_get_min_freq()
513 mutex_unlock(&pc->freq_lock); in xe_guc_pc_get_min_freq()
514 xe_device_mem_access_put(pc_to_xe(pc)); in xe_guc_pc_get_min_freq()
520 * @pc: The GuC PC
524 * -EAGAIN if GuC PC not ready (likely in middle of a reset),
527 int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq) in xe_guc_pc_set_min_freq() argument
531 xe_device_mem_access_get(pc_to_xe(pc)); in xe_guc_pc_set_min_freq()
532 mutex_lock(&pc->freq_lock); in xe_guc_pc_set_min_freq()
533 if (!pc->freq_ready) { in xe_guc_pc_set_min_freq()
539 ret = pc_set_min_freq(pc, freq); in xe_guc_pc_set_min_freq()
543 pc->user_requested_min = freq; in xe_guc_pc_set_min_freq()
546 mutex_unlock(&pc->freq_lock); in xe_guc_pc_set_min_freq()
547 xe_device_mem_access_put(pc_to_xe(pc)); in xe_guc_pc_set_min_freq()
554 * @pc: The GuC PC
558 * -EAGAIN if GuC PC not ready (likely in middle of a reset).
560 int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq) in xe_guc_pc_get_max_freq() argument
564 xe_device_mem_access_get(pc_to_xe(pc)); in xe_guc_pc_get_max_freq()
565 mutex_lock(&pc->freq_lock); in xe_guc_pc_get_max_freq()
566 if (!pc->freq_ready) { in xe_guc_pc_get_max_freq()
572 ret = pc_action_query_task_state(pc); in xe_guc_pc_get_max_freq()
576 *freq = pc_get_max_freq(pc); in xe_guc_pc_get_max_freq()
579 mutex_unlock(&pc->freq_lock); in xe_guc_pc_get_max_freq()
580 xe_device_mem_access_put(pc_to_xe(pc)); in xe_guc_pc_get_max_freq()
586 * @pc: The GuC PC
590 * -EAGAIN if GuC PC not ready (likely in middle of a reset),
593 int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) in xe_guc_pc_set_max_freq() argument
597 xe_device_mem_access_get(pc_to_xe(pc)); in xe_guc_pc_set_max_freq()
598 mutex_lock(&pc->freq_lock); in xe_guc_pc_set_max_freq()
599 if (!pc->freq_ready) { in xe_guc_pc_set_max_freq()
605 ret = pc_set_max_freq(pc, freq); in xe_guc_pc_set_max_freq()
609 pc->user_requested_max = freq; in xe_guc_pc_set_max_freq()
612 mutex_unlock(&pc->freq_lock); in xe_guc_pc_set_max_freq()
613 xe_device_mem_access_put(pc_to_xe(pc)); in xe_guc_pc_set_max_freq()
619 * @pc: XE_GuC_PC instance
621 enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc) in xe_guc_pc_c_status() argument
623 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_c_status()
650 * @pc: Xe_GuC_PC instance
652 u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc) in xe_guc_pc_rc6_residency() argument
654 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_rc6_residency()
666 * @pc: Xe_GuC_PC instance
668 u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc) in xe_guc_pc_mc6_residency() argument
670 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_mc6_residency()
680 static void mtl_init_fused_rp_values(struct xe_guc_pc *pc) in mtl_init_fused_rp_values() argument
682 struct xe_gt *gt = pc_to_gt(pc); in mtl_init_fused_rp_values()
685 xe_device_assert_mem_access(pc_to_xe(pc)); in mtl_init_fused_rp_values()
692 pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg)); in mtl_init_fused_rp_values()
694 pc->rpn_freq = decode_freq(REG_FIELD_GET(MTL_RPN_CAP_MASK, reg)); in mtl_init_fused_rp_values()
697 static void tgl_init_fused_rp_values(struct xe_guc_pc *pc) in tgl_init_fused_rp_values() argument
699 struct xe_gt *gt = pc_to_gt(pc); in tgl_init_fused_rp_values()
703 xe_device_assert_mem_access(pc_to_xe(pc)); in tgl_init_fused_rp_values()
709 pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER; in tgl_init_fused_rp_values()
710 pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER; in tgl_init_fused_rp_values()
713 static void pc_init_fused_rp_values(struct xe_guc_pc *pc) in pc_init_fused_rp_values() argument
715 struct xe_gt *gt = pc_to_gt(pc); in pc_init_fused_rp_values()
719 mtl_init_fused_rp_values(pc); in pc_init_fused_rp_values()
721 tgl_init_fused_rp_values(pc); in pc_init_fused_rp_values()
727 * @pc: Xe_GuC_PC instance
729 void xe_guc_pc_init_early(struct xe_guc_pc *pc) in xe_guc_pc_init_early() argument
731 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_init_early()
734 pc_init_fused_rp_values(pc); in xe_guc_pc_init_early()
735 pc_set_cur_freq(pc, pc->rp0_freq); in xe_guc_pc_init_early()
738 static int pc_adjust_freq_bounds(struct xe_guc_pc *pc) in pc_adjust_freq_bounds() argument
742 lockdep_assert_held(&pc->freq_lock); in pc_adjust_freq_bounds()
744 ret = pc_action_query_task_state(pc); in pc_adjust_freq_bounds()
753 if (pc_get_max_freq(pc) > pc->rp0_freq) in pc_adjust_freq_bounds()
754 pc_set_max_freq(pc, pc->rp0_freq); in pc_adjust_freq_bounds()
760 if (pc_get_min_freq(pc) > pc->rp0_freq) in pc_adjust_freq_bounds()
761 pc_set_min_freq(pc, pc->rp0_freq); in pc_adjust_freq_bounds()
766 static int pc_adjust_requested_freq(struct xe_guc_pc *pc) in pc_adjust_requested_freq() argument
770 lockdep_assert_held(&pc->freq_lock); in pc_adjust_requested_freq()
772 if (pc->user_requested_min != 0) { in pc_adjust_requested_freq()
773 ret = pc_set_min_freq(pc, pc->user_requested_min); in pc_adjust_requested_freq()
778 if (pc->user_requested_max != 0) { in pc_adjust_requested_freq()
779 ret = pc_set_max_freq(pc, pc->user_requested_max); in pc_adjust_requested_freq()
789 * @pc: Xe_GuC_PC instance
795 int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc) in xe_guc_pc_gucrc_disable() argument
797 struct xe_device *xe = pc_to_xe(pc); in xe_guc_pc_gucrc_disable()
798 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_gucrc_disable()
804 xe_device_mem_access_get(pc_to_xe(pc)); in xe_guc_pc_gucrc_disable()
806 ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL); in xe_guc_pc_gucrc_disable()
819 xe_device_mem_access_put(pc_to_xe(pc)); in xe_guc_pc_gucrc_disable()
823 static void pc_init_pcode_freq(struct xe_guc_pc *pc) in pc_init_pcode_freq() argument
825 u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER); in pc_init_pcode_freq()
826 u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER); in pc_init_pcode_freq()
828 XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max)); in pc_init_pcode_freq()
831 static int pc_init_freqs(struct xe_guc_pc *pc) in pc_init_freqs() argument
835 mutex_lock(&pc->freq_lock); in pc_init_freqs()
837 ret = pc_adjust_freq_bounds(pc); in pc_init_freqs()
841 ret = pc_adjust_requested_freq(pc); in pc_init_freqs()
845 pc_update_rp_values(pc); in pc_init_freqs()
847 pc_init_pcode_freq(pc); in pc_init_freqs()
853 pc->freq_ready = true; in pc_init_freqs()
856 mutex_unlock(&pc->freq_lock); in pc_init_freqs()
862 * @pc: Xe_GuC_PC instance
864 int xe_guc_pc_start(struct xe_guc_pc *pc) in xe_guc_pc_start() argument
866 struct xe_device *xe = pc_to_xe(pc); in xe_guc_pc_start()
867 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_start()
873 xe_device_mem_access_get(pc_to_xe(pc)); in xe_guc_pc_start()
884 pc_set_cur_freq(pc, UINT_MAX); in xe_guc_pc_start()
890 memset(pc->bo->vmap.vaddr, 0, size); in xe_guc_pc_start()
891 slpc_shared_data_write(pc, header.size, size); in xe_guc_pc_start()
893 ret = pc_action_reset(pc); in xe_guc_pc_start()
897 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) { in xe_guc_pc_start()
898 drm_err(&pc_to_xe(pc)->drm, "GuC PC Start failed\n"); in xe_guc_pc_start()
903 ret = pc_init_freqs(pc); in xe_guc_pc_start()
908 xe_guc_pc_gucrc_disable(pc); in xe_guc_pc_start()
913 ret = pc_action_setup_gucrc(pc, XE_GUCRC_FIRMWARE_CONTROL); in xe_guc_pc_start()
918 xe_device_mem_access_put(pc_to_xe(pc)); in xe_guc_pc_start()
924 * @pc: Xe_GuC_PC instance
926 int xe_guc_pc_stop(struct xe_guc_pc *pc) in xe_guc_pc_stop() argument
928 struct xe_device *xe = pc_to_xe(pc); in xe_guc_pc_stop()
931 xe_device_mem_access_get(pc_to_xe(pc)); in xe_guc_pc_stop()
934 xe_gt_idle_disable_c6(pc_to_gt(pc)); in xe_guc_pc_stop()
939 mutex_lock(&pc->freq_lock); in xe_guc_pc_stop()
940 pc->freq_ready = false; in xe_guc_pc_stop()
941 mutex_unlock(&pc->freq_lock); in xe_guc_pc_stop()
943 ret = pc_action_shutdown(pc); in xe_guc_pc_stop()
947 if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_NOT_RUNNING)) { in xe_guc_pc_stop()
948 drm_err(&pc_to_xe(pc)->drm, "GuC PC Shutdown failed\n"); in xe_guc_pc_stop()
953 xe_device_mem_access_put(pc_to_xe(pc)); in xe_guc_pc_stop()
959 * @pc: Xe_GuC_PC instance
961 void xe_guc_pc_fini(struct xe_guc_pc *pc) in xe_guc_pc_fini() argument
963 struct xe_device *xe = pc_to_xe(pc); in xe_guc_pc_fini()
967 xe_gt_idle_disable_c6(pc_to_gt(pc)); in xe_guc_pc_fini()
972 XE_WARN_ON(xe_guc_pc_gucrc_disable(pc)); in xe_guc_pc_fini()
973 XE_WARN_ON(xe_guc_pc_stop(pc)); in xe_guc_pc_fini()
974 mutex_destroy(&pc->freq_lock); in xe_guc_pc_fini()
979 * @pc: Xe_GuC_PC instance
981 int xe_guc_pc_init(struct xe_guc_pc *pc) in xe_guc_pc_init() argument
983 struct xe_gt *gt = pc_to_gt(pc); in xe_guc_pc_init()
992 mutex_init(&pc->freq_lock); in xe_guc_pc_init()
1000 pc->bo = bo; in xe_guc_pc_init()