Lines Matching defs:slpc
46 static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
48 return container_of(slpc, struct intel_guc, slpc);
51 static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
53 return guc_to_gt(slpc_to_guc(slpc));
56 static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
58 return slpc_to_gt(slpc)->i915;
76 void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
78 struct intel_guc *guc = slpc_to_guc(slpc);
80 slpc->supported = __detect_slpc_supported(guc);
81 slpc->selected = __guc_slpc_selected(guc);
118 static u32 slpc_get_state(struct intel_guc_slpc *slpc)
122 GEM_BUG_ON(!slpc->vma);
124 drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
125 data = slpc->vaddr;
145 static int slpc_set_param_nb(struct intel_guc_slpc *slpc, u8 id, u32 value)
147 struct intel_guc *guc = slpc_to_guc(slpc);
169 static bool slpc_is_running(struct intel_guc_slpc *slpc)
171 return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
189 static int slpc_query_task_state(struct intel_guc_slpc *slpc)
191 struct intel_guc *guc = slpc_to_guc(slpc);
192 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
199 drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
204 static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
206 struct intel_guc *guc = slpc_to_guc(slpc);
219 static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
221 struct intel_guc *guc = slpc_to_guc(slpc);
222 struct drm_i915_private *i915 = slpc_to_i915(slpc);
226 lockdep_assert_held(&slpc->lock);
241 ret = slpc_set_param_nb(slpc,
254 struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
263 mutex_lock(&slpc->lock);
264 if (atomic_read(&slpc->num_waiters)) {
265 err = slpc_force_min_freq(slpc, slpc->boost_freq);
267 slpc->num_boosts++;
269 mutex_unlock(&slpc->lock);
272 int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
274 struct intel_guc *guc = slpc_to_guc(slpc);
278 GEM_BUG_ON(slpc->vma);
280 err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
286 slpc->max_freq_softlimit = 0;
287 slpc->min_freq_softlimit = 0;
288 slpc->ignore_eff_freq = false;
289 slpc->min_is_rpmax = false;
291 slpc->boost_freq = 0;
292 atomic_set(&slpc->num_waiters, 0);
293 slpc->num_boosts = 0;
294 slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
296 slpc->power_profile = SLPC_POWER_PROFILES_BASE;
298 mutex_init(&slpc->lock);
299 INIT_WORK(&slpc->boost_work, slpc_boost_work);
324 static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
326 return slpc_global_state_to_string(slpc_get_state(slpc));
344 static int slpc_reset(struct intel_guc_slpc *slpc)
346 struct intel_guc *guc = slpc_to_guc(slpc);
347 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
358 if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
360 slpc_get_state_string(slpc));
368 static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
370 struct slpc_shared_data *data = slpc->vaddr;
372 GEM_BUG_ON(!slpc->vma);
379 static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
381 struct slpc_shared_data *data = slpc->vaddr;
383 GEM_BUG_ON(!slpc->vma);
390 static void slpc_shared_data_reset(struct intel_guc_slpc *slpc)
392 struct drm_i915_private *i915 = slpc_to_i915(slpc);
393 struct slpc_shared_data *data = slpc->vaddr;
417 * @slpc: pointer to intel_guc_slpc.
425 int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
427 struct drm_i915_private *i915 = slpc_to_i915(slpc);
431 if (val < slpc->min_freq ||
432 val > slpc->rp0_freq ||
433 val < slpc->min_freq_softlimit)
437 ret = slpc_set_param(slpc,
447 slpc->max_freq_softlimit = val;
454 * @slpc: pointer to intel_guc_slpc.
462 int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
464 struct drm_i915_private *i915 = slpc_to_i915(slpc);
470 ret = slpc_query_task_state(slpc);
473 *val = slpc_decode_max_freq(slpc);
479 int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
481 struct drm_i915_private *i915 = slpc_to_i915(slpc);
485 mutex_lock(&slpc->lock);
488 ret = slpc_set_param(slpc,
492 guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
495 slpc->ignore_eff_freq = val;
499 ret = slpc_set_param(slpc,
501 slpc->min_freq);
505 mutex_unlock(&slpc->lock);
511 * @slpc: pointer to intel_guc_slpc.
519 int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
521 struct drm_i915_private *i915 = slpc_to_i915(slpc);
525 if (val < slpc->min_freq ||
526 val > slpc->rp0_freq ||
527 val > slpc->max_freq_softlimit)
531 mutex_lock(&slpc->lock);
534 ret = slpc_set_param(slpc,
539 slpc->min_freq_softlimit = val;
542 mutex_unlock(&slpc->lock);
553 * @slpc: pointer to intel_guc_slpc.
561 int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
563 struct drm_i915_private *i915 = slpc_to_i915(slpc);
569 ret = slpc_query_task_state(slpc);
572 *val = slpc_decode_min_freq(slpc);
578 int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val)
580 struct drm_i915_private *i915 = slpc_to_i915(slpc);
585 ret = slpc_set_param(slpc,
592 int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
594 struct drm_i915_private *i915 = slpc_to_i915(slpc);
602 ret = slpc_set_param(slpc,
608 int intel_guc_slpc_set_power_profile(struct intel_guc_slpc *slpc, u32 val)
610 struct drm_i915_private *i915 = slpc_to_i915(slpc);
617 mutex_lock(&slpc->lock);
620 ret = slpc_set_param(slpc,
624 guc_err(slpc_to_guc(slpc),
628 slpc->power_profile = val;
631 mutex_unlock(&slpc->lock);
651 static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
660 if (!slpc->max_freq_softlimit) {
661 slpc->max_freq_softlimit = slpc->rp0_freq;
662 slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit;
663 } else if (slpc->max_freq_softlimit != slpc->rp0_freq) {
664 ret = intel_guc_slpc_set_max_freq(slpc,
665 slpc->max_freq_softlimit);
671 if (!slpc->min_freq_softlimit) {
673 slpc->min_freq_softlimit = slpc->min_freq;
674 slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
676 return intel_guc_slpc_set_min_freq(slpc,
677 slpc->min_freq_softlimit);
683 static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
688 ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq);
690 guc_err(slpc_to_guc(slpc), "Failed to get min freq: %pe\n", ERR_PTR(ret));
700 static void update_server_min_softlimit(struct intel_guc_slpc *slpc)
705 if (!slpc->min_freq_softlimit &&
706 is_slpc_min_freq_rpmax(slpc)) {
707 slpc->min_is_rpmax = true;
708 slpc->min_freq_softlimit = slpc->rp0_freq;
709 (slpc_to_gt(slpc))->defaults.min_freq = slpc->min_freq_softlimit;
713 static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
716 return slpc_set_param(slpc,
718 slpc->rp0_freq);
721 static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
723 struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
727 slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq);
728 slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq);
729 slpc->min_freq = intel_gpu_freq(rps, caps.min_freq);
731 if (!slpc->boost_freq)
732 slpc->boost_freq = slpc->rp0_freq;
737 * @slpc: pointer to intel_guc_slpc.
748 int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
750 struct intel_guc *guc = slpc_to_guc(slpc);
753 GEM_BUG_ON(!slpc->vma);
755 slpc_shared_data_reset(slpc);
757 ret = slpc_reset(slpc);
763 ret = slpc_query_task_state(slpc);
767 intel_guc_pm_intrmsk_enable(slpc_to_gt(slpc));
769 slpc_get_rp_values(slpc);
772 update_server_min_softlimit(slpc);
775 ret = slpc_use_fused_rp0(slpc);
782 intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
785 ret = slpc_set_softlimits(slpc);
792 intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
795 intel_guc_slpc_set_strategy(slpc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
798 ret = intel_guc_slpc_set_power_profile(slpc, slpc->power_profile);
807 int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
811 if (val < slpc->min_freq || val > slpc->rp0_freq)
814 mutex_lock(&slpc->lock);
816 if (slpc->boost_freq != val) {
818 if (atomic_read(&slpc->num_waiters)) {
819 ret = slpc_force_min_freq(slpc, val);
826 slpc->boost_freq = val;
830 mutex_unlock(&slpc->lock);
834 void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
842 mutex_lock(&slpc->lock);
843 if (atomic_dec_and_test(&slpc->num_waiters))
844 slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
845 mutex_unlock(&slpc->lock);
848 int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
850 struct drm_i915_private *i915 = slpc_to_i915(slpc);
851 struct slpc_shared_data *data = slpc->vaddr;
856 GEM_BUG_ON(!slpc->vma);
859 ret = slpc_query_task_state(slpc);
864 drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
885 slpc_decode_max_freq(slpc));
887 slpc_decode_min_freq(slpc));
889 slpc->num_boosts);
891 atomic_read(&slpc->num_waiters));
898 void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
900 if (!slpc->vma)
903 i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);