Lines Matching +full:max +full:- +full:freq

1 // SPDX-License-Identifier: MIT
26 #define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
37 return -1; in cmp_u64()
49 return -1; in cmp_u32()
68 #define CS_GPR(x) GEN8_RING_CS_GPR(engine->mmio_base, x) in create_spin_counter()
76 obj = i915_gem_object_create_internal(vm->i915, 64 << 10); in create_spin_counter()
80 end = obj->base.size / sizeof(u32) - 1; in create_spin_counter()
113 loop = cs - base; in create_spin_counter()
134 GEM_BUG_ON(cs - base > end); in create_spin_counter()
151 static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms) in wait_for_freq() argument
158 memset(history, freq, sizeof(history)); in wait_for_freq()
171 if (act == freq) in wait_for_freq()
188 static u8 rps_set_check(struct intel_rps *rps, u8 freq) in rps_set_check() argument
190 mutex_lock(&rps->lock); in rps_set_check()
192 if (wait_for(!intel_rps_set(rps, freq), 50)) { in rps_set_check()
193 mutex_unlock(&rps->lock); in rps_set_check()
196 GEM_BUG_ON(rps->last_freq != freq); in rps_set_check()
197 mutex_unlock(&rps->lock); in rps_set_check()
199 return wait_for_freq(rps, freq, 50); in rps_set_check()
222 struct intel_rps *rps = &gt->rps; in live_rps_clock_interval()
230 if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) in live_rps_clock_interval()
234 return -ENOMEM; in live_rps_clock_interval()
237 saved_work = rps->work.func; in live_rps_clock_interval()
238 rps->work.func = dummy_rps_work; in live_rps_clock_interval()
241 intel_rps_disable(&gt->rps); in live_rps_clock_interval()
256 engine->kernel_context, in live_rps_clock_interval()
268 engine->name); in live_rps_clock_interval()
271 intel_gt_set_wedged(engine->gt); in live_rps_clock_interval()
272 err = -EIO; in live_rps_clock_interval()
276 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); in live_rps_clock_interval()
278 intel_uncore_write_fw(gt->uncore, GEN6_RP_CUR_UP_EI, 0); in live_rps_clock_interval()
281 intel_uncore_write_fw(gt->uncore, in live_rps_clock_interval()
283 intel_uncore_write_fw(gt->uncore, in live_rps_clock_interval()
286 intel_uncore_write_fw(gt->uncore, GEN6_RP_CONTROL, in live_rps_clock_interval()
289 if (wait_for(intel_uncore_read_fw(gt->uncore, in live_rps_clock_interval()
294 engine->name); in live_rps_clock_interval()
295 err = -ENODEV; in live_rps_clock_interval()
304 cycles_[i] = -intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI); in live_rps_clock_interval()
309 cycles_[i] += intel_uncore_read_fw(gt->uncore, GEN6_RP_CUR_UP_EI); in live_rps_clock_interval()
322 intel_uncore_write_fw(gt->uncore, GEN6_RP_CONTROL, 0); in live_rps_clock_interval()
323 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); in live_rps_clock_interval()
334 engine->name, cycles, time, dt, expected, in live_rps_clock_interval()
335 gt->clock_frequency / 1000); in live_rps_clock_interval()
340 engine->name); in live_rps_clock_interval()
341 err = -EINVAL; in live_rps_clock_interval()
347 engine->name); in live_rps_clock_interval()
348 err = -EINVAL; in live_rps_clock_interval()
352 if (igt_flush_test(gt->i915)) in live_rps_clock_interval()
353 err = -EIO; in live_rps_clock_interval()
358 intel_rps_enable(&gt->rps); in live_rps_clock_interval()
364 rps->work.func = saved_work; in live_rps_clock_interval()
366 if (err == -ENODEV) /* skipped, don't report a fail */ in live_rps_clock_interval()
375 struct intel_rps *rps = &gt->rps; in live_rps_control()
393 if (IS_CHERRYVIEW(gt->i915)) /* XXX fragile PCU */ in live_rps_control()
397 return -ENOMEM; in live_rps_control()
400 saved_work = rps->work.func; in live_rps_control()
401 rps->work.func = dummy_rps_work; in live_rps_control()
408 int min, max; in live_rps_control() local
416 engine->kernel_context, in live_rps_control()
427 engine->name); in live_rps_control()
430 intel_gt_set_wedged(engine->gt); in live_rps_control()
431 err = -EIO; in live_rps_control()
435 if (rps_set_check(rps, rps->min_freq) != rps->min_freq) { in live_rps_control()
437 engine->name, rps->min_freq, read_cagf(rps)); in live_rps_control()
441 err = -EINVAL; in live_rps_control()
445 for (f = rps->min_freq + 1; f < rps->max_freq; f++) { in live_rps_control()
452 if (rps_set_check(rps, rps->min_freq) != rps->min_freq) { in live_rps_control()
454 engine->name, rps->min_freq, read_cagf(rps)); in live_rps_control()
458 err = -EINVAL; in live_rps_control()
463 max = rps_set_check(rps, limit); in live_rps_control()
467 min = rps_set_check(rps, rps->min_freq); in live_rps_control()
474 engine->name, in live_rps_control()
475 rps->min_freq, intel_gpu_freq(rps, rps->min_freq), in live_rps_control()
476 rps->max_freq, intel_gpu_freq(rps, rps->max_freq), in live_rps_control()
478 min, max, ktime_to_ns(min_dt), ktime_to_ns(max_dt)); in live_rps_control()
480 if (limit != rps->max_freq) { in live_rps_control()
481 u32 throttle = intel_uncore_read(gt->uncore, in live_rps_control()
485 engine->name, throttle & GT0_PERF_LIMIT_REASONS_MASK); in live_rps_control()
489 if (igt_flush_test(gt->i915)) { in live_rps_control()
490 err = -EIO; in live_rps_control()
499 rps->work.func = saved_work; in live_rps_control()
514 min_gpu_freq = rps->min_freq; in show_pcu_config()
515 max_gpu_freq = rps->max_freq; in show_pcu_config()
522 wakeref = intel_runtime_pm_get(rps_to_uncore(rps)->rpm); in show_pcu_config()
528 snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE, in show_pcu_config()
537 intel_runtime_pm_put(rps_to_uncore(rps)->rpm, wakeref); in show_pcu_config()
547 dc = READ_ONCE(*cntr) - dc; in __measure_frequency()
548 dt = ktime_get() - dt; in __measure_frequency()
553 static u64 measure_frequency_at(struct intel_rps *rps, u32 *cntr, int *freq) in measure_frequency_at() argument
558 *freq = rps_set_check(rps, *freq); in measure_frequency_at()
561 *freq = (*freq + read_cagf(rps)) / 2; in measure_frequency_at()
573 dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0)); in __measure_cs_frequency()
576 dc = intel_uncore_read_fw(engine->uncore, CS_GPR(0)) - dc; in __measure_cs_frequency()
577 dt = ktime_get() - dt; in __measure_cs_frequency()
584 int *freq) in measure_cs_frequency_at() argument
589 *freq = rps_set_check(rps, *freq); in measure_cs_frequency_at()
592 *freq = (*freq + read_cagf(rps)) / 2; in measure_cs_frequency_at()
608 struct intel_rps *rps = &gt->rps; in live_rps_frequency_cs()
623 if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */ in live_rps_frequency_cs()
630 saved_work = rps->work.func; in live_rps_frequency_cs()
631 rps->work.func = dummy_rps_work; in live_rps_frequency_cs()
639 int freq; in live_rps_frequency_cs() member
640 } min, max; in live_rps_frequency_cs() local
645 engine->kernel_context->vm, false, in live_rps_frequency_cs()
661 err = rq->engine->emit_bb_start(rq, in live_rps_frequency_cs()
668 if (wait_for(intel_uncore_read(engine->uncore, CS_GPR(0)), in live_rps_frequency_cs()
671 engine->name); in live_rps_frequency_cs()
675 min.freq = rps->min_freq; in live_rps_frequency_cs()
676 min.count = measure_cs_frequency_at(rps, engine, &min.freq); in live_rps_frequency_cs()
678 max.freq = rps->max_freq; in live_rps_frequency_cs()
679 max.count = measure_cs_frequency_at(rps, engine, &max.freq); in live_rps_frequency_cs()
681 pr_info("%s: min:%lluKHz @ %uMHz, max:%lluKHz @ %uMHz [%d%%]\n", in live_rps_frequency_cs()
682 engine->name, in live_rps_frequency_cs()
683 min.count, intel_gpu_freq(rps, min.freq), in live_rps_frequency_cs()
684 max.count, intel_gpu_freq(rps, max.freq), in live_rps_frequency_cs()
685 (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * max.count, in live_rps_frequency_cs()
686 max.freq * min.count)); in live_rps_frequency_cs()
688 if (!scaled_within(max.freq * min.count, in live_rps_frequency_cs()
689 min.freq * max.count, in live_rps_frequency_cs()
693 pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n", in live_rps_frequency_cs()
694 engine->name, in live_rps_frequency_cs()
695 max.freq * min.count, in live_rps_frequency_cs()
696 min.freq * max.count); in live_rps_frequency_cs()
699 for (f = min.freq + 1; f <= rps->max_freq; f++) { in live_rps_frequency_cs()
708 engine->name, in live_rps_frequency_cs()
710 (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * count, in live_rps_frequency_cs()
716 err = -EINTR; /* ignore error, continue on with test */ in live_rps_frequency_cs()
721 i915_gem_object_flush_map(vma->obj); in live_rps_frequency_cs()
722 i915_gem_object_unpin_map(vma->obj); in live_rps_frequency_cs()
728 if (igt_flush_test(gt->i915)) in live_rps_frequency_cs()
729 err = -EIO; in live_rps_frequency_cs()
735 rps->work.func = saved_work; in live_rps_frequency_cs()
747 struct intel_rps *rps = &gt->rps; in live_rps_frequency_srm()
762 if (GRAPHICS_VER(gt->i915) < 8) /* for CS simplicity */ in live_rps_frequency_srm()
769 saved_work = rps->work.func; in live_rps_frequency_srm()
770 rps->work.func = dummy_rps_work; in live_rps_frequency_srm()
778 int freq; in live_rps_frequency_srm() member
779 } min, max; in live_rps_frequency_srm() local
784 engine->kernel_context->vm, true, in live_rps_frequency_srm()
800 err = rq->engine->emit_bb_start(rq, in live_rps_frequency_srm()
809 engine->name); in live_rps_frequency_srm()
813 min.freq = rps->min_freq; in live_rps_frequency_srm()
814 min.count = measure_frequency_at(rps, cntr, &min.freq); in live_rps_frequency_srm()
816 max.freq = rps->max_freq; in live_rps_frequency_srm()
817 max.count = measure_frequency_at(rps, cntr, &max.freq); in live_rps_frequency_srm()
819 pr_info("%s: min:%lluKHz @ %uMHz, max:%lluKHz @ %uMHz [%d%%]\n", in live_rps_frequency_srm()
820 engine->name, in live_rps_frequency_srm()
821 min.count, intel_gpu_freq(rps, min.freq), in live_rps_frequency_srm()
822 max.count, intel_gpu_freq(rps, max.freq), in live_rps_frequency_srm()
823 (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * max.count, in live_rps_frequency_srm()
824 max.freq * min.count)); in live_rps_frequency_srm()
826 if (!scaled_within(max.freq * min.count, in live_rps_frequency_srm()
827 min.freq * max.count, in live_rps_frequency_srm()
831 pr_err("%s: CS did not scale with frequency! scaled min:%llu, max:%llu\n", in live_rps_frequency_srm()
832 engine->name, in live_rps_frequency_srm()
833 max.freq * min.count, in live_rps_frequency_srm()
834 min.freq * max.count); in live_rps_frequency_srm()
837 for (f = min.freq + 1; f <= rps->max_freq; f++) { in live_rps_frequency_srm()
846 engine->name, in live_rps_frequency_srm()
848 (int)DIV64_U64_ROUND_CLOSEST(100 * min.freq * count, in live_rps_frequency_srm()
854 err = -EINTR; /* ignore error, continue on with test */ in live_rps_frequency_srm()
859 i915_gem_object_flush_map(vma->obj); in live_rps_frequency_srm()
860 i915_gem_object_unpin_map(vma->obj); in live_rps_frequency_srm()
866 if (igt_flush_test(gt->i915)) in live_rps_frequency_srm()
867 err = -EIO; in live_rps_frequency_srm()
873 rps->work.func = saved_work; in live_rps_frequency_srm()
888 GEM_BUG_ON(rps->pm_iir); in sleep_for_ei()
899 struct intel_uncore *uncore = engine->uncore; in __rps_up_interrupt()
906 rps_set_check(rps, rps->min_freq); in __rps_up_interrupt()
908 rq = igt_spinner_create_request(spin, engine->kernel_context, MI_NOOP); in __rps_up_interrupt()
917 engine->name); in __rps_up_interrupt()
919 intel_gt_set_wedged(engine->gt); in __rps_up_interrupt()
920 return -EIO; in __rps_up_interrupt()
925 engine->name); in __rps_up_interrupt()
928 return -EINVAL; in __rps_up_interrupt()
931 if (!(rps->pm_events & GEN6_PM_RP_UP_THRESHOLD)) { in __rps_up_interrupt()
933 engine->name); in __rps_up_interrupt()
935 return -EINVAL; in __rps_up_interrupt()
938 if (rps->last_freq != rps->min_freq) { in __rps_up_interrupt()
940 engine->name); in __rps_up_interrupt()
942 return -EINVAL; in __rps_up_interrupt()
946 timeout = intel_gt_pm_interval_to_ns(engine->gt, timeout); in __rps_up_interrupt()
955 if (rps->cur_freq != rps->min_freq) { in __rps_up_interrupt()
957 engine->name, intel_rps_read_actual_frequency(rps)); in __rps_up_interrupt()
958 return -EINVAL; in __rps_up_interrupt()
961 if (!(rps->pm_iir & GEN6_PM_RP_UP_THRESHOLD)) { in __rps_up_interrupt()
963 engine->name, rps->pm_iir, in __rps_up_interrupt()
967 return -EINVAL; in __rps_up_interrupt()
976 struct intel_uncore *uncore = engine->uncore; in __rps_down_interrupt()
979 rps_set_check(rps, rps->max_freq); in __rps_down_interrupt()
981 if (!(rps->pm_events & GEN6_PM_RP_DOWN_THRESHOLD)) { in __rps_down_interrupt()
983 engine->name); in __rps_down_interrupt()
984 return -EINVAL; in __rps_down_interrupt()
987 if (rps->last_freq != rps->max_freq) { in __rps_down_interrupt()
988 pr_err("%s: RPS did not program max frequency\n", in __rps_down_interrupt()
989 engine->name); in __rps_down_interrupt()
990 return -EINVAL; in __rps_down_interrupt()
994 timeout = intel_gt_pm_interval_to_ns(engine->gt, timeout); in __rps_down_interrupt()
999 if (rps->cur_freq != rps->max_freq) { in __rps_down_interrupt()
1001 engine->name, in __rps_down_interrupt()
1003 return -EINVAL; in __rps_down_interrupt()
1006 if (!(rps->pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT))) { in __rps_down_interrupt()
1008 engine->name, rps->pm_iir, in __rps_down_interrupt()
1015 return -EINVAL; in __rps_down_interrupt()
1024 struct intel_rps *rps = &gt->rps; in live_rps_interrupt()
1037 if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6) in live_rps_interrupt()
1042 pm_events = rps->pm_events; in live_rps_interrupt()
1045 return -ENODEV; in live_rps_interrupt()
1049 return -ENOMEM; in live_rps_interrupt()
1052 saved_work = rps->work.func; in live_rps_interrupt()
1053 rps->work.func = dummy_rps_work; in live_rps_interrupt()
1058 intel_gt_pm_wait_for_idle(engine->gt); in live_rps_interrupt()
1069 intel_gt_pm_wait_for_idle(engine->gt); in live_rps_interrupt()
1075 intel_rc6_disable(&gt->rc6); in live_rps_interrupt()
1079 intel_rc6_enable(&gt->rc6); in live_rps_interrupt()
1087 if (igt_flush_test(gt->i915)) in live_rps_interrupt()
1088 err = -EIO; in live_rps_interrupt()
1093 rps->work.func = saved_work; in live_rps_interrupt()
1105 dE = librapl_energy_uJ() - dE; in __measure_power()
1106 dt = ktime_get() - dt; in __measure_power()
1111 static u64 measure_power(struct intel_rps *rps, int *freq) in measure_power() argument
1119 *freq = (*freq + read_cagf(rps)) / 2; in measure_power()
1126 static u64 measure_power_at(struct intel_rps *rps, int *freq) in measure_power_at() argument
1128 *freq = rps_set_check(rps, *freq); in measure_power_at()
1130 return measure_power(rps, freq); in measure_power_at()
1136 struct intel_rps *rps = &gt->rps; in live_rps_power()
1149 if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) in live_rps_power()
1152 if (!librapl_supported(gt->i915)) in live_rps_power()
1156 return -ENOMEM; in live_rps_power()
1159 saved_work = rps->work.func; in live_rps_power()
1160 rps->work.func = dummy_rps_work; in live_rps_power()
1166 int freq; in live_rps_power() member
1167 } min, max; in live_rps_power() local
1175 engine->kernel_context, in live_rps_power()
1187 engine->name); in live_rps_power()
1190 intel_gt_set_wedged(engine->gt); in live_rps_power()
1191 err = -EIO; in live_rps_power()
1195 max.freq = rps->max_freq; in live_rps_power()
1196 max.power = measure_power_at(rps, &max.freq); in live_rps_power()
1198 min.freq = rps->min_freq; in live_rps_power()
1199 min.power = measure_power_at(rps, &min.freq); in live_rps_power()
1204 pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n", in live_rps_power()
1205 engine->name, in live_rps_power()
1206 min.power, intel_gpu_freq(rps, min.freq), in live_rps_power()
1207 max.power, intel_gpu_freq(rps, max.freq)); in live_rps_power()
1209 if (10 * min.freq >= 9 * max.freq) { in live_rps_power()
1211 min.freq, intel_gpu_freq(rps, min.freq), in live_rps_power()
1212 max.freq, intel_gpu_freq(rps, max.freq)); in live_rps_power()
1216 if (11 * min.power > 10 * max.power) { in live_rps_power()
1218 engine->name); in live_rps_power()
1219 err = -EINVAL; in live_rps_power()
1223 if (igt_flush_test(gt->i915)) { in live_rps_power()
1224 err = -EIO; in live_rps_power()
1232 rps->work.func = saved_work; in live_rps_power()
1240 struct intel_rps *rps = &gt->rps; in live_rps_dynamic()
1253 if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) in live_rps_dynamic()
1257 return -ENOMEM; in live_rps_dynamic()
1268 u8 freq; in live_rps_dynamic() member
1269 } min, max; in live_rps_dynamic() local
1276 rps->cur_freq = rps->min_freq; in live_rps_dynamic()
1279 intel_rc6_disable(&gt->rc6); in live_rps_dynamic()
1280 GEM_BUG_ON(rps->last_freq != rps->min_freq); in live_rps_dynamic()
1283 engine->kernel_context, in live_rps_dynamic()
1292 max.dt = ktime_get(); in live_rps_dynamic()
1293 max.freq = wait_for_freq(rps, rps->max_freq, 500); in live_rps_dynamic()
1294 max.dt = ktime_sub(ktime_get(), max.dt); in live_rps_dynamic()
1299 min.freq = wait_for_freq(rps, rps->min_freq, 2000); in live_rps_dynamic()
1303 engine->name, in live_rps_dynamic()
1304 max.freq, intel_gpu_freq(rps, max.freq), in live_rps_dynamic()
1305 ktime_to_ns(max.dt), in live_rps_dynamic()
1306 min.freq, intel_gpu_freq(rps, min.freq), in live_rps_dynamic()
1308 if (min.freq >= max.freq) { in live_rps_dynamic()
1310 engine->name); in live_rps_dynamic()
1311 err = -EINVAL; in live_rps_dynamic()
1315 intel_rc6_enable(&gt->rc6); in live_rps_dynamic()
1318 if (igt_flush_test(gt->i915)) in live_rps_dynamic()
1319 err = -EIO; in live_rps_dynamic()