Lines Matching full:m2
35 } dot, vco, n, m, m1, m2, p, p1; member
48 .m2 = { .min = 6, .max = 16 },
61 .m2 = { .min = 6, .max = 16 },
74 .m2 = { .min = 6, .max = 16 },
87 .m2 = { .min = 3, .max = 7 },
100 .m2 = { .min = 3, .max = 7 },
114 .m2 = { .min = 5, .max = 11 },
129 .m2 = { .min = 5, .max = 11 },
142 .m2 = { .min = 5, .max = 11 },
156 .m2 = { .min = 5, .max = 11 },
170 /* Pineview only has one combined m divider, which we treat as m2. */
172 .m2 = { .min = 0, .max = 254 },
185 .m2 = { .min = 0, .max = 254 },
194 * We calculate clock using (register_value + 2) for N/M1/M2, so here
203 .m2 = { .min = 5, .max = 9 },
216 .m2 = { .min = 5, .max = 9 },
229 .m2 = { .min = 5, .max = 9 },
243 .m2 = { .min = 5, .max = 9 },
256 .m2 = { .min = 5, .max = 9 },
274 .m2 = { .min = 11, .max = 156 },
290 .m2 = { .min = 24 << 22, .max = 175 << 22 },
300 /* FIXME: find real m2 limits */
301 .m2 = { .min = 2 << 22, .max = 255 << 22 },
317 clock->m = clock->m2 + 2; in pnv_calc_dpll_params()
330 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); in i9xx_dpll_compute_m()
348 clock->m = clock->m1 * clock->m2; in vlv_calc_dpll_params()
361 clock->m = clock->m1 * clock->m2; in chv_calc_dpll_params()
408 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; in i9xx_crtc_clock_get()
411 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; in i9xx_crtc_clock_get()
500 clock.m2 = mdiv & DPIO_M2DIV_MASK; in vlv_crtc_clock_get()
532 clock.m2 = (pll_dw0 & 0xff) << 22; in chv_crtc_clock_get()
534 clock.m2 |= pll_dw2 & 0x3fffff; in chv_crtc_clock_get()
554 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) in intel_pll_is_valid()
560 if (clock->m1 <= clock->m2) in intel_pll_is_valid()
632 for (clock.m2 = limit->m2.min; in i9xx_find_best_dpll()
633 clock.m2 <= limit->m2.max; clock.m2++) { in i9xx_find_best_dpll()
634 if (clock.m2 >= clock.m1) in i9xx_find_best_dpll()
690 for (clock.m2 = limit->m2.min; in pnv_find_best_dpll()
691 clock.m2 <= limit->m2.max; clock.m2++) { in pnv_find_best_dpll()
750 /* based on hardware requirement, prefere larger m1,m2 */ in g4x_find_best_dpll()
753 for (clock.m2 = limit->m2.max; in g4x_find_best_dpll()
754 clock.m2 >= limit->m2.min; clock.m2--) { in g4x_find_best_dpll()
846 /* based on hardware requirement, prefer bigger m1,m2 values */ in vlv_find_best_dpll()
850 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, in vlv_find_best_dpll()
892 u64 m2; in chv_find_best_dpll() local
914 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22, in chv_find_best_dpll()
917 if (m2 > INT_MAX/clock.m1) in chv_find_best_dpll()
920 clock.m2 = m2; in chv_find_best_dpll()
953 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; in i9xx_dpll_compute_fp()
958 return (1 << dpll->n) << 16 | dpll->m2; in pnv_dpll_compute_fp()
1856 bestm2 = crtc_state->dpll.m2; in vlv_prepare_pll()
1985 bestm2_frac = crtc_state->dpll.m2 & 0x3fffff; in chv_prepare_pll()
1986 bestm2 = crtc_state->dpll.m2 >> 22; in chv_prepare_pll()
2002 /* Feedback post-divider - m2 */ in chv_prepare_pll()
2010 /* M2 fraction division */ in chv_prepare_pll()
2013 /* M2 fraction division enable */ in chv_prepare_pll()