xref: /linux/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 
27 #include "dcn30/dcn30_hubbub.h"
28 #include "dcn32_hubbub.h"
29 #include "dm_services.h"
30 #include "reg_helper.h"
31 #include "dal_asic_id.h"
32 
33 
34 #define CTX \
35 	hubbub2->base.ctx
36 #define DC_LOGGER \
37 	hubbub2->base.ctx->logger
38 #define REG(reg)\
39 	hubbub2->regs->reg
40 
41 #undef FN
42 #define FN(reg_name, field_name) \
43 	hubbub2->shifts->field_name, hubbub2->masks->field_name
44 
45 /**
46  * DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for
47  *                            DCN32
48  */
49 #define DCN32_CRB_SEGMENT_SIZE_KB 64
50 
dcn32_init_crb(struct hubbub * hubbub)51 static void dcn32_init_crb(struct hubbub *hubbub)
52 {
53 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
54 
55 	REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT,
56 		&hubbub2->det0_size);
57 
58 	REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT,
59 		&hubbub2->det1_size);
60 
61 	REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT,
62 		&hubbub2->det2_size);
63 
64 	REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT,
65 		&hubbub2->det3_size);
66 
67 	REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT,
68 		&hubbub2->compbuf_size_segments);
69 
70 	REG_SET_2(COMPBUF_RESERVED_SPACE, 0,
71 			COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32,
72 			COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128);
73 	REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
74 }
75 
hubbub32_set_sdp_control(struct hubbub * hubbub,bool dc_control)76 static void hubbub32_set_sdp_control(struct hubbub *hubbub, bool dc_control)
77 {
78 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
79 
80 	REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
81 			SDPIF_PORT_CONTROL, dc_control);
82 }
83 
hubbub32_set_request_limit(struct hubbub * hubbub,int memory_channel_count,int words_per_channel)84 void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel)
85 {
86 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
87 
88 	uint32_t request_limit = 3 * memory_channel_count * words_per_channel / 4;
89 
90 	ASSERT((request_limit & (~0xFFF)) == 0); //field is only 24 bits long
91 	ASSERT(request_limit > 0); //field is only 24 bits long
92 
93 	if (request_limit > 0xFFF)
94 		request_limit = 0xFFF;
95 
96 	if (request_limit > 0)
97 		REG_UPDATE(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, request_limit);
98 }
99 
100 
dcn32_program_det_size(struct hubbub * hubbub,int hubp_inst,unsigned int det_buffer_size_in_kbyte)101 void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte)
102 {
103 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
104 
105 	unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB;
106 
107 	switch (hubp_inst) {
108 	case 0:
109 		REG_UPDATE(DCHUBBUB_DET0_CTRL,
110 					DET0_SIZE, det_size_segments);
111 		hubbub2->det0_size = det_size_segments;
112 		break;
113 	case 1:
114 		REG_UPDATE(DCHUBBUB_DET1_CTRL,
115 					DET1_SIZE, det_size_segments);
116 		hubbub2->det1_size = det_size_segments;
117 		break;
118 	case 2:
119 		REG_UPDATE(DCHUBBUB_DET2_CTRL,
120 					DET2_SIZE, det_size_segments);
121 		hubbub2->det2_size = det_size_segments;
122 		break;
123 	case 3:
124 		REG_UPDATE(DCHUBBUB_DET3_CTRL,
125 					DET3_SIZE, det_size_segments);
126 		hubbub2->det3_size = det_size_segments;
127 		break;
128 	default:
129 		break;
130 	}
131 	if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
132 			+ hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) {
133 		/* This may happen during seamless transition from ODM 2:1 to ODM4:1 */
134 		DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) >  CRB segments (%d)\n",
135 						hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size,
136 						hubbub2->compbuf_size_segments, hubbub2->crb_size_segs);
137 	}
138 }
139 
dcn32_program_compbuf_size(struct hubbub * hubbub,unsigned int compbuf_size_kb,bool safe_to_increase)140 void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
141 {
142 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
143 	unsigned int compbuf_size_segments = (compbuf_size_kb + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB;
144 
145 	if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) {
146 		if (compbuf_size_segments > hubbub2->compbuf_size_segments) {
147 			REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100);
148 			REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100);
149 			REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100);
150 			REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100);
151 		}
152 		/* Should never be hit, if it is we have an erroneous hw config*/
153 		ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
154 				+ hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs);
155 		REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments);
156 		hubbub2->compbuf_size_segments = compbuf_size_segments;
157 		ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments);
158 	}
159 }
160 
convert_and_clamp(uint32_t wm_ns,uint32_t refclk_mhz,uint32_t clamp_value)161 static uint32_t convert_and_clamp(
162 	uint32_t wm_ns,
163 	uint32_t refclk_mhz,
164 	uint32_t clamp_value)
165 {
166 	uint32_t ret_val = 0;
167 	ret_val = wm_ns * refclk_mhz;
168 
169 	ret_val /= 1000;
170 
171 	if (ret_val > clamp_value)
172 		ret_val = clamp_value;
173 
174 	return ret_val;
175 }
176 
hubbub32_program_urgent_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)177 bool hubbub32_program_urgent_watermarks(
178 		struct hubbub *hubbub,
179 		union dcn_watermark_set *watermarks,
180 		unsigned int refclk_mhz,
181 		bool safe_to_lower)
182 {
183 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
184 	uint32_t prog_wm_value;
185 	bool wm_pending = false;
186 
187 	/* Repeat for water mark set A, B, C and D. */
188 	/* clock state A */
189 	if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
190 		hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
191 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
192 				refclk_mhz, 0x3fff);
193 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
194 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
195 
196 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
197 			"HW register value = 0x%x\n",
198 			watermarks->a.urgent_ns, prog_wm_value);
199 	} else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns)
200 		wm_pending = true;
201 
202 	/* determine the transfer time for a quantity of data for a particular requestor.*/
203 	if (safe_to_lower || watermarks->a.frac_urg_bw_flip
204 			> hubbub2->watermarks.a.frac_urg_bw_flip) {
205 		hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
206 
207 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
208 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
209 	} else if (watermarks->a.frac_urg_bw_flip
210 			< hubbub2->watermarks.a.frac_urg_bw_flip)
211 		wm_pending = true;
212 
213 	if (safe_to_lower || watermarks->a.frac_urg_bw_nom
214 			> hubbub2->watermarks.a.frac_urg_bw_nom) {
215 		hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
216 
217 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
218 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
219 	} else if (watermarks->a.frac_urg_bw_nom
220 			< hubbub2->watermarks.a.frac_urg_bw_nom)
221 		wm_pending = true;
222 
223 	if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
224 		hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
225 		prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
226 				refclk_mhz, 0x3fff);
227 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
228 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
229 	} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
230 		wm_pending = true;
231 
232 	/* clock state B */
233 	if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
234 		hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
235 		prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
236 				refclk_mhz, 0x3fff);
237 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
238 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
239 
240 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
241 			"HW register value = 0x%x\n",
242 			watermarks->b.urgent_ns, prog_wm_value);
243 	} else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns)
244 		wm_pending = true;
245 
246 	/* determine the transfer time for a quantity of data for a particular requestor.*/
247 	if (safe_to_lower || watermarks->b.frac_urg_bw_flip
248 			> hubbub2->watermarks.b.frac_urg_bw_flip) {
249 		hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip;
250 
251 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
252 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip);
253 	} else if (watermarks->b.frac_urg_bw_flip
254 			< hubbub2->watermarks.b.frac_urg_bw_flip)
255 		wm_pending = true;
256 
257 	if (safe_to_lower || watermarks->b.frac_urg_bw_nom
258 			> hubbub2->watermarks.b.frac_urg_bw_nom) {
259 		hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom;
260 
261 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
262 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom);
263 	} else if (watermarks->b.frac_urg_bw_nom
264 			< hubbub2->watermarks.b.frac_urg_bw_nom)
265 		wm_pending = true;
266 
267 	if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
268 		hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
269 		prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
270 				refclk_mhz, 0x3fff);
271 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
272 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
273 	} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
274 		wm_pending = true;
275 
276 	/* clock state C */
277 	if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
278 		hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
279 		prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
280 				refclk_mhz, 0x3fff);
281 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
282 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
283 
284 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
285 			"HW register value = 0x%x\n",
286 			watermarks->c.urgent_ns, prog_wm_value);
287 	} else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns)
288 		wm_pending = true;
289 
290 	/* determine the transfer time for a quantity of data for a particular requestor.*/
291 	if (safe_to_lower || watermarks->c.frac_urg_bw_flip
292 			> hubbub2->watermarks.c.frac_urg_bw_flip) {
293 		hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip;
294 
295 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
296 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip);
297 	} else if (watermarks->c.frac_urg_bw_flip
298 			< hubbub2->watermarks.c.frac_urg_bw_flip)
299 		wm_pending = true;
300 
301 	if (safe_to_lower || watermarks->c.frac_urg_bw_nom
302 			> hubbub2->watermarks.c.frac_urg_bw_nom) {
303 		hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom;
304 
305 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
306 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom);
307 	} else if (watermarks->c.frac_urg_bw_nom
308 			< hubbub2->watermarks.c.frac_urg_bw_nom)
309 		wm_pending = true;
310 
311 	if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
312 		hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
313 		prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
314 				refclk_mhz, 0x3fff);
315 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
316 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
317 	} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
318 		wm_pending = true;
319 
320 	/* clock state D */
321 	if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
322 		hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
323 		prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
324 				refclk_mhz, 0x3fff);
325 		REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
326 				DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
327 
328 		DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
329 			"HW register value = 0x%x\n",
330 			watermarks->d.urgent_ns, prog_wm_value);
331 	} else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns)
332 		wm_pending = true;
333 
334 	/* determine the transfer time for a quantity of data for a particular requestor.*/
335 	if (safe_to_lower || watermarks->d.frac_urg_bw_flip
336 			> hubbub2->watermarks.d.frac_urg_bw_flip) {
337 		hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip;
338 
339 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
340 				DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip);
341 	} else if (watermarks->d.frac_urg_bw_flip
342 			< hubbub2->watermarks.d.frac_urg_bw_flip)
343 		wm_pending = true;
344 
345 	if (safe_to_lower || watermarks->d.frac_urg_bw_nom
346 			> hubbub2->watermarks.d.frac_urg_bw_nom) {
347 		hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom;
348 
349 		REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
350 				DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom);
351 	} else if (watermarks->d.frac_urg_bw_nom
352 			< hubbub2->watermarks.d.frac_urg_bw_nom)
353 		wm_pending = true;
354 
355 	if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
356 		hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
357 		prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
358 				refclk_mhz, 0x3fff);
359 		REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
360 				DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
361 	} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
362 		wm_pending = true;
363 
364 	return wm_pending;
365 }
366 
hubbub32_program_stutter_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)367 bool hubbub32_program_stutter_watermarks(
368 		struct hubbub *hubbub,
369 		union dcn_watermark_set *watermarks,
370 		unsigned int refclk_mhz,
371 		bool safe_to_lower)
372 {
373 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
374 	uint32_t prog_wm_value;
375 	bool wm_pending = false;
376 
377 	/* clock state A */
378 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
379 			> hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
380 		hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
381 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
382 		prog_wm_value = convert_and_clamp(
383 				watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
384 				refclk_mhz, 0xffff);
385 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
386 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
387 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
388 			"HW register value = 0x%x\n",
389 			watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
390 	} else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
391 			< hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
392 		wm_pending = true;
393 
394 	if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
395 			> hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) {
396 		hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns =
397 				watermarks->a.cstate_pstate.cstate_exit_ns;
398 		prog_wm_value = convert_and_clamp(
399 				watermarks->a.cstate_pstate.cstate_exit_ns,
400 				refclk_mhz, 0xffff);
401 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
402 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
403 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
404 			"HW register value = 0x%x\n",
405 			watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
406 	} else if (watermarks->a.cstate_pstate.cstate_exit_ns
407 			< hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns)
408 		wm_pending = true;
409 
410 	/* clock state B */
411 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
412 			> hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
413 		hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
414 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
415 		prog_wm_value = convert_and_clamp(
416 				watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
417 				refclk_mhz, 0xffff);
418 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
419 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
420 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
421 			"HW register value = 0x%x\n",
422 			watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
423 	} else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
424 			< hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
425 		wm_pending = true;
426 
427 	if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
428 			> hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) {
429 		hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns =
430 				watermarks->b.cstate_pstate.cstate_exit_ns;
431 		prog_wm_value = convert_and_clamp(
432 				watermarks->b.cstate_pstate.cstate_exit_ns,
433 				refclk_mhz, 0xffff);
434 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
435 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
436 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
437 			"HW register value = 0x%x\n",
438 			watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
439 	} else if (watermarks->b.cstate_pstate.cstate_exit_ns
440 			< hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns)
441 		wm_pending = true;
442 
443 	/* clock state C */
444 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
445 			> hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
446 		hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
447 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
448 		prog_wm_value = convert_and_clamp(
449 				watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
450 				refclk_mhz, 0xffff);
451 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
452 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
453 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
454 			"HW register value = 0x%x\n",
455 			watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
456 	} else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
457 			< hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
458 		wm_pending = true;
459 
460 	if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
461 			> hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) {
462 		hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns =
463 				watermarks->c.cstate_pstate.cstate_exit_ns;
464 		prog_wm_value = convert_and_clamp(
465 				watermarks->c.cstate_pstate.cstate_exit_ns,
466 				refclk_mhz, 0xffff);
467 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
468 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
469 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
470 			"HW register value = 0x%x\n",
471 			watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
472 	} else if (watermarks->c.cstate_pstate.cstate_exit_ns
473 			< hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns)
474 		wm_pending = true;
475 
476 	/* clock state D */
477 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
478 			> hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
479 		hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
480 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
481 		prog_wm_value = convert_and_clamp(
482 				watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
483 				refclk_mhz, 0xffff);
484 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
485 				DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
486 		DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
487 			"HW register value = 0x%x\n",
488 			watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
489 	} else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
490 			< hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
491 		wm_pending = true;
492 
493 	if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
494 			> hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) {
495 		hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns =
496 				watermarks->d.cstate_pstate.cstate_exit_ns;
497 		prog_wm_value = convert_and_clamp(
498 				watermarks->d.cstate_pstate.cstate_exit_ns,
499 				refclk_mhz, 0xffff);
500 		REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
501 				DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
502 		DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
503 			"HW register value = 0x%x\n",
504 			watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
505 	} else if (watermarks->d.cstate_pstate.cstate_exit_ns
506 			< hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns)
507 		wm_pending = true;
508 
509 	return wm_pending;
510 }
511 
512 
hubbub32_program_pstate_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)513 bool hubbub32_program_pstate_watermarks(
514 		struct hubbub *hubbub,
515 		union dcn_watermark_set *watermarks,
516 		unsigned int refclk_mhz,
517 		bool safe_to_lower)
518 {
519 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
520 	uint32_t prog_wm_value;
521 
522 	bool wm_pending = false;
523 
524 	/* Section for UCLK_PSTATE_CHANGE_WATERMARKS */
525 	/* clock state A */
526 	if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
527 			> hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) {
528 		hubbub2->watermarks.a.cstate_pstate.pstate_change_ns =
529 				watermarks->a.cstate_pstate.pstate_change_ns;
530 		prog_wm_value = convert_and_clamp(
531 				watermarks->a.cstate_pstate.pstate_change_ns,
532 				refclk_mhz, 0xffff);
533 		REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0,
534 				DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value);
535 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
536 			"HW register value = 0x%x\n\n",
537 			watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
538 	} else if (watermarks->a.cstate_pstate.pstate_change_ns
539 			< hubbub2->watermarks.a.cstate_pstate.pstate_change_ns)
540 		wm_pending = true;
541 
542 	/* clock state B */
543 	if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
544 			> hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) {
545 		hubbub2->watermarks.b.cstate_pstate.pstate_change_ns =
546 				watermarks->b.cstate_pstate.pstate_change_ns;
547 		prog_wm_value = convert_and_clamp(
548 				watermarks->b.cstate_pstate.pstate_change_ns,
549 				refclk_mhz, 0xffff);
550 		REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0,
551 				DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value);
552 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
553 			"HW register value = 0x%x\n\n",
554 			watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
555 	} else if (watermarks->b.cstate_pstate.pstate_change_ns
556 			< hubbub2->watermarks.b.cstate_pstate.pstate_change_ns)
557 		wm_pending = true;
558 
559 	/* clock state C */
560 	if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
561 			> hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) {
562 		hubbub2->watermarks.c.cstate_pstate.pstate_change_ns =
563 				watermarks->c.cstate_pstate.pstate_change_ns;
564 		prog_wm_value = convert_and_clamp(
565 				watermarks->c.cstate_pstate.pstate_change_ns,
566 				refclk_mhz, 0xffff);
567 		REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, 0,
568 				DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value);
569 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
570 			"HW register value = 0x%x\n\n",
571 			watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
572 	} else if (watermarks->c.cstate_pstate.pstate_change_ns
573 			< hubbub2->watermarks.c.cstate_pstate.pstate_change_ns)
574 		wm_pending = true;
575 
576 	/* clock state D */
577 	if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
578 			> hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) {
579 		hubbub2->watermarks.d.cstate_pstate.pstate_change_ns =
580 				watermarks->d.cstate_pstate.pstate_change_ns;
581 		prog_wm_value = convert_and_clamp(
582 				watermarks->d.cstate_pstate.pstate_change_ns,
583 				refclk_mhz, 0xffff);
584 		REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, 0,
585 				DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value);
586 		DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
587 			"HW register value = 0x%x\n\n",
588 			watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
589 	} else if (watermarks->d.cstate_pstate.pstate_change_ns
590 			< hubbub2->watermarks.d.cstate_pstate.pstate_change_ns)
591 		wm_pending = true;
592 
593 	/* Section for FCLK_PSTATE_CHANGE_WATERMARKS */
594 	/* clock state A */
595 	if (safe_to_lower || watermarks->a.cstate_pstate.fclk_pstate_change_ns
596 			> hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) {
597 		hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns =
598 				watermarks->a.cstate_pstate.fclk_pstate_change_ns;
599 		prog_wm_value = convert_and_clamp(
600 				watermarks->a.cstate_pstate.fclk_pstate_change_ns,
601 				refclk_mhz, 0xffff);
602 		REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0,
603 				DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value);
604 		DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n"
605 			"HW register value = 0x%x\n\n",
606 			watermarks->a.cstate_pstate.fclk_pstate_change_ns, prog_wm_value);
607 	} else if (watermarks->a.cstate_pstate.fclk_pstate_change_ns
608 			< hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns)
609 		wm_pending = true;
610 
611 	/* clock state B */
612 	if (safe_to_lower || watermarks->b.cstate_pstate.fclk_pstate_change_ns
613 			> hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) {
614 		hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns =
615 				watermarks->b.cstate_pstate.fclk_pstate_change_ns;
616 		prog_wm_value = convert_and_clamp(
617 				watermarks->b.cstate_pstate.fclk_pstate_change_ns,
618 				refclk_mhz, 0xffff);
619 		REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0,
620 				DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value);
621 		DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n"
622 			"HW register value = 0x%x\n\n",
623 			watermarks->b.cstate_pstate.fclk_pstate_change_ns, prog_wm_value);
624 	} else if (watermarks->b.cstate_pstate.fclk_pstate_change_ns
625 			< hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns)
626 		wm_pending = true;
627 
628 	/* clock state C */
629 	if (safe_to_lower || watermarks->c.cstate_pstate.fclk_pstate_change_ns
630 			> hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) {
631 		hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns =
632 				watermarks->c.cstate_pstate.fclk_pstate_change_ns;
633 		prog_wm_value = convert_and_clamp(
634 				watermarks->c.cstate_pstate.fclk_pstate_change_ns,
635 				refclk_mhz, 0xffff);
636 		REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, 0,
637 				DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value);
638 		DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_C calculated =%d\n"
639 			"HW register value = 0x%x\n\n",
640 			watermarks->c.cstate_pstate.fclk_pstate_change_ns, prog_wm_value);
641 	} else if (watermarks->c.cstate_pstate.fclk_pstate_change_ns
642 			< hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns)
643 		wm_pending = true;
644 
645 	/* clock state D */
646 	if (safe_to_lower || watermarks->d.cstate_pstate.fclk_pstate_change_ns
647 			> hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) {
648 		hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns =
649 				watermarks->d.cstate_pstate.fclk_pstate_change_ns;
650 		prog_wm_value = convert_and_clamp(
651 				watermarks->d.cstate_pstate.fclk_pstate_change_ns,
652 				refclk_mhz, 0xffff);
653 		REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, 0,
654 				DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value);
655 		DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_D calculated =%d\n"
656 			"HW register value = 0x%x\n\n",
657 			watermarks->d.cstate_pstate.fclk_pstate_change_ns, prog_wm_value);
658 	} else if (watermarks->d.cstate_pstate.fclk_pstate_change_ns
659 			< hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns)
660 		wm_pending = true;
661 
662 	return wm_pending;
663 }
664 
665 
hubbub32_program_usr_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)666 bool hubbub32_program_usr_watermarks(
667 		struct hubbub *hubbub,
668 		union dcn_watermark_set *watermarks,
669 		unsigned int refclk_mhz,
670 		bool safe_to_lower)
671 {
672 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
673 	uint32_t prog_wm_value;
674 
675 	bool wm_pending = false;
676 
677 	/* clock state A */
678 	if (safe_to_lower || watermarks->a.usr_retraining_ns
679 			> hubbub2->watermarks.a.usr_retraining_ns) {
680 		hubbub2->watermarks.a.usr_retraining_ns = watermarks->a.usr_retraining_ns;
681 		prog_wm_value = convert_and_clamp(
682 				watermarks->a.usr_retraining_ns,
683 				refclk_mhz, 0x3fff);
684 		REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0,
685 				DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, prog_wm_value);
686 		DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n"
687 			"HW register value = 0x%x\n\n",
688 			watermarks->a.usr_retraining_ns, prog_wm_value);
689 	} else if (watermarks->a.usr_retraining_ns
690 			< hubbub2->watermarks.a.usr_retraining_ns)
691 		wm_pending = true;
692 
693 	/* clock state B */
694 	if (safe_to_lower || watermarks->b.usr_retraining_ns
695 			> hubbub2->watermarks.b.usr_retraining_ns) {
696 		hubbub2->watermarks.b.usr_retraining_ns = watermarks->b.usr_retraining_ns;
697 		prog_wm_value = convert_and_clamp(
698 				watermarks->b.usr_retraining_ns,
699 				refclk_mhz, 0x3fff);
700 		REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0,
701 				DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, prog_wm_value);
702 		DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n"
703 			"HW register value = 0x%x\n\n",
704 			watermarks->b.usr_retraining_ns, prog_wm_value);
705 	} else if (watermarks->b.usr_retraining_ns
706 			< hubbub2->watermarks.b.usr_retraining_ns)
707 		wm_pending = true;
708 
709 	/* clock state C */
710 	if (safe_to_lower || watermarks->c.usr_retraining_ns
711 			> hubbub2->watermarks.c.usr_retraining_ns) {
712 		hubbub2->watermarks.c.usr_retraining_ns =
713 				watermarks->c.usr_retraining_ns;
714 		prog_wm_value = convert_and_clamp(
715 				watermarks->c.usr_retraining_ns,
716 				refclk_mhz, 0x3fff);
717 		REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, 0,
718 				DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, prog_wm_value);
719 		DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_C calculated =%d\n"
720 			"HW register value = 0x%x\n\n",
721 			watermarks->c.usr_retraining_ns, prog_wm_value);
722 	} else if (watermarks->c.usr_retraining_ns
723 			< hubbub2->watermarks.c.usr_retraining_ns)
724 		wm_pending = true;
725 
726 	/* clock state D */
727 	if (safe_to_lower || watermarks->d.usr_retraining_ns
728 			> hubbub2->watermarks.d.usr_retraining_ns) {
729 		hubbub2->watermarks.d.usr_retraining_ns =
730 				watermarks->d.usr_retraining_ns;
731 		prog_wm_value = convert_and_clamp(
732 				watermarks->d.usr_retraining_ns,
733 				refclk_mhz, 0x3fff);
734 		REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, 0,
735 				DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, prog_wm_value);
736 		DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_D calculated =%d\n"
737 			"HW register value = 0x%x\n\n",
738 			watermarks->d.usr_retraining_ns, prog_wm_value);
739 	} else if (watermarks->d.usr_retraining_ns
740 			< hubbub2->watermarks.d.usr_retraining_ns)
741 		wm_pending = true;
742 
743 	return wm_pending;
744 }
745 
hubbub32_force_usr_retraining_allow(struct hubbub * hubbub,bool allow)746 void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow)
747 {
748 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
749 
750 	/*
751 	 * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE = 1 means enabling forcing value
752 	 * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE = 1 or 0,  means value to be forced when force enable
753 	 */
754 
755 	REG_UPDATE_2(DCHUBBUB_ARB_USR_RETRAINING_CNTL,
756 			DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE, allow,
757 			DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE, allow);
758 }
759 
hubbub32_program_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)760 static bool hubbub32_program_watermarks(
761 		struct hubbub *hubbub,
762 		union dcn_watermark_set *watermarks,
763 		unsigned int refclk_mhz,
764 		bool safe_to_lower)
765 {
766 	struct dc *dc = hubbub->ctx->dc;
767 	bool wm_pending = false;
768 
769 	if (!safe_to_lower && dc->debug.disable_stutter_for_wm_program &&
770 			(ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) ||
771 			ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) {
772 		/* before raising watermarks, SDP control give to DF, stutter must be disabled */
773 		wm_pending = true;
774 		hubbub32_set_sdp_control(hubbub, false);
775 		hubbub1_allow_self_refresh_control(hubbub, false);
776 	}
777 
778 	if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
779 		wm_pending = true;
780 
781 	if (hubbub32_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
782 		wm_pending = true;
783 
784 	if (hubbub32_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
785 		wm_pending = true;
786 
787 	if (hubbub32_program_usr_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
788 		wm_pending = true;
789 
790 	/*
791 	 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
792 	 * If the memory controller is fully utilized and the DCHub requestors are
793 	 * well ahead of their amortized schedule, then it is safe to prevent the next winner
794 	 * from being committed and sent to the fabric.
795 	 * The utilization of the memory controller is approximated by ensuring that
796 	 * the number of outstanding requests is greater than a threshold specified
797 	 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
798 	 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
799 	 *
800 	 * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF)
801 	 * to turn off it for now.
802 	 */
803 	/*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
804 			DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
805 	REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
806 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
807 
808 	if (safe_to_lower) {
809 		/* after lowering watermarks, stutter setting is restored, SDP control given to DC */
810 		hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
811 
812 		if (dc->debug.disable_stutter_for_wm_program &&
813 				(ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) ||
814 				ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) {
815 			hubbub32_set_sdp_control(hubbub, true);
816 		}
817 	} else if (dc->debug.disable_stutter) {
818 		hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
819 	}
820 
821 	hubbub32_force_usr_retraining_allow(hubbub, dc->debug.force_usr_allow);
822 
823 	return wm_pending;
824 }
825 
826 /* Copy values from WM set A to all other sets */
hubbub32_init_watermarks(struct hubbub * hubbub)827 static void hubbub32_init_watermarks(struct hubbub *hubbub)
828 {
829 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
830 	uint32_t reg;
831 
832 	reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
833 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg);
834 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg);
835 	REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg);
836 
837 	reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A);
838 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg);
839 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg);
840 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg);
841 
842 	reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A);
843 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg);
844 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg);
845 	REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg);
846 
847 	reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A);
848 	REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg);
849 	REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg);
850 	REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg);
851 
852 	reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
853 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg);
854 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg);
855 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg);
856 
857 	reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
858 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg);
859 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg);
860 	REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg);
861 
862 	reg = REG_READ(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A);
863 	REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, reg);
864 	REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, reg);
865 	REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, reg);
866 
867 	reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A);
868 	REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, reg);
869 	REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, reg);
870 	REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, reg);
871 
872 	reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A);
873 	REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, reg);
874 	REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, reg);
875 	REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg);
876 }
877 
hubbub32_wm_read_state(struct hubbub * hubbub,struct dcn_hubbub_wm * wm)878 static void hubbub32_wm_read_state(struct hubbub *hubbub,
879 		struct dcn_hubbub_wm *wm)
880 {
881 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
882 	struct dcn_hubbub_wm_set *s;
883 
884 	memset(wm, 0, sizeof(struct dcn_hubbub_wm));
885 
886 	s = &wm->sets[0];
887 	s->wm_set = 0;
888 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
889 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
890 
891 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
892 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
893 
894 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
895 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
896 
897 	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A,
898 			 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change);
899 
900 	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A,
901 			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain);
902 
903 	REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A,
904 			 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, &s->fclk_pstate_change);
905 
906 	s = &wm->sets[1];
907 	s->wm_set = 1;
908 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
909 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
910 
911 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
912 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
913 
914 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
915 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
916 
917 	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B,
918 			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change);
919 
920 	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B,
921 			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain);
922 
923 	REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B,
924 			DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, &s->fclk_pstate_change);
925 
926 	s = &wm->sets[2];
927 	s->wm_set = 2;
928 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
929 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
930 
931 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
932 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
933 
934 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
935 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
936 
937 	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C,
938 			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_change);
939 
940 	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C,
941 			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain);
942 
943 	REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C,
944 			DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, &s->fclk_pstate_change);
945 
946 	s = &wm->sets[3];
947 	s->wm_set = 3;
948 	REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
949 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
950 
951 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
952 			DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
953 
954 	REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
955 			DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
956 
957 	REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D,
958 			DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_change);
959 
960 	REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D,
961 			 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain);
962 
963 	REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D,
964 			DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, &s->fclk_pstate_change);
965 }
966 
hubbub32_force_wm_propagate_to_pipes(struct hubbub * hubbub)967 void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub)
968 {
969 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
970 	uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
971 	uint32_t prog_wm_value = convert_and_clamp(hubbub2->watermarks.a.urgent_ns,
972 			refclk_mhz, 0x3fff);
973 
974 	REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
975 			DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
976 }
977 
hubbub32_get_mall_en(struct hubbub * hubbub,unsigned int * mall_in_use)978 void hubbub32_get_mall_en(struct hubbub *hubbub, unsigned int *mall_in_use)
979 {
980 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
981 	uint32_t prefetch_complete, mall_en;
982 
983 	REG_GET_2(DCHUBBUB_ARB_MALL_CNTL, MALL_IN_USE, &mall_en,
984 			  MALL_PREFETCH_COMPLETE, &prefetch_complete);
985 
986 	*mall_in_use = prefetch_complete && mall_en;
987 }
988 
hubbub32_init(struct hubbub * hubbub)989 void hubbub32_init(struct hubbub *hubbub)
990 {
991 	struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
992 
993 	/* Enable clock gate*/
994 	if (hubbub->ctx->dc->debug.disable_clock_gate) {
995 		/*done in hwseq*/
996 		/*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
997 
998 		REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
999 			DISPCLK_R_DCHUBBUB_GATE_DIS, 1,
1000 			DCFCLK_R_DCHUBBUB_GATE_DIS, 1);
1001 	}
1002 	/*
1003 	ignore the "df_pre_cstate_req" from the SDP port control.
1004 	only the DCN will determine when to connect the SDP port
1005 	*/
1006 	hubbub32_set_sdp_control(hubbub, true);
1007 	/*Set SDP's max outstanding request to 512
1008 	must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/
1009 	REG_UPDATE(DCHUBBUB_SDPIF_CFG1,
1010 			SDPIF_MAX_NUM_OUTSTANDING, 1);
1011 	/*must set the registers back to 256 in zero frame buffer mode*/
1012 	REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
1013 			DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 512,
1014 			DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 512);
1015 }
1016 
1017 static const struct hubbub_funcs hubbub32_funcs = {
1018 	.update_dchub = hubbub2_update_dchub,
1019 	.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
1020 	.init_vm_ctx = hubbub2_init_vm_ctx,
1021 	.dcc_support_swizzle = hubbub3_dcc_support_swizzle,
1022 	.dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
1023 	.get_dcc_compression_cap = hubbub3_get_dcc_compression_cap,
1024 	.wm_read_state = hubbub32_wm_read_state,
1025 	.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
1026 	.program_watermarks = hubbub32_program_watermarks,
1027 	.allow_self_refresh_control = hubbub1_allow_self_refresh_control,
1028 	.is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
1029 	.verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high,
1030 	.force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
1031 	.force_pstate_change_control = hubbub3_force_pstate_change_control,
1032 	.init_watermarks = hubbub32_init_watermarks,
1033 	.program_det_size = dcn32_program_det_size,
1034 	.program_compbuf_size = dcn32_program_compbuf_size,
1035 	.init_crb = dcn32_init_crb,
1036 	.hubbub_read_state = hubbub2_read_state,
1037 	.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
1038 	.set_request_limit = hubbub32_set_request_limit,
1039 	.get_mall_en = hubbub32_get_mall_en,
1040 	.hubbub_read_reg_state = hubbub3_read_reg_state
1041 };
1042 
hubbub32_construct(struct dcn20_hubbub * hubbub2,struct dc_context * ctx,const struct dcn_hubbub_registers * hubbub_regs,const struct dcn_hubbub_shift * hubbub_shift,const struct dcn_hubbub_mask * hubbub_mask,int det_size_kb,int pixel_chunk_size_kb,int config_return_buffer_size_kb)1043 void hubbub32_construct(struct dcn20_hubbub *hubbub2,
1044 	struct dc_context *ctx,
1045 	const struct dcn_hubbub_registers *hubbub_regs,
1046 	const struct dcn_hubbub_shift *hubbub_shift,
1047 	const struct dcn_hubbub_mask *hubbub_mask,
1048 	int det_size_kb,
1049 	int pixel_chunk_size_kb,
1050 	int config_return_buffer_size_kb)
1051 {
1052 	hubbub2->base.ctx = ctx;
1053 	hubbub2->base.funcs = &hubbub32_funcs;
1054 	hubbub2->regs = hubbub_regs;
1055 	hubbub2->shifts = hubbub_shift;
1056 	hubbub2->masks = hubbub_mask;
1057 
1058 	hubbub2->debug_test_index_pstate = 0xB;
1059 	hubbub2->detile_buf_size = det_size_kb * 1024;
1060 	hubbub2->pixel_chunk_size = pixel_chunk_size_kb * 1024;
1061 	hubbub2->crb_size_segs = config_return_buffer_size_kb / DCN32_CRB_SEGMENT_SIZE_KB;
1062 }
1063