xref: /linux/drivers/gpu/drm/vc4/vc4_kms.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Broadcom
4  */
5 
6 /**
7  * DOC: VC4 KMS
8  *
9  * This is the general code for implementing KMS mode setting that
10  * doesn't clearly associate with any of the other objects (plane,
11  * crtc, HDMI encoder).
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/sort.h>
16 
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_crtc.h>
20 #include <drm/drm_fourcc.h>
21 #include <drm/drm_gem_framebuffer_helper.h>
22 #include <drm/drm_probe_helper.h>
23 #include <drm/drm_vblank.h>
24 
25 #include "vc4_drv.h"
26 #include "vc4_regs.h"
27 
28 struct vc4_ctm_state {
29 	struct drm_private_state base;
30 	struct drm_color_ctm *ctm;
31 	int fifo;
32 };
33 
34 #define to_vc4_ctm_state(_state)				\
35 	container_of_const(_state, struct vc4_ctm_state, base)
36 
37 struct vc4_load_tracker_state {
38 	struct drm_private_state base;
39 	u64 hvs_load;
40 	u64 membus_load;
41 };
42 
43 #define to_vc4_load_tracker_state(_state)				\
44 	container_of_const(_state, struct vc4_load_tracker_state, base)
45 
vc4_get_ctm_state(struct drm_atomic_state * state,struct drm_private_obj * manager)46 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
47 					       struct drm_private_obj *manager)
48 {
49 	struct drm_device *dev = state->dev;
50 	struct vc4_dev *vc4 = to_vc4_dev(dev);
51 	struct drm_private_state *priv_state;
52 	int ret;
53 
54 	ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
55 	if (ret)
56 		return ERR_PTR(ret);
57 
58 	priv_state = drm_atomic_get_private_obj_state(state, manager);
59 	if (IS_ERR(priv_state))
60 		return ERR_CAST(priv_state);
61 
62 	return to_vc4_ctm_state(priv_state);
63 }
64 
65 static struct drm_private_state *
vc4_ctm_duplicate_state(struct drm_private_obj * obj)66 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
67 {
68 	struct vc4_ctm_state *state;
69 
70 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
71 	if (!state)
72 		return NULL;
73 
74 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
75 
76 	return &state->base;
77 }
78 
vc4_ctm_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)79 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
80 				  struct drm_private_state *state)
81 {
82 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
83 
84 	kfree(ctm_state);
85 }
86 
87 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
88 	.atomic_duplicate_state = vc4_ctm_duplicate_state,
89 	.atomic_destroy_state = vc4_ctm_destroy_state,
90 };
91 
vc4_ctm_obj_fini(struct drm_device * dev,void * unused)92 static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
93 {
94 	struct vc4_dev *vc4 = to_vc4_dev(dev);
95 
96 	drm_atomic_private_obj_fini(&vc4->ctm_manager);
97 }
98 
vc4_ctm_obj_init(struct vc4_dev * vc4)99 static int vc4_ctm_obj_init(struct vc4_dev *vc4)
100 {
101 	struct vc4_ctm_state *ctm_state;
102 
103 	drm_modeset_lock_init(&vc4->ctm_state_lock);
104 
105 	ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
106 	if (!ctm_state)
107 		return -ENOMEM;
108 
109 	drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
110 				    &vc4_ctm_state_funcs);
111 
112 	return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
113 }
114 
115 /* Converts a DRM S31.32 value to the HW S0.9 format. */
vc4_ctm_s31_32_to_s0_9(u64 in)116 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
117 {
118 	u16 r;
119 
120 	/* Sign bit. */
121 	r = in & BIT_ULL(63) ? BIT(9) : 0;
122 
123 	if ((in & GENMASK_ULL(62, 32)) > 0) {
124 		/* We have zero integer bits so we can only saturate here. */
125 		r |= GENMASK(8, 0);
126 	} else {
127 		/* Otherwise take the 9 most important fractional bits. */
128 		r |= (in >> 23) & GENMASK(8, 0);
129 	}
130 
131 	return r;
132 }
133 
134 static void
vc4_ctm_commit(struct vc4_dev * vc4,struct drm_atomic_state * state)135 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
136 {
137 	struct vc4_hvs *hvs = vc4->hvs;
138 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
139 	struct drm_color_ctm *ctm = ctm_state->ctm;
140 
141 	WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
142 
143 	if (ctm_state->fifo) {
144 		HVS_WRITE(SCALER_OLEDCOEF2,
145 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
146 					SCALER_OLEDCOEF2_R_TO_R) |
147 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
148 					SCALER_OLEDCOEF2_R_TO_G) |
149 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
150 					SCALER_OLEDCOEF2_R_TO_B));
151 		HVS_WRITE(SCALER_OLEDCOEF1,
152 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
153 					SCALER_OLEDCOEF1_G_TO_R) |
154 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
155 					SCALER_OLEDCOEF1_G_TO_G) |
156 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
157 					SCALER_OLEDCOEF1_G_TO_B));
158 		HVS_WRITE(SCALER_OLEDCOEF0,
159 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
160 					SCALER_OLEDCOEF0_B_TO_R) |
161 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
162 					SCALER_OLEDCOEF0_B_TO_G) |
163 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
164 					SCALER_OLEDCOEF0_B_TO_B));
165 	}
166 
167 	HVS_WRITE(SCALER_OLEDOFFS,
168 		  VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
169 }
170 
171 struct vc4_hvs_state *
vc4_hvs_get_new_global_state(const struct drm_atomic_state * state)172 vc4_hvs_get_new_global_state(const struct drm_atomic_state *state)
173 {
174 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
175 	struct drm_private_state *priv_state;
176 
177 	priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
178 	if (!priv_state)
179 		return ERR_PTR(-EINVAL);
180 
181 	return to_vc4_hvs_state(priv_state);
182 }
183 
184 struct vc4_hvs_state *
vc4_hvs_get_old_global_state(const struct drm_atomic_state * state)185 vc4_hvs_get_old_global_state(const struct drm_atomic_state *state)
186 {
187 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
188 	struct drm_private_state *priv_state;
189 
190 	priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
191 	if (!priv_state)
192 		return ERR_PTR(-EINVAL);
193 
194 	return to_vc4_hvs_state(priv_state);
195 }
196 
197 struct vc4_hvs_state *
vc4_hvs_get_global_state(struct drm_atomic_state * state)198 vc4_hvs_get_global_state(struct drm_atomic_state *state)
199 {
200 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
201 	struct drm_private_state *priv_state;
202 
203 	priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
204 	if (IS_ERR(priv_state))
205 		return ERR_CAST(priv_state);
206 
207 	return to_vc4_hvs_state(priv_state);
208 }
209 
vc4_hvs_pv_muxing_commit(struct vc4_dev * vc4,struct drm_atomic_state * state)210 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
211 				     struct drm_atomic_state *state)
212 {
213 	struct vc4_hvs *hvs = vc4->hvs;
214 	struct drm_crtc_state *crtc_state;
215 	struct drm_crtc *crtc;
216 	unsigned int i;
217 
218 	WARN_ON_ONCE(vc4->gen != VC4_GEN_4);
219 
220 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
221 		struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
222 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
223 		u32 dispctrl;
224 		u32 dsp3_mux;
225 
226 		if (!crtc_state->active)
227 			continue;
228 
229 		if (vc4_state->assigned_channel != 2)
230 			continue;
231 
232 		/*
233 		 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
234 		 * FIFO X'.
235 		 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
236 		 *
237 		 * DSP3 is connected to FIFO2 unless the transposer is
238 		 * enabled. In this case, FIFO 2 is directly accessed by the
239 		 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
240 		 * route.
241 		 */
242 		if (vc4_crtc->feeds_txp)
243 			dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
244 		else
245 			dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
246 
247 		dispctrl = HVS_READ(SCALER_DISPCTRL) &
248 			   ~SCALER_DISPCTRL_DSP3_MUX_MASK;
249 		HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
250 	}
251 }
252 
vc5_hvs_pv_muxing_commit(struct vc4_dev * vc4,struct drm_atomic_state * state)253 static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
254 				     struct drm_atomic_state *state)
255 {
256 	struct vc4_hvs *hvs = vc4->hvs;
257 	struct drm_crtc_state *crtc_state;
258 	struct drm_crtc *crtc;
259 	unsigned char mux;
260 	unsigned int i;
261 	u32 reg;
262 
263 	WARN_ON_ONCE(vc4->gen != VC4_GEN_5);
264 
265 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
266 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
267 		struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
268 		unsigned int channel = vc4_state->assigned_channel;
269 
270 		if (!vc4_state->update_muxing)
271 			continue;
272 
273 		switch (vc4_crtc->data->hvs_output) {
274 		case 2:
275 			drm_WARN_ON(&vc4->base,
276 				    VC4_GET_FIELD(HVS_READ(SCALER_DISPCTRL),
277 						  SCALER_DISPCTRL_DSP3_MUX) == channel);
278 
279 			mux = (channel == 2) ? 0 : 1;
280 			reg = HVS_READ(SCALER_DISPECTRL);
281 			HVS_WRITE(SCALER_DISPECTRL,
282 				  (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
283 				  VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
284 			break;
285 
286 		case 3:
287 			if (channel == VC4_HVS_CHANNEL_DISABLED)
288 				mux = 3;
289 			else
290 				mux = channel;
291 
292 			reg = HVS_READ(SCALER_DISPCTRL);
293 			HVS_WRITE(SCALER_DISPCTRL,
294 				  (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
295 				  VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
296 			break;
297 
298 		case 4:
299 			if (channel == VC4_HVS_CHANNEL_DISABLED)
300 				mux = 3;
301 			else
302 				mux = channel;
303 
304 			reg = HVS_READ(SCALER_DISPEOLN);
305 			HVS_WRITE(SCALER_DISPEOLN,
306 				  (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
307 				  VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
308 
309 			break;
310 
311 		case 5:
312 			if (channel == VC4_HVS_CHANNEL_DISABLED)
313 				mux = 3;
314 			else
315 				mux = channel;
316 
317 			reg = HVS_READ(SCALER_DISPDITHER);
318 			HVS_WRITE(SCALER_DISPDITHER,
319 				  (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
320 				  VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
321 			break;
322 
323 		default:
324 			break;
325 		}
326 	}
327 }
328 
vc6_hvs_pv_muxing_commit(struct vc4_dev * vc4,struct drm_atomic_state * state)329 static void vc6_hvs_pv_muxing_commit(struct vc4_dev *vc4,
330 				     struct drm_atomic_state *state)
331 {
332 	struct vc4_hvs *hvs = vc4->hvs;
333 	struct drm_crtc_state *crtc_state;
334 	struct drm_crtc *crtc;
335 	unsigned int i;
336 
337 	WARN_ON_ONCE(vc4->gen != VC4_GEN_6_C && vc4->gen != VC4_GEN_6_D);
338 
339 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
340 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
341 		struct vc4_encoder *vc4_encoder;
342 		struct drm_encoder *encoder;
343 		unsigned char mux;
344 		u32 reg;
345 
346 		if (!vc4_state->update_muxing)
347 			continue;
348 
349 		if (vc4_state->assigned_channel != 1)
350 			continue;
351 
352 		encoder = vc4_get_crtc_encoder(crtc, crtc_state);
353 		vc4_encoder = to_vc4_encoder(encoder);
354 		switch (vc4_encoder->type) {
355 		case VC4_ENCODER_TYPE_HDMI1:
356 			mux = 0;
357 			break;
358 
359 		case VC4_ENCODER_TYPE_TXP1:
360 			mux = 2;
361 			break;
362 
363 		default:
364 			drm_err(&vc4->base, "Unhandled encoder type for PV muxing %d",
365 				vc4_encoder->type);
366 			mux = 0;
367 			break;
368 		}
369 
370 		reg = HVS_READ(SCALER6_CONTROL);
371 		HVS_WRITE(SCALER6_CONTROL,
372 			  (reg & ~SCALER6_CONTROL_DSP1_TARGET_MASK) |
373 			  VC4_SET_FIELD(mux, SCALER6_CONTROL_DSP1_TARGET));
374 	}
375 }
376 
vc4_atomic_commit_tail(struct drm_atomic_state * state)377 static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
378 {
379 	struct drm_device *dev = state->dev;
380 	struct vc4_dev *vc4 = to_vc4_dev(dev);
381 	struct vc4_hvs *hvs = vc4->hvs;
382 	struct vc4_hvs_state *new_hvs_state;
383 	struct vc4_hvs_state *old_hvs_state;
384 	unsigned int channel;
385 
386 	old_hvs_state = vc4_hvs_get_old_global_state(state);
387 	if (WARN_ON(IS_ERR(old_hvs_state)))
388 		return;
389 
390 	new_hvs_state = vc4_hvs_get_new_global_state(state);
391 	if (WARN_ON(IS_ERR(new_hvs_state)))
392 		return;
393 
394 	if (vc4->gen < VC4_GEN_6_C) {
395 		struct drm_crtc_state *new_crtc_state;
396 		struct drm_crtc *crtc;
397 		int i;
398 
399 		for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
400 			struct vc4_crtc_state *vc4_crtc_state;
401 
402 			if (!new_crtc_state->commit)
403 				continue;
404 
405 			vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
406 			vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
407 		}
408 	}
409 
410 	for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
411 		struct drm_crtc_commit *commit;
412 		int ret;
413 
414 		if (!old_hvs_state->fifo_state[channel].in_use)
415 			continue;
416 
417 		commit = old_hvs_state->fifo_state[channel].pending_commit;
418 		if (!commit)
419 			continue;
420 
421 		ret = drm_crtc_commit_wait(commit);
422 		if (ret)
423 			drm_err(dev, "Timed out waiting for commit\n");
424 
425 		drm_crtc_commit_put(commit);
426 		old_hvs_state->fifo_state[channel].pending_commit = NULL;
427 	}
428 
429 	if (vc4->gen == VC4_GEN_5) {
430 		unsigned long state_rate = max(old_hvs_state->core_clock_rate,
431 					       new_hvs_state->core_clock_rate);
432 		unsigned long core_rate = clamp_t(unsigned long, state_rate,
433 						  500000000, hvs->max_core_rate);
434 
435 		drm_dbg(dev, "Raising the core clock at %lu Hz\n", core_rate);
436 
437 		/*
438 		 * Do a temporary request on the core clock during the
439 		 * modeset.
440 		 */
441 		WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
442 		WARN_ON(clk_set_min_rate(hvs->disp_clk, core_rate));
443 	}
444 
445 	drm_atomic_helper_commit_modeset_disables(dev, state);
446 
447 	if (vc4->gen <= VC4_GEN_5)
448 		vc4_ctm_commit(vc4, state);
449 
450 	switch (vc4->gen) {
451 	case VC4_GEN_4:
452 		vc4_hvs_pv_muxing_commit(vc4, state);
453 		break;
454 
455 	case VC4_GEN_5:
456 		vc5_hvs_pv_muxing_commit(vc4, state);
457 		break;
458 
459 	case VC4_GEN_6_C:
460 	case VC4_GEN_6_D:
461 		vc6_hvs_pv_muxing_commit(vc4, state);
462 		break;
463 
464 	default:
465 		drm_err(dev, "Unknown VC4 generation: %d", vc4->gen);
466 		break;
467 	}
468 
469 	drm_atomic_helper_commit_planes(dev, state,
470 					DRM_PLANE_COMMIT_ACTIVE_ONLY);
471 
472 	drm_atomic_helper_commit_modeset_enables(dev, state);
473 
474 	drm_atomic_helper_fake_vblank(state);
475 
476 	drm_atomic_helper_commit_hw_done(state);
477 
478 	drm_atomic_helper_wait_for_flip_done(dev, state);
479 
480 	drm_atomic_helper_cleanup_planes(dev, state);
481 
482 	if (vc4->gen == VC4_GEN_5) {
483 		unsigned long core_rate = min_t(unsigned long,
484 						hvs->max_core_rate,
485 						new_hvs_state->core_clock_rate);
486 
487 		drm_dbg(dev, "Running the core clock at %lu Hz\n", core_rate);
488 
489 		/*
490 		 * Request a clock rate based on the current HVS
491 		 * requirements.
492 		 */
493 		WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
494 		WARN_ON(clk_set_min_rate(hvs->disp_clk, core_rate));
495 
496 		drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
497 			clk_get_rate(hvs->core_clk));
498 	}
499 }
500 
vc4_atomic_commit_setup(struct drm_atomic_state * state)501 static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
502 {
503 	struct drm_crtc_state *crtc_state;
504 	struct vc4_hvs_state *hvs_state;
505 	struct drm_crtc *crtc;
506 	unsigned int i;
507 
508 	hvs_state = vc4_hvs_get_new_global_state(state);
509 	if (WARN_ON(IS_ERR(hvs_state)))
510 		return PTR_ERR(hvs_state);
511 
512 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
513 		struct vc4_crtc_state *vc4_crtc_state =
514 			to_vc4_crtc_state(crtc_state);
515 		unsigned int channel =
516 			vc4_crtc_state->assigned_channel;
517 
518 		if (channel == VC4_HVS_CHANNEL_DISABLED)
519 			continue;
520 
521 		if (!hvs_state->fifo_state[channel].in_use)
522 			continue;
523 
524 		hvs_state->fifo_state[channel].pending_commit =
525 			drm_crtc_commit_get(crtc_state->commit);
526 	}
527 
528 	return 0;
529 }
530 
vc4_fb_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_format_info * info,const struct drm_mode_fb_cmd2 * mode_cmd)531 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
532 					     struct drm_file *file_priv,
533 					     const struct drm_format_info *info,
534 					     const struct drm_mode_fb_cmd2 *mode_cmd)
535 {
536 	struct vc4_dev *vc4 = to_vc4_dev(dev);
537 	struct drm_mode_fb_cmd2 mode_cmd_local;
538 
539 	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
540 		return ERR_PTR(-ENODEV);
541 
542 	/* If the user didn't specify a modifier, use the
543 	 * vc4_set_tiling_ioctl() state for the BO.
544 	 */
545 	if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
546 		struct drm_gem_object *gem_obj;
547 		struct vc4_bo *bo;
548 
549 		gem_obj = drm_gem_object_lookup(file_priv,
550 						mode_cmd->handles[0]);
551 		if (!gem_obj) {
552 			DRM_DEBUG("Failed to look up GEM BO %d\n",
553 				  mode_cmd->handles[0]);
554 			return ERR_PTR(-ENOENT);
555 		}
556 		bo = to_vc4_bo(gem_obj);
557 
558 		mode_cmd_local = *mode_cmd;
559 
560 		if (bo->t_format) {
561 			mode_cmd_local.modifier[0] =
562 				DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
563 		} else {
564 			mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
565 		}
566 
567 		drm_gem_object_put(gem_obj);
568 
569 		mode_cmd = &mode_cmd_local;
570 	}
571 
572 	return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
573 }
574 
575 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
576  * at a time and the HW only supports S0.9 scalars. To account for the latter,
577  * we don't allow userland to set a CTM that we have no hope of approximating.
578  */
579 static int
vc4_ctm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)580 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
581 {
582 	struct vc4_dev *vc4 = to_vc4_dev(dev);
583 	struct vc4_ctm_state *ctm_state = NULL;
584 	struct drm_crtc *crtc;
585 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
586 	struct drm_color_ctm *ctm;
587 	int i;
588 
589 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
590 		/* CTM is being disabled. */
591 		if (!new_crtc_state->ctm && old_crtc_state->ctm) {
592 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
593 			if (IS_ERR(ctm_state))
594 				return PTR_ERR(ctm_state);
595 			ctm_state->fifo = 0;
596 		}
597 	}
598 
599 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
600 		if (new_crtc_state->ctm == old_crtc_state->ctm)
601 			continue;
602 
603 		if (!ctm_state) {
604 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
605 			if (IS_ERR(ctm_state))
606 				return PTR_ERR(ctm_state);
607 		}
608 
609 		/* CTM is being enabled or the matrix changed. */
610 		if (new_crtc_state->ctm) {
611 			struct vc4_crtc_state *vc4_crtc_state =
612 				to_vc4_crtc_state(new_crtc_state);
613 
614 			/* fifo is 1-based since 0 disables CTM. */
615 			int fifo = vc4_crtc_state->assigned_channel + 1;
616 
617 			/* Check userland isn't trying to turn on CTM for more
618 			 * than one CRTC at a time.
619 			 */
620 			if (ctm_state->fifo && ctm_state->fifo != fifo) {
621 				DRM_DEBUG_DRIVER("Too many CTM configured\n");
622 				return -EINVAL;
623 			}
624 
625 			/* Check we can approximate the specified CTM.
626 			 * We disallow scalars |c| > 1.0 since the HW has
627 			 * no integer bits.
628 			 */
629 			ctm = new_crtc_state->ctm->data;
630 			for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
631 				u64 val = ctm->matrix[i];
632 
633 				val &= ~BIT_ULL(63);
634 				if (val > BIT_ULL(32))
635 					return -EINVAL;
636 			}
637 
638 			ctm_state->fifo = fifo;
639 			ctm_state->ctm = ctm;
640 		}
641 	}
642 
643 	return 0;
644 }
645 
vc4_load_tracker_atomic_check(struct drm_atomic_state * state)646 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
647 {
648 	struct drm_plane_state *old_plane_state, *new_plane_state;
649 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
650 	struct vc4_load_tracker_state *load_state;
651 	struct drm_private_state *priv_state;
652 	struct drm_plane *plane;
653 	int i;
654 
655 	priv_state = drm_atomic_get_private_obj_state(state,
656 						      &vc4->load_tracker);
657 	if (IS_ERR(priv_state))
658 		return PTR_ERR(priv_state);
659 
660 	load_state = to_vc4_load_tracker_state(priv_state);
661 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
662 				       new_plane_state, i) {
663 		struct vc4_plane_state *vc4_plane_state;
664 
665 		if (old_plane_state->fb && old_plane_state->crtc) {
666 			vc4_plane_state = to_vc4_plane_state(old_plane_state);
667 			load_state->membus_load -= vc4_plane_state->membus_load;
668 			load_state->hvs_load -= vc4_plane_state->hvs_load;
669 		}
670 
671 		if (new_plane_state->fb && new_plane_state->crtc) {
672 			vc4_plane_state = to_vc4_plane_state(new_plane_state);
673 			load_state->membus_load += vc4_plane_state->membus_load;
674 			load_state->hvs_load += vc4_plane_state->hvs_load;
675 		}
676 	}
677 
678 	/* Don't check the load when the tracker is disabled. */
679 	if (!vc4->load_tracker_enabled)
680 		return 0;
681 
682 	/* The absolute limit is 2Gbyte/sec, but let's take a margin to let
683 	 * the system work when other blocks are accessing the memory.
684 	 */
685 	if (load_state->membus_load > SZ_1G + SZ_512M)
686 		return -ENOSPC;
687 
688 	/* HVS clock is supposed to run @ 250Mhz, let's take a margin and
689 	 * consider the maximum number of cycles is 240M.
690 	 */
691 	if (load_state->hvs_load > 240000000ULL)
692 		return -ENOSPC;
693 
694 	return 0;
695 }
696 
697 static struct drm_private_state *
vc4_load_tracker_duplicate_state(struct drm_private_obj * obj)698 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
699 {
700 	struct vc4_load_tracker_state *state;
701 
702 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
703 	if (!state)
704 		return NULL;
705 
706 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
707 
708 	return &state->base;
709 }
710 
vc4_load_tracker_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)711 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
712 					   struct drm_private_state *state)
713 {
714 	struct vc4_load_tracker_state *load_state;
715 
716 	load_state = to_vc4_load_tracker_state(state);
717 	kfree(load_state);
718 }
719 
720 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
721 	.atomic_duplicate_state = vc4_load_tracker_duplicate_state,
722 	.atomic_destroy_state = vc4_load_tracker_destroy_state,
723 };
724 
vc4_load_tracker_obj_fini(struct drm_device * dev,void * unused)725 static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
726 {
727 	struct vc4_dev *vc4 = to_vc4_dev(dev);
728 
729 	drm_atomic_private_obj_fini(&vc4->load_tracker);
730 }
731 
vc4_load_tracker_obj_init(struct vc4_dev * vc4)732 static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
733 {
734 	struct vc4_load_tracker_state *load_state;
735 
736 	load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
737 	if (!load_state)
738 		return -ENOMEM;
739 
740 	drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
741 				    &load_state->base,
742 				    &vc4_load_tracker_state_funcs);
743 
744 	return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
745 }
746 
747 static struct drm_private_state *
vc4_hvs_channels_duplicate_state(struct drm_private_obj * obj)748 vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
749 {
750 	struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
751 	struct vc4_hvs_state *state;
752 	unsigned int i;
753 
754 	state = kzalloc(sizeof(*state), GFP_KERNEL);
755 	if (!state)
756 		return NULL;
757 
758 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
759 
760 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
761 		state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
762 		state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;
763 	}
764 
765 	state->core_clock_rate = old_state->core_clock_rate;
766 
767 	return &state->base;
768 }
769 
vc4_hvs_channels_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)770 static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
771 					   struct drm_private_state *state)
772 {
773 	struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
774 	unsigned int i;
775 
776 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
777 		if (!hvs_state->fifo_state[i].pending_commit)
778 			continue;
779 
780 		drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
781 	}
782 
783 	kfree(hvs_state);
784 }
785 
vc4_hvs_channels_print_state(struct drm_printer * p,const struct drm_private_state * state)786 static void vc4_hvs_channels_print_state(struct drm_printer *p,
787 					 const struct drm_private_state *state)
788 {
789 	const struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
790 	unsigned int i;
791 
792 	drm_printf(p, "HVS State\n");
793 	drm_printf(p, "\tCore Clock Rate: %lu\n", hvs_state->core_clock_rate);
794 
795 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
796 		drm_printf(p, "\tChannel %d\n", i);
797 		drm_printf(p, "\t\tin use=%d\n", hvs_state->fifo_state[i].in_use);
798 		drm_printf(p, "\t\tload=%lu\n", hvs_state->fifo_state[i].fifo_load);
799 	}
800 }
801 
802 static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
803 	.atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
804 	.atomic_destroy_state = vc4_hvs_channels_destroy_state,
805 	.atomic_print_state = vc4_hvs_channels_print_state,
806 };
807 
vc4_hvs_channels_obj_fini(struct drm_device * dev,void * unused)808 static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
809 {
810 	struct vc4_dev *vc4 = to_vc4_dev(dev);
811 
812 	drm_atomic_private_obj_fini(&vc4->hvs_channels);
813 }
814 
vc4_hvs_channels_obj_init(struct vc4_dev * vc4)815 static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
816 {
817 	struct vc4_hvs_state *state;
818 
819 	state = kzalloc(sizeof(*state), GFP_KERNEL);
820 	if (!state)
821 		return -ENOMEM;
822 
823 	drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
824 				    &state->base,
825 				    &vc4_hvs_state_funcs);
826 
827 	return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
828 }
829 
cmp_vc4_crtc_hvs_output(const void * a,const void * b)830 static int cmp_vc4_crtc_hvs_output(const void *a, const void *b)
831 {
832 	const struct vc4_crtc *crtc_a =
833 		to_vc4_crtc(*(const struct drm_crtc **)a);
834 	const struct vc4_crtc_data *data_a =
835 		vc4_crtc_to_vc4_crtc_data(crtc_a);
836 	const struct vc4_crtc *crtc_b =
837 		to_vc4_crtc(*(const struct drm_crtc **)b);
838 	const struct vc4_crtc_data *data_b =
839 		vc4_crtc_to_vc4_crtc_data(crtc_b);
840 
841 	return data_a->hvs_output - data_b->hvs_output;
842 }
843 
844 /*
845  * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
846  * the TXP (and therefore all the CRTCs found on that platform).
847  *
848  * The naive (and our initial) implementation would just iterate over
849  * all the active CRTCs, try to find a suitable FIFO, and then remove it
850  * from the pool of available FIFOs. However, there are a few corner
851  * cases that need to be considered:
852  *
853  * - When running in a dual-display setup (so with two CRTCs involved),
854  *   we can update the state of a single CRTC (for example by changing
855  *   its mode using xrandr under X11) without affecting the other. In
856  *   this case, the other CRTC wouldn't be in the state at all, so we
857  *   need to consider all the running CRTCs in the DRM device to assign
858  *   a FIFO, not just the one in the state.
859  *
860  * - To fix the above, we can't use drm_atomic_get_crtc_state on all
861  *   enabled CRTCs to pull their CRTC state into the global state, since
862  *   a page flip would start considering their vblank to complete. Since
863  *   we don't have a guarantee that they are actually active, that
864  *   vblank might never happen, and shouldn't even be considered if we
865  *   want to do a page flip on a single CRTC. That can be tested by
866  *   doing a modetest -v first on HDMI1 and then on HDMI0.
867  *
868  * - Since we need the pixelvalve to be disabled and enabled back when
869  *   the FIFO is changed, we should keep the FIFO assigned for as long
870  *   as the CRTC is enabled, only considering it free again once that
871  *   CRTC has been disabled. This can be tested by booting X11 on a
872  *   single display, and changing the resolution down and then back up.
873  */
vc4_pv_muxing_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)874 static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
875 				      struct drm_atomic_state *state)
876 {
877 	struct vc4_hvs_state *hvs_new_state;
878 	struct drm_crtc **sorted_crtcs;
879 	struct drm_crtc *crtc;
880 	unsigned int unassigned_channels = 0;
881 	unsigned int i;
882 	int ret;
883 
884 	hvs_new_state = vc4_hvs_get_global_state(state);
885 	if (IS_ERR(hvs_new_state))
886 		return PTR_ERR(hvs_new_state);
887 
888 	for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
889 		if (!hvs_new_state->fifo_state[i].in_use)
890 			unassigned_channels |= BIT(i);
891 
892 	/*
893 	 * The problem we have to solve here is that we have up to 7
894 	 * encoders, connected to up to 6 CRTCs.
895 	 *
896 	 * Those CRTCs, depending on the instance, can be routed to 1, 2
897 	 * or 3 HVS FIFOs, and we need to set the muxing between FIFOs and
898 	 * outputs in the HVS accordingly.
899 	 *
900 	 * It would be pretty hard to come up with an algorithm that
901 	 * would generically solve this. However, the current routing
902 	 * trees we support allow us to simplify a bit the problem.
903 	 *
904 	 * Indeed, with the current supported layouts, if we try to
905 	 * assign in the ascending crtc index order the FIFOs, we can't
906 	 * fall into the situation where an earlier CRTC that had
907 	 * multiple routes is assigned one that was the only option for
908 	 * a later CRTC.
909 	 *
910 	 * If the layout changes and doesn't give us that in the future,
911 	 * we will need to have something smarter, but it works so far.
912 	 */
913 	sorted_crtcs = kmalloc_array(dev->num_crtcs, sizeof(*sorted_crtcs), GFP_KERNEL);
914 	if (!sorted_crtcs)
915 		return -ENOMEM;
916 
917 	i = 0;
918 	drm_for_each_crtc(crtc, dev)
919 		sorted_crtcs[i++] = crtc;
920 
921 	sort(sorted_crtcs, i, sizeof(*sorted_crtcs), cmp_vc4_crtc_hvs_output, NULL);
922 
923 	for (i = 0; i < dev->num_crtcs; i++) {
924 		struct vc4_crtc_state *old_vc4_crtc_state, *new_vc4_crtc_state;
925 		struct drm_crtc_state *old_crtc_state, *new_crtc_state;
926 		struct vc4_crtc *vc4_crtc;
927 		unsigned int matching_channels;
928 		unsigned int channel;
929 
930 		crtc = sorted_crtcs[i];
931 		if (!crtc)
932 			continue;
933 		vc4_crtc = to_vc4_crtc(crtc);
934 
935 		old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
936 		if (!old_crtc_state)
937 			continue;
938 		old_vc4_crtc_state = to_vc4_crtc_state(old_crtc_state);
939 
940 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
941 		if (!new_crtc_state)
942 			continue;
943 		new_vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
944 
945 		drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name);
946 
947 		/* Nothing to do here, let's skip it */
948 		if (old_crtc_state->enable == new_crtc_state->enable) {
949 			if (new_crtc_state->enable)
950 				drm_dbg(dev, "%s: Already enabled, reusing channel %d.\n",
951 					crtc->name, new_vc4_crtc_state->assigned_channel);
952 			else
953 				drm_dbg(dev, "%s: Disabled, ignoring.\n", crtc->name);
954 
955 			continue;
956 		}
957 
958 		/* Muxing will need to be modified, mark it as such */
959 		new_vc4_crtc_state->update_muxing = true;
960 
961 		/* If we're disabling our CRTC, we put back our channel */
962 		if (!new_crtc_state->enable) {
963 			channel = old_vc4_crtc_state->assigned_channel;
964 
965 			drm_dbg(dev, "%s: Disabling, Freeing channel %d\n",
966 				crtc->name, channel);
967 
968 			hvs_new_state->fifo_state[channel].in_use = false;
969 			new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
970 			continue;
971 		}
972 
973 		matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
974 		if (!matching_channels) {
975 			ret = -EINVAL;
976 			goto err_free_crtc_array;
977 		}
978 
979 		channel = ffs(matching_channels) - 1;
980 
981 		drm_dbg(dev, "Assigned HVS channel %d to CRTC %s\n", channel, crtc->name);
982 		new_vc4_crtc_state->assigned_channel = channel;
983 		unassigned_channels &= ~BIT(channel);
984 		hvs_new_state->fifo_state[channel].in_use = true;
985 	}
986 
987 	kfree(sorted_crtcs);
988 	return 0;
989 
990 err_free_crtc_array:
991 	kfree(sorted_crtcs);
992 	return ret;
993 }
994 
995 static int
vc4_core_clock_atomic_check(struct drm_atomic_state * state)996 vc4_core_clock_atomic_check(struct drm_atomic_state *state)
997 {
998 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
999 	struct drm_private_state *priv_state;
1000 	struct vc4_hvs_state *hvs_new_state;
1001 	struct vc4_load_tracker_state *load_state;
1002 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1003 	struct drm_crtc *crtc;
1004 	unsigned int num_outputs;
1005 	unsigned long pixel_rate;
1006 	unsigned long cob_rate;
1007 	unsigned int i;
1008 
1009 	priv_state = drm_atomic_get_private_obj_state(state,
1010 						      &vc4->load_tracker);
1011 	if (IS_ERR(priv_state))
1012 		return PTR_ERR(priv_state);
1013 
1014 	load_state = to_vc4_load_tracker_state(priv_state);
1015 
1016 	hvs_new_state = vc4_hvs_get_global_state(state);
1017 	if (IS_ERR(hvs_new_state))
1018 		return PTR_ERR(hvs_new_state);
1019 
1020 	for_each_oldnew_crtc_in_state(state, crtc,
1021 				      old_crtc_state,
1022 				      new_crtc_state,
1023 				      i) {
1024 		if (old_crtc_state->active) {
1025 			struct vc4_crtc_state *old_vc4_state =
1026 				to_vc4_crtc_state(old_crtc_state);
1027 			unsigned int channel = old_vc4_state->assigned_channel;
1028 
1029 			hvs_new_state->fifo_state[channel].fifo_load = 0;
1030 		}
1031 
1032 		if (new_crtc_state->active) {
1033 			struct vc4_crtc_state *new_vc4_state =
1034 				to_vc4_crtc_state(new_crtc_state);
1035 			unsigned int channel = new_vc4_state->assigned_channel;
1036 
1037 			hvs_new_state->fifo_state[channel].fifo_load =
1038 				new_vc4_state->hvs_load;
1039 		}
1040 	}
1041 
1042 	cob_rate = 0;
1043 	num_outputs = 0;
1044 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
1045 		if (!hvs_new_state->fifo_state[i].in_use)
1046 			continue;
1047 
1048 		num_outputs++;
1049 		cob_rate = max_t(unsigned long,
1050 				 hvs_new_state->fifo_state[i].fifo_load,
1051 				 cob_rate);
1052 	}
1053 
1054 	pixel_rate = load_state->hvs_load;
1055 	if (num_outputs > 1) {
1056 		pixel_rate = (pixel_rate * 40) / 100;
1057 	} else {
1058 		pixel_rate = (pixel_rate * 60) / 100;
1059 	}
1060 
1061 	hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate);
1062 
1063 	return 0;
1064 }
1065 
1066 
1067 static int
vc4_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)1068 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
1069 {
1070 	int ret;
1071 
1072 	ret = vc4_pv_muxing_atomic_check(dev, state);
1073 	if (ret)
1074 		return ret;
1075 
1076 	ret = vc4_ctm_atomic_check(dev, state);
1077 	if (ret < 0)
1078 		return ret;
1079 
1080 	ret = drm_atomic_helper_check(dev, state);
1081 	if (ret)
1082 		return ret;
1083 
1084 	ret = vc4_load_tracker_atomic_check(state);
1085 	if (ret)
1086 		return ret;
1087 
1088 	return vc4_core_clock_atomic_check(state);
1089 }
1090 
1091 static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
1092 	.atomic_commit_setup	= vc4_atomic_commit_setup,
1093 	.atomic_commit_tail	= vc4_atomic_commit_tail,
1094 };
1095 
1096 static const struct drm_mode_config_funcs vc4_mode_funcs = {
1097 	.atomic_check = vc4_atomic_check,
1098 	.atomic_commit = drm_atomic_helper_commit,
1099 	.fb_create = vc4_fb_create,
1100 };
1101 
1102 static const struct drm_mode_config_funcs vc5_mode_funcs = {
1103 	.atomic_check = vc4_atomic_check,
1104 	.atomic_commit = drm_atomic_helper_commit,
1105 	.fb_create = drm_gem_fb_create,
1106 };
1107 
vc4_kms_load(struct drm_device * dev)1108 int vc4_kms_load(struct drm_device *dev)
1109 {
1110 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1111 	int ret;
1112 
1113 	/*
1114 	 * The limits enforced by the load tracker aren't relevant for
1115 	 * the BCM2711, but the load tracker computations are used for
1116 	 * the core clock rate calculation.
1117 	 */
1118 	if (vc4->gen == VC4_GEN_4) {
1119 		/* Start with the load tracker enabled. Can be
1120 		 * disabled through the debugfs load_tracker file.
1121 		 */
1122 		vc4->load_tracker_enabled = true;
1123 	}
1124 
1125 	/* Set support for vblank irq fast disable, before drm_vblank_init() */
1126 	dev->vblank_disable_immediate = true;
1127 
1128 	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
1129 	if (ret < 0) {
1130 		dev_err(dev->dev, "failed to initialize vblank\n");
1131 		return ret;
1132 	}
1133 
1134 	if (vc4->gen >= VC4_GEN_6_C) {
1135 		dev->mode_config.max_width = 8192;
1136 		dev->mode_config.max_height = 8192;
1137 	} else if (vc4->gen >= VC4_GEN_5) {
1138 		dev->mode_config.max_width = 7680;
1139 		dev->mode_config.max_height = 7680;
1140 	} else {
1141 		dev->mode_config.max_width = 2048;
1142 		dev->mode_config.max_height = 2048;
1143 	}
1144 
1145 	dev->mode_config.funcs = (vc4->gen > VC4_GEN_4) ? &vc5_mode_funcs : &vc4_mode_funcs;
1146 	dev->mode_config.helper_private = &vc4_mode_config_helpers;
1147 	dev->mode_config.preferred_depth = 24;
1148 	dev->mode_config.async_page_flip = true;
1149 	dev->mode_config.normalize_zpos = true;
1150 
1151 	ret = vc4_ctm_obj_init(vc4);
1152 	if (ret)
1153 		return ret;
1154 
1155 	ret = vc4_load_tracker_obj_init(vc4);
1156 	if (ret)
1157 		return ret;
1158 
1159 	ret = vc4_hvs_channels_obj_init(vc4);
1160 	if (ret)
1161 		return ret;
1162 
1163 	drm_mode_config_reset(dev);
1164 
1165 	drm_kms_helper_poll_init(dev);
1166 
1167 	return 0;
1168 }
1169