1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__
8 #include "dpu_kms.h"
9 #include "dpu_hw_lm.h"
10 #include "dpu_hw_ctl.h"
11 #include "dpu_hw_cdm.h"
12 #include "dpu_hw_cwb.h"
13 #include "dpu_hw_pingpong.h"
14 #include "dpu_hw_sspp.h"
15 #include "dpu_hw_intf.h"
16 #include "dpu_hw_wb.h"
17 #include "dpu_hw_dspp.h"
18 #include "dpu_hw_merge3d.h"
19 #include "dpu_hw_dsc.h"
20 #include "dpu_encoder.h"
21 #include "dpu_trace.h"
22
23
reserved_by_other(uint32_t * res_map,int idx,uint32_t crtc_id)24 static inline bool reserved_by_other(uint32_t *res_map, int idx,
25 uint32_t crtc_id)
26 {
27 return res_map[idx] && res_map[idx] != crtc_id;
28 }
29
30 /**
31 * dpu_rm_init - Read hardware catalog and create reservation tracking objects
32 * for all HW blocks.
33 * @dev: Corresponding device for devres management
34 * @rm: DPU Resource Manager handle
35 * @cat: Pointer to hardware catalog
36 * @mdss_data: Pointer to MDSS / UBWC configuration
37 * @mmio: mapped register io address of MDP
38 * @return: 0 on Success otherwise -ERROR
39 */
dpu_rm_init(struct drm_device * dev,struct dpu_rm * rm,const struct dpu_mdss_cfg * cat,const struct msm_mdss_data * mdss_data,void __iomem * mmio)40 int dpu_rm_init(struct drm_device *dev,
41 struct dpu_rm *rm,
42 const struct dpu_mdss_cfg *cat,
43 const struct msm_mdss_data *mdss_data,
44 void __iomem *mmio)
45 {
46 int rc, i;
47
48 if (!rm || !cat || !mmio) {
49 DPU_ERROR("invalid kms\n");
50 return -EINVAL;
51 }
52
53 /* Clear, setup lists */
54 memset(rm, 0, sizeof(*rm));
55
56 /* Interrogate HW catalog and create tracking items for hw blocks */
57 for (i = 0; i < cat->mixer_count; i++) {
58 struct dpu_hw_mixer *hw;
59 const struct dpu_lm_cfg *lm = &cat->mixer[i];
60
61 hw = dpu_hw_lm_init(dev, lm, mmio);
62 if (IS_ERR(hw)) {
63 rc = PTR_ERR(hw);
64 DPU_ERROR("failed lm object creation: err %d\n", rc);
65 goto fail;
66 }
67 rm->mixer_blks[lm->id - LM_0] = &hw->base;
68 }
69
70 for (i = 0; i < cat->merge_3d_count; i++) {
71 struct dpu_hw_merge_3d *hw;
72 const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
73
74 hw = dpu_hw_merge_3d_init(dev, merge_3d, mmio);
75 if (IS_ERR(hw)) {
76 rc = PTR_ERR(hw);
77 DPU_ERROR("failed merge_3d object creation: err %d\n",
78 rc);
79 goto fail;
80 }
81 rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base;
82 }
83
84 for (i = 0; i < cat->pingpong_count; i++) {
85 struct dpu_hw_pingpong *hw;
86 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
87
88 hw = dpu_hw_pingpong_init(dev, pp, mmio, cat->mdss_ver);
89 if (IS_ERR(hw)) {
90 rc = PTR_ERR(hw);
91 DPU_ERROR("failed pingpong object creation: err %d\n",
92 rc);
93 goto fail;
94 }
95 if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX)
96 hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]);
97 rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
98 }
99
100 for (i = 0; i < cat->intf_count; i++) {
101 struct dpu_hw_intf *hw;
102 const struct dpu_intf_cfg *intf = &cat->intf[i];
103
104 hw = dpu_hw_intf_init(dev, intf, mmio, cat->mdss_ver);
105 if (IS_ERR(hw)) {
106 rc = PTR_ERR(hw);
107 DPU_ERROR("failed intf object creation: err %d\n", rc);
108 goto fail;
109 }
110 rm->hw_intf[intf->id - INTF_0] = hw;
111 }
112
113 for (i = 0; i < cat->wb_count; i++) {
114 struct dpu_hw_wb *hw;
115 const struct dpu_wb_cfg *wb = &cat->wb[i];
116
117 hw = dpu_hw_wb_init(dev, wb, mmio, cat->mdss_ver);
118 if (IS_ERR(hw)) {
119 rc = PTR_ERR(hw);
120 DPU_ERROR("failed wb object creation: err %d\n", rc);
121 goto fail;
122 }
123 rm->hw_wb[wb->id - WB_0] = hw;
124 }
125
126 for (i = 0; i < cat->cwb_count; i++) {
127 struct dpu_hw_cwb *hw;
128 const struct dpu_cwb_cfg *cwb = &cat->cwb[i];
129
130 hw = dpu_hw_cwb_init(dev, cwb, mmio);
131 if (IS_ERR(hw)) {
132 rc = PTR_ERR(hw);
133 DPU_ERROR("failed cwb object creation: err %d\n", rc);
134 goto fail;
135 }
136 rm->cwb_blks[cwb->id - CWB_0] = &hw->base;
137 }
138
139 for (i = 0; i < cat->ctl_count; i++) {
140 struct dpu_hw_ctl *hw;
141 const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
142
143 hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mixer_count, cat->mixer);
144 if (IS_ERR(hw)) {
145 rc = PTR_ERR(hw);
146 DPU_ERROR("failed ctl object creation: err %d\n", rc);
147 goto fail;
148 }
149 rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
150 }
151
152 for (i = 0; i < cat->dspp_count; i++) {
153 struct dpu_hw_dspp *hw;
154 const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
155
156 hw = dpu_hw_dspp_init(dev, dspp, mmio);
157 if (IS_ERR(hw)) {
158 rc = PTR_ERR(hw);
159 DPU_ERROR("failed dspp object creation: err %d\n", rc);
160 goto fail;
161 }
162 rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
163 }
164
165 for (i = 0; i < cat->dsc_count; i++) {
166 struct dpu_hw_dsc *hw;
167 const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
168
169 if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features))
170 hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio);
171 else
172 hw = dpu_hw_dsc_init(dev, dsc, mmio);
173
174 if (IS_ERR(hw)) {
175 rc = PTR_ERR(hw);
176 DPU_ERROR("failed dsc object creation: err %d\n", rc);
177 goto fail;
178 }
179 rm->dsc_blks[dsc->id - DSC_0] = &hw->base;
180 }
181
182 for (i = 0; i < cat->sspp_count; i++) {
183 struct dpu_hw_sspp *hw;
184 const struct dpu_sspp_cfg *sspp = &cat->sspp[i];
185
186 hw = dpu_hw_sspp_init(dev, sspp, mmio, mdss_data, cat->mdss_ver);
187 if (IS_ERR(hw)) {
188 rc = PTR_ERR(hw);
189 DPU_ERROR("failed sspp object creation: err %d\n", rc);
190 goto fail;
191 }
192 rm->hw_sspp[sspp->id - SSPP_NONE] = hw;
193 }
194
195 if (cat->cdm) {
196 struct dpu_hw_cdm *hw;
197
198 hw = dpu_hw_cdm_init(dev, cat->cdm, mmio, cat->mdss_ver);
199 if (IS_ERR(hw)) {
200 rc = PTR_ERR(hw);
201 DPU_ERROR("failed cdm object creation: err %d\n", rc);
202 goto fail;
203 }
204 rm->cdm_blk = &hw->base;
205 }
206
207 return 0;
208
209 fail:
210 return rc ? rc : -EFAULT;
211 }
212
_dpu_rm_needs_split_display(const struct msm_display_topology * top)213 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
214 {
215 return top->num_intf > 1;
216 }
217
218 /**
219 * _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
220 * @rm: dpu resource manager handle
221 * @primary_idx: index of primary mixer in rm->mixer_blks[]
222 *
223 * Returns: lm peer mixed id on success or %-EINVAL on error
224 */
_dpu_rm_get_lm_peer(struct dpu_rm * rm,int primary_idx)225 static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
226 {
227 const struct dpu_lm_cfg *prim_lm_cfg;
228
229 prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
230
231 if (prim_lm_cfg->lm_pair >= LM_0 && prim_lm_cfg->lm_pair < LM_MAX)
232 return prim_lm_cfg->lm_pair - LM_0;
233 return -EINVAL;
234 }
235
_dpu_rm_reserve_cwb_mux_and_pingpongs(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t crtc_id,struct msm_display_topology * topology)236 static int _dpu_rm_reserve_cwb_mux_and_pingpongs(struct dpu_rm *rm,
237 struct dpu_global_state *global_state,
238 uint32_t crtc_id,
239 struct msm_display_topology *topology)
240 {
241 int num_cwb_mux = topology->num_lm, cwb_mux_count = 0;
242 int cwb_pp_start_idx = PINGPONG_CWB_0 - PINGPONG_0;
243 int cwb_pp_idx[MAX_BLOCKS];
244 int cwb_mux_idx[MAX_BLOCKS];
245
246 /*
247 * Reserve additional dedicated CWB PINGPONG blocks and muxes for each
248 * mixer
249 *
250 * TODO: add support reserving resources for platforms with no
251 * PINGPONG_CWB
252 */
253 for (int i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
254 cwb_mux_count < num_cwb_mux; i++) {
255 for (int j = 0; j < ARRAY_SIZE(rm->cwb_blks); j++) {
256 /*
257 * Odd LMs must be assigned to odd CWB muxes and even
258 * LMs with even CWB muxes.
259 *
260 * Since the RM HW block array index is based on the HW
261 * block ids, we can also use the array index to enforce
262 * the odd/even rule. See dpu_rm_init() for more
263 * information
264 */
265 if (reserved_by_other(global_state->cwb_to_crtc_id, j, crtc_id) ||
266 i % 2 != j % 2)
267 continue;
268
269 cwb_mux_idx[cwb_mux_count] = j;
270 cwb_pp_idx[cwb_mux_count] = j + cwb_pp_start_idx;
271 cwb_mux_count++;
272 break;
273 }
274 }
275
276 if (cwb_mux_count != num_cwb_mux) {
277 DPU_ERROR("Unable to reserve all CWB PINGPONGs\n");
278 return -ENAVAIL;
279 }
280
281 for (int i = 0; i < cwb_mux_count; i++) {
282 global_state->pingpong_to_crtc_id[cwb_pp_idx[i]] = crtc_id;
283 global_state->cwb_to_crtc_id[cwb_mux_idx[i]] = crtc_id;
284 }
285
286 return 0;
287 }
288
289 /**
290 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
291 * proposed use case requirements, incl. hardwired dependent blocks like
292 * pingpong
293 * @rm: dpu resource manager handle
294 * @global_state: resources shared across multiple kms objects
295 * @crtc_id: crtc id requesting for allocation
296 * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
297 * if lm, and all other hardwired blocks connected to the lm (pp) is
298 * available and appropriate
299 * @pp_idx: output parameter, index of pingpong block attached to the layer
300 * mixer in rm->pingpong_blks[].
301 * @dspp_idx: output parameter, index of dspp block attached to the layer
302 * mixer in rm->dspp_blks[].
303 * @topology: selected topology for the display
304 * Return: true if lm matches all requirements, false otherwise
305 */
_dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t crtc_id,int lm_idx,int * pp_idx,int * dspp_idx,struct msm_display_topology * topology)306 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
307 struct dpu_global_state *global_state,
308 uint32_t crtc_id, int lm_idx, int *pp_idx, int *dspp_idx,
309 struct msm_display_topology *topology)
310 {
311 const struct dpu_lm_cfg *lm_cfg;
312 int idx;
313
314 /* Already reserved? */
315 if (reserved_by_other(global_state->mixer_to_crtc_id, lm_idx, crtc_id)) {
316 DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
317 return false;
318 }
319
320 lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
321 idx = lm_cfg->pingpong - PINGPONG_0;
322 if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
323 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
324 return false;
325 }
326
327 if (reserved_by_other(global_state->pingpong_to_crtc_id, idx, crtc_id)) {
328 DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
329 lm_cfg->pingpong);
330 return false;
331 }
332 *pp_idx = idx;
333
334 if (!topology->num_dspp)
335 return true;
336
337 idx = lm_cfg->dspp - DSPP_0;
338 if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
339 DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
340 return false;
341 }
342
343 if (reserved_by_other(global_state->dspp_to_crtc_id, idx, crtc_id)) {
344 DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
345 lm_cfg->dspp);
346 return false;
347 }
348 *dspp_idx = idx;
349
350 return true;
351 }
352
_dpu_rm_reserve_lms(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t crtc_id,struct msm_display_topology * topology)353 static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
354 struct dpu_global_state *global_state,
355 uint32_t crtc_id,
356 struct msm_display_topology *topology)
357
358 {
359 int lm_idx[MAX_BLOCKS];
360 int pp_idx[MAX_BLOCKS];
361 int dspp_idx[MAX_BLOCKS] = {0};
362 int i, lm_count = 0;
363
364 if (!topology->num_lm) {
365 DPU_ERROR("invalid number of lm: %d\n", topology->num_lm);
366 return -EINVAL;
367 }
368
369 /* Find a primary mixer */
370 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
371 lm_count < topology->num_lm; i++) {
372 if (!rm->mixer_blks[i])
373 continue;
374
375 lm_count = 0;
376 lm_idx[lm_count] = i;
377
378 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
379 crtc_id, i, &pp_idx[lm_count],
380 &dspp_idx[lm_count], topology)) {
381 continue;
382 }
383
384 ++lm_count;
385
386 /* Valid primary mixer found, find matching peers */
387 if (lm_count < topology->num_lm) {
388 int j = _dpu_rm_get_lm_peer(rm, i);
389
390 /* ignore the peer if there is an error or if the peer was already processed */
391 if (j < 0 || j < i)
392 continue;
393
394 if (!rm->mixer_blks[j])
395 continue;
396
397 if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
398 global_state, crtc_id, j,
399 &pp_idx[lm_count], &dspp_idx[lm_count],
400 topology)) {
401 continue;
402 }
403
404 lm_idx[lm_count] = j;
405 ++lm_count;
406 }
407 }
408
409 if (lm_count != topology->num_lm) {
410 DPU_DEBUG("unable to find appropriate mixers\n");
411 return -ENAVAIL;
412 }
413
414 for (i = 0; i < lm_count; i++) {
415 global_state->mixer_to_crtc_id[lm_idx[i]] = crtc_id;
416 global_state->pingpong_to_crtc_id[pp_idx[i]] = crtc_id;
417 global_state->dspp_to_crtc_id[dspp_idx[i]] =
418 topology->num_dspp ? crtc_id : 0;
419
420 trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, crtc_id,
421 pp_idx[i] + PINGPONG_0);
422 }
423
424 return 0;
425 }
426
_dpu_rm_reserve_ctls(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t crtc_id,const struct msm_display_topology * top)427 static int _dpu_rm_reserve_ctls(
428 struct dpu_rm *rm,
429 struct dpu_global_state *global_state,
430 uint32_t crtc_id,
431 const struct msm_display_topology *top)
432 {
433 int ctl_idx[MAX_BLOCKS];
434 int i = 0, j, num_ctls;
435 bool needs_split_display;
436
437 /*
438 * For non-CWB mode, each hw_intf needs its own hw_ctl to program its
439 * control path.
440 *
441 * Hardcode num_ctls to 1 if CWB is enabled because in CWB, both the
442 * writeback and real-time encoders must be driven by the same control
443 * path
444 */
445 if (top->cwb_enabled)
446 num_ctls = 1;
447 else
448 num_ctls = top->num_intf;
449
450 needs_split_display = _dpu_rm_needs_split_display(top);
451
452 for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
453 const struct dpu_hw_ctl *ctl;
454 unsigned long features;
455 bool has_split_display;
456
457 if (!rm->ctl_blks[j])
458 continue;
459 if (reserved_by_other(global_state->ctl_to_crtc_id, j, crtc_id))
460 continue;
461
462 ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
463 features = ctl->caps->features;
464 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
465
466 DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features);
467
468 if (needs_split_display != has_split_display)
469 continue;
470
471 ctl_idx[i] = j;
472 DPU_DEBUG("ctl %d match\n", j + CTL_0);
473
474 if (++i == num_ctls)
475 break;
476
477 }
478
479 if (i != num_ctls)
480 return -ENAVAIL;
481
482 for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
483 global_state->ctl_to_crtc_id[ctl_idx[i]] = crtc_id;
484 trace_dpu_rm_reserve_ctls(i + CTL_0, crtc_id);
485 }
486
487 return 0;
488 }
489
_dpu_rm_pingpong_next_index(struct dpu_global_state * global_state,int start,uint32_t crtc_id)490 static int _dpu_rm_pingpong_next_index(struct dpu_global_state *global_state,
491 int start,
492 uint32_t crtc_id)
493 {
494 int i;
495
496 for (i = start; i < (PINGPONG_MAX - PINGPONG_0); i++) {
497 if (global_state->pingpong_to_crtc_id[i] == crtc_id)
498 return i;
499 }
500
501 return -ENAVAIL;
502 }
503
_dpu_rm_pingpong_dsc_check(int dsc_idx,int pp_idx)504 static int _dpu_rm_pingpong_dsc_check(int dsc_idx, int pp_idx)
505 {
506 /*
507 * DSC with even index must be used with the PINGPONG with even index
508 * DSC with odd index must be used with the PINGPONG with odd index
509 */
510 if ((dsc_idx & 0x01) != (pp_idx & 0x01))
511 return -ENAVAIL;
512
513 return 0;
514 }
515
_dpu_rm_dsc_alloc(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t crtc_id,const struct msm_display_topology * top)516 static int _dpu_rm_dsc_alloc(struct dpu_rm *rm,
517 struct dpu_global_state *global_state,
518 uint32_t crtc_id,
519 const struct msm_display_topology *top)
520 {
521 int num_dsc = 0;
522 int pp_idx = 0;
523 int dsc_idx;
524 int ret;
525
526 for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) &&
527 num_dsc < top->num_dsc; dsc_idx++) {
528 if (!rm->dsc_blks[dsc_idx])
529 continue;
530
531 if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id))
532 continue;
533
534 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id);
535 if (pp_idx < 0)
536 return -ENAVAIL;
537
538 ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx);
539 if (ret)
540 return -ENAVAIL;
541
542 global_state->dsc_to_crtc_id[dsc_idx] = crtc_id;
543 num_dsc++;
544 pp_idx++;
545 }
546
547 if (num_dsc < top->num_dsc) {
548 DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n",
549 num_dsc, top->num_dsc);
550 return -ENAVAIL;
551 }
552
553 return 0;
554 }
555
_dpu_rm_dsc_alloc_pair(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t crtc_id,const struct msm_display_topology * top)556 static int _dpu_rm_dsc_alloc_pair(struct dpu_rm *rm,
557 struct dpu_global_state *global_state,
558 uint32_t crtc_id,
559 const struct msm_display_topology *top)
560 {
561 int num_dsc = 0;
562 int dsc_idx, pp_idx = 0;
563 int ret;
564
565 /* only start from even dsc index */
566 for (dsc_idx = 0; dsc_idx < ARRAY_SIZE(rm->dsc_blks) &&
567 num_dsc < top->num_dsc; dsc_idx += 2) {
568 if (!rm->dsc_blks[dsc_idx] ||
569 !rm->dsc_blks[dsc_idx + 1])
570 continue;
571
572 /* consective dsc index to be paired */
573 if (reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx, crtc_id) ||
574 reserved_by_other(global_state->dsc_to_crtc_id, dsc_idx + 1, crtc_id))
575 continue;
576
577 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx, crtc_id);
578 if (pp_idx < 0)
579 return -ENAVAIL;
580
581 ret = _dpu_rm_pingpong_dsc_check(dsc_idx, pp_idx);
582 if (ret) {
583 pp_idx = 0;
584 continue;
585 }
586
587 pp_idx = _dpu_rm_pingpong_next_index(global_state, pp_idx + 1, crtc_id);
588 if (pp_idx < 0)
589 return -ENAVAIL;
590
591 ret = _dpu_rm_pingpong_dsc_check(dsc_idx + 1, pp_idx);
592 if (ret) {
593 pp_idx = 0;
594 continue;
595 }
596
597 global_state->dsc_to_crtc_id[dsc_idx] = crtc_id;
598 global_state->dsc_to_crtc_id[dsc_idx + 1] = crtc_id;
599 num_dsc += 2;
600 pp_idx++; /* start for next pair */
601 }
602
603 if (num_dsc < top->num_dsc) {
604 DPU_ERROR("DSC allocation failed num_dsc=%d required=%d\n",
605 num_dsc, top->num_dsc);
606 return -ENAVAIL;
607 }
608
609 return 0;
610 }
611
_dpu_rm_reserve_dsc(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t crtc_id,const struct msm_display_topology * top)612 static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
613 struct dpu_global_state *global_state,
614 uint32_t crtc_id,
615 const struct msm_display_topology *top)
616 {
617 if (!top->num_dsc || !top->num_intf)
618 return 0;
619
620 /*
621 * Facts:
622 * 1) no pingpong split (two layer mixers shared one pingpong)
623 * 2) DSC pair starts from even index, such as index(0,1), (2,3), etc
624 * 3) even PINGPONG connects to even DSC
625 * 4) odd PINGPONG connects to odd DSC
626 * 5) pair: encoder +--> pp_idx_0 --> dsc_idx_0
627 * +--> pp_idx_1 --> dsc_idx_1
628 */
629
630 /* num_dsc should be either 1, 2 or 4 */
631 if (top->num_dsc > top->num_intf) /* merge mode */
632 return _dpu_rm_dsc_alloc_pair(rm, global_state, crtc_id, top);
633 else
634 return _dpu_rm_dsc_alloc(rm, global_state, crtc_id, top);
635
636 return 0;
637 }
638
_dpu_rm_reserve_cdm(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t crtc_id,int num_cdm)639 static int _dpu_rm_reserve_cdm(struct dpu_rm *rm,
640 struct dpu_global_state *global_state,
641 uint32_t crtc_id,
642 int num_cdm)
643 {
644 /* try allocating only one CDM block */
645 if (!rm->cdm_blk) {
646 DPU_ERROR("CDM block does not exist\n");
647 return -EIO;
648 }
649
650 if (num_cdm > 1) {
651 DPU_ERROR("More than 1 INTF requesting CDM\n");
652 return -EINVAL;
653 }
654
655 if (global_state->cdm_to_crtc_id) {
656 DPU_ERROR("CDM_0 is already allocated\n");
657 return -EIO;
658 }
659
660 global_state->cdm_to_crtc_id = crtc_id;
661
662 return 0;
663 }
664
_dpu_rm_make_reservation(struct dpu_rm * rm,struct dpu_global_state * global_state,uint32_t crtc_id,struct msm_display_topology * topology)665 static int _dpu_rm_make_reservation(
666 struct dpu_rm *rm,
667 struct dpu_global_state *global_state,
668 uint32_t crtc_id,
669 struct msm_display_topology *topology)
670 {
671 int ret;
672
673 ret = _dpu_rm_reserve_lms(rm, global_state, crtc_id, topology);
674 if (ret) {
675 DPU_ERROR("unable to find appropriate mixers\n");
676 return ret;
677 }
678
679 if (topology->cwb_enabled) {
680 ret = _dpu_rm_reserve_cwb_mux_and_pingpongs(rm, global_state,
681 crtc_id, topology);
682 if (ret)
683 return ret;
684 }
685
686 ret = _dpu_rm_reserve_ctls(rm, global_state, crtc_id,
687 topology);
688 if (ret) {
689 DPU_ERROR("unable to find appropriate CTL\n");
690 return ret;
691 }
692
693 ret = _dpu_rm_reserve_dsc(rm, global_state, crtc_id, topology);
694 if (ret)
695 return ret;
696
697 if (topology->num_cdm > 0) {
698 ret = _dpu_rm_reserve_cdm(rm, global_state, crtc_id, topology->num_cdm);
699 if (ret) {
700 DPU_ERROR("unable to find CDM blk\n");
701 return ret;
702 }
703 }
704
705 return ret;
706 }
707
_dpu_rm_clear_mapping(uint32_t * res_mapping,int cnt,uint32_t crtc_id)708 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
709 uint32_t crtc_id)
710 {
711 int i;
712
713 for (i = 0; i < cnt; i++) {
714 if (res_mapping[i] == crtc_id)
715 res_mapping[i] = 0;
716 }
717 }
718
719 /**
720 * dpu_rm_release - Given the encoder for the display chain, release any
721 * HW blocks previously reserved for that use case.
722 * @global_state: resources shared across multiple kms objects
723 * @crtc: DRM CRTC handle
724 * @return: 0 on Success otherwise -ERROR
725 */
dpu_rm_release(struct dpu_global_state * global_state,struct drm_crtc * crtc)726 void dpu_rm_release(struct dpu_global_state *global_state,
727 struct drm_crtc *crtc)
728 {
729 uint32_t crtc_id = crtc->base.id;
730
731 _dpu_rm_clear_mapping(global_state->pingpong_to_crtc_id,
732 ARRAY_SIZE(global_state->pingpong_to_crtc_id), crtc_id);
733 _dpu_rm_clear_mapping(global_state->mixer_to_crtc_id,
734 ARRAY_SIZE(global_state->mixer_to_crtc_id), crtc_id);
735 _dpu_rm_clear_mapping(global_state->ctl_to_crtc_id,
736 ARRAY_SIZE(global_state->ctl_to_crtc_id), crtc_id);
737 _dpu_rm_clear_mapping(global_state->dsc_to_crtc_id,
738 ARRAY_SIZE(global_state->dsc_to_crtc_id), crtc_id);
739 _dpu_rm_clear_mapping(global_state->dspp_to_crtc_id,
740 ARRAY_SIZE(global_state->dspp_to_crtc_id), crtc_id);
741 _dpu_rm_clear_mapping(&global_state->cdm_to_crtc_id, 1, crtc_id);
742 _dpu_rm_clear_mapping(global_state->cwb_to_crtc_id,
743 ARRAY_SIZE(global_state->cwb_to_crtc_id), crtc_id);
744 }
745
746 /**
747 * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
748 * the use connections and user requirements, specified through related
749 * topology control properties, and reserve hardware blocks to that
750 * display chain.
751 * HW blocks can then be accessed through dpu_rm_get_* functions.
752 * HW Reservations should be released via dpu_rm_release_hw.
753 * @rm: DPU Resource Manager handle
754 * @global_state: resources shared across multiple kms objects
755 * @crtc: DRM CRTC handle
756 * @topology: Pointer to topology info for the display
757 * @return: 0 on Success otherwise -ERROR
758 */
dpu_rm_reserve(struct dpu_rm * rm,struct dpu_global_state * global_state,struct drm_crtc * crtc,struct msm_display_topology * topology)759 int dpu_rm_reserve(
760 struct dpu_rm *rm,
761 struct dpu_global_state *global_state,
762 struct drm_crtc *crtc,
763 struct msm_display_topology *topology)
764 {
765 int ret;
766
767 if (IS_ERR(global_state)) {
768 DPU_ERROR("failed to global state\n");
769 return PTR_ERR(global_state);
770 }
771
772 DRM_DEBUG_KMS("reserving hw for crtc %d\n", crtc->base.id);
773
774 DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
775 topology->num_lm, topology->num_dsc,
776 topology->num_intf);
777
778 ret = _dpu_rm_make_reservation(rm, global_state, crtc->base.id, topology);
779 if (ret)
780 DPU_ERROR("failed to reserve hw resources: %d\n", ret);
781
782 return ret;
783 }
784
dpu_rm_try_sspp(struct dpu_rm * rm,struct dpu_global_state * global_state,struct drm_crtc * crtc,struct dpu_rm_sspp_requirements * reqs,unsigned int type)785 static struct dpu_hw_sspp *dpu_rm_try_sspp(struct dpu_rm *rm,
786 struct dpu_global_state *global_state,
787 struct drm_crtc *crtc,
788 struct dpu_rm_sspp_requirements *reqs,
789 unsigned int type)
790 {
791 uint32_t crtc_id = crtc->base.id;
792 struct dpu_hw_sspp *hw_sspp;
793 int i;
794
795 for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++) {
796 if (!rm->hw_sspp[i])
797 continue;
798
799 if (global_state->sspp_to_crtc_id[i])
800 continue;
801
802 hw_sspp = rm->hw_sspp[i];
803
804 if (hw_sspp->cap->type != type)
805 continue;
806
807 if (reqs->scale && !hw_sspp->cap->sblk->scaler_blk.len)
808 continue;
809
810 // TODO: QSEED2 and RGB scalers are not yet supported
811 if (reqs->scale && !hw_sspp->ops.setup_scaler)
812 continue;
813
814 if (reqs->yuv && !hw_sspp->cap->sblk->csc_blk.len)
815 continue;
816
817 if (reqs->rot90 && !(hw_sspp->cap->features & DPU_SSPP_INLINE_ROTATION))
818 continue;
819
820 global_state->sspp_to_crtc_id[i] = crtc_id;
821
822 return rm->hw_sspp[i];
823 }
824
825 return NULL;
826 }
827
828 /**
829 * dpu_rm_reserve_sspp - Reserve the required SSPP for the provided CRTC
830 * @rm: DPU Resource Manager handle
831 * @global_state: private global state
832 * @crtc: DRM CRTC handle
833 * @reqs: SSPP required features
834 */
dpu_rm_reserve_sspp(struct dpu_rm * rm,struct dpu_global_state * global_state,struct drm_crtc * crtc,struct dpu_rm_sspp_requirements * reqs)835 struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
836 struct dpu_global_state *global_state,
837 struct drm_crtc *crtc,
838 struct dpu_rm_sspp_requirements *reqs)
839 {
840 struct dpu_hw_sspp *hw_sspp = NULL;
841
842 if (!reqs->scale && !reqs->yuv)
843 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
844 if (!hw_sspp && reqs->scale)
845 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
846 if (!hw_sspp)
847 hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
848
849 return hw_sspp;
850 }
851
852 /**
853 * dpu_rm_release_all_sspp - Given the CRTC, release all SSPP
854 * blocks previously reserved for that use case.
855 * @global_state: resources shared across multiple kms objects
856 * @crtc: DRM CRTC handle
857 */
dpu_rm_release_all_sspp(struct dpu_global_state * global_state,struct drm_crtc * crtc)858 void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
859 struct drm_crtc *crtc)
860 {
861 uint32_t crtc_id = crtc->base.id;
862
863 _dpu_rm_clear_mapping(global_state->sspp_to_crtc_id,
864 ARRAY_SIZE(global_state->sspp_to_crtc_id), crtc_id);
865 }
866
867 /**
868 * dpu_rm_get_assigned_resources - Get hw resources of the given type that are
869 * assigned to this encoder
870 * @rm: DPU Resource Manager handle
871 * @global_state: resources shared across multiple kms objects
872 * @crtc: DRM CRTC handle
873 * @type: resource type to return data for
874 * @blks: pointer to the array to be filled by HW resources
875 * @blks_size: size of the @blks array
876 */
dpu_rm_get_assigned_resources(struct dpu_rm * rm,struct dpu_global_state * global_state,struct drm_crtc * crtc,enum dpu_hw_blk_type type,struct dpu_hw_blk ** blks,int blks_size)877 int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
878 struct dpu_global_state *global_state, struct drm_crtc *crtc,
879 enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
880 {
881 uint32_t crtc_id = crtc->base.id;
882 struct dpu_hw_blk **hw_blks;
883 uint32_t *hw_to_crtc_id;
884 int i, num_blks, max_blks;
885
886 switch (type) {
887 case DPU_HW_BLK_PINGPONG:
888 case DPU_HW_BLK_DCWB_PINGPONG:
889 hw_blks = rm->pingpong_blks;
890 hw_to_crtc_id = global_state->pingpong_to_crtc_id;
891 max_blks = ARRAY_SIZE(rm->pingpong_blks);
892 break;
893 case DPU_HW_BLK_LM:
894 hw_blks = rm->mixer_blks;
895 hw_to_crtc_id = global_state->mixer_to_crtc_id;
896 max_blks = ARRAY_SIZE(rm->mixer_blks);
897 break;
898 case DPU_HW_BLK_CTL:
899 hw_blks = rm->ctl_blks;
900 hw_to_crtc_id = global_state->ctl_to_crtc_id;
901 max_blks = ARRAY_SIZE(rm->ctl_blks);
902 break;
903 case DPU_HW_BLK_DSPP:
904 hw_blks = rm->dspp_blks;
905 hw_to_crtc_id = global_state->dspp_to_crtc_id;
906 max_blks = ARRAY_SIZE(rm->dspp_blks);
907 break;
908 case DPU_HW_BLK_DSC:
909 hw_blks = rm->dsc_blks;
910 hw_to_crtc_id = global_state->dsc_to_crtc_id;
911 max_blks = ARRAY_SIZE(rm->dsc_blks);
912 break;
913 case DPU_HW_BLK_CDM:
914 hw_blks = &rm->cdm_blk;
915 hw_to_crtc_id = &global_state->cdm_to_crtc_id;
916 max_blks = 1;
917 break;
918 case DPU_HW_BLK_CWB:
919 hw_blks = rm->cwb_blks;
920 hw_to_crtc_id = global_state->cwb_to_crtc_id;
921 max_blks = ARRAY_SIZE(rm->cwb_blks);
922 break;
923 default:
924 DPU_ERROR("blk type %d not managed by rm\n", type);
925 return 0;
926 }
927
928 num_blks = 0;
929 for (i = 0; i < max_blks; i++) {
930 if (hw_to_crtc_id[i] != crtc_id)
931 continue;
932
933 if (type == DPU_HW_BLK_PINGPONG) {
934 struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]);
935
936 if (pp->idx >= PINGPONG_CWB_0)
937 continue;
938 }
939
940 if (type == DPU_HW_BLK_DCWB_PINGPONG) {
941 struct dpu_hw_pingpong *pp = to_dpu_hw_pingpong(hw_blks[i]);
942
943 if (pp->idx < PINGPONG_CWB_0)
944 continue;
945 }
946
947 if (num_blks == blks_size) {
948 DPU_ERROR("More than %d resources assigned to crtc %d\n",
949 blks_size, crtc_id);
950 break;
951 }
952 if (!hw_blks[i]) {
953 DPU_ERROR("Allocated resource %d unavailable to assign to crtc %d\n",
954 type, crtc_id);
955 break;
956 }
957 blks[num_blks++] = hw_blks[i];
958 }
959
960 return num_blks;
961 }
962
dpu_rm_print_state_helper(struct drm_printer * p,struct dpu_hw_blk * blk,uint32_t mapping)963 static void dpu_rm_print_state_helper(struct drm_printer *p,
964 struct dpu_hw_blk *blk,
965 uint32_t mapping)
966 {
967 if (!blk)
968 drm_puts(p, "- ");
969 else if (!mapping)
970 drm_puts(p, "# ");
971 else
972 drm_printf(p, "%d ", mapping);
973 }
974
975
976 /**
977 * dpu_rm_print_state - output the RM private state
978 * @p: DRM printer
979 * @global_state: global state
980 */
dpu_rm_print_state(struct drm_printer * p,const struct dpu_global_state * global_state)981 void dpu_rm_print_state(struct drm_printer *p,
982 const struct dpu_global_state *global_state)
983 {
984 const struct dpu_rm *rm = global_state->rm;
985 int i;
986
987 drm_puts(p, "resource mapping:\n");
988 drm_puts(p, "\tpingpong=");
989 for (i = 0; i < ARRAY_SIZE(global_state->pingpong_to_crtc_id); i++)
990 dpu_rm_print_state_helper(p, rm->pingpong_blks[i],
991 global_state->pingpong_to_crtc_id[i]);
992 drm_puts(p, "\n");
993
994 drm_puts(p, "\tmixer=");
995 for (i = 0; i < ARRAY_SIZE(global_state->mixer_to_crtc_id); i++)
996 dpu_rm_print_state_helper(p, rm->mixer_blks[i],
997 global_state->mixer_to_crtc_id[i]);
998 drm_puts(p, "\n");
999
1000 drm_puts(p, "\tctl=");
1001 for (i = 0; i < ARRAY_SIZE(global_state->ctl_to_crtc_id); i++)
1002 dpu_rm_print_state_helper(p, rm->ctl_blks[i],
1003 global_state->ctl_to_crtc_id[i]);
1004 drm_puts(p, "\n");
1005
1006 drm_puts(p, "\tdspp=");
1007 for (i = 0; i < ARRAY_SIZE(global_state->dspp_to_crtc_id); i++)
1008 dpu_rm_print_state_helper(p, rm->dspp_blks[i],
1009 global_state->dspp_to_crtc_id[i]);
1010 drm_puts(p, "\n");
1011
1012 drm_puts(p, "\tdsc=");
1013 for (i = 0; i < ARRAY_SIZE(global_state->dsc_to_crtc_id); i++)
1014 dpu_rm_print_state_helper(p, rm->dsc_blks[i],
1015 global_state->dsc_to_crtc_id[i]);
1016 drm_puts(p, "\n");
1017
1018 drm_puts(p, "\tcdm=");
1019 dpu_rm_print_state_helper(p, rm->cdm_blk,
1020 global_state->cdm_to_crtc_id);
1021 drm_puts(p, "\n");
1022
1023 drm_puts(p, "\tsspp=");
1024 /* skip SSPP_NONE and start from the next index */
1025 for (i = SSPP_NONE + 1; i < ARRAY_SIZE(global_state->sspp_to_crtc_id); i++)
1026 dpu_rm_print_state_helper(p, rm->hw_sspp[i] ? &rm->hw_sspp[i]->base : NULL,
1027 global_state->sspp_to_crtc_id[i]);
1028 drm_puts(p, "\n");
1029
1030 drm_puts(p, "\tcwb=");
1031 for (i = 0; i < ARRAY_SIZE(global_state->cwb_to_crtc_id); i++)
1032 dpu_rm_print_state_helper(p, rm->cwb_blks[i],
1033 global_state->cwb_to_crtc_id[i]);
1034 drm_puts(p, "\n");
1035 }
1036