1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) STMicroelectronics SA 2014
4 * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
5 */
6
7 #include <linux/component.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/firmware.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/reset.h>
15 #include <linux/seq_file.h>
16
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_device.h>
19 #include <drm/drm_fb_dma_helper.h>
20 #include <drm/drm_fourcc.h>
21 #include <drm/drm_framebuffer.h>
22 #include <drm/drm_gem_dma_helper.h>
23 #include <drm/drm_print.h>
24
25 #include "sti_compositor.h"
26 #include "sti_drv.h"
27 #include "sti_hqvdp_lut.h"
28 #include "sti_plane.h"
29 #include "sti_vtg.h"
30
31 /* Firmware name */
32 #define HQVDP_FMW_NAME "hqvdp-stih407.bin"
33
34 /* Regs address */
35 #define HQVDP_DMEM 0x00000000 /* 0x00000000 */
36 #define HQVDP_PMEM 0x00040000 /* 0x00040000 */
37 #define HQVDP_RD_PLUG 0x000E0000 /* 0x000E0000 */
38 #define HQVDP_RD_PLUG_CONTROL (HQVDP_RD_PLUG + 0x1000) /* 0x000E1000 */
39 #define HQVDP_RD_PLUG_PAGE_SIZE (HQVDP_RD_PLUG + 0x1004) /* 0x000E1004 */
40 #define HQVDP_RD_PLUG_MIN_OPC (HQVDP_RD_PLUG + 0x1008) /* 0x000E1008 */
41 #define HQVDP_RD_PLUG_MAX_OPC (HQVDP_RD_PLUG + 0x100C) /* 0x000E100C */
42 #define HQVDP_RD_PLUG_MAX_CHK (HQVDP_RD_PLUG + 0x1010) /* 0x000E1010 */
43 #define HQVDP_RD_PLUG_MAX_MSG (HQVDP_RD_PLUG + 0x1014) /* 0x000E1014 */
44 #define HQVDP_RD_PLUG_MIN_SPACE (HQVDP_RD_PLUG + 0x1018) /* 0x000E1018 */
45 #define HQVDP_WR_PLUG 0x000E2000 /* 0x000E2000 */
46 #define HQVDP_WR_PLUG_CONTROL (HQVDP_WR_PLUG + 0x1000) /* 0x000E3000 */
47 #define HQVDP_WR_PLUG_PAGE_SIZE (HQVDP_WR_PLUG + 0x1004) /* 0x000E3004 */
48 #define HQVDP_WR_PLUG_MIN_OPC (HQVDP_WR_PLUG + 0x1008) /* 0x000E3008 */
49 #define HQVDP_WR_PLUG_MAX_OPC (HQVDP_WR_PLUG + 0x100C) /* 0x000E300C */
50 #define HQVDP_WR_PLUG_MAX_CHK (HQVDP_WR_PLUG + 0x1010) /* 0x000E3010 */
51 #define HQVDP_WR_PLUG_MAX_MSG (HQVDP_WR_PLUG + 0x1014) /* 0x000E3014 */
52 #define HQVDP_WR_PLUG_MIN_SPACE (HQVDP_WR_PLUG + 0x1018) /* 0x000E3018 */
53 #define HQVDP_MBX 0x000E4000 /* 0x000E4000 */
54 #define HQVDP_MBX_IRQ_TO_XP70 (HQVDP_MBX + 0x0000) /* 0x000E4000 */
55 #define HQVDP_MBX_INFO_HOST (HQVDP_MBX + 0x0004) /* 0x000E4004 */
56 #define HQVDP_MBX_IRQ_TO_HOST (HQVDP_MBX + 0x0008) /* 0x000E4008 */
57 #define HQVDP_MBX_INFO_XP70 (HQVDP_MBX + 0x000C) /* 0x000E400C */
58 #define HQVDP_MBX_SW_RESET_CTRL (HQVDP_MBX + 0x0010) /* 0x000E4010 */
59 #define HQVDP_MBX_STARTUP_CTRL1 (HQVDP_MBX + 0x0014) /* 0x000E4014 */
60 #define HQVDP_MBX_STARTUP_CTRL2 (HQVDP_MBX + 0x0018) /* 0x000E4018 */
61 #define HQVDP_MBX_GP_STATUS (HQVDP_MBX + 0x001C) /* 0x000E401C */
62 #define HQVDP_MBX_NEXT_CMD (HQVDP_MBX + 0x0020) /* 0x000E4020 */
63 #define HQVDP_MBX_CURRENT_CMD (HQVDP_MBX + 0x0024) /* 0x000E4024 */
64 #define HQVDP_MBX_SOFT_VSYNC (HQVDP_MBX + 0x0028) /* 0x000E4028 */
65
66 /* Plugs config */
67 #define PLUG_CONTROL_ENABLE 0x00000001
68 #define PLUG_PAGE_SIZE_256 0x00000002
69 #define PLUG_MIN_OPC_8 0x00000003
70 #define PLUG_MAX_OPC_64 0x00000006
71 #define PLUG_MAX_CHK_2X 0x00000001
72 #define PLUG_MAX_MSG_1X 0x00000000
73 #define PLUG_MIN_SPACE_1 0x00000000
74
75 /* SW reset CTRL */
76 #define SW_RESET_CTRL_FULL BIT(0)
77 #define SW_RESET_CTRL_CORE BIT(1)
78
79 /* Startup ctrl 1 */
80 #define STARTUP_CTRL1_RST_DONE BIT(0)
81 #define STARTUP_CTRL1_AUTH_IDLE BIT(2)
82
83 /* Startup ctrl 2 */
84 #define STARTUP_CTRL2_FETCH_EN BIT(1)
85
86 /* Info xP70 */
87 #define INFO_XP70_FW_READY BIT(15)
88 #define INFO_XP70_FW_PROCESSING BIT(14)
89 #define INFO_XP70_FW_INITQUEUES BIT(13)
90
91 /* SOFT_VSYNC */
92 #define SOFT_VSYNC_HW 0x00000000
93 #define SOFT_VSYNC_SW_CMD 0x00000001
94 #define SOFT_VSYNC_SW_CTRL_IRQ 0x00000003
95
96 /* Reset & boot poll config */
97 #define POLL_MAX_ATTEMPT 50
98 #define POLL_DELAY_MS 20
99
100 #define SCALE_FACTOR 8192
101 #define SCALE_MAX_FOR_LEG_LUT_F 4096
102 #define SCALE_MAX_FOR_LEG_LUT_E 4915
103 #define SCALE_MAX_FOR_LEG_LUT_D 6654
104 #define SCALE_MAX_FOR_LEG_LUT_C 8192
105
106 enum sti_hvsrc_orient {
107 HVSRC_HORI,
108 HVSRC_VERT
109 };
110
111 /* Command structures */
112 struct sti_hqvdp_top {
113 u32 config;
114 u32 mem_format;
115 u32 current_luma;
116 u32 current_enh_luma;
117 u32 current_right_luma;
118 u32 current_enh_right_luma;
119 u32 current_chroma;
120 u32 current_enh_chroma;
121 u32 current_right_chroma;
122 u32 current_enh_right_chroma;
123 u32 output_luma;
124 u32 output_chroma;
125 u32 luma_src_pitch;
126 u32 luma_enh_src_pitch;
127 u32 luma_right_src_pitch;
128 u32 luma_enh_right_src_pitch;
129 u32 chroma_src_pitch;
130 u32 chroma_enh_src_pitch;
131 u32 chroma_right_src_pitch;
132 u32 chroma_enh_right_src_pitch;
133 u32 luma_processed_pitch;
134 u32 chroma_processed_pitch;
135 u32 input_frame_size;
136 u32 input_viewport_ori;
137 u32 input_viewport_ori_right;
138 u32 input_viewport_size;
139 u32 left_view_border_width;
140 u32 right_view_border_width;
141 u32 left_view_3d_offset_width;
142 u32 right_view_3d_offset_width;
143 u32 side_stripe_color;
144 u32 crc_reset_ctrl;
145 };
146
147 /* Configs for interlaced : no IT, no pass thru, 3 fields */
148 #define TOP_CONFIG_INTER_BTM 0x00000000
149 #define TOP_CONFIG_INTER_TOP 0x00000002
150
151 /* Config for progressive : no IT, no pass thru, 3 fields */
152 #define TOP_CONFIG_PROGRESSIVE 0x00000001
153
154 /* Default MemFormat: in=420_raster_dual out=444_raster;opaque Mem2Tv mode */
155 #define TOP_MEM_FORMAT_DFLT 0x00018060
156
157 /* Min/Max size */
158 #define MAX_WIDTH 0x1FFF
159 #define MAX_HEIGHT 0x0FFF
160 #define MIN_WIDTH 0x0030
161 #define MIN_HEIGHT 0x0010
162
163 struct sti_hqvdp_vc1re {
164 u32 ctrl_prv_csdi;
165 u32 ctrl_cur_csdi;
166 u32 ctrl_nxt_csdi;
167 u32 ctrl_cur_fmd;
168 u32 ctrl_nxt_fmd;
169 };
170
171 struct sti_hqvdp_fmd {
172 u32 config;
173 u32 viewport_ori;
174 u32 viewport_size;
175 u32 next_next_luma;
176 u32 next_next_right_luma;
177 u32 next_next_next_luma;
178 u32 next_next_next_right_luma;
179 u32 threshold_scd;
180 u32 threshold_rfd;
181 u32 threshold_move;
182 u32 threshold_cfd;
183 };
184
185 struct sti_hqvdp_csdi {
186 u32 config;
187 u32 config2;
188 u32 dcdi_config;
189 u32 prev_luma;
190 u32 prev_enh_luma;
191 u32 prev_right_luma;
192 u32 prev_enh_right_luma;
193 u32 next_luma;
194 u32 next_enh_luma;
195 u32 next_right_luma;
196 u32 next_enh_right_luma;
197 u32 prev_chroma;
198 u32 prev_enh_chroma;
199 u32 prev_right_chroma;
200 u32 prev_enh_right_chroma;
201 u32 next_chroma;
202 u32 next_enh_chroma;
203 u32 next_right_chroma;
204 u32 next_enh_right_chroma;
205 u32 prev_motion;
206 u32 prev_right_motion;
207 u32 cur_motion;
208 u32 cur_right_motion;
209 u32 next_motion;
210 u32 next_right_motion;
211 };
212
213 /* Config for progressive: by pass */
214 #define CSDI_CONFIG_PROG 0x00000000
215 /* Config for directional deinterlacing without motion */
216 #define CSDI_CONFIG_INTER_DIR 0x00000016
217 /* Additional configs for fader, blender, motion,... deinterlace algorithms */
218 #define CSDI_CONFIG2_DFLT 0x000001B3
219 #define CSDI_DCDI_CONFIG_DFLT 0x00203803
220
221 struct sti_hqvdp_hvsrc {
222 u32 hor_panoramic_ctrl;
223 u32 output_picture_size;
224 u32 init_horizontal;
225 u32 init_vertical;
226 u32 param_ctrl;
227 u32 yh_coef[NB_COEF];
228 u32 ch_coef[NB_COEF];
229 u32 yv_coef[NB_COEF];
230 u32 cv_coef[NB_COEF];
231 u32 hori_shift;
232 u32 vert_shift;
233 };
234
235 /* Default ParamCtrl: all controls enabled */
236 #define HVSRC_PARAM_CTRL_DFLT 0xFFFFFFFF
237
238 struct sti_hqvdp_iqi {
239 u32 config;
240 u32 demo_wind_size;
241 u32 pk_config;
242 u32 coeff0_coeff1;
243 u32 coeff2_coeff3;
244 u32 coeff4;
245 u32 pk_lut;
246 u32 pk_gain;
247 u32 pk_coring_level;
248 u32 cti_config;
249 u32 le_config;
250 u32 le_lut[64];
251 u32 con_bri;
252 u32 sat_gain;
253 u32 pxf_conf;
254 u32 default_color;
255 };
256
257 /* Default Config : IQI bypassed */
258 #define IQI_CONFIG_DFLT 0x00000001
259 /* Default Contrast & Brightness gain = 256 */
260 #define IQI_CON_BRI_DFLT 0x00000100
261 /* Default Saturation gain = 256 */
262 #define IQI_SAT_GAIN_DFLT 0x00000100
263 /* Default PxfConf : P2I bypassed */
264 #define IQI_PXF_CONF_DFLT 0x00000001
265
266 struct sti_hqvdp_top_status {
267 u32 processing_time;
268 u32 input_y_crc;
269 u32 input_uv_crc;
270 };
271
272 struct sti_hqvdp_fmd_status {
273 u32 fmd_repeat_move_status;
274 u32 fmd_scene_count_status;
275 u32 cfd_sum;
276 u32 field_sum;
277 u32 next_y_fmd_crc;
278 u32 next_next_y_fmd_crc;
279 u32 next_next_next_y_fmd_crc;
280 };
281
282 struct sti_hqvdp_csdi_status {
283 u32 prev_y_csdi_crc;
284 u32 cur_y_csdi_crc;
285 u32 next_y_csdi_crc;
286 u32 prev_uv_csdi_crc;
287 u32 cur_uv_csdi_crc;
288 u32 next_uv_csdi_crc;
289 u32 y_csdi_crc;
290 u32 uv_csdi_crc;
291 u32 uv_cup_crc;
292 u32 mot_csdi_crc;
293 u32 mot_cur_csdi_crc;
294 u32 mot_prev_csdi_crc;
295 };
296
297 struct sti_hqvdp_hvsrc_status {
298 u32 y_hvsrc_crc;
299 u32 u_hvsrc_crc;
300 u32 v_hvsrc_crc;
301 };
302
303 struct sti_hqvdp_iqi_status {
304 u32 pxf_it_status;
305 u32 y_iqi_crc;
306 u32 u_iqi_crc;
307 u32 v_iqi_crc;
308 };
309
310 /* Main commands. We use 2 commands one being processed by the firmware, one
311 * ready to be fetched upon next Vsync*/
312 #define NB_VDP_CMD 2
313
314 struct sti_hqvdp_cmd {
315 struct sti_hqvdp_top top;
316 struct sti_hqvdp_vc1re vc1re;
317 struct sti_hqvdp_fmd fmd;
318 struct sti_hqvdp_csdi csdi;
319 struct sti_hqvdp_hvsrc hvsrc;
320 struct sti_hqvdp_iqi iqi;
321 struct sti_hqvdp_top_status top_status;
322 struct sti_hqvdp_fmd_status fmd_status;
323 struct sti_hqvdp_csdi_status csdi_status;
324 struct sti_hqvdp_hvsrc_status hvsrc_status;
325 struct sti_hqvdp_iqi_status iqi_status;
326 };
327
328 /*
329 * STI HQVDP structure
330 *
331 * @dev: driver device
332 * @drm_dev: the drm device
333 * @regs: registers
334 * @plane: plane structure for hqvdp it self
335 * @clk: IP clock
336 * @clk_pix_main: pix main clock
337 * @reset: reset control
338 * @vtg_nb: notifier to handle VTG Vsync
339 * @btm_field_pending: is there any bottom field (interlaced frame) to display
340 * @hqvdp_cmd: buffer of commands
341 * @hqvdp_cmd_paddr: physical address of hqvdp_cmd
342 * @vtg: vtg for main data path
343 * @xp70_initialized: true if xp70 is already initialized
344 * @vtg_registered: true if registered to VTG
345 */
346 struct sti_hqvdp {
347 struct device *dev;
348 struct drm_device *drm_dev;
349 void __iomem *regs;
350 struct sti_plane plane;
351 struct clk *clk;
352 struct clk *clk_pix_main;
353 struct reset_control *reset;
354 struct notifier_block vtg_nb;
355 bool btm_field_pending;
356 void *hqvdp_cmd;
357 u32 hqvdp_cmd_paddr;
358 struct sti_vtg *vtg;
359 bool xp70_initialized;
360 bool vtg_registered;
361 };
362
363 #define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
364
365 static const uint32_t hqvdp_supported_formats[] = {
366 DRM_FORMAT_NV12,
367 };
368
369 /**
370 * sti_hqvdp_get_free_cmd
371 * @hqvdp: hqvdp structure
372 *
373 * Look for a hqvdp_cmd that is not being used (or about to be used) by the FW.
374 *
375 * RETURNS:
376 * the offset of the command to be used.
377 * -1 in error cases
378 */
sti_hqvdp_get_free_cmd(struct sti_hqvdp * hqvdp)379 static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
380 {
381 u32 curr_cmd, next_cmd;
382 u32 cmd = hqvdp->hqvdp_cmd_paddr;
383 int i;
384
385 curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
386 next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
387
388 for (i = 0; i < NB_VDP_CMD; i++) {
389 if ((cmd != curr_cmd) && (cmd != next_cmd))
390 return i * sizeof(struct sti_hqvdp_cmd);
391 cmd += sizeof(struct sti_hqvdp_cmd);
392 }
393
394 return -1;
395 }
396
397 /**
398 * sti_hqvdp_get_curr_cmd
399 * @hqvdp: hqvdp structure
400 *
401 * Look for the hqvdp_cmd that is being used by the FW.
402 *
403 * RETURNS:
404 * the offset of the command to be used.
405 * -1 in error cases
406 */
sti_hqvdp_get_curr_cmd(struct sti_hqvdp * hqvdp)407 static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
408 {
409 u32 curr_cmd;
410 u32 cmd = hqvdp->hqvdp_cmd_paddr;
411 unsigned int i;
412
413 curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
414
415 for (i = 0; i < NB_VDP_CMD; i++) {
416 if (cmd == curr_cmd)
417 return i * sizeof(struct sti_hqvdp_cmd);
418
419 cmd += sizeof(struct sti_hqvdp_cmd);
420 }
421
422 return -1;
423 }
424
425 /**
426 * sti_hqvdp_get_next_cmd
427 * @hqvdp: hqvdp structure
428 *
429 * Look for the next hqvdp_cmd that will be used by the FW.
430 *
431 * RETURNS:
432 * the offset of the next command that will be used.
433 * -1 in error cases
434 */
sti_hqvdp_get_next_cmd(struct sti_hqvdp * hqvdp)435 static int sti_hqvdp_get_next_cmd(struct sti_hqvdp *hqvdp)
436 {
437 int next_cmd;
438 dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
439 unsigned int i;
440
441 next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
442
443 for (i = 0; i < NB_VDP_CMD; i++) {
444 if (cmd == next_cmd)
445 return i * sizeof(struct sti_hqvdp_cmd);
446
447 cmd += sizeof(struct sti_hqvdp_cmd);
448 }
449
450 return -1;
451 }
452
453 #define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
454 readl(hqvdp->regs + reg))
455
hqvdp_dbg_get_lut(u32 * coef)456 static const char *hqvdp_dbg_get_lut(u32 *coef)
457 {
458 if (!memcmp(coef, coef_lut_a_legacy, 16))
459 return "LUT A";
460 if (!memcmp(coef, coef_lut_b, 16))
461 return "LUT B";
462 if (!memcmp(coef, coef_lut_c_y_legacy, 16))
463 return "LUT C Y";
464 if (!memcmp(coef, coef_lut_c_c_legacy, 16))
465 return "LUT C C";
466 if (!memcmp(coef, coef_lut_d_y_legacy, 16))
467 return "LUT D Y";
468 if (!memcmp(coef, coef_lut_d_c_legacy, 16))
469 return "LUT D C";
470 if (!memcmp(coef, coef_lut_e_y_legacy, 16))
471 return "LUT E Y";
472 if (!memcmp(coef, coef_lut_e_c_legacy, 16))
473 return "LUT E C";
474 if (!memcmp(coef, coef_lut_f_y_legacy, 16))
475 return "LUT F Y";
476 if (!memcmp(coef, coef_lut_f_c_legacy, 16))
477 return "LUT F C";
478 return "<UNKNOWN>";
479 }
480
hqvdp_dbg_dump_cmd(struct seq_file * s,struct sti_hqvdp_cmd * c)481 static void hqvdp_dbg_dump_cmd(struct seq_file *s, struct sti_hqvdp_cmd *c)
482 {
483 int src_w, src_h, dst_w, dst_h;
484
485 seq_puts(s, "\n\tTOP:");
486 seq_printf(s, "\n\t %-20s 0x%08X", "Config", c->top.config);
487 switch (c->top.config) {
488 case TOP_CONFIG_PROGRESSIVE:
489 seq_puts(s, "\tProgressive");
490 break;
491 case TOP_CONFIG_INTER_TOP:
492 seq_puts(s, "\tInterlaced, top field");
493 break;
494 case TOP_CONFIG_INTER_BTM:
495 seq_puts(s, "\tInterlaced, bottom field");
496 break;
497 default:
498 seq_puts(s, "\t<UNKNOWN>");
499 break;
500 }
501
502 seq_printf(s, "\n\t %-20s 0x%08X", "MemFormat", c->top.mem_format);
503 seq_printf(s, "\n\t %-20s 0x%08X", "CurrentY", c->top.current_luma);
504 seq_printf(s, "\n\t %-20s 0x%08X", "CurrentC", c->top.current_chroma);
505 seq_printf(s, "\n\t %-20s 0x%08X", "YSrcPitch", c->top.luma_src_pitch);
506 seq_printf(s, "\n\t %-20s 0x%08X", "CSrcPitch",
507 c->top.chroma_src_pitch);
508 seq_printf(s, "\n\t %-20s 0x%08X", "InputFrameSize",
509 c->top.input_frame_size);
510 seq_printf(s, "\t%dx%d",
511 c->top.input_frame_size & 0x0000FFFF,
512 c->top.input_frame_size >> 16);
513 seq_printf(s, "\n\t %-20s 0x%08X", "InputViewportSize",
514 c->top.input_viewport_size);
515 src_w = c->top.input_viewport_size & 0x0000FFFF;
516 src_h = c->top.input_viewport_size >> 16;
517 seq_printf(s, "\t%dx%d", src_w, src_h);
518
519 seq_puts(s, "\n\tHVSRC:");
520 seq_printf(s, "\n\t %-20s 0x%08X", "OutputPictureSize",
521 c->hvsrc.output_picture_size);
522 dst_w = c->hvsrc.output_picture_size & 0x0000FFFF;
523 dst_h = c->hvsrc.output_picture_size >> 16;
524 seq_printf(s, "\t%dx%d", dst_w, dst_h);
525 seq_printf(s, "\n\t %-20s 0x%08X", "ParamCtrl", c->hvsrc.param_ctrl);
526
527 seq_printf(s, "\n\t %-20s %s", "yh_coef",
528 hqvdp_dbg_get_lut(c->hvsrc.yh_coef));
529 seq_printf(s, "\n\t %-20s %s", "ch_coef",
530 hqvdp_dbg_get_lut(c->hvsrc.ch_coef));
531 seq_printf(s, "\n\t %-20s %s", "yv_coef",
532 hqvdp_dbg_get_lut(c->hvsrc.yv_coef));
533 seq_printf(s, "\n\t %-20s %s", "cv_coef",
534 hqvdp_dbg_get_lut(c->hvsrc.cv_coef));
535
536 seq_printf(s, "\n\t %-20s", "ScaleH");
537 if (dst_w > src_w)
538 seq_printf(s, " %d/1", dst_w / src_w);
539 else
540 seq_printf(s, " 1/%d", src_w / dst_w);
541
542 seq_printf(s, "\n\t %-20s", "tScaleV");
543 if (dst_h > src_h)
544 seq_printf(s, " %d/1", dst_h / src_h);
545 else
546 seq_printf(s, " 1/%d", src_h / dst_h);
547
548 seq_puts(s, "\n\tCSDI:");
549 seq_printf(s, "\n\t %-20s 0x%08X\t", "Config", c->csdi.config);
550 switch (c->csdi.config) {
551 case CSDI_CONFIG_PROG:
552 seq_puts(s, "Bypass");
553 break;
554 case CSDI_CONFIG_INTER_DIR:
555 seq_puts(s, "Deinterlace, directional");
556 break;
557 default:
558 seq_puts(s, "<UNKNOWN>");
559 break;
560 }
561
562 seq_printf(s, "\n\t %-20s 0x%08X", "Config2", c->csdi.config2);
563 seq_printf(s, "\n\t %-20s 0x%08X", "DcdiConfig", c->csdi.dcdi_config);
564 }
565
hqvdp_dbg_show(struct seq_file * s,void * data)566 static int hqvdp_dbg_show(struct seq_file *s, void *data)
567 {
568 struct drm_info_node *node = s->private;
569 struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
570 int cmd, cmd_offset, infoxp70;
571 void *virt;
572
573 seq_printf(s, "%s: (vaddr = 0x%p)",
574 sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
575
576 DBGFS_DUMP(HQVDP_MBX_IRQ_TO_XP70);
577 DBGFS_DUMP(HQVDP_MBX_INFO_HOST);
578 DBGFS_DUMP(HQVDP_MBX_IRQ_TO_HOST);
579 DBGFS_DUMP(HQVDP_MBX_INFO_XP70);
580 infoxp70 = readl(hqvdp->regs + HQVDP_MBX_INFO_XP70);
581 seq_puts(s, "\tFirmware state: ");
582 if (infoxp70 & INFO_XP70_FW_READY)
583 seq_puts(s, "idle and ready");
584 else if (infoxp70 & INFO_XP70_FW_PROCESSING)
585 seq_puts(s, "processing a picture");
586 else if (infoxp70 & INFO_XP70_FW_INITQUEUES)
587 seq_puts(s, "programming queues");
588 else
589 seq_puts(s, "NOT READY");
590
591 DBGFS_DUMP(HQVDP_MBX_SW_RESET_CTRL);
592 DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL1);
593 if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
594 & STARTUP_CTRL1_RST_DONE)
595 seq_puts(s, "\tReset is done");
596 else
597 seq_puts(s, "\tReset is NOT done");
598 DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL2);
599 if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2)
600 & STARTUP_CTRL2_FETCH_EN)
601 seq_puts(s, "\tFetch is enabled");
602 else
603 seq_puts(s, "\tFetch is NOT enabled");
604 DBGFS_DUMP(HQVDP_MBX_GP_STATUS);
605 DBGFS_DUMP(HQVDP_MBX_NEXT_CMD);
606 DBGFS_DUMP(HQVDP_MBX_CURRENT_CMD);
607 DBGFS_DUMP(HQVDP_MBX_SOFT_VSYNC);
608 if (!(readl(hqvdp->regs + HQVDP_MBX_SOFT_VSYNC) & 3))
609 seq_puts(s, "\tHW Vsync");
610 else
611 seq_puts(s, "\tSW Vsync ?!?!");
612
613 /* Last command */
614 cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
615 cmd_offset = sti_hqvdp_get_curr_cmd(hqvdp);
616 if (cmd_offset == -1) {
617 seq_puts(s, "\n\n Last command: unknown");
618 } else {
619 virt = hqvdp->hqvdp_cmd + cmd_offset;
620 seq_printf(s, "\n\n Last command: address @ 0x%x (0x%p)",
621 cmd, virt);
622 hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
623 }
624
625 /* Next command */
626 cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
627 cmd_offset = sti_hqvdp_get_next_cmd(hqvdp);
628 if (cmd_offset == -1) {
629 seq_puts(s, "\n\n Next command: unknown");
630 } else {
631 virt = hqvdp->hqvdp_cmd + cmd_offset;
632 seq_printf(s, "\n\n Next command address: @ 0x%x (0x%p)",
633 cmd, virt);
634 hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
635 }
636
637 seq_putc(s, '\n');
638 return 0;
639 }
640
641 static struct drm_info_list hqvdp_debugfs_files[] = {
642 { "hqvdp", hqvdp_dbg_show, 0, NULL },
643 };
644
hqvdp_debugfs_init(struct sti_hqvdp * hqvdp,struct drm_minor * minor)645 static void hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
646 {
647 unsigned int i;
648
649 for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
650 hqvdp_debugfs_files[i].data = hqvdp;
651
652 drm_debugfs_create_files(hqvdp_debugfs_files,
653 ARRAY_SIZE(hqvdp_debugfs_files),
654 minor->debugfs_root, minor);
655 }
656
657 /**
658 * sti_hqvdp_update_hvsrc
659 * @orient: horizontal or vertical
660 * @scale: scaling/zoom factor
661 * @hvsrc: the structure containing the LUT coef
662 *
663 * Update the Y and C Lut coef, as well as the shift param
664 *
665 * RETURNS:
666 * None.
667 */
sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient,int scale,struct sti_hqvdp_hvsrc * hvsrc)668 static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
669 struct sti_hqvdp_hvsrc *hvsrc)
670 {
671 const int *coef_c, *coef_y;
672 int shift_c, shift_y;
673
674 /* Get the appropriate coef tables */
675 if (scale < SCALE_MAX_FOR_LEG_LUT_F) {
676 coef_y = coef_lut_f_y_legacy;
677 coef_c = coef_lut_f_c_legacy;
678 shift_y = SHIFT_LUT_F_Y_LEGACY;
679 shift_c = SHIFT_LUT_F_C_LEGACY;
680 } else if (scale < SCALE_MAX_FOR_LEG_LUT_E) {
681 coef_y = coef_lut_e_y_legacy;
682 coef_c = coef_lut_e_c_legacy;
683 shift_y = SHIFT_LUT_E_Y_LEGACY;
684 shift_c = SHIFT_LUT_E_C_LEGACY;
685 } else if (scale < SCALE_MAX_FOR_LEG_LUT_D) {
686 coef_y = coef_lut_d_y_legacy;
687 coef_c = coef_lut_d_c_legacy;
688 shift_y = SHIFT_LUT_D_Y_LEGACY;
689 shift_c = SHIFT_LUT_D_C_LEGACY;
690 } else if (scale < SCALE_MAX_FOR_LEG_LUT_C) {
691 coef_y = coef_lut_c_y_legacy;
692 coef_c = coef_lut_c_c_legacy;
693 shift_y = SHIFT_LUT_C_Y_LEGACY;
694 shift_c = SHIFT_LUT_C_C_LEGACY;
695 } else if (scale == SCALE_MAX_FOR_LEG_LUT_C) {
696 coef_y = coef_c = coef_lut_b;
697 shift_y = shift_c = SHIFT_LUT_B;
698 } else {
699 coef_y = coef_c = coef_lut_a_legacy;
700 shift_y = shift_c = SHIFT_LUT_A_LEGACY;
701 }
702
703 if (orient == HVSRC_HORI) {
704 hvsrc->hori_shift = (shift_c << 16) | shift_y;
705 memcpy(hvsrc->yh_coef, coef_y, sizeof(hvsrc->yh_coef));
706 memcpy(hvsrc->ch_coef, coef_c, sizeof(hvsrc->ch_coef));
707 } else {
708 hvsrc->vert_shift = (shift_c << 16) | shift_y;
709 memcpy(hvsrc->yv_coef, coef_y, sizeof(hvsrc->yv_coef));
710 memcpy(hvsrc->cv_coef, coef_c, sizeof(hvsrc->cv_coef));
711 }
712 }
713
714 /**
715 * sti_hqvdp_check_hw_scaling
716 * @hqvdp: hqvdp pointer
717 * @mode: display mode with timing constraints
718 * @src_w: source width
719 * @src_h: source height
720 * @dst_w: destination width
721 * @dst_h: destination height
722 *
723 * Check if the HW is able to perform the scaling request
724 * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
725 * Zy = OutputHeight / InputHeight
726 * LFW = (Tx * IPClock) / (MaxNbCycles * Cp)
727 * Tx : Total video mode horizontal resolution
728 * IPClock : HQVDP IP clock (Mhz)
729 * MaxNbCycles: max(InputWidth, OutputWidth)
730 * Cp: Video mode pixel clock (Mhz)
731 *
732 * RETURNS:
733 * True if the HW can scale.
734 */
sti_hqvdp_check_hw_scaling(struct sti_hqvdp * hqvdp,struct drm_display_mode * mode,int src_w,int src_h,int dst_w,int dst_h)735 static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
736 struct drm_display_mode *mode,
737 int src_w, int src_h,
738 int dst_w, int dst_h)
739 {
740 unsigned long lfw;
741 unsigned int inv_zy;
742
743 lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
744 lfw /= max(src_w, dst_w) * mode->clock / 1000;
745
746 inv_zy = DIV_ROUND_UP(src_h, dst_h);
747
748 return inv_zy <= lfw;
749 }
750
751 /**
752 * sti_hqvdp_disable
753 * @hqvdp: hqvdp pointer
754 *
755 * Disables the HQVDP plane
756 */
sti_hqvdp_disable(struct sti_hqvdp * hqvdp)757 static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
758 {
759 int i;
760
761 DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane));
762
763 /* Unregister VTG Vsync callback */
764 if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
765 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
766
767 /* Set next cmd to NULL */
768 writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
769
770 for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
771 if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
772 & INFO_XP70_FW_READY)
773 break;
774 msleep(POLL_DELAY_MS);
775 }
776
777 /* VTG can stop now */
778 clk_disable_unprepare(hqvdp->clk_pix_main);
779
780 if (i == POLL_MAX_ATTEMPT)
781 DRM_ERROR("XP70 could not revert to idle\n");
782
783 hqvdp->plane.status = STI_PLANE_DISABLED;
784 hqvdp->vtg_registered = false;
785 }
786
787 /**
788 * sti_hqvdp_vtg_cb
789 * @nb: notifier block
790 * @evt: event message
791 * @data: private data
792 *
793 * Handle VTG Vsync event, display pending bottom field
794 *
795 * RETURNS:
796 * 0 on success.
797 */
sti_hqvdp_vtg_cb(struct notifier_block * nb,unsigned long evt,void * data)798 static int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
799 {
800 struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb);
801 int btm_cmd_offset, top_cmd_offest;
802 struct sti_hqvdp_cmd *btm_cmd, *top_cmd;
803
804 if ((evt != VTG_TOP_FIELD_EVENT) && (evt != VTG_BOTTOM_FIELD_EVENT)) {
805 DRM_DEBUG_DRIVER("Unknown event\n");
806 return 0;
807 }
808
809 if (hqvdp->plane.status == STI_PLANE_FLUSHING) {
810 /* disable need to be synchronize on vsync event */
811 DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
812 sti_plane_to_str(&hqvdp->plane));
813
814 sti_hqvdp_disable(hqvdp);
815 }
816
817 if (hqvdp->btm_field_pending) {
818 /* Create the btm field command from the current one */
819 btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
820 top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
821 if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
822 DRM_DEBUG_DRIVER("Warning: no cmd, will skip field\n");
823 return -EBUSY;
824 }
825
826 btm_cmd = hqvdp->hqvdp_cmd + btm_cmd_offset;
827 top_cmd = hqvdp->hqvdp_cmd + top_cmd_offest;
828
829 memcpy(btm_cmd, top_cmd, sizeof(*btm_cmd));
830
831 btm_cmd->top.config = TOP_CONFIG_INTER_BTM;
832 btm_cmd->top.current_luma +=
833 btm_cmd->top.luma_src_pitch / 2;
834 btm_cmd->top.current_chroma +=
835 btm_cmd->top.chroma_src_pitch / 2;
836
837 /* Post the command to mailbox */
838 writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
839 hqvdp->regs + HQVDP_MBX_NEXT_CMD);
840
841 hqvdp->btm_field_pending = false;
842
843 dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
844 __func__, hqvdp->hqvdp_cmd_paddr);
845
846 sti_plane_update_fps(&hqvdp->plane, false, true);
847 }
848
849 return 0;
850 }
851
sti_hqvdp_init(struct sti_hqvdp * hqvdp)852 static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
853 {
854 int size;
855 dma_addr_t dma_addr;
856
857 hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
858
859 /* Allocate memory for the VDP commands */
860 size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
861 hqvdp->hqvdp_cmd = dma_alloc_wc(hqvdp->dev, size,
862 &dma_addr,
863 GFP_KERNEL | GFP_DMA);
864 if (!hqvdp->hqvdp_cmd) {
865 DRM_ERROR("Failed to allocate memory for VDP cmd\n");
866 return;
867 }
868
869 hqvdp->hqvdp_cmd_paddr = (u32)dma_addr;
870 memset(hqvdp->hqvdp_cmd, 0, size);
871 }
872
sti_hqvdp_init_plugs(struct sti_hqvdp * hqvdp)873 static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
874 {
875 /* Configure Plugs (same for RD & WR) */
876 writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_RD_PLUG_PAGE_SIZE);
877 writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_RD_PLUG_MIN_OPC);
878 writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_RD_PLUG_MAX_OPC);
879 writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_RD_PLUG_MAX_CHK);
880 writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_RD_PLUG_MAX_MSG);
881 writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_RD_PLUG_MIN_SPACE);
882 writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_RD_PLUG_CONTROL);
883
884 writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_WR_PLUG_PAGE_SIZE);
885 writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_WR_PLUG_MIN_OPC);
886 writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_WR_PLUG_MAX_OPC);
887 writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_WR_PLUG_MAX_CHK);
888 writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_WR_PLUG_MAX_MSG);
889 writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_WR_PLUG_MIN_SPACE);
890 writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_WR_PLUG_CONTROL);
891 }
892
893 /**
894 * sti_hqvdp_start_xp70
895 * @hqvdp: hqvdp pointer
896 *
897 * Run the xP70 initialization sequence
898 */
sti_hqvdp_start_xp70(struct sti_hqvdp * hqvdp)899 static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp)
900 {
901 const struct firmware *firmware;
902 u32 *fw_rd_plug, *fw_wr_plug, *fw_pmem, *fw_dmem;
903 u8 *data;
904 int i;
905 struct fw_header {
906 int rd_size;
907 int wr_size;
908 int pmem_size;
909 int dmem_size;
910 } *header;
911
912 DRM_DEBUG_DRIVER("\n");
913
914 if (hqvdp->xp70_initialized) {
915 DRM_DEBUG_DRIVER("HQVDP XP70 already initialized\n");
916 return;
917 }
918
919 /* Request firmware */
920 if (request_firmware(&firmware, HQVDP_FMW_NAME, hqvdp->dev)) {
921 DRM_ERROR("Can't get HQVDP firmware\n");
922 return;
923 }
924
925 /* Check firmware parts */
926 if (!firmware) {
927 DRM_ERROR("Firmware not available\n");
928 return;
929 }
930
931 header = (struct fw_header *)firmware->data;
932 if (firmware->size < sizeof(*header)) {
933 DRM_ERROR("Invalid firmware size (%zu)\n", firmware->size);
934 goto out;
935 }
936 if ((sizeof(*header) + header->rd_size + header->wr_size +
937 header->pmem_size + header->dmem_size) != firmware->size) {
938 DRM_ERROR("Invalid fmw structure (%zu+%d+%d+%d+%d != %zu)\n",
939 sizeof(*header), header->rd_size, header->wr_size,
940 header->pmem_size, header->dmem_size,
941 firmware->size);
942 goto out;
943 }
944
945 data = (u8 *)firmware->data;
946 data += sizeof(*header);
947 fw_rd_plug = (void *)data;
948 data += header->rd_size;
949 fw_wr_plug = (void *)data;
950 data += header->wr_size;
951 fw_pmem = (void *)data;
952 data += header->pmem_size;
953 fw_dmem = (void *)data;
954
955 /* Enable clock */
956 if (clk_prepare_enable(hqvdp->clk))
957 DRM_ERROR("Failed to prepare/enable HQVDP clk\n");
958
959 /* Reset */
960 writel(SW_RESET_CTRL_FULL, hqvdp->regs + HQVDP_MBX_SW_RESET_CTRL);
961
962 for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
963 if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
964 & STARTUP_CTRL1_RST_DONE)
965 break;
966 msleep(POLL_DELAY_MS);
967 }
968 if (i == POLL_MAX_ATTEMPT) {
969 DRM_ERROR("Could not reset\n");
970 clk_disable_unprepare(hqvdp->clk);
971 goto out;
972 }
973
974 /* Init Read & Write plugs */
975 for (i = 0; i < header->rd_size / 4; i++)
976 writel(fw_rd_plug[i], hqvdp->regs + HQVDP_RD_PLUG + i * 4);
977 for (i = 0; i < header->wr_size / 4; i++)
978 writel(fw_wr_plug[i], hqvdp->regs + HQVDP_WR_PLUG + i * 4);
979
980 sti_hqvdp_init_plugs(hqvdp);
981
982 /* Authorize Idle Mode */
983 writel(STARTUP_CTRL1_AUTH_IDLE, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1);
984
985 /* Prevent VTG interruption during the boot */
986 writel(SOFT_VSYNC_SW_CTRL_IRQ, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
987 writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
988
989 /* Download PMEM & DMEM */
990 for (i = 0; i < header->pmem_size / 4; i++)
991 writel(fw_pmem[i], hqvdp->regs + HQVDP_PMEM + i * 4);
992 for (i = 0; i < header->dmem_size / 4; i++)
993 writel(fw_dmem[i], hqvdp->regs + HQVDP_DMEM + i * 4);
994
995 /* Enable fetch */
996 writel(STARTUP_CTRL2_FETCH_EN, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2);
997
998 /* Wait end of boot */
999 for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
1000 if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
1001 & INFO_XP70_FW_READY)
1002 break;
1003 msleep(POLL_DELAY_MS);
1004 }
1005 if (i == POLL_MAX_ATTEMPT) {
1006 DRM_ERROR("Could not boot\n");
1007 clk_disable_unprepare(hqvdp->clk);
1008 goto out;
1009 }
1010
1011 /* Launch Vsync */
1012 writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
1013
1014 DRM_INFO("HQVDP XP70 initialized\n");
1015
1016 hqvdp->xp70_initialized = true;
1017
1018 out:
1019 release_firmware(firmware);
1020 }
1021
sti_hqvdp_atomic_check(struct drm_plane * drm_plane,struct drm_atomic_state * state)1022 static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
1023 struct drm_atomic_state *state)
1024 {
1025 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1026 drm_plane);
1027 struct sti_plane *plane = to_sti_plane(drm_plane);
1028 struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
1029 struct drm_crtc *crtc = new_plane_state->crtc;
1030 struct drm_framebuffer *fb = new_plane_state->fb;
1031 struct drm_crtc_state *crtc_state;
1032 struct drm_display_mode *mode;
1033 int dst_x, dst_y, dst_w, dst_h;
1034 int src_x, src_y, src_w, src_h;
1035
1036 /* no need for further checks if the plane is being disabled */
1037 if (!crtc || !fb)
1038 return 0;
1039
1040 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1041 if (IS_ERR(crtc_state))
1042 return PTR_ERR(crtc_state);
1043
1044 mode = &crtc_state->mode;
1045 dst_x = new_plane_state->crtc_x;
1046 dst_y = new_plane_state->crtc_y;
1047 dst_w = clamp_val(new_plane_state->crtc_w, 0, mode->hdisplay - dst_x);
1048 dst_h = clamp_val(new_plane_state->crtc_h, 0, mode->vdisplay - dst_y);
1049 /* src_x are in 16.16 format */
1050 src_x = new_plane_state->src_x >> 16;
1051 src_y = new_plane_state->src_y >> 16;
1052 src_w = new_plane_state->src_w >> 16;
1053 src_h = new_plane_state->src_h >> 16;
1054
1055 if (mode->clock && !sti_hqvdp_check_hw_scaling(hqvdp, mode,
1056 src_w, src_h,
1057 dst_w, dst_h)) {
1058 DRM_ERROR("Scaling beyond HW capabilities\n");
1059 return -EINVAL;
1060 }
1061
1062 if (!drm_fb_dma_get_gem_obj(fb, 0)) {
1063 DRM_ERROR("Can't get DMA GEM object for fb\n");
1064 return -EINVAL;
1065 }
1066
1067 /*
1068 * Input / output size
1069 * Align to upper even value
1070 */
1071 dst_w = ALIGN(dst_w, 2);
1072 dst_h = ALIGN(dst_h, 2);
1073
1074 if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
1075 (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
1076 (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
1077 (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
1078 DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
1079 src_w, src_h,
1080 dst_w, dst_h);
1081 return -EINVAL;
1082 }
1083
1084 if (!hqvdp->xp70_initialized)
1085 /* Start HQVDP XP70 coprocessor */
1086 sti_hqvdp_start_xp70(hqvdp);
1087
1088 if (!hqvdp->vtg_registered) {
1089 /* Prevent VTG shutdown */
1090 if (clk_prepare_enable(hqvdp->clk_pix_main)) {
1091 DRM_ERROR("Failed to prepare/enable pix main clk\n");
1092 return -EINVAL;
1093 }
1094
1095 /* Register VTG Vsync callback to handle bottom fields */
1096 if (sti_vtg_register_client(hqvdp->vtg,
1097 &hqvdp->vtg_nb,
1098 crtc)) {
1099 DRM_ERROR("Cannot register VTG notifier\n");
1100 clk_disable_unprepare(hqvdp->clk_pix_main);
1101 return -EINVAL;
1102 }
1103 hqvdp->vtg_registered = true;
1104 }
1105
1106 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
1107 crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
1108 drm_plane->base.id, sti_plane_to_str(plane));
1109 DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
1110 sti_plane_to_str(plane),
1111 dst_w, dst_h, dst_x, dst_y,
1112 src_w, src_h, src_x, src_y);
1113
1114 return 0;
1115 }
1116
sti_hqvdp_atomic_update(struct drm_plane * drm_plane,struct drm_atomic_state * state)1117 static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
1118 struct drm_atomic_state *state)
1119 {
1120 struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
1121 drm_plane);
1122 struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
1123 drm_plane);
1124 struct sti_plane *plane = to_sti_plane(drm_plane);
1125 struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
1126 struct drm_crtc *crtc = newstate->crtc;
1127 struct drm_framebuffer *fb = newstate->fb;
1128 struct drm_display_mode *mode;
1129 int dst_x, dst_y, dst_w, dst_h;
1130 int src_x, src_y, src_w, src_h;
1131 struct drm_gem_dma_object *dma_obj;
1132 struct sti_hqvdp_cmd *cmd;
1133 int scale_h, scale_v;
1134 int cmd_offset;
1135
1136 if (!crtc || !fb)
1137 return;
1138
1139 if ((oldstate->fb == newstate->fb) &&
1140 (oldstate->crtc_x == newstate->crtc_x) &&
1141 (oldstate->crtc_y == newstate->crtc_y) &&
1142 (oldstate->crtc_w == newstate->crtc_w) &&
1143 (oldstate->crtc_h == newstate->crtc_h) &&
1144 (oldstate->src_x == newstate->src_x) &&
1145 (oldstate->src_y == newstate->src_y) &&
1146 (oldstate->src_w == newstate->src_w) &&
1147 (oldstate->src_h == newstate->src_h)) {
1148 /* No change since last update, do not post cmd */
1149 DRM_DEBUG_DRIVER("No change, not posting cmd\n");
1150 plane->status = STI_PLANE_UPDATED;
1151 return;
1152 }
1153
1154 mode = &crtc->mode;
1155 dst_x = newstate->crtc_x;
1156 dst_y = newstate->crtc_y;
1157 dst_w = clamp_val(newstate->crtc_w, 0, mode->hdisplay - dst_x);
1158 dst_h = clamp_val(newstate->crtc_h, 0, mode->vdisplay - dst_y);
1159 /* src_x are in 16.16 format */
1160 src_x = newstate->src_x >> 16;
1161 src_y = newstate->src_y >> 16;
1162 src_w = newstate->src_w >> 16;
1163 src_h = newstate->src_h >> 16;
1164
1165 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
1166 if (cmd_offset == -1) {
1167 DRM_DEBUG_DRIVER("Warning: no cmd, will skip frame\n");
1168 return;
1169 }
1170 cmd = hqvdp->hqvdp_cmd + cmd_offset;
1171
1172 /* Static parameters, defaulting to progressive mode */
1173 cmd->top.config = TOP_CONFIG_PROGRESSIVE;
1174 cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
1175 cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
1176 cmd->csdi.config = CSDI_CONFIG_PROG;
1177
1178 /* VC1RE, FMD bypassed : keep everything set to 0
1179 * IQI/P2I bypassed */
1180 cmd->iqi.config = IQI_CONFIG_DFLT;
1181 cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
1182 cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
1183 cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
1184
1185 dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
1186
1187 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
1188 (char *)&fb->format->format,
1189 (unsigned long) dma_obj->dma_addr);
1190
1191 /* Buffer planes address */
1192 cmd->top.current_luma = (u32) dma_obj->dma_addr + fb->offsets[0];
1193 cmd->top.current_chroma = (u32) dma_obj->dma_addr + fb->offsets[1];
1194
1195 /* Pitches */
1196 cmd->top.luma_processed_pitch = fb->pitches[0];
1197 cmd->top.luma_src_pitch = fb->pitches[0];
1198 cmd->top.chroma_processed_pitch = fb->pitches[1];
1199 cmd->top.chroma_src_pitch = fb->pitches[1];
1200
1201 /* Input / output size
1202 * Align to upper even value */
1203 dst_w = ALIGN(dst_w, 2);
1204 dst_h = ALIGN(dst_h, 2);
1205
1206 cmd->top.input_viewport_size = src_h << 16 | src_w;
1207 cmd->top.input_frame_size = src_h << 16 | src_w;
1208 cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
1209 cmd->top.input_viewport_ori = src_y << 16 | src_x;
1210
1211 /* Handle interlaced */
1212 if (fb->flags & DRM_MODE_FB_INTERLACED) {
1213 /* Top field to display */
1214 cmd->top.config = TOP_CONFIG_INTER_TOP;
1215
1216 /* Update pitches and vert size */
1217 cmd->top.input_frame_size = (src_h / 2) << 16 | src_w;
1218 cmd->top.luma_processed_pitch *= 2;
1219 cmd->top.luma_src_pitch *= 2;
1220 cmd->top.chroma_processed_pitch *= 2;
1221 cmd->top.chroma_src_pitch *= 2;
1222
1223 /* Enable directional deinterlacing processing */
1224 cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
1225 cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
1226 cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
1227 }
1228
1229 /* Update hvsrc lut coef */
1230 scale_h = SCALE_FACTOR * dst_w / src_w;
1231 sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
1232
1233 scale_v = SCALE_FACTOR * dst_h / src_h;
1234 sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
1235
1236 writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
1237 hqvdp->regs + HQVDP_MBX_NEXT_CMD);
1238
1239 /* Interlaced : get ready to display the bottom field at next Vsync */
1240 if (fb->flags & DRM_MODE_FB_INTERLACED)
1241 hqvdp->btm_field_pending = true;
1242
1243 dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
1244 __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
1245
1246 sti_plane_update_fps(plane, true, true);
1247
1248 plane->status = STI_PLANE_UPDATED;
1249 }
1250
sti_hqvdp_atomic_disable(struct drm_plane * drm_plane,struct drm_atomic_state * state)1251 static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
1252 struct drm_atomic_state *state)
1253 {
1254 struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
1255 drm_plane);
1256 struct sti_plane *plane = to_sti_plane(drm_plane);
1257
1258 if (!oldstate->crtc) {
1259 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
1260 drm_plane->base.id);
1261 return;
1262 }
1263
1264 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
1265 oldstate->crtc->base.id,
1266 sti_mixer_to_str(to_sti_mixer(oldstate->crtc)),
1267 drm_plane->base.id, sti_plane_to_str(plane));
1268
1269 plane->status = STI_PLANE_DISABLING;
1270 }
1271
1272 static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
1273 .atomic_check = sti_hqvdp_atomic_check,
1274 .atomic_update = sti_hqvdp_atomic_update,
1275 .atomic_disable = sti_hqvdp_atomic_disable,
1276 };
1277
sti_hqvdp_late_register(struct drm_plane * drm_plane)1278 static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
1279 {
1280 struct sti_plane *plane = to_sti_plane(drm_plane);
1281 struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
1282
1283 hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
1284
1285 return 0;
1286 }
1287
1288 static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
1289 .update_plane = drm_atomic_helper_update_plane,
1290 .disable_plane = drm_atomic_helper_disable_plane,
1291 .destroy = drm_plane_cleanup,
1292 .reset = drm_atomic_helper_plane_reset,
1293 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
1294 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
1295 .late_register = sti_hqvdp_late_register,
1296 };
1297
sti_hqvdp_create(struct drm_device * drm_dev,struct device * dev,int desc)1298 static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
1299 struct device *dev, int desc)
1300 {
1301 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
1302 int res;
1303
1304 hqvdp->plane.desc = desc;
1305 hqvdp->plane.status = STI_PLANE_DISABLED;
1306
1307 sti_hqvdp_init(hqvdp);
1308
1309 res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
1310 &sti_hqvdp_plane_helpers_funcs,
1311 hqvdp_supported_formats,
1312 ARRAY_SIZE(hqvdp_supported_formats),
1313 NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
1314 if (res) {
1315 DRM_ERROR("Failed to initialize universal plane\n");
1316 return NULL;
1317 }
1318
1319 drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs);
1320
1321 sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
1322
1323 return &hqvdp->plane.drm_plane;
1324 }
1325
sti_hqvdp_bind(struct device * dev,struct device * master,void * data)1326 static int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
1327 {
1328 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
1329 struct drm_device *drm_dev = data;
1330 struct drm_plane *plane;
1331
1332 DRM_DEBUG_DRIVER("\n");
1333
1334 hqvdp->drm_dev = drm_dev;
1335
1336 /* Create HQVDP plane once xp70 is initialized */
1337 plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0);
1338 if (!plane)
1339 DRM_ERROR("Can't create HQVDP plane\n");
1340
1341 return 0;
1342 }
1343
sti_hqvdp_unbind(struct device * dev,struct device * master,void * data)1344 static void sti_hqvdp_unbind(struct device *dev,
1345 struct device *master, void *data)
1346 {
1347 /* do nothing */
1348 }
1349
1350 static const struct component_ops sti_hqvdp_ops = {
1351 .bind = sti_hqvdp_bind,
1352 .unbind = sti_hqvdp_unbind,
1353 };
1354
sti_hqvdp_probe(struct platform_device * pdev)1355 static int sti_hqvdp_probe(struct platform_device *pdev)
1356 {
1357 struct device *dev = &pdev->dev;
1358 struct device_node *vtg_np;
1359 struct sti_hqvdp *hqvdp;
1360
1361 DRM_DEBUG_DRIVER("\n");
1362
1363 hqvdp = devm_kzalloc(dev, sizeof(*hqvdp), GFP_KERNEL);
1364 if (!hqvdp) {
1365 DRM_ERROR("Failed to allocate HQVDP context\n");
1366 return -ENOMEM;
1367 }
1368
1369 hqvdp->dev = dev;
1370 hqvdp->regs = devm_platform_ioremap_resource(pdev, 0);
1371 if (IS_ERR(hqvdp->regs)) {
1372 DRM_ERROR("Register mapping failed\n");
1373 return PTR_ERR(hqvdp->regs);
1374 }
1375
1376 /* Get clock resources */
1377 hqvdp->clk = devm_clk_get(dev, "hqvdp");
1378 hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main");
1379 if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk_pix_main)) {
1380 DRM_ERROR("Cannot get clocks\n");
1381 return -ENXIO;
1382 }
1383
1384 /* Get reset resources */
1385 hqvdp->reset = devm_reset_control_get(dev, "hqvdp");
1386 if (!IS_ERR(hqvdp->reset))
1387 reset_control_deassert(hqvdp->reset);
1388
1389 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
1390 if (vtg_np)
1391 hqvdp->vtg = of_vtg_find(vtg_np);
1392 of_node_put(vtg_np);
1393
1394 platform_set_drvdata(pdev, hqvdp);
1395
1396 return component_add(&pdev->dev, &sti_hqvdp_ops);
1397 }
1398
sti_hqvdp_remove(struct platform_device * pdev)1399 static void sti_hqvdp_remove(struct platform_device *pdev)
1400 {
1401 component_del(&pdev->dev, &sti_hqvdp_ops);
1402 }
1403
1404 static const struct of_device_id hqvdp_of_match[] = {
1405 { .compatible = "st,stih407-hqvdp", },
1406 { /* end node */ }
1407 };
1408 MODULE_DEVICE_TABLE(of, hqvdp_of_match);
1409
1410 struct platform_driver sti_hqvdp_driver = {
1411 .driver = {
1412 .name = "sti-hqvdp",
1413 .of_match_table = hqvdp_of_match,
1414 },
1415 .probe = sti_hqvdp_probe,
1416 .remove = sti_hqvdp_remove,
1417 };
1418
1419 MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
1420 MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
1421 MODULE_LICENSE("GPL");
1422