1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015 MediaTek Inc.
4 */
5
6 #include <drm/drm_blend.h>
7 #include <drm/drm_fourcc.h>
8 #include <drm/drm_framebuffer.h>
9
10 #include <linux/clk.h>
11 #include <linux/component.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/soc/mediatek/mtk-cmdq.h>
17
18 #include "mtk_crtc.h"
19 #include "mtk_ddp_comp.h"
20 #include "mtk_disp_drv.h"
21 #include "mtk_drm_drv.h"
22
23 #define DISP_REG_OVL_INTEN 0x0004
24 #define OVL_FME_CPL_INT BIT(1)
25 #define DISP_REG_OVL_INTSTA 0x0008
26 #define DISP_REG_OVL_EN 0x000c
27 #define DISP_REG_OVL_RST 0x0014
28 #define DISP_REG_OVL_ROI_SIZE 0x0020
29 #define DISP_REG_OVL_DATAPATH_CON 0x0024
30 #define OVL_LAYER_SMI_ID_EN BIT(0)
31 #define OVL_BGCLR_SEL_IN BIT(2)
32 #define OVL_LAYER_AFBC_EN(n) BIT(4+n)
33 #define DISP_REG_OVL_ROI_BGCLR 0x0028
34 #define DISP_REG_OVL_SRC_CON 0x002c
35 #define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n))
36 #define DISP_REG_OVL_SRC_SIZE(n) (0x0038 + 0x20 * (n))
37 #define DISP_REG_OVL_OFFSET(n) (0x003c + 0x20 * (n))
38 #define DISP_REG_OVL_PITCH_MSB(n) (0x0040 + 0x20 * (n))
39 #define OVL_PITCH_MSB_2ND_SUBBUF BIT(16)
40 #define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n))
41 #define OVL_CONST_BLEND BIT(28)
42 #define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n))
43 #define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n))
44 #define DISP_REG_OVL_ADDR_MT2701 0x0040
45 #define DISP_REG_OVL_CLRFMT_EXT 0x02d0
46 #define OVL_CON_CLRFMT_BIT_DEPTH_MASK(n) (GENMASK(1, 0) << (4 * (n)))
47 #define OVL_CON_CLRFMT_BIT_DEPTH(depth, n) ((depth) << (4 * (n)))
48 #define OVL_CON_CLRFMT_8_BIT (0)
49 #define OVL_CON_CLRFMT_10_BIT (1)
50 #define DISP_REG_OVL_ADDR_MT8173 0x0f40
51 #define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
52 #define DISP_REG_OVL_HDR_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n) + 0x04)
53 #define DISP_REG_OVL_HDR_PITCH(ovl, n) ((ovl)->data->addr + 0x20 * (n) + 0x08)
54
55 #define GMC_THRESHOLD_BITS 16
56 #define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4)
57 #define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8)
58
59 #define OVL_CON_CLRFMT_MAN BIT(23)
60 #define OVL_CON_BYTE_SWAP BIT(24)
61
62 /* OVL_CON_RGB_SWAP works only if OVL_CON_CLRFMT_MAN is enabled */
63 #define OVL_CON_RGB_SWAP BIT(25)
64
65 #define OVL_CON_CLRFMT_RGB (1 << 12)
66 #define OVL_CON_CLRFMT_ARGB8888 (2 << 12)
67 #define OVL_CON_CLRFMT_RGBA8888 (3 << 12)
68 #define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
69 #define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP)
70 #define OVL_CON_CLRFMT_UYVY (4 << 12)
71 #define OVL_CON_CLRFMT_YUYV (5 << 12)
72 #define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
73 #define OVL_CON_CLRFMT_PARGB8888 ((3 << 12) | OVL_CON_CLRFMT_MAN)
74 #define OVL_CON_CLRFMT_PABGR8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_RGB_SWAP)
75 #define OVL_CON_CLRFMT_PBGRA8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_BYTE_SWAP)
76 #define OVL_CON_CLRFMT_PRGBA8888 (OVL_CON_CLRFMT_PABGR8888 | OVL_CON_BYTE_SWAP)
77 #define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
78 0 : OVL_CON_CLRFMT_RGB)
79 #define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
80 OVL_CON_CLRFMT_RGB : 0)
81 #define OVL_CON_AEN BIT(8)
82 #define OVL_CON_ALPHA 0xff
83 #define OVL_CON_VIRT_FLIP BIT(9)
84 #define OVL_CON_HORZ_FLIP BIT(10)
85
86 #define OVL_COLOR_ALPHA GENMASK(31, 24)
87
is_10bit_rgb(u32 fmt)88 static inline bool is_10bit_rgb(u32 fmt)
89 {
90 switch (fmt) {
91 case DRM_FORMAT_XRGB2101010:
92 case DRM_FORMAT_ARGB2101010:
93 case DRM_FORMAT_RGBX1010102:
94 case DRM_FORMAT_RGBA1010102:
95 case DRM_FORMAT_XBGR2101010:
96 case DRM_FORMAT_ABGR2101010:
97 case DRM_FORMAT_BGRX1010102:
98 case DRM_FORMAT_BGRA1010102:
99 return true;
100 }
101 return false;
102 }
103
104 static const u32 mt8173_formats[] = {
105 DRM_FORMAT_XRGB8888,
106 DRM_FORMAT_ARGB8888,
107 DRM_FORMAT_BGRX8888,
108 DRM_FORMAT_BGRA8888,
109 DRM_FORMAT_ABGR8888,
110 DRM_FORMAT_XBGR8888,
111 DRM_FORMAT_RGB888,
112 DRM_FORMAT_BGR888,
113 DRM_FORMAT_RGB565,
114 DRM_FORMAT_UYVY,
115 DRM_FORMAT_YUYV,
116 };
117
118 static const u32 mt8195_formats[] = {
119 DRM_FORMAT_XRGB8888,
120 DRM_FORMAT_ARGB8888,
121 DRM_FORMAT_XRGB2101010,
122 DRM_FORMAT_ARGB2101010,
123 DRM_FORMAT_BGRX8888,
124 DRM_FORMAT_BGRA8888,
125 DRM_FORMAT_BGRX1010102,
126 DRM_FORMAT_BGRA1010102,
127 DRM_FORMAT_ABGR8888,
128 DRM_FORMAT_XBGR8888,
129 DRM_FORMAT_XBGR2101010,
130 DRM_FORMAT_ABGR2101010,
131 DRM_FORMAT_RGBX8888,
132 DRM_FORMAT_RGBA8888,
133 DRM_FORMAT_RGBX1010102,
134 DRM_FORMAT_RGBA1010102,
135 DRM_FORMAT_RGB888,
136 DRM_FORMAT_BGR888,
137 DRM_FORMAT_RGB565,
138 DRM_FORMAT_UYVY,
139 DRM_FORMAT_YUYV,
140 };
141
142 struct mtk_disp_ovl_data {
143 unsigned int addr;
144 unsigned int gmc_bits;
145 unsigned int layer_nr;
146 bool fmt_rgb565_is_0;
147 bool smi_id_en;
148 bool supports_afbc;
149 const u32 blend_modes;
150 const u32 *formats;
151 size_t num_formats;
152 bool supports_clrfmt_ext;
153 };
154
155 /*
156 * struct mtk_disp_ovl - DISP_OVL driver structure
157 * @crtc: associated crtc to report vblank events to
158 * @data: platform data
159 */
160 struct mtk_disp_ovl {
161 struct drm_crtc *crtc;
162 struct clk *clk;
163 void __iomem *regs;
164 struct cmdq_client_reg cmdq_reg;
165 const struct mtk_disp_ovl_data *data;
166 void (*vblank_cb)(void *data);
167 void *vblank_cb_data;
168 };
169
mtk_disp_ovl_irq_handler(int irq,void * dev_id)170 static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
171 {
172 struct mtk_disp_ovl *priv = dev_id;
173
174 /* Clear frame completion interrupt */
175 writel(0x0, priv->regs + DISP_REG_OVL_INTSTA);
176
177 if (!priv->vblank_cb)
178 return IRQ_NONE;
179
180 priv->vblank_cb(priv->vblank_cb_data);
181
182 return IRQ_HANDLED;
183 }
184
mtk_ovl_register_vblank_cb(struct device * dev,void (* vblank_cb)(void *),void * vblank_cb_data)185 void mtk_ovl_register_vblank_cb(struct device *dev,
186 void (*vblank_cb)(void *),
187 void *vblank_cb_data)
188 {
189 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
190
191 ovl->vblank_cb = vblank_cb;
192 ovl->vblank_cb_data = vblank_cb_data;
193 }
194
mtk_ovl_unregister_vblank_cb(struct device * dev)195 void mtk_ovl_unregister_vblank_cb(struct device *dev)
196 {
197 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
198
199 ovl->vblank_cb = NULL;
200 ovl->vblank_cb_data = NULL;
201 }
202
mtk_ovl_enable_vblank(struct device * dev)203 void mtk_ovl_enable_vblank(struct device *dev)
204 {
205 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
206
207 writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA);
208 writel_relaxed(OVL_FME_CPL_INT, ovl->regs + DISP_REG_OVL_INTEN);
209 }
210
mtk_ovl_disable_vblank(struct device * dev)211 void mtk_ovl_disable_vblank(struct device *dev)
212 {
213 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
214
215 writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN);
216 }
217
mtk_ovl_get_blend_modes(struct device * dev)218 u32 mtk_ovl_get_blend_modes(struct device *dev)
219 {
220 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
221
222 return ovl->data->blend_modes;
223 }
224
mtk_ovl_get_formats(struct device * dev)225 const u32 *mtk_ovl_get_formats(struct device *dev)
226 {
227 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
228
229 return ovl->data->formats;
230 }
231
mtk_ovl_get_num_formats(struct device * dev)232 size_t mtk_ovl_get_num_formats(struct device *dev)
233 {
234 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
235
236 return ovl->data->num_formats;
237 }
238
mtk_ovl_is_afbc_supported(struct device * dev)239 bool mtk_ovl_is_afbc_supported(struct device *dev)
240 {
241 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
242
243 return ovl->data->supports_afbc;
244 }
245
mtk_ovl_clk_enable(struct device * dev)246 int mtk_ovl_clk_enable(struct device *dev)
247 {
248 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
249
250 return clk_prepare_enable(ovl->clk);
251 }
252
mtk_ovl_clk_disable(struct device * dev)253 void mtk_ovl_clk_disable(struct device *dev)
254 {
255 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
256
257 clk_disable_unprepare(ovl->clk);
258 }
259
mtk_ovl_start(struct device * dev)260 void mtk_ovl_start(struct device *dev)
261 {
262 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
263
264 if (ovl->data->smi_id_en) {
265 unsigned int reg;
266
267 reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
268 reg = reg | OVL_LAYER_SMI_ID_EN;
269 writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
270 }
271 writel_relaxed(0x1, ovl->regs + DISP_REG_OVL_EN);
272 }
273
mtk_ovl_stop(struct device * dev)274 void mtk_ovl_stop(struct device *dev)
275 {
276 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
277
278 writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_EN);
279 if (ovl->data->smi_id_en) {
280 unsigned int reg;
281
282 reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
283 reg = reg & ~OVL_LAYER_SMI_ID_EN;
284 writel_relaxed(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
285 }
286 }
287
mtk_ovl_set_afbc(struct mtk_disp_ovl * ovl,struct cmdq_pkt * cmdq_pkt,int idx,bool enabled)288 static void mtk_ovl_set_afbc(struct mtk_disp_ovl *ovl, struct cmdq_pkt *cmdq_pkt,
289 int idx, bool enabled)
290 {
291 mtk_ddp_write_mask(cmdq_pkt, enabled ? OVL_LAYER_AFBC_EN(idx) : 0,
292 &ovl->cmdq_reg, ovl->regs,
293 DISP_REG_OVL_DATAPATH_CON, OVL_LAYER_AFBC_EN(idx));
294 }
295
mtk_ovl_set_bit_depth(struct device * dev,int idx,u32 format,struct cmdq_pkt * cmdq_pkt)296 static void mtk_ovl_set_bit_depth(struct device *dev, int idx, u32 format,
297 struct cmdq_pkt *cmdq_pkt)
298 {
299 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
300 unsigned int bit_depth = OVL_CON_CLRFMT_8_BIT;
301
302 if (!ovl->data->supports_clrfmt_ext)
303 return;
304
305 if (is_10bit_rgb(format))
306 bit_depth = OVL_CON_CLRFMT_10_BIT;
307
308 mtk_ddp_write_mask(cmdq_pkt, OVL_CON_CLRFMT_BIT_DEPTH(bit_depth, idx),
309 &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_CLRFMT_EXT,
310 OVL_CON_CLRFMT_BIT_DEPTH_MASK(idx));
311 }
312
mtk_ovl_config(struct device * dev,unsigned int w,unsigned int h,unsigned int vrefresh,unsigned int bpc,struct cmdq_pkt * cmdq_pkt)313 void mtk_ovl_config(struct device *dev, unsigned int w,
314 unsigned int h, unsigned int vrefresh,
315 unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
316 {
317 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
318
319 if (w != 0 && h != 0)
320 mtk_ddp_write_relaxed(cmdq_pkt, h << 16 | w, &ovl->cmdq_reg, ovl->regs,
321 DISP_REG_OVL_ROI_SIZE);
322
323 /*
324 * The background color must be opaque black (ARGB),
325 * otherwise the alpha blending will have no effect
326 */
327 mtk_ddp_write_relaxed(cmdq_pkt, OVL_COLOR_ALPHA, &ovl->cmdq_reg,
328 ovl->regs, DISP_REG_OVL_ROI_BGCLR);
329
330 mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
331 mtk_ddp_write(cmdq_pkt, 0x0, &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RST);
332 }
333
mtk_ovl_layer_nr(struct device * dev)334 unsigned int mtk_ovl_layer_nr(struct device *dev)
335 {
336 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
337
338 return ovl->data->layer_nr;
339 }
340
mtk_ovl_supported_rotations(struct device * dev)341 unsigned int mtk_ovl_supported_rotations(struct device *dev)
342 {
343 return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
344 DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
345 }
346
mtk_ovl_layer_check(struct device * dev,unsigned int idx,struct mtk_plane_state * mtk_state)347 int mtk_ovl_layer_check(struct device *dev, unsigned int idx,
348 struct mtk_plane_state *mtk_state)
349 {
350 struct drm_plane_state *state = &mtk_state->base;
351
352 /* check if any unsupported rotation is set */
353 if (state->rotation & ~mtk_ovl_supported_rotations(dev))
354 return -EINVAL;
355
356 /*
357 * TODO: Rotating/reflecting YUV buffers is not supported at this time.
358 * Only RGB[AX] variants are supported.
359 * Since DRM_MODE_ROTATE_0 means "no rotation", we should not
360 * reject layers with this property.
361 */
362 if (state->fb->format->is_yuv && (state->rotation & ~DRM_MODE_ROTATE_0))
363 return -EINVAL;
364
365 return 0;
366 }
367
mtk_ovl_layer_on(struct device * dev,unsigned int idx,struct cmdq_pkt * cmdq_pkt)368 void mtk_ovl_layer_on(struct device *dev, unsigned int idx,
369 struct cmdq_pkt *cmdq_pkt)
370 {
371 unsigned int gmc_thrshd_l;
372 unsigned int gmc_thrshd_h;
373 unsigned int gmc_value;
374 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
375
376 mtk_ddp_write(cmdq_pkt, 0x1, &ovl->cmdq_reg, ovl->regs,
377 DISP_REG_OVL_RDMA_CTRL(idx));
378 gmc_thrshd_l = GMC_THRESHOLD_LOW >>
379 (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
380 gmc_thrshd_h = GMC_THRESHOLD_HIGH >>
381 (GMC_THRESHOLD_BITS - ovl->data->gmc_bits);
382 if (ovl->data->gmc_bits == 10)
383 gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16;
384 else
385 gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 |
386 gmc_thrshd_h << 16 | gmc_thrshd_h << 24;
387 mtk_ddp_write(cmdq_pkt, gmc_value,
388 &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_RDMA_GMC(idx));
389 mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &ovl->cmdq_reg, ovl->regs,
390 DISP_REG_OVL_SRC_CON, BIT(idx));
391 }
392
mtk_ovl_layer_off(struct device * dev,unsigned int idx,struct cmdq_pkt * cmdq_pkt)393 void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
394 struct cmdq_pkt *cmdq_pkt)
395 {
396 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
397
398 mtk_ddp_write_mask(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
399 DISP_REG_OVL_SRC_CON, BIT(idx));
400 mtk_ddp_write(cmdq_pkt, 0, &ovl->cmdq_reg, ovl->regs,
401 DISP_REG_OVL_RDMA_CTRL(idx));
402 }
403
mtk_ovl_fmt_convert(struct mtk_disp_ovl * ovl,struct mtk_plane_state * state)404 static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl,
405 struct mtk_plane_state *state)
406 {
407 unsigned int fmt = state->pending.format;
408 unsigned int blend_mode = DRM_MODE_BLEND_COVERAGE;
409
410 /*
411 * For the platforms where OVL_CON_CLRFMT_MAN is defined in the hardware data sheet
412 * and supports premultiplied color formats, such as OVL_CON_CLRFMT_PARGB8888.
413 *
414 * Check blend_modes in the driver data to see if premultiplied mode is supported.
415 * If not, use coverage mode instead to set it to the supported color formats.
416 *
417 * Current DRM assumption is that alpha is default premultiplied, so the bitmask of
418 * blend_modes must include BIT(DRM_MODE_BLEND_PREMULTI). Otherwise, mtk_plane_init()
419 * will get an error return from drm_plane_create_blend_mode_property() and
420 * state->base.pixel_blend_mode should not be used.
421 */
422 if (ovl->data->blend_modes & BIT(DRM_MODE_BLEND_PREMULTI))
423 blend_mode = state->base.pixel_blend_mode;
424
425 switch (fmt) {
426 default:
427 case DRM_FORMAT_RGB565:
428 return OVL_CON_CLRFMT_RGB565(ovl);
429 case DRM_FORMAT_BGR565:
430 return OVL_CON_CLRFMT_RGB565(ovl) | OVL_CON_BYTE_SWAP;
431 case DRM_FORMAT_RGB888:
432 return OVL_CON_CLRFMT_RGB888(ovl);
433 case DRM_FORMAT_BGR888:
434 return OVL_CON_CLRFMT_RGB888(ovl) | OVL_CON_BYTE_SWAP;
435 case DRM_FORMAT_RGBX8888:
436 case DRM_FORMAT_RGBA8888:
437 case DRM_FORMAT_RGBX1010102:
438 case DRM_FORMAT_RGBA1010102:
439 return blend_mode == DRM_MODE_BLEND_COVERAGE ?
440 OVL_CON_CLRFMT_RGBA8888 :
441 OVL_CON_CLRFMT_PRGBA8888;
442 case DRM_FORMAT_BGRX8888:
443 case DRM_FORMAT_BGRA8888:
444 case DRM_FORMAT_BGRX1010102:
445 case DRM_FORMAT_BGRA1010102:
446 return blend_mode == DRM_MODE_BLEND_COVERAGE ?
447 OVL_CON_CLRFMT_BGRA8888 :
448 OVL_CON_CLRFMT_PBGRA8888;
449 case DRM_FORMAT_XRGB8888:
450 case DRM_FORMAT_ARGB8888:
451 case DRM_FORMAT_XRGB2101010:
452 case DRM_FORMAT_ARGB2101010:
453 return blend_mode == DRM_MODE_BLEND_COVERAGE ?
454 OVL_CON_CLRFMT_ARGB8888 :
455 OVL_CON_CLRFMT_PARGB8888;
456 case DRM_FORMAT_XBGR8888:
457 case DRM_FORMAT_ABGR8888:
458 case DRM_FORMAT_XBGR2101010:
459 case DRM_FORMAT_ABGR2101010:
460 return blend_mode == DRM_MODE_BLEND_COVERAGE ?
461 OVL_CON_CLRFMT_ABGR8888 :
462 OVL_CON_CLRFMT_PABGR8888;
463 case DRM_FORMAT_UYVY:
464 return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB;
465 case DRM_FORMAT_YUYV:
466 return OVL_CON_CLRFMT_YUYV | OVL_CON_MTX_YUV_TO_RGB;
467 }
468 }
469
mtk_ovl_afbc_layer_config(struct mtk_disp_ovl * ovl,unsigned int idx,struct mtk_plane_pending_state * pending,struct cmdq_pkt * cmdq_pkt)470 static void mtk_ovl_afbc_layer_config(struct mtk_disp_ovl *ovl,
471 unsigned int idx,
472 struct mtk_plane_pending_state *pending,
473 struct cmdq_pkt *cmdq_pkt)
474 {
475 unsigned int pitch_msb = pending->pitch >> 16;
476 unsigned int hdr_pitch = pending->hdr_pitch;
477 unsigned int hdr_addr = pending->hdr_addr;
478
479 if (pending->modifier != DRM_FORMAT_MOD_LINEAR) {
480 mtk_ddp_write_relaxed(cmdq_pkt, hdr_addr, &ovl->cmdq_reg, ovl->regs,
481 DISP_REG_OVL_HDR_ADDR(ovl, idx));
482 mtk_ddp_write_relaxed(cmdq_pkt,
483 OVL_PITCH_MSB_2ND_SUBBUF | pitch_msb,
484 &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
485 mtk_ddp_write_relaxed(cmdq_pkt, hdr_pitch, &ovl->cmdq_reg, ovl->regs,
486 DISP_REG_OVL_HDR_PITCH(ovl, idx));
487 } else {
488 mtk_ddp_write_relaxed(cmdq_pkt, pitch_msb,
489 &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH_MSB(idx));
490 }
491 }
492
mtk_ovl_layer_config(struct device * dev,unsigned int idx,struct mtk_plane_state * state,struct cmdq_pkt * cmdq_pkt)493 void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
494 struct mtk_plane_state *state,
495 struct cmdq_pkt *cmdq_pkt)
496 {
497 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
498 struct mtk_plane_pending_state *pending = &state->pending;
499 unsigned int addr = pending->addr;
500 unsigned int pitch_lsb = pending->pitch & GENMASK(15, 0);
501 unsigned int fmt = pending->format;
502 unsigned int rotation = pending->rotation;
503 unsigned int offset = (pending->y << 16) | pending->x;
504 unsigned int src_size = (pending->height << 16) | pending->width;
505 unsigned int blend_mode = state->base.pixel_blend_mode;
506 unsigned int ignore_pixel_alpha = 0;
507 unsigned int con;
508
509 if (!pending->enable) {
510 mtk_ovl_layer_off(dev, idx, cmdq_pkt);
511 return;
512 }
513
514 con = mtk_ovl_fmt_convert(ovl, state);
515 if (state->base.fb) {
516 con |= state->base.alpha & OVL_CON_ALPHA;
517
518 /*
519 * For blend_modes supported SoCs, always enable alpha blending.
520 * For blend_modes unsupported SoCs, enable alpha blending when has_alpha is set.
521 */
522 if (blend_mode || state->base.fb->format->has_alpha)
523 con |= OVL_CON_AEN;
524
525 /*
526 * Although the alpha channel can be ignored, CONST_BLD must be enabled
527 * for XRGB format, otherwise OVL will still read the value from memory.
528 * For RGB888 related formats, whether CONST_BLD is enabled or not won't
529 * affect the result. Therefore we use !has_alpha as the condition.
530 */
531 if (blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !state->base.fb->format->has_alpha)
532 ignore_pixel_alpha = OVL_CONST_BLEND;
533 }
534
535 /*
536 * Treat rotate 180 as flip x + flip y, and XOR the original rotation value
537 * to flip x + flip y to support both in the same time.
538 */
539 if (rotation & DRM_MODE_ROTATE_180)
540 rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
541
542 if (rotation & DRM_MODE_REFLECT_Y) {
543 con |= OVL_CON_VIRT_FLIP;
544 addr += (pending->height - 1) * pending->pitch;
545 }
546
547 if (rotation & DRM_MODE_REFLECT_X) {
548 con |= OVL_CON_HORZ_FLIP;
549 addr += pending->pitch - 1;
550 }
551
552 if (ovl->data->supports_afbc)
553 mtk_ovl_set_afbc(ovl, cmdq_pkt, idx,
554 pending->modifier != DRM_FORMAT_MOD_LINEAR);
555
556 mtk_ddp_write_relaxed(cmdq_pkt, con, &ovl->cmdq_reg, ovl->regs,
557 DISP_REG_OVL_CON(idx));
558 mtk_ddp_write_relaxed(cmdq_pkt, pitch_lsb | ignore_pixel_alpha,
559 &ovl->cmdq_reg, ovl->regs, DISP_REG_OVL_PITCH(idx));
560 mtk_ddp_write_relaxed(cmdq_pkt, src_size, &ovl->cmdq_reg, ovl->regs,
561 DISP_REG_OVL_SRC_SIZE(idx));
562 mtk_ddp_write_relaxed(cmdq_pkt, offset, &ovl->cmdq_reg, ovl->regs,
563 DISP_REG_OVL_OFFSET(idx));
564 mtk_ddp_write_relaxed(cmdq_pkt, addr, &ovl->cmdq_reg, ovl->regs,
565 DISP_REG_OVL_ADDR(ovl, idx));
566
567 if (ovl->data->supports_afbc)
568 mtk_ovl_afbc_layer_config(ovl, idx, pending, cmdq_pkt);
569
570 mtk_ovl_set_bit_depth(dev, idx, fmt, cmdq_pkt);
571 mtk_ovl_layer_on(dev, idx, cmdq_pkt);
572 }
573
mtk_ovl_bgclr_in_on(struct device * dev)574 void mtk_ovl_bgclr_in_on(struct device *dev)
575 {
576 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
577 unsigned int reg;
578
579 reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
580 reg = reg | OVL_BGCLR_SEL_IN;
581 writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
582 }
583
mtk_ovl_bgclr_in_off(struct device * dev)584 void mtk_ovl_bgclr_in_off(struct device *dev)
585 {
586 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
587 unsigned int reg;
588
589 reg = readl(ovl->regs + DISP_REG_OVL_DATAPATH_CON);
590 reg = reg & ~OVL_BGCLR_SEL_IN;
591 writel(reg, ovl->regs + DISP_REG_OVL_DATAPATH_CON);
592 }
593
mtk_disp_ovl_bind(struct device * dev,struct device * master,void * data)594 static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
595 void *data)
596 {
597 return 0;
598 }
599
mtk_disp_ovl_unbind(struct device * dev,struct device * master,void * data)600 static void mtk_disp_ovl_unbind(struct device *dev, struct device *master,
601 void *data)
602 {
603 }
604
605 static const struct component_ops mtk_disp_ovl_component_ops = {
606 .bind = mtk_disp_ovl_bind,
607 .unbind = mtk_disp_ovl_unbind,
608 };
609
mtk_disp_ovl_probe(struct platform_device * pdev)610 static int mtk_disp_ovl_probe(struct platform_device *pdev)
611 {
612 struct device *dev = &pdev->dev;
613 struct mtk_disp_ovl *priv;
614 int irq;
615 int ret;
616
617 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
618 if (!priv)
619 return -ENOMEM;
620
621 irq = platform_get_irq(pdev, 0);
622 if (irq < 0)
623 return irq;
624
625 priv->clk = devm_clk_get(dev, NULL);
626 if (IS_ERR(priv->clk))
627 return dev_err_probe(dev, PTR_ERR(priv->clk),
628 "failed to get ovl clk\n");
629
630 priv->regs = devm_platform_ioremap_resource(pdev, 0);
631 if (IS_ERR(priv->regs))
632 return dev_err_probe(dev, PTR_ERR(priv->regs),
633 "failed to ioremap ovl\n");
634 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
635 ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
636 if (ret)
637 dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
638 #endif
639
640 priv->data = of_device_get_match_data(dev);
641 platform_set_drvdata(pdev, priv);
642
643 ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
644 IRQF_TRIGGER_NONE, dev_name(dev), priv);
645 if (ret < 0)
646 return dev_err_probe(dev, ret, "Failed to request irq %d\n", irq);
647
648 pm_runtime_enable(dev);
649
650 ret = component_add(dev, &mtk_disp_ovl_component_ops);
651 if (ret) {
652 pm_runtime_disable(dev);
653 return dev_err_probe(dev, ret, "Failed to add component\n");
654 }
655
656 return 0;
657 }
658
mtk_disp_ovl_remove(struct platform_device * pdev)659 static void mtk_disp_ovl_remove(struct platform_device *pdev)
660 {
661 component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
662 pm_runtime_disable(&pdev->dev);
663 }
664
665 static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
666 .addr = DISP_REG_OVL_ADDR_MT2701,
667 .gmc_bits = 8,
668 .layer_nr = 4,
669 .fmt_rgb565_is_0 = false,
670 .formats = mt8173_formats,
671 .num_formats = ARRAY_SIZE(mt8173_formats),
672 };
673
674 static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
675 .addr = DISP_REG_OVL_ADDR_MT8173,
676 .gmc_bits = 8,
677 .layer_nr = 4,
678 .fmt_rgb565_is_0 = true,
679 .formats = mt8173_formats,
680 .num_formats = ARRAY_SIZE(mt8173_formats),
681 };
682
683 static const struct mtk_disp_ovl_data mt8183_ovl_driver_data = {
684 .addr = DISP_REG_OVL_ADDR_MT8173,
685 .gmc_bits = 10,
686 .layer_nr = 4,
687 .fmt_rgb565_is_0 = true,
688 .formats = mt8173_formats,
689 .num_formats = ARRAY_SIZE(mt8173_formats),
690 };
691
692 static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = {
693 .addr = DISP_REG_OVL_ADDR_MT8173,
694 .gmc_bits = 10,
695 .layer_nr = 2,
696 .fmt_rgb565_is_0 = true,
697 .formats = mt8173_formats,
698 .num_formats = ARRAY_SIZE(mt8173_formats),
699 };
700
701 static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = {
702 .addr = DISP_REG_OVL_ADDR_MT8173,
703 .gmc_bits = 10,
704 .layer_nr = 4,
705 .fmt_rgb565_is_0 = true,
706 .smi_id_en = true,
707 .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
708 BIT(DRM_MODE_BLEND_COVERAGE) |
709 BIT(DRM_MODE_BLEND_PIXEL_NONE),
710 .formats = mt8173_formats,
711 .num_formats = ARRAY_SIZE(mt8173_formats),
712 };
713
714 static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = {
715 .addr = DISP_REG_OVL_ADDR_MT8173,
716 .gmc_bits = 10,
717 .layer_nr = 2,
718 .fmt_rgb565_is_0 = true,
719 .smi_id_en = true,
720 .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
721 BIT(DRM_MODE_BLEND_COVERAGE) |
722 BIT(DRM_MODE_BLEND_PIXEL_NONE),
723 .formats = mt8173_formats,
724 .num_formats = ARRAY_SIZE(mt8173_formats),
725 };
726
727 static const struct mtk_disp_ovl_data mt8195_ovl_driver_data = {
728 .addr = DISP_REG_OVL_ADDR_MT8173,
729 .gmc_bits = 10,
730 .layer_nr = 4,
731 .fmt_rgb565_is_0 = true,
732 .smi_id_en = true,
733 .supports_afbc = true,
734 .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) |
735 BIT(DRM_MODE_BLEND_COVERAGE) |
736 BIT(DRM_MODE_BLEND_PIXEL_NONE),
737 .formats = mt8195_formats,
738 .num_formats = ARRAY_SIZE(mt8195_formats),
739 .supports_clrfmt_ext = true,
740 };
741
742 static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
743 { .compatible = "mediatek,mt2701-disp-ovl",
744 .data = &mt2701_ovl_driver_data},
745 { .compatible = "mediatek,mt8173-disp-ovl",
746 .data = &mt8173_ovl_driver_data},
747 { .compatible = "mediatek,mt8183-disp-ovl",
748 .data = &mt8183_ovl_driver_data},
749 { .compatible = "mediatek,mt8183-disp-ovl-2l",
750 .data = &mt8183_ovl_2l_driver_data},
751 { .compatible = "mediatek,mt8192-disp-ovl",
752 .data = &mt8192_ovl_driver_data},
753 { .compatible = "mediatek,mt8192-disp-ovl-2l",
754 .data = &mt8192_ovl_2l_driver_data},
755 { .compatible = "mediatek,mt8195-disp-ovl",
756 .data = &mt8195_ovl_driver_data},
757 {},
758 };
759 MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
760
761 struct platform_driver mtk_disp_ovl_driver = {
762 .probe = mtk_disp_ovl_probe,
763 .remove = mtk_disp_ovl_remove,
764 .driver = {
765 .name = "mediatek-disp-ovl",
766 .of_match_table = mtk_disp_ovl_driver_dt_match,
767 },
768 };
769