xref: /linux/drivers/staging/media/sunxi/cedrus/cedrus_hw.c (revision cb9f145f638d7afa633632a9290d6ad06caeb8ee)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cedrus VPU driver
4  *
5  * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
6  * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
7  * Copyright (C) 2018 Bootlin
8  *
9  * Based on the vim2m driver, that is:
10  *
11  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
12  * Pawel Osciak, <pawel@osciak.com>
13  * Marek Szyprowski, <m.szyprowski@samsung.com>
14  */
15 
16 #include <linux/platform_device.h>
17 #include <linux/of.h>
18 #include <linux/of_reserved_mem.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/interrupt.h>
21 #include <linux/clk.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/regmap.h>
24 #include <linux/reset.h>
25 #include <linux/soc/sunxi/sunxi_sram.h>
26 
27 #include <media/videobuf2-core.h>
28 #include <media/v4l2-mem2mem.h>
29 
30 #include "cedrus.h"
31 #include "cedrus_hw.h"
32 #include "cedrus_regs.h"
33 
cedrus_engine_enable(struct cedrus_ctx * ctx)34 int cedrus_engine_enable(struct cedrus_ctx *ctx)
35 {
36 	u32 reg = 0;
37 
38 	/*
39 	 * FIXME: This is only valid on 32-bits DDR's, we should test
40 	 * it on the A13/A33.
41 	 */
42 	reg |= VE_MODE_REC_WR_MODE_2MB;
43 	reg |= VE_MODE_DDR_MODE_BW_128;
44 
45 	switch (ctx->src_fmt.pixelformat) {
46 	case V4L2_PIX_FMT_MPEG2_SLICE:
47 		reg |= VE_MODE_DEC_MPEG;
48 		break;
49 
50 	/* H.264 and VP8 both use the same decoding mode bit. */
51 	case V4L2_PIX_FMT_H264_SLICE:
52 	case V4L2_PIX_FMT_VP8_FRAME:
53 		reg |= VE_MODE_DEC_H264;
54 		break;
55 
56 	case V4L2_PIX_FMT_HEVC_SLICE:
57 		reg |= VE_MODE_DEC_H265;
58 		break;
59 
60 	default:
61 		return -EINVAL;
62 	}
63 
64 	if (ctx->src_fmt.width == 4096)
65 		reg |= VE_MODE_PIC_WIDTH_IS_4096;
66 	if (ctx->src_fmt.width > 2048)
67 		reg |= VE_MODE_PIC_WIDTH_MORE_2048;
68 
69 	cedrus_write(ctx->dev, VE_MODE, reg);
70 
71 	return 0;
72 }
73 
cedrus_engine_disable(struct cedrus_dev * dev)74 void cedrus_engine_disable(struct cedrus_dev *dev)
75 {
76 	cedrus_write(dev, VE_MODE, VE_MODE_DISABLED);
77 }
78 
cedrus_dst_format_set(struct cedrus_dev * dev,struct v4l2_pix_format * fmt)79 void cedrus_dst_format_set(struct cedrus_dev *dev,
80 			   struct v4l2_pix_format *fmt)
81 {
82 	unsigned int width = fmt->width;
83 	unsigned int height = fmt->height;
84 	u32 chroma_size;
85 	u32 reg;
86 
87 	switch (fmt->pixelformat) {
88 	case V4L2_PIX_FMT_NV12:
89 	case V4L2_PIX_FMT_NV21:
90 	case V4L2_PIX_FMT_YUV420:
91 	case V4L2_PIX_FMT_YVU420:
92 		chroma_size = ALIGN(width, 16) * ALIGN(height, 16) / 2;
93 
94 		switch (fmt->pixelformat) {
95 		case V4L2_PIX_FMT_NV12:
96 			reg = VE_PRIMARY_OUT_FMT_NV12;
97 			break;
98 		case V4L2_PIX_FMT_NV21:
99 			reg = VE_PRIMARY_OUT_FMT_NV21;
100 			break;
101 		case V4L2_PIX_FMT_YUV420:
102 			reg = VE_PRIMARY_OUT_FMT_YU12;
103 			break;
104 		case V4L2_PIX_FMT_YVU420:
105 		default:
106 			reg = VE_PRIMARY_OUT_FMT_YV12;
107 			break;
108 		}
109 		cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
110 
111 		reg = chroma_size / 2;
112 		cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg);
113 
114 		reg = VE_PRIMARY_FB_LINE_STRIDE_LUMA(ALIGN(width, 16)) |
115 		      VE_PRIMARY_FB_LINE_STRIDE_CHROMA(ALIGN(width, 16) / 2);
116 		cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
117 
118 		break;
119 	case V4L2_PIX_FMT_NV12_32L32:
120 	default:
121 		reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
122 		cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
123 
124 		reg = VE_SECONDARY_OUT_FMT_TILED_32_NV12;
125 		cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
126 
127 		break;
128 	}
129 }
130 
cedrus_irq(int irq,void * data)131 static irqreturn_t cedrus_irq(int irq, void *data)
132 {
133 	struct cedrus_dev *dev = data;
134 	struct cedrus_ctx *ctx;
135 	enum vb2_buffer_state state;
136 	enum cedrus_irq_status status;
137 
138 	/*
139 	 * If cancel_delayed_work returns false it means watchdog already
140 	 * executed and finished the job.
141 	 */
142 	if (!cancel_delayed_work(&dev->watchdog_work))
143 		return IRQ_HANDLED;
144 
145 	ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
146 	if (!ctx) {
147 		v4l2_err(&dev->v4l2_dev,
148 			 "Instance released before the end of transaction\n");
149 		return IRQ_NONE;
150 	}
151 
152 	status = ctx->current_codec->irq_status(ctx);
153 	if (status == CEDRUS_IRQ_NONE)
154 		return IRQ_NONE;
155 
156 	ctx->current_codec->irq_disable(ctx);
157 	ctx->current_codec->irq_clear(ctx);
158 
159 	if (status == CEDRUS_IRQ_ERROR)
160 		state = VB2_BUF_STATE_ERROR;
161 	else
162 		state = VB2_BUF_STATE_DONE;
163 
164 	v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
165 					 state);
166 
167 	return IRQ_HANDLED;
168 }
169 
cedrus_watchdog(struct work_struct * work)170 void cedrus_watchdog(struct work_struct *work)
171 {
172 	struct cedrus_dev *dev;
173 	struct cedrus_ctx *ctx;
174 
175 	dev = container_of(to_delayed_work(work),
176 			   struct cedrus_dev, watchdog_work);
177 
178 	ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
179 	if (!ctx)
180 		return;
181 
182 	v4l2_err(&dev->v4l2_dev, "frame processing timed out!\n");
183 	reset_control_reset(dev->rstc);
184 	v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
185 					 VB2_BUF_STATE_ERROR);
186 }
187 
cedrus_hw_suspend(struct device * device)188 int cedrus_hw_suspend(struct device *device)
189 {
190 	struct cedrus_dev *dev = dev_get_drvdata(device);
191 
192 	clk_disable_unprepare(dev->ram_clk);
193 	clk_disable_unprepare(dev->mod_clk);
194 	clk_disable_unprepare(dev->ahb_clk);
195 
196 	reset_control_assert(dev->rstc);
197 
198 	return 0;
199 }
200 
cedrus_hw_resume(struct device * device)201 int cedrus_hw_resume(struct device *device)
202 {
203 	struct cedrus_dev *dev = dev_get_drvdata(device);
204 	int ret;
205 
206 	ret = reset_control_reset(dev->rstc);
207 	if (ret) {
208 		dev_err(dev->dev, "Failed to apply reset\n");
209 
210 		return ret;
211 	}
212 
213 	ret = clk_prepare_enable(dev->ahb_clk);
214 	if (ret) {
215 		dev_err(dev->dev, "Failed to enable AHB clock\n");
216 
217 		goto err_rst;
218 	}
219 
220 	ret = clk_prepare_enable(dev->mod_clk);
221 	if (ret) {
222 		dev_err(dev->dev, "Failed to enable MOD clock\n");
223 
224 		goto err_ahb_clk;
225 	}
226 
227 	ret = clk_prepare_enable(dev->ram_clk);
228 	if (ret) {
229 		dev_err(dev->dev, "Failed to enable RAM clock\n");
230 
231 		goto err_mod_clk;
232 	}
233 
234 	return 0;
235 
236 err_mod_clk:
237 	clk_disable_unprepare(dev->mod_clk);
238 err_ahb_clk:
239 	clk_disable_unprepare(dev->ahb_clk);
240 err_rst:
241 	reset_control_assert(dev->rstc);
242 
243 	return ret;
244 }
245 
cedrus_hw_probe(struct cedrus_dev * dev)246 int cedrus_hw_probe(struct cedrus_dev *dev)
247 {
248 	const struct cedrus_variant *variant;
249 	int irq_dec;
250 	int ret;
251 
252 	variant = of_device_get_match_data(dev->dev);
253 	if (!variant)
254 		return -EINVAL;
255 
256 	dev->capabilities = variant->capabilities;
257 
258 	irq_dec = platform_get_irq(dev->pdev, 0);
259 	if (irq_dec <= 0)
260 		return irq_dec;
261 	ret = devm_request_irq(dev->dev, irq_dec, cedrus_irq,
262 			       0, dev_name(dev->dev), dev);
263 	if (ret) {
264 		dev_err(dev->dev, "Failed to request IRQ\n");
265 
266 		return ret;
267 	}
268 
269 	ret = of_reserved_mem_device_init(dev->dev);
270 	if (ret && ret != -ENODEV) {
271 		dev_err(dev->dev, "Failed to reserve memory\n");
272 
273 		return ret;
274 	}
275 
276 	ret = sunxi_sram_claim(dev->dev);
277 	if (ret) {
278 		dev_err(dev->dev, "Failed to claim SRAM\n");
279 
280 		goto err_mem;
281 	}
282 
283 	dev->ahb_clk = devm_clk_get(dev->dev, "ahb");
284 	if (IS_ERR(dev->ahb_clk)) {
285 		dev_err(dev->dev, "Failed to get AHB clock\n");
286 
287 		ret = PTR_ERR(dev->ahb_clk);
288 		goto err_sram;
289 	}
290 
291 	dev->mod_clk = devm_clk_get(dev->dev, "mod");
292 	if (IS_ERR(dev->mod_clk)) {
293 		dev_err(dev->dev, "Failed to get MOD clock\n");
294 
295 		ret = PTR_ERR(dev->mod_clk);
296 		goto err_sram;
297 	}
298 
299 	dev->ram_clk = devm_clk_get(dev->dev, "ram");
300 	if (IS_ERR(dev->ram_clk)) {
301 		dev_err(dev->dev, "Failed to get RAM clock\n");
302 
303 		ret = PTR_ERR(dev->ram_clk);
304 		goto err_sram;
305 	}
306 
307 	dev->rstc = devm_reset_control_get(dev->dev, NULL);
308 	if (IS_ERR(dev->rstc)) {
309 		dev_err(dev->dev, "Failed to get reset control\n");
310 
311 		ret = PTR_ERR(dev->rstc);
312 		goto err_sram;
313 	}
314 
315 	dev->base = devm_platform_ioremap_resource(dev->pdev, 0);
316 	if (IS_ERR(dev->base)) {
317 		dev_err(dev->dev, "Failed to map registers\n");
318 
319 		ret = PTR_ERR(dev->base);
320 		goto err_sram;
321 	}
322 
323 	ret = clk_set_rate(dev->mod_clk, variant->mod_rate);
324 	if (ret) {
325 		dev_err(dev->dev, "Failed to set clock rate\n");
326 
327 		goto err_sram;
328 	}
329 
330 	pm_runtime_enable(dev->dev);
331 	if (!pm_runtime_enabled(dev->dev)) {
332 		ret = cedrus_hw_resume(dev->dev);
333 		if (ret)
334 			goto err_pm;
335 	}
336 
337 	return 0;
338 
339 err_pm:
340 	pm_runtime_disable(dev->dev);
341 err_sram:
342 	sunxi_sram_release(dev->dev);
343 err_mem:
344 	of_reserved_mem_device_release(dev->dev);
345 
346 	return ret;
347 }
348 
cedrus_hw_remove(struct cedrus_dev * dev)349 void cedrus_hw_remove(struct cedrus_dev *dev)
350 {
351 	pm_runtime_disable(dev->dev);
352 	if (!pm_runtime_status_suspended(dev->dev))
353 		cedrus_hw_suspend(dev->dev);
354 
355 	sunxi_sram_release(dev->dev);
356 
357 	of_reserved_mem_device_release(dev->dev);
358 }
359