1 /*
2  * Samsung TV Mixer driver
3  *
4  * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5  *
6  * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published
10  * by the Free Software Foundation. either version 2 of the License,
11  * or (at your option) any later version
12  */
13 
14 #include "mixer.h"
15 
16 #include <media/v4l2-ioctl.h>
17 #include <linux/videodev2.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/version.h>
21 #include <linux/timer.h>
22 #include <media/videobuf2-dma-contig.h>
23 
find_reg_callback(struct device * dev,void * p)24 static int find_reg_callback(struct device *dev, void *p)
25 {
26 	struct v4l2_subdev **sd = p;
27 
28 	*sd = dev_get_drvdata(dev);
29 	/* non-zero value stops iteration */
30 	return 1;
31 }
32 
find_and_register_subdev(struct mxr_device * mdev,char * module_name)33 static struct v4l2_subdev *find_and_register_subdev(
34 	struct mxr_device *mdev, char *module_name)
35 {
36 	struct device_driver *drv;
37 	struct v4l2_subdev *sd = NULL;
38 	int ret;
39 
40 	/* TODO: add waiting until probe is finished */
41 	drv = driver_find(module_name, &platform_bus_type);
42 	if (!drv) {
43 		mxr_warn(mdev, "module %s is missing\n", module_name);
44 		return NULL;
45 	}
46 	/* driver refcnt is increased, it is safe to iterate over devices */
47 	ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
48 	/* ret == 0 means that find_reg_callback was never executed */
49 	if (sd == NULL) {
50 		mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
51 		goto done;
52 	}
53 	/* v4l2_device_register_subdev detects if sd is NULL */
54 	ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
55 	if (ret) {
56 		mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
57 		sd = NULL;
58 	}
59 
60 done:
61 	put_driver(drv);
62 	return sd;
63 }
64 
mxr_acquire_video(struct mxr_device * mdev,struct mxr_output_conf * output_conf,int output_count)65 int __devinit mxr_acquire_video(struct mxr_device *mdev,
66 	struct mxr_output_conf *output_conf, int output_count)
67 {
68 	struct device *dev = mdev->dev;
69 	struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
70 	int i;
71 	int ret = 0;
72 	struct v4l2_subdev *sd;
73 
74 	strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
75 	/* prepare context for V4L2 device */
76 	ret = v4l2_device_register(dev, v4l2_dev);
77 	if (ret) {
78 		mxr_err(mdev, "could not register v4l2 device.\n");
79 		goto fail;
80 	}
81 
82 	mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
83 	if (IS_ERR_OR_NULL(mdev->alloc_ctx)) {
84 		mxr_err(mdev, "could not acquire vb2 allocator\n");
85 		goto fail_v4l2_dev;
86 	}
87 
88 	/* registering outputs */
89 	mdev->output_cnt = 0;
90 	for (i = 0; i < output_count; ++i) {
91 		struct mxr_output_conf *conf = &output_conf[i];
92 		struct mxr_output *out;
93 
94 		sd = find_and_register_subdev(mdev, conf->module_name);
95 		/* trying to register next output */
96 		if (sd == NULL)
97 			continue;
98 		out = kzalloc(sizeof *out, GFP_KERNEL);
99 		if (out == NULL) {
100 			mxr_err(mdev, "no memory for '%s'\n",
101 				conf->output_name);
102 			ret = -ENOMEM;
103 			/* registered subdevs are removed in fail_v4l2_dev */
104 			goto fail_output;
105 		}
106 		strlcpy(out->name, conf->output_name, sizeof(out->name));
107 		out->sd = sd;
108 		out->cookie = conf->cookie;
109 		mdev->output[mdev->output_cnt++] = out;
110 		mxr_info(mdev, "added output '%s' from module '%s'\n",
111 			conf->output_name, conf->module_name);
112 		/* checking if maximal number of outputs is reached */
113 		if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
114 			break;
115 	}
116 
117 	if (mdev->output_cnt == 0) {
118 		mxr_err(mdev, "failed to register any output\n");
119 		ret = -ENODEV;
120 		/* skipping fail_output because there is nothing to free */
121 		goto fail_vb2_allocator;
122 	}
123 
124 	return 0;
125 
126 fail_output:
127 	/* kfree is NULL-safe */
128 	for (i = 0; i < mdev->output_cnt; ++i)
129 		kfree(mdev->output[i]);
130 	memset(mdev->output, 0, sizeof mdev->output);
131 
132 fail_vb2_allocator:
133 	/* freeing allocator context */
134 	vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
135 
136 fail_v4l2_dev:
137 	/* NOTE: automatically unregister all subdevs */
138 	v4l2_device_unregister(v4l2_dev);
139 
140 fail:
141 	return ret;
142 }
143 
mxr_release_video(struct mxr_device * mdev)144 void __devexit mxr_release_video(struct mxr_device *mdev)
145 {
146 	int i;
147 
148 	/* kfree is NULL-safe */
149 	for (i = 0; i < mdev->output_cnt; ++i)
150 		kfree(mdev->output[i]);
151 
152 	vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
153 	v4l2_device_unregister(&mdev->v4l2_dev);
154 }
155 
mxr_querycap(struct file * file,void * priv,struct v4l2_capability * cap)156 static int mxr_querycap(struct file *file, void *priv,
157 	struct v4l2_capability *cap)
158 {
159 	struct mxr_layer *layer = video_drvdata(file);
160 
161 	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
162 
163 	strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof cap->driver);
164 	strlcpy(cap->card, layer->vfd.name, sizeof cap->card);
165 	sprintf(cap->bus_info, "%d", layer->idx);
166 	cap->version = KERNEL_VERSION(0, 1, 0);
167 	cap->capabilities = V4L2_CAP_STREAMING |
168 		V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
169 
170 	return 0;
171 }
172 
mxr_geometry_dump(struct mxr_device * mdev,struct mxr_geometry * geo)173 static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
174 {
175 	mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
176 		geo->src.full_width, geo->src.full_height);
177 	mxr_dbg(mdev, "src.size = (%u, %u)\n",
178 		geo->src.width, geo->src.height);
179 	mxr_dbg(mdev, "src.offset = (%u, %u)\n",
180 		geo->src.x_offset, geo->src.y_offset);
181 	mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
182 		geo->dst.full_width, geo->dst.full_height);
183 	mxr_dbg(mdev, "dst.size = (%u, %u)\n",
184 		geo->dst.width, geo->dst.height);
185 	mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
186 		geo->dst.x_offset, geo->dst.y_offset);
187 	mxr_dbg(mdev, "ratio = (%u, %u)\n",
188 		geo->x_ratio, geo->y_ratio);
189 }
190 
mxr_layer_default_geo(struct mxr_layer * layer)191 static void mxr_layer_default_geo(struct mxr_layer *layer)
192 {
193 	struct mxr_device *mdev = layer->mdev;
194 	struct v4l2_mbus_framefmt mbus_fmt;
195 
196 	memset(&layer->geo, 0, sizeof layer->geo);
197 
198 	mxr_get_mbus_fmt(mdev, &mbus_fmt);
199 
200 	layer->geo.dst.full_width = mbus_fmt.width;
201 	layer->geo.dst.full_height = mbus_fmt.height;
202 	layer->geo.dst.width = layer->geo.dst.full_width;
203 	layer->geo.dst.height = layer->geo.dst.full_height;
204 	layer->geo.dst.field = mbus_fmt.field;
205 
206 	layer->geo.src.full_width = mbus_fmt.width;
207 	layer->geo.src.full_height = mbus_fmt.height;
208 	layer->geo.src.width = layer->geo.src.full_width;
209 	layer->geo.src.height = layer->geo.src.full_height;
210 
211 	mxr_geometry_dump(mdev, &layer->geo);
212 	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
213 	mxr_geometry_dump(mdev, &layer->geo);
214 }
215 
mxr_layer_update_output(struct mxr_layer * layer)216 static void mxr_layer_update_output(struct mxr_layer *layer)
217 {
218 	struct mxr_device *mdev = layer->mdev;
219 	struct v4l2_mbus_framefmt mbus_fmt;
220 
221 	mxr_get_mbus_fmt(mdev, &mbus_fmt);
222 	/* checking if update is needed */
223 	if (layer->geo.dst.full_width == mbus_fmt.width &&
224 		layer->geo.dst.full_height == mbus_fmt.width)
225 		return;
226 
227 	layer->geo.dst.full_width = mbus_fmt.width;
228 	layer->geo.dst.full_height = mbus_fmt.height;
229 	layer->geo.dst.field = mbus_fmt.field;
230 	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
231 
232 	mxr_geometry_dump(mdev, &layer->geo);
233 }
234 
235 static const struct mxr_format *find_format_by_fourcc(
236 	struct mxr_layer *layer, unsigned long fourcc);
237 static const struct mxr_format *find_format_by_index(
238 	struct mxr_layer *layer, unsigned long index);
239 
mxr_enum_fmt(struct file * file,void * priv,struct v4l2_fmtdesc * f)240 static int mxr_enum_fmt(struct file *file, void  *priv,
241 	struct v4l2_fmtdesc *f)
242 {
243 	struct mxr_layer *layer = video_drvdata(file);
244 	struct mxr_device *mdev = layer->mdev;
245 	const struct mxr_format *fmt;
246 
247 	mxr_dbg(mdev, "%s\n", __func__);
248 	fmt = find_format_by_index(layer, f->index);
249 	if (fmt == NULL)
250 		return -EINVAL;
251 
252 	strlcpy(f->description, fmt->name, sizeof(f->description));
253 	f->pixelformat = fmt->fourcc;
254 
255 	return 0;
256 }
257 
divup(unsigned int divident,unsigned int divisor)258 static unsigned int divup(unsigned int divident, unsigned int divisor)
259 {
260 	return (divident + divisor - 1) / divisor;
261 }
262 
mxr_get_plane_size(const struct mxr_block * blk,unsigned int width,unsigned int height)263 unsigned long mxr_get_plane_size(const struct mxr_block *blk,
264 	unsigned int width, unsigned int height)
265 {
266 	unsigned int bl_width = divup(width, blk->width);
267 	unsigned int bl_height = divup(height, blk->height);
268 
269 	return bl_width * bl_height * blk->size;
270 }
271 
mxr_mplane_fill(struct v4l2_plane_pix_format * planes,const struct mxr_format * fmt,u32 width,u32 height)272 static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
273 	const struct mxr_format *fmt, u32 width, u32 height)
274 {
275 	int i;
276 
277 	/* checking if nothing to fill */
278 	if (!planes)
279 		return;
280 
281 	memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
282 	for (i = 0; i < fmt->num_planes; ++i) {
283 		struct v4l2_plane_pix_format *plane = planes
284 			+ fmt->plane2subframe[i];
285 		const struct mxr_block *blk = &fmt->plane[i];
286 		u32 bl_width = divup(width, blk->width);
287 		u32 bl_height = divup(height, blk->height);
288 		u32 sizeimage = bl_width * bl_height * blk->size;
289 		u16 bytesperline = bl_width * blk->size / blk->height;
290 
291 		plane->sizeimage += sizeimage;
292 		plane->bytesperline = max(plane->bytesperline, bytesperline);
293 	}
294 }
295 
mxr_g_fmt(struct file * file,void * priv,struct v4l2_format * f)296 static int mxr_g_fmt(struct file *file, void *priv,
297 			     struct v4l2_format *f)
298 {
299 	struct mxr_layer *layer = video_drvdata(file);
300 	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
301 
302 	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
303 
304 	pix->width = layer->geo.src.full_width;
305 	pix->height = layer->geo.src.full_height;
306 	pix->field = V4L2_FIELD_NONE;
307 	pix->pixelformat = layer->fmt->fourcc;
308 	pix->colorspace = layer->fmt->colorspace;
309 	mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
310 
311 	return 0;
312 }
313 
mxr_s_fmt(struct file * file,void * priv,struct v4l2_format * f)314 static int mxr_s_fmt(struct file *file, void *priv,
315 	struct v4l2_format *f)
316 {
317 	struct mxr_layer *layer = video_drvdata(file);
318 	const struct mxr_format *fmt;
319 	struct v4l2_pix_format_mplane *pix;
320 	struct mxr_device *mdev = layer->mdev;
321 	struct mxr_geometry *geo = &layer->geo;
322 
323 	mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
324 
325 	pix = &f->fmt.pix_mp;
326 	fmt = find_format_by_fourcc(layer, pix->pixelformat);
327 	if (fmt == NULL) {
328 		mxr_warn(mdev, "not recognized fourcc: %08x\n",
329 			pix->pixelformat);
330 		return -EINVAL;
331 	}
332 	layer->fmt = fmt;
333 	/* set source size to highest accepted value */
334 	geo->src.full_width = max(geo->dst.full_width, pix->width);
335 	geo->src.full_height = max(geo->dst.full_height, pix->height);
336 	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
337 	mxr_geometry_dump(mdev, &layer->geo);
338 	/* set cropping to total visible screen */
339 	geo->src.width = pix->width;
340 	geo->src.height = pix->height;
341 	geo->src.x_offset = 0;
342 	geo->src.y_offset = 0;
343 	/* assure consistency of geometry */
344 	layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
345 	mxr_geometry_dump(mdev, &layer->geo);
346 	/* set full size to lowest possible value */
347 	geo->src.full_width = 0;
348 	geo->src.full_height = 0;
349 	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
350 	mxr_geometry_dump(mdev, &layer->geo);
351 
352 	/* returning results */
353 	mxr_g_fmt(file, priv, f);
354 
355 	return 0;
356 }
357 
mxr_g_selection(struct file * file,void * fh,struct v4l2_selection * s)358 static int mxr_g_selection(struct file *file, void *fh,
359 	struct v4l2_selection *s)
360 {
361 	struct mxr_layer *layer = video_drvdata(file);
362 	struct mxr_geometry *geo = &layer->geo;
363 
364 	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
365 
366 	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
367 		s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
368 		return -EINVAL;
369 
370 	switch (s->target) {
371 	case V4L2_SEL_TGT_CROP_ACTIVE:
372 		s->r.left = geo->src.x_offset;
373 		s->r.top = geo->src.y_offset;
374 		s->r.width = geo->src.width;
375 		s->r.height = geo->src.height;
376 		break;
377 	case V4L2_SEL_TGT_CROP_DEFAULT:
378 	case V4L2_SEL_TGT_CROP_BOUNDS:
379 		s->r.left = 0;
380 		s->r.top = 0;
381 		s->r.width = geo->src.full_width;
382 		s->r.height = geo->src.full_height;
383 		break;
384 	case V4L2_SEL_TGT_COMPOSE_ACTIVE:
385 	case V4L2_SEL_TGT_COMPOSE_PADDED:
386 		s->r.left = geo->dst.x_offset;
387 		s->r.top = geo->dst.y_offset;
388 		s->r.width = geo->dst.width;
389 		s->r.height = geo->dst.height;
390 		break;
391 	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
392 	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
393 		s->r.left = 0;
394 		s->r.top = 0;
395 		s->r.width = geo->dst.full_width;
396 		s->r.height = geo->dst.full_height;
397 		break;
398 	default:
399 		return -EINVAL;
400 	}
401 
402 	return 0;
403 }
404 
405 /* returns 1 if rectangle 'a' is inside 'b' */
mxr_is_rect_inside(struct v4l2_rect * a,struct v4l2_rect * b)406 static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
407 {
408 	if (a->left < b->left)
409 		return 0;
410 	if (a->top < b->top)
411 		return 0;
412 	if (a->left + a->width > b->left + b->width)
413 		return 0;
414 	if (a->top + a->height > b->top + b->height)
415 		return 0;
416 	return 1;
417 }
418 
mxr_s_selection(struct file * file,void * fh,struct v4l2_selection * s)419 static int mxr_s_selection(struct file *file, void *fh,
420 	struct v4l2_selection *s)
421 {
422 	struct mxr_layer *layer = video_drvdata(file);
423 	struct mxr_geometry *geo = &layer->geo;
424 	struct mxr_crop *target = NULL;
425 	enum mxr_geometry_stage stage;
426 	struct mxr_geometry tmp;
427 	struct v4l2_rect res;
428 
429 	memset(&res, 0, sizeof res);
430 
431 	mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
432 		s->r.width, s->r.height, s->r.left, s->r.top);
433 
434 	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
435 		s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
436 		return -EINVAL;
437 
438 	switch (s->target) {
439 	/* ignore read-only targets */
440 	case V4L2_SEL_TGT_CROP_DEFAULT:
441 	case V4L2_SEL_TGT_CROP_BOUNDS:
442 		res.width = geo->src.full_width;
443 		res.height = geo->src.full_height;
444 		break;
445 
446 	/* ignore read-only targets */
447 	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
448 	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
449 		res.width = geo->dst.full_width;
450 		res.height = geo->dst.full_height;
451 		break;
452 
453 	case V4L2_SEL_TGT_CROP_ACTIVE:
454 		target = &geo->src;
455 		stage = MXR_GEOMETRY_CROP;
456 		break;
457 	case V4L2_SEL_TGT_COMPOSE_ACTIVE:
458 	case V4L2_SEL_TGT_COMPOSE_PADDED:
459 		target = &geo->dst;
460 		stage = MXR_GEOMETRY_COMPOSE;
461 		break;
462 	default:
463 		return -EINVAL;
464 	}
465 	/* apply change and update geometry if needed */
466 	if (target) {
467 		/* backup current geometry if setup fails */
468 		memcpy(&tmp, geo, sizeof tmp);
469 
470 		/* apply requested selection */
471 		target->x_offset = s->r.left;
472 		target->y_offset = s->r.top;
473 		target->width = s->r.width;
474 		target->height = s->r.height;
475 
476 		layer->ops.fix_geometry(layer, stage, s->flags);
477 
478 		/* retrieve update selection rectangle */
479 		res.left = target->x_offset;
480 		res.top = target->y_offset;
481 		res.width = target->width;
482 		res.height = target->height;
483 
484 		mxr_geometry_dump(layer->mdev, &layer->geo);
485 	}
486 
487 	/* checking if the rectangle satisfies constraints */
488 	if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
489 		goto fail;
490 	if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
491 		goto fail;
492 
493 	/* return result rectangle */
494 	s->r = res;
495 
496 	return 0;
497 fail:
498 	/* restore old geometry, which is not touched if target is NULL */
499 	if (target)
500 		memcpy(geo, &tmp, sizeof tmp);
501 	return -ERANGE;
502 }
503 
mxr_enum_dv_presets(struct file * file,void * fh,struct v4l2_dv_enum_preset * preset)504 static int mxr_enum_dv_presets(struct file *file, void *fh,
505 	struct v4l2_dv_enum_preset *preset)
506 {
507 	struct mxr_layer *layer = video_drvdata(file);
508 	struct mxr_device *mdev = layer->mdev;
509 	int ret;
510 
511 	/* lock protects from changing sd_out */
512 	mutex_lock(&mdev->mutex);
513 	ret = v4l2_subdev_call(to_outsd(mdev), video, enum_dv_presets, preset);
514 	mutex_unlock(&mdev->mutex);
515 
516 	return ret ? -EINVAL : 0;
517 }
518 
mxr_s_dv_preset(struct file * file,void * fh,struct v4l2_dv_preset * preset)519 static int mxr_s_dv_preset(struct file *file, void *fh,
520 	struct v4l2_dv_preset *preset)
521 {
522 	struct mxr_layer *layer = video_drvdata(file);
523 	struct mxr_device *mdev = layer->mdev;
524 	int ret;
525 
526 	/* lock protects from changing sd_out */
527 	mutex_lock(&mdev->mutex);
528 
529 	/* preset change cannot be done while there is an entity
530 	 * dependant on output configuration
531 	 */
532 	if (mdev->n_output > 0) {
533 		mutex_unlock(&mdev->mutex);
534 		return -EBUSY;
535 	}
536 
537 	ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_preset, preset);
538 
539 	mutex_unlock(&mdev->mutex);
540 
541 	mxr_layer_update_output(layer);
542 
543 	/* any failure should return EINVAL according to V4L2 doc */
544 	return ret ? -EINVAL : 0;
545 }
546 
mxr_g_dv_preset(struct file * file,void * fh,struct v4l2_dv_preset * preset)547 static int mxr_g_dv_preset(struct file *file, void *fh,
548 	struct v4l2_dv_preset *preset)
549 {
550 	struct mxr_layer *layer = video_drvdata(file);
551 	struct mxr_device *mdev = layer->mdev;
552 	int ret;
553 
554 	/* lock protects from changing sd_out */
555 	mutex_lock(&mdev->mutex);
556 	ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_preset, preset);
557 	mutex_unlock(&mdev->mutex);
558 
559 	return ret ? -EINVAL : 0;
560 }
561 
mxr_s_std(struct file * file,void * fh,v4l2_std_id * norm)562 static int mxr_s_std(struct file *file, void *fh, v4l2_std_id *norm)
563 {
564 	struct mxr_layer *layer = video_drvdata(file);
565 	struct mxr_device *mdev = layer->mdev;
566 	int ret;
567 
568 	/* lock protects from changing sd_out */
569 	mutex_lock(&mdev->mutex);
570 
571 	/* standard change cannot be done while there is an entity
572 	 * dependant on output configuration
573 	 */
574 	if (mdev->n_output > 0) {
575 		mutex_unlock(&mdev->mutex);
576 		return -EBUSY;
577 	}
578 
579 	ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, *norm);
580 
581 	mutex_unlock(&mdev->mutex);
582 
583 	mxr_layer_update_output(layer);
584 
585 	return ret ? -EINVAL : 0;
586 }
587 
mxr_g_std(struct file * file,void * fh,v4l2_std_id * norm)588 static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
589 {
590 	struct mxr_layer *layer = video_drvdata(file);
591 	struct mxr_device *mdev = layer->mdev;
592 	int ret;
593 
594 	/* lock protects from changing sd_out */
595 	mutex_lock(&mdev->mutex);
596 	ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
597 	mutex_unlock(&mdev->mutex);
598 
599 	return ret ? -EINVAL : 0;
600 }
601 
mxr_enum_output(struct file * file,void * fh,struct v4l2_output * a)602 static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
603 {
604 	struct mxr_layer *layer = video_drvdata(file);
605 	struct mxr_device *mdev = layer->mdev;
606 	struct mxr_output *out;
607 	struct v4l2_subdev *sd;
608 
609 	if (a->index >= mdev->output_cnt)
610 		return -EINVAL;
611 	out = mdev->output[a->index];
612 	BUG_ON(out == NULL);
613 	sd = out->sd;
614 	strlcpy(a->name, out->name, sizeof(a->name));
615 
616 	/* try to obtain supported tv norms */
617 	v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
618 	a->capabilities = 0;
619 	if (sd->ops->video && sd->ops->video->s_dv_preset)
620 		a->capabilities |= V4L2_OUT_CAP_PRESETS;
621 	if (sd->ops->video && sd->ops->video->s_std_output)
622 		a->capabilities |= V4L2_OUT_CAP_STD;
623 	a->type = V4L2_OUTPUT_TYPE_ANALOG;
624 
625 	return 0;
626 }
627 
mxr_s_output(struct file * file,void * fh,unsigned int i)628 static int mxr_s_output(struct file *file, void *fh, unsigned int i)
629 {
630 	struct video_device *vfd = video_devdata(file);
631 	struct mxr_layer *layer = video_drvdata(file);
632 	struct mxr_device *mdev = layer->mdev;
633 
634 	if (i >= mdev->output_cnt || mdev->output[i] == NULL)
635 		return -EINVAL;
636 
637 	mutex_lock(&mdev->mutex);
638 	if (mdev->n_output > 0) {
639 		mutex_unlock(&mdev->mutex);
640 		return -EBUSY;
641 	}
642 	mdev->current_output = i;
643 	vfd->tvnorms = 0;
644 	v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
645 		&vfd->tvnorms);
646 	mutex_unlock(&mdev->mutex);
647 
648 	/* update layers geometry */
649 	mxr_layer_update_output(layer);
650 
651 	mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
652 
653 	return 0;
654 }
655 
mxr_g_output(struct file * file,void * fh,unsigned int * p)656 static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
657 {
658 	struct mxr_layer *layer = video_drvdata(file);
659 	struct mxr_device *mdev = layer->mdev;
660 
661 	mutex_lock(&mdev->mutex);
662 	*p = mdev->current_output;
663 	mutex_unlock(&mdev->mutex);
664 
665 	return 0;
666 }
667 
mxr_reqbufs(struct file * file,void * priv,struct v4l2_requestbuffers * p)668 static int mxr_reqbufs(struct file *file, void *priv,
669 			  struct v4l2_requestbuffers *p)
670 {
671 	struct mxr_layer *layer = video_drvdata(file);
672 
673 	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
674 	return vb2_reqbufs(&layer->vb_queue, p);
675 }
676 
mxr_querybuf(struct file * file,void * priv,struct v4l2_buffer * p)677 static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
678 {
679 	struct mxr_layer *layer = video_drvdata(file);
680 
681 	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
682 	return vb2_querybuf(&layer->vb_queue, p);
683 }
684 
mxr_qbuf(struct file * file,void * priv,struct v4l2_buffer * p)685 static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
686 {
687 	struct mxr_layer *layer = video_drvdata(file);
688 
689 	mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
690 	return vb2_qbuf(&layer->vb_queue, p);
691 }
692 
mxr_dqbuf(struct file * file,void * priv,struct v4l2_buffer * p)693 static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
694 {
695 	struct mxr_layer *layer = video_drvdata(file);
696 
697 	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
698 	return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
699 }
700 
mxr_streamon(struct file * file,void * priv,enum v4l2_buf_type i)701 static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
702 {
703 	struct mxr_layer *layer = video_drvdata(file);
704 
705 	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
706 	return vb2_streamon(&layer->vb_queue, i);
707 }
708 
mxr_streamoff(struct file * file,void * priv,enum v4l2_buf_type i)709 static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
710 {
711 	struct mxr_layer *layer = video_drvdata(file);
712 
713 	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
714 	return vb2_streamoff(&layer->vb_queue, i);
715 }
716 
717 static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
718 	.vidioc_querycap = mxr_querycap,
719 	/* format handling */
720 	.vidioc_enum_fmt_vid_out = mxr_enum_fmt,
721 	.vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
722 	.vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
723 	/* buffer control */
724 	.vidioc_reqbufs = mxr_reqbufs,
725 	.vidioc_querybuf = mxr_querybuf,
726 	.vidioc_qbuf = mxr_qbuf,
727 	.vidioc_dqbuf = mxr_dqbuf,
728 	/* Streaming control */
729 	.vidioc_streamon = mxr_streamon,
730 	.vidioc_streamoff = mxr_streamoff,
731 	/* Preset functions */
732 	.vidioc_enum_dv_presets = mxr_enum_dv_presets,
733 	.vidioc_s_dv_preset = mxr_s_dv_preset,
734 	.vidioc_g_dv_preset = mxr_g_dv_preset,
735 	/* analog TV standard functions */
736 	.vidioc_s_std = mxr_s_std,
737 	.vidioc_g_std = mxr_g_std,
738 	/* Output handling */
739 	.vidioc_enum_output = mxr_enum_output,
740 	.vidioc_s_output = mxr_s_output,
741 	.vidioc_g_output = mxr_g_output,
742 	/* selection ioctls */
743 	.vidioc_g_selection = mxr_g_selection,
744 	.vidioc_s_selection = mxr_s_selection,
745 };
746 
mxr_video_open(struct file * file)747 static int mxr_video_open(struct file *file)
748 {
749 	struct mxr_layer *layer = video_drvdata(file);
750 	struct mxr_device *mdev = layer->mdev;
751 	int ret = 0;
752 
753 	mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
754 	/* assure device probe is finished */
755 	wait_for_device_probe();
756 	/* creating context for file descriptor */
757 	ret = v4l2_fh_open(file);
758 	if (ret) {
759 		mxr_err(mdev, "v4l2_fh_open failed\n");
760 		return ret;
761 	}
762 
763 	/* leaving if layer is already initialized */
764 	if (!v4l2_fh_is_singular_file(file))
765 		return 0;
766 
767 	/* FIXME: should power be enabled on open? */
768 	ret = mxr_power_get(mdev);
769 	if (ret) {
770 		mxr_err(mdev, "power on failed\n");
771 		goto fail_fh_open;
772 	}
773 
774 	ret = vb2_queue_init(&layer->vb_queue);
775 	if (ret != 0) {
776 		mxr_err(mdev, "failed to initialize vb2 queue\n");
777 		goto fail_power;
778 	}
779 	/* set default format, first on the list */
780 	layer->fmt = layer->fmt_array[0];
781 	/* setup default geometry */
782 	mxr_layer_default_geo(layer);
783 
784 	return 0;
785 
786 fail_power:
787 	mxr_power_put(mdev);
788 
789 fail_fh_open:
790 	v4l2_fh_release(file);
791 
792 	return ret;
793 }
794 
795 static unsigned int
mxr_video_poll(struct file * file,struct poll_table_struct * wait)796 mxr_video_poll(struct file *file, struct poll_table_struct *wait)
797 {
798 	struct mxr_layer *layer = video_drvdata(file);
799 
800 	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
801 
802 	return vb2_poll(&layer->vb_queue, file, wait);
803 }
804 
mxr_video_mmap(struct file * file,struct vm_area_struct * vma)805 static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
806 {
807 	struct mxr_layer *layer = video_drvdata(file);
808 
809 	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
810 
811 	return vb2_mmap(&layer->vb_queue, vma);
812 }
813 
mxr_video_release(struct file * file)814 static int mxr_video_release(struct file *file)
815 {
816 	struct mxr_layer *layer = video_drvdata(file);
817 
818 	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
819 	if (v4l2_fh_is_singular_file(file)) {
820 		vb2_queue_release(&layer->vb_queue);
821 		mxr_power_put(layer->mdev);
822 	}
823 	v4l2_fh_release(file);
824 	return 0;
825 }
826 
827 static const struct v4l2_file_operations mxr_fops = {
828 	.owner = THIS_MODULE,
829 	.open = mxr_video_open,
830 	.poll = mxr_video_poll,
831 	.mmap = mxr_video_mmap,
832 	.release = mxr_video_release,
833 	.unlocked_ioctl = video_ioctl2,
834 };
835 
queue_setup(struct vb2_queue * vq,const struct v4l2_format * pfmt,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],void * alloc_ctxs[])836 static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
837 	unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
838 	void *alloc_ctxs[])
839 {
840 	struct mxr_layer *layer = vb2_get_drv_priv(vq);
841 	const struct mxr_format *fmt = layer->fmt;
842 	int i;
843 	struct mxr_device *mdev = layer->mdev;
844 	struct v4l2_plane_pix_format planes[3];
845 
846 	mxr_dbg(mdev, "%s\n", __func__);
847 	/* checking if format was configured */
848 	if (fmt == NULL)
849 		return -EINVAL;
850 	mxr_dbg(mdev, "fmt = %s\n", fmt->name);
851 	mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
852 		layer->geo.src.full_height);
853 
854 	*nplanes = fmt->num_subframes;
855 	for (i = 0; i < fmt->num_subframes; ++i) {
856 		alloc_ctxs[i] = layer->mdev->alloc_ctx;
857 		sizes[i] = PAGE_ALIGN(planes[i].sizeimage);
858 		mxr_dbg(mdev, "size[%d] = %08lx\n", i, sizes[i]);
859 	}
860 
861 	if (*nbuffers == 0)
862 		*nbuffers = 1;
863 
864 	return 0;
865 }
866 
buf_queue(struct vb2_buffer * vb)867 static void buf_queue(struct vb2_buffer *vb)
868 {
869 	struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
870 	struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
871 	struct mxr_device *mdev = layer->mdev;
872 	unsigned long flags;
873 
874 	spin_lock_irqsave(&layer->enq_slock, flags);
875 	list_add_tail(&buffer->list, &layer->enq_list);
876 	spin_unlock_irqrestore(&layer->enq_slock, flags);
877 
878 	mxr_dbg(mdev, "queuing buffer\n");
879 }
880 
wait_lock(struct vb2_queue * vq)881 static void wait_lock(struct vb2_queue *vq)
882 {
883 	struct mxr_layer *layer = vb2_get_drv_priv(vq);
884 
885 	mxr_dbg(layer->mdev, "%s\n", __func__);
886 	mutex_lock(&layer->mutex);
887 }
888 
wait_unlock(struct vb2_queue * vq)889 static void wait_unlock(struct vb2_queue *vq)
890 {
891 	struct mxr_layer *layer = vb2_get_drv_priv(vq);
892 
893 	mxr_dbg(layer->mdev, "%s\n", __func__);
894 	mutex_unlock(&layer->mutex);
895 }
896 
start_streaming(struct vb2_queue * vq,unsigned int count)897 static int start_streaming(struct vb2_queue *vq, unsigned int count)
898 {
899 	struct mxr_layer *layer = vb2_get_drv_priv(vq);
900 	struct mxr_device *mdev = layer->mdev;
901 	unsigned long flags;
902 
903 	mxr_dbg(mdev, "%s\n", __func__);
904 
905 	if (count == 0) {
906 		mxr_dbg(mdev, "no output buffers queued\n");
907 		return -EINVAL;
908 	}
909 
910 	/* block any changes in output configuration */
911 	mxr_output_get(mdev);
912 
913 	mxr_layer_update_output(layer);
914 	layer->ops.format_set(layer);
915 	/* enabling layer in hardware */
916 	spin_lock_irqsave(&layer->enq_slock, flags);
917 	layer->state = MXR_LAYER_STREAMING;
918 	spin_unlock_irqrestore(&layer->enq_slock, flags);
919 
920 	layer->ops.stream_set(layer, MXR_ENABLE);
921 	mxr_streamer_get(mdev);
922 
923 	return 0;
924 }
925 
mxr_watchdog(unsigned long arg)926 static void mxr_watchdog(unsigned long arg)
927 {
928 	struct mxr_layer *layer = (struct mxr_layer *) arg;
929 	struct mxr_device *mdev = layer->mdev;
930 	unsigned long flags;
931 
932 	mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
933 
934 	spin_lock_irqsave(&layer->enq_slock, flags);
935 
936 	if (layer->update_buf == layer->shadow_buf)
937 		layer->update_buf = NULL;
938 	if (layer->update_buf) {
939 		vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
940 		layer->update_buf = NULL;
941 	}
942 	if (layer->shadow_buf) {
943 		vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
944 		layer->shadow_buf = NULL;
945 	}
946 	spin_unlock_irqrestore(&layer->enq_slock, flags);
947 }
948 
stop_streaming(struct vb2_queue * vq)949 static int stop_streaming(struct vb2_queue *vq)
950 {
951 	struct mxr_layer *layer = vb2_get_drv_priv(vq);
952 	struct mxr_device *mdev = layer->mdev;
953 	unsigned long flags;
954 	struct timer_list watchdog;
955 	struct mxr_buffer *buf, *buf_tmp;
956 
957 	mxr_dbg(mdev, "%s\n", __func__);
958 
959 	spin_lock_irqsave(&layer->enq_slock, flags);
960 
961 	/* reset list */
962 	layer->state = MXR_LAYER_STREAMING_FINISH;
963 
964 	/* set all buffer to be done */
965 	list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
966 		list_del(&buf->list);
967 		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
968 	}
969 
970 	spin_unlock_irqrestore(&layer->enq_slock, flags);
971 
972 	/* give 1 seconds to complete to complete last buffers */
973 	setup_timer_on_stack(&watchdog, mxr_watchdog,
974 		(unsigned long)layer);
975 	mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
976 
977 	/* wait until all buffers are goes to done state */
978 	vb2_wait_for_all_buffers(vq);
979 
980 	/* stop timer if all synchronization is done */
981 	del_timer_sync(&watchdog);
982 	destroy_timer_on_stack(&watchdog);
983 
984 	/* stopping hardware */
985 	spin_lock_irqsave(&layer->enq_slock, flags);
986 	layer->state = MXR_LAYER_IDLE;
987 	spin_unlock_irqrestore(&layer->enq_slock, flags);
988 
989 	/* disabling layer in hardware */
990 	layer->ops.stream_set(layer, MXR_DISABLE);
991 	/* remove one streamer */
992 	mxr_streamer_put(mdev);
993 	/* allow changes in output configuration */
994 	mxr_output_put(mdev);
995 	return 0;
996 }
997 
998 static struct vb2_ops mxr_video_qops = {
999 	.queue_setup = queue_setup,
1000 	.buf_queue = buf_queue,
1001 	.wait_prepare = wait_unlock,
1002 	.wait_finish = wait_lock,
1003 	.start_streaming = start_streaming,
1004 	.stop_streaming = stop_streaming,
1005 };
1006 
1007 /* FIXME: try to put this functions to mxr_base_layer_create */
mxr_base_layer_register(struct mxr_layer * layer)1008 int mxr_base_layer_register(struct mxr_layer *layer)
1009 {
1010 	struct mxr_device *mdev = layer->mdev;
1011 	int ret;
1012 
1013 	ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
1014 	if (ret)
1015 		mxr_err(mdev, "failed to register video device\n");
1016 	else
1017 		mxr_info(mdev, "registered layer %s as /dev/video%d\n",
1018 			layer->vfd.name, layer->vfd.num);
1019 	return ret;
1020 }
1021 
mxr_base_layer_unregister(struct mxr_layer * layer)1022 void mxr_base_layer_unregister(struct mxr_layer *layer)
1023 {
1024 	video_unregister_device(&layer->vfd);
1025 }
1026 
mxr_layer_release(struct mxr_layer * layer)1027 void mxr_layer_release(struct mxr_layer *layer)
1028 {
1029 	if (layer->ops.release)
1030 		layer->ops.release(layer);
1031 }
1032 
mxr_base_layer_release(struct mxr_layer * layer)1033 void mxr_base_layer_release(struct mxr_layer *layer)
1034 {
1035 	kfree(layer);
1036 }
1037 
mxr_vfd_release(struct video_device * vdev)1038 static void mxr_vfd_release(struct video_device *vdev)
1039 {
1040 	printk(KERN_INFO "video device release\n");
1041 }
1042 
mxr_base_layer_create(struct mxr_device * mdev,int idx,char * name,struct mxr_layer_ops * ops)1043 struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
1044 	int idx, char *name, struct mxr_layer_ops *ops)
1045 {
1046 	struct mxr_layer *layer;
1047 
1048 	layer = kzalloc(sizeof *layer, GFP_KERNEL);
1049 	if (layer == NULL) {
1050 		mxr_err(mdev, "not enough memory for layer.\n");
1051 		goto fail;
1052 	}
1053 
1054 	layer->mdev = mdev;
1055 	layer->idx = idx;
1056 	layer->ops = *ops;
1057 
1058 	spin_lock_init(&layer->enq_slock);
1059 	INIT_LIST_HEAD(&layer->enq_list);
1060 	mutex_init(&layer->mutex);
1061 
1062 	layer->vfd = (struct video_device) {
1063 		.minor = -1,
1064 		.release = mxr_vfd_release,
1065 		.fops = &mxr_fops,
1066 		.ioctl_ops = &mxr_ioctl_ops,
1067 	};
1068 	strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
1069 	/* let framework control PRIORITY */
1070 	set_bit(V4L2_FL_USE_FH_PRIO, &layer->vfd.flags);
1071 
1072 	video_set_drvdata(&layer->vfd, layer);
1073 	layer->vfd.lock = &layer->mutex;
1074 	layer->vfd.v4l2_dev = &mdev->v4l2_dev;
1075 
1076 	layer->vb_queue = (struct vb2_queue) {
1077 		.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
1078 		.io_modes = VB2_MMAP | VB2_USERPTR,
1079 		.drv_priv = layer,
1080 		.buf_struct_size = sizeof(struct mxr_buffer),
1081 		.ops = &mxr_video_qops,
1082 		.mem_ops = &vb2_dma_contig_memops,
1083 	};
1084 
1085 	return layer;
1086 
1087 fail:
1088 	return NULL;
1089 }
1090 
find_format_by_fourcc(struct mxr_layer * layer,unsigned long fourcc)1091 static const struct mxr_format *find_format_by_fourcc(
1092 	struct mxr_layer *layer, unsigned long fourcc)
1093 {
1094 	int i;
1095 
1096 	for (i = 0; i < layer->fmt_array_size; ++i)
1097 		if (layer->fmt_array[i]->fourcc == fourcc)
1098 			return layer->fmt_array[i];
1099 	return NULL;
1100 }
1101 
find_format_by_index(struct mxr_layer * layer,unsigned long index)1102 static const struct mxr_format *find_format_by_index(
1103 	struct mxr_layer *layer, unsigned long index)
1104 {
1105 	if (index >= layer->fmt_array_size)
1106 		return NULL;
1107 	return layer->fmt_array[index];
1108 }
1109 
1110