1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2017 - 2018 Intel Corporation
4 * Copyright 2017 Google LLC
5 *
6 * Based on Intel IPU4 driver.
7 *
8 */
9
10 #include <linux/delay.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pm_runtime.h>
14
15 #include "ipu3.h"
16 #include "ipu3-css-fw.h"
17 #include "ipu3-dmamap.h"
18 #include "ipu3-mmu.h"
19
20 #define IMGU_PCI_ID 0x1919
21 #define IMGU_PCI_BAR 0
22 #define IMGU_DMA_MASK DMA_BIT_MASK(39)
23 #define IMGU_MAX_QUEUE_DEPTH (2 + 2)
24
25 /*
26 * pre-allocated buffer size for IMGU dummy buffers. Those
27 * values should be tuned to big enough to avoid buffer
28 * re-allocation when streaming to lower streaming latency.
29 */
30 #define CSS_QUEUE_IN_BUF_SIZE 0
31 #define CSS_QUEUE_PARAMS_BUF_SIZE 0
32 #define CSS_QUEUE_OUT_BUF_SIZE (4160 * 3120 * 12 / 8)
33 #define CSS_QUEUE_VF_BUF_SIZE (1920 * 1080 * 12 / 8)
34 #define CSS_QUEUE_STAT_3A_BUF_SIZE sizeof(struct ipu3_uapi_stats_3a)
35
36 static const size_t css_queue_buf_size_map[IPU3_CSS_QUEUES] = {
37 [IPU3_CSS_QUEUE_IN] = CSS_QUEUE_IN_BUF_SIZE,
38 [IPU3_CSS_QUEUE_PARAMS] = CSS_QUEUE_PARAMS_BUF_SIZE,
39 [IPU3_CSS_QUEUE_OUT] = CSS_QUEUE_OUT_BUF_SIZE,
40 [IPU3_CSS_QUEUE_VF] = CSS_QUEUE_VF_BUF_SIZE,
41 [IPU3_CSS_QUEUE_STAT_3A] = CSS_QUEUE_STAT_3A_BUF_SIZE,
42 };
43
44 static const struct imgu_node_mapping imgu_node_map[IMGU_NODE_NUM] = {
45 [IMGU_NODE_IN] = {IPU3_CSS_QUEUE_IN, "input"},
46 [IMGU_NODE_PARAMS] = {IPU3_CSS_QUEUE_PARAMS, "parameters"},
47 [IMGU_NODE_OUT] = {IPU3_CSS_QUEUE_OUT, "output"},
48 [IMGU_NODE_VF] = {IPU3_CSS_QUEUE_VF, "viewfinder"},
49 [IMGU_NODE_STAT_3A] = {IPU3_CSS_QUEUE_STAT_3A, "3a stat"},
50 };
51
imgu_node_to_queue(unsigned int node)52 unsigned int imgu_node_to_queue(unsigned int node)
53 {
54 return imgu_node_map[node].css_queue;
55 }
56
imgu_map_node(struct imgu_device * imgu,unsigned int css_queue)57 unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue)
58 {
59 unsigned int i;
60
61 for (i = 0; i < IMGU_NODE_NUM; i++)
62 if (imgu_node_map[i].css_queue == css_queue)
63 break;
64
65 return i;
66 }
67
68 /**************** Dummy buffers ****************/
69
imgu_dummybufs_cleanup(struct imgu_device * imgu,unsigned int pipe)70 static void imgu_dummybufs_cleanup(struct imgu_device *imgu, unsigned int pipe)
71 {
72 unsigned int i;
73 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
74
75 for (i = 0; i < IPU3_CSS_QUEUES; i++)
76 imgu_dmamap_free(imgu,
77 &imgu_pipe->queues[i].dmap);
78 }
79
imgu_dummybufs_preallocate(struct imgu_device * imgu,unsigned int pipe)80 static int imgu_dummybufs_preallocate(struct imgu_device *imgu,
81 unsigned int pipe)
82 {
83 unsigned int i;
84 size_t size;
85 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
86
87 for (i = 0; i < IPU3_CSS_QUEUES; i++) {
88 size = css_queue_buf_size_map[i];
89 /*
90 * Do not enable dummy buffers for master queue,
91 * always require that real buffers from user are
92 * available.
93 */
94 if (i == IMGU_QUEUE_MASTER || size == 0)
95 continue;
96
97 if (!imgu_dmamap_alloc(imgu,
98 &imgu_pipe->queues[i].dmap, size)) {
99 imgu_dummybufs_cleanup(imgu, pipe);
100 return -ENOMEM;
101 }
102 }
103
104 return 0;
105 }
106
imgu_dummybufs_init(struct imgu_device * imgu,unsigned int pipe)107 static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe)
108 {
109 const struct v4l2_pix_format_mplane *mpix;
110 const struct v4l2_meta_format *meta;
111 unsigned int i, k, node;
112 size_t size;
113 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
114
115 /* Allocate a dummy buffer for each queue where buffer is optional */
116 for (i = 0; i < IPU3_CSS_QUEUES; i++) {
117 node = imgu_map_node(imgu, i);
118 if (!imgu_pipe->queue_enabled[node] || i == IMGU_QUEUE_MASTER)
119 continue;
120
121 if (!imgu_pipe->nodes[IMGU_NODE_VF].enabled &&
122 i == IPU3_CSS_QUEUE_VF)
123 /*
124 * Do not enable dummy buffers for VF if it is not
125 * requested by the user.
126 */
127 continue;
128
129 meta = &imgu_pipe->nodes[node].vdev_fmt.fmt.meta;
130 mpix = &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp;
131
132 if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
133 size = meta->buffersize;
134 else
135 size = mpix->plane_fmt[0].sizeimage;
136
137 if (imgu_css_dma_buffer_resize(imgu,
138 &imgu_pipe->queues[i].dmap,
139 size)) {
140 imgu_dummybufs_cleanup(imgu, pipe);
141 return -ENOMEM;
142 }
143
144 for (k = 0; k < IMGU_MAX_QUEUE_DEPTH; k++)
145 imgu_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i,
146 imgu_pipe->queues[i].dmap.daddr);
147 }
148
149 return 0;
150 }
151
152 /* May be called from atomic context */
imgu_dummybufs_get(struct imgu_device * imgu,int queue,unsigned int pipe)153 static struct imgu_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
154 int queue, unsigned int pipe)
155 {
156 unsigned int i;
157 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
158
159 /* dummybufs are not allocated for master q */
160 if (queue == IPU3_CSS_QUEUE_IN)
161 return NULL;
162
163 if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr))
164 /* Buffer should not be allocated here */
165 return NULL;
166
167 for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
168 if (imgu_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) !=
169 IPU3_CSS_BUFFER_QUEUED)
170 break;
171
172 if (i == IMGU_MAX_QUEUE_DEPTH)
173 return NULL;
174
175 imgu_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue,
176 imgu_pipe->queues[queue].dmap.daddr);
177
178 return &imgu_pipe->queues[queue].dummybufs[i];
179 }
180
181 /* Check if given buffer is a dummy buffer */
imgu_dummybufs_check(struct imgu_device * imgu,struct imgu_css_buffer * buf,unsigned int pipe)182 static bool imgu_dummybufs_check(struct imgu_device *imgu,
183 struct imgu_css_buffer *buf,
184 unsigned int pipe)
185 {
186 unsigned int i;
187 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
188
189 for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
190 if (buf == &imgu_pipe->queues[buf->queue].dummybufs[i])
191 break;
192
193 return i < IMGU_MAX_QUEUE_DEPTH;
194 }
195
imgu_buffer_done(struct imgu_device * imgu,struct vb2_buffer * vb,enum vb2_buffer_state state)196 static void imgu_buffer_done(struct imgu_device *imgu, struct vb2_buffer *vb,
197 enum vb2_buffer_state state)
198 {
199 mutex_lock(&imgu->lock);
200 imgu_v4l2_buffer_done(vb, state);
201 mutex_unlock(&imgu->lock);
202 }
203
imgu_queue_getbuf(struct imgu_device * imgu,unsigned int node,unsigned int pipe)204 static struct imgu_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu,
205 unsigned int node,
206 unsigned int pipe)
207 {
208 struct imgu_buffer *buf;
209 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
210
211 if (WARN_ON(node >= IMGU_NODE_NUM))
212 return NULL;
213
214 /* Find first free buffer from the node */
215 list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) {
216 if (imgu_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_NEW)
217 return &buf->css_buf;
218 }
219
220 /* There were no free buffers, try to return a dummy buffer */
221 return imgu_dummybufs_get(imgu, imgu_node_map[node].css_queue, pipe);
222 }
223
224 /*
225 * Queue as many buffers to CSS as possible. If all buffers don't fit into
226 * CSS buffer queues, they remain unqueued and will be queued later.
227 */
imgu_queue_buffers(struct imgu_device * imgu,bool initial,unsigned int pipe)228 int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe)
229 {
230 unsigned int node;
231 int r = 0;
232 struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
233
234 if (!imgu_css_is_streaming(&imgu->css))
235 return 0;
236
237 dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d", pipe);
238 mutex_lock(&imgu->lock);
239
240 if (!imgu_css_pipe_queue_empty(&imgu->css, pipe)) {
241 mutex_unlock(&imgu->lock);
242 return 0;
243 }
244
245 /* Buffer set is queued to FW only when input buffer is ready */
246 for (node = IMGU_NODE_NUM - 1;
247 imgu_queue_getbuf(imgu, IMGU_NODE_IN, pipe);
248 node = node ? node - 1 : IMGU_NODE_NUM - 1) {
249 if (node == IMGU_NODE_VF &&
250 !imgu_pipe->nodes[IMGU_NODE_VF].enabled) {
251 dev_warn(&imgu->pci_dev->dev,
252 "Vf not enabled, ignore queue");
253 continue;
254 } else if (node == IMGU_NODE_PARAMS &&
255 imgu_pipe->nodes[node].enabled) {
256 struct vb2_buffer *vb;
257 struct imgu_vb2_buffer *ivb;
258
259 /* No parameters for this frame */
260 if (list_empty(&imgu_pipe->nodes[node].buffers))
261 continue;
262
263 ivb = list_first_entry(&imgu_pipe->nodes[node].buffers,
264 struct imgu_vb2_buffer, list);
265 list_del(&ivb->list);
266 vb = &ivb->vbb.vb2_buf;
267 r = imgu_css_set_parameters(&imgu->css, pipe,
268 vb2_plane_vaddr(vb, 0));
269 if (r) {
270 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
271 dev_warn(&imgu->pci_dev->dev,
272 "set parameters failed.");
273 continue;
274 }
275
276 vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
277 dev_dbg(&imgu->pci_dev->dev,
278 "queue user parameters %d to css.", vb->index);
279 } else if (imgu_pipe->queue_enabled[node]) {
280 struct imgu_css_buffer *buf =
281 imgu_queue_getbuf(imgu, node, pipe);
282 struct imgu_buffer *ibuf = NULL;
283 bool dummy;
284
285 if (!buf)
286 break;
287
288 r = imgu_css_buf_queue(&imgu->css, pipe, buf);
289 if (r)
290 break;
291 dummy = imgu_dummybufs_check(imgu, buf, pipe);
292 if (!dummy)
293 ibuf = container_of(buf, struct imgu_buffer,
294 css_buf);
295 dev_dbg(&imgu->pci_dev->dev,
296 "queue %s %s buffer %u to css da: 0x%08x\n",
297 dummy ? "dummy" : "user",
298 imgu_node_map[node].name,
299 dummy ? 0 : ibuf->vid_buf.vbb.vb2_buf.index,
300 (u32)buf->daddr);
301 }
302 }
303 mutex_unlock(&imgu->lock);
304
305 if (r && r != -EBUSY)
306 goto failed;
307
308 return 0;
309
310 failed:
311 /*
312 * On error, mark all buffers as failed which are not
313 * yet queued to CSS
314 */
315 dev_err(&imgu->pci_dev->dev,
316 "failed to queue buffer to CSS on queue %i (%d)\n",
317 node, r);
318
319 if (initial)
320 /* If we were called from streamon(), no need to finish bufs */
321 return r;
322
323 for (node = 0; node < IMGU_NODE_NUM; node++) {
324 struct imgu_buffer *buf, *buf0;
325
326 if (!imgu_pipe->queue_enabled[node])
327 continue; /* Skip disabled queues */
328
329 mutex_lock(&imgu->lock);
330 list_for_each_entry_safe(buf, buf0,
331 &imgu_pipe->nodes[node].buffers,
332 vid_buf.list) {
333 if (imgu_css_buf_state(&buf->css_buf) ==
334 IPU3_CSS_BUFFER_QUEUED)
335 continue; /* Was already queued, skip */
336
337 imgu_v4l2_buffer_done(&buf->vid_buf.vbb.vb2_buf,
338 VB2_BUF_STATE_ERROR);
339 }
340 mutex_unlock(&imgu->lock);
341 }
342
343 return r;
344 }
345
imgu_powerup(struct imgu_device * imgu)346 static int imgu_powerup(struct imgu_device *imgu)
347 {
348 int r;
349 unsigned int pipe;
350 unsigned int freq = 200;
351 struct v4l2_mbus_framefmt *fmt;
352
353 /* input larger than 2048*1152, ask imgu to work on high freq */
354 for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
355 fmt = &imgu->imgu_pipe[pipe].nodes[IMGU_NODE_IN].pad_fmt;
356 dev_dbg(&imgu->pci_dev->dev, "pipe %u input format = %ux%u",
357 pipe, fmt->width, fmt->height);
358 if ((fmt->width * fmt->height) >= (2048 * 1152))
359 freq = 450;
360 }
361
362 r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base, freq);
363 if (r)
364 return r;
365
366 imgu_mmu_resume(imgu->mmu);
367 return 0;
368 }
369
imgu_powerdown(struct imgu_device * imgu)370 static void imgu_powerdown(struct imgu_device *imgu)
371 {
372 imgu_mmu_suspend(imgu->mmu);
373 imgu_css_set_powerdown(&imgu->pci_dev->dev, imgu->base);
374 }
375
imgu_s_stream(struct imgu_device * imgu,int enable)376 int imgu_s_stream(struct imgu_device *imgu, int enable)
377 {
378 struct device *dev = &imgu->pci_dev->dev;
379 int r, pipe;
380
381 if (!enable) {
382 /* Stop streaming */
383 dev_dbg(dev, "stream off\n");
384 /* Block new buffers to be queued to CSS. */
385 atomic_set(&imgu->qbuf_barrier, 1);
386 imgu_css_stop_streaming(&imgu->css);
387 synchronize_irq(imgu->pci_dev->irq);
388 atomic_set(&imgu->qbuf_barrier, 0);
389 imgu_powerdown(imgu);
390 pm_runtime_put(&imgu->pci_dev->dev);
391
392 return 0;
393 }
394
395 /* Set Power */
396 r = pm_runtime_resume_and_get(dev);
397 if (r < 0) {
398 dev_err(dev, "failed to set imgu power\n");
399 return r;
400 }
401
402 r = imgu_powerup(imgu);
403 if (r) {
404 dev_err(dev, "failed to power up imgu\n");
405 pm_runtime_put(dev);
406 return r;
407 }
408
409 /* Start CSS streaming */
410 r = imgu_css_start_streaming(&imgu->css);
411 if (r) {
412 dev_err(dev, "failed to start css streaming (%d)", r);
413 goto fail_start_streaming;
414 }
415
416 for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
417 /* Initialize dummy buffers */
418 r = imgu_dummybufs_init(imgu, pipe);
419 if (r) {
420 dev_err(dev, "failed to initialize dummy buffers (%d)", r);
421 goto fail_dummybufs;
422 }
423
424 /* Queue as many buffers from queue as possible */
425 r = imgu_queue_buffers(imgu, true, pipe);
426 if (r) {
427 dev_err(dev, "failed to queue initial buffers (%d)", r);
428 goto fail_queueing;
429 }
430 }
431
432 return 0;
433 fail_queueing:
434 for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
435 imgu_dummybufs_cleanup(imgu, pipe);
436 fail_dummybufs:
437 imgu_css_stop_streaming(&imgu->css);
438 fail_start_streaming:
439 pm_runtime_put(dev);
440
441 return r;
442 }
443
imgu_video_nodes_exit(struct imgu_device * imgu)444 static void imgu_video_nodes_exit(struct imgu_device *imgu)
445 {
446 int i;
447
448 for (i = 0; i < IMGU_MAX_PIPE_NUM; i++)
449 imgu_dummybufs_cleanup(imgu, i);
450
451 imgu_v4l2_unregister(imgu);
452 }
453
imgu_video_nodes_init(struct imgu_device * imgu)454 static int imgu_video_nodes_init(struct imgu_device *imgu)
455 {
456 struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
457 struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
458 struct imgu_media_pipe *imgu_pipe;
459 unsigned int i, j;
460 int r;
461
462 imgu->buf_struct_size = sizeof(struct imgu_buffer);
463
464 for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
465 imgu_pipe = &imgu->imgu_pipe[j];
466
467 for (i = 0; i < IMGU_NODE_NUM; i++) {
468 imgu_pipe->nodes[i].name = imgu_node_map[i].name;
469 imgu_pipe->nodes[i].output = i < IMGU_QUEUE_FIRST_INPUT;
470 imgu_pipe->nodes[i].enabled = false;
471
472 if (i != IMGU_NODE_PARAMS && i != IMGU_NODE_STAT_3A)
473 fmts[imgu_node_map[i].css_queue] =
474 &imgu_pipe->nodes[i].vdev_fmt.fmt.pix_mp;
475 atomic_set(&imgu_pipe->nodes[i].sequence, 0);
476 }
477 }
478
479 r = imgu_v4l2_register(imgu);
480 if (r)
481 return r;
482
483 /* Set initial formats and initialize formats of video nodes */
484 for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
485 imgu_pipe = &imgu->imgu_pipe[j];
486
487 rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_pipe->imgu_sd.rect.eff;
488 rects[IPU3_CSS_RECT_BDS] = &imgu_pipe->imgu_sd.rect.bds;
489 imgu_css_fmt_set(&imgu->css, fmts, rects, j);
490
491 /* Pre-allocate dummy buffers */
492 r = imgu_dummybufs_preallocate(imgu, j);
493 if (r) {
494 dev_err(&imgu->pci_dev->dev,
495 "failed to pre-allocate dummy buffers (%d)", r);
496 goto out_cleanup;
497 }
498 }
499
500 return 0;
501
502 out_cleanup:
503 imgu_video_nodes_exit(imgu);
504
505 return r;
506 }
507
508 /**************** PCI interface ****************/
509
imgu_isr_threaded(int irq,void * imgu_ptr)510 static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
511 {
512 struct imgu_device *imgu = imgu_ptr;
513 struct imgu_media_pipe *imgu_pipe;
514 int p;
515
516 /* Dequeue / queue buffers */
517 do {
518 u64 ns = ktime_get_ns();
519 struct imgu_css_buffer *b;
520 struct imgu_buffer *buf = NULL;
521 unsigned int node, pipe;
522 bool dummy;
523
524 do {
525 mutex_lock(&imgu->lock);
526 b = imgu_css_buf_dequeue(&imgu->css);
527 mutex_unlock(&imgu->lock);
528 } while (PTR_ERR(b) == -EAGAIN);
529
530 if (IS_ERR(b)) {
531 if (PTR_ERR(b) != -EBUSY) /* All done */
532 dev_err(&imgu->pci_dev->dev,
533 "failed to dequeue buffers (%pe)\n", b);
534 break;
535 }
536
537 node = imgu_map_node(imgu, b->queue);
538 pipe = b->pipe;
539 dummy = imgu_dummybufs_check(imgu, b, pipe);
540 if (!dummy)
541 buf = container_of(b, struct imgu_buffer, css_buf);
542 dev_dbg(&imgu->pci_dev->dev,
543 "dequeue %s %s buffer %d daddr 0x%x from css\n",
544 dummy ? "dummy" : "user",
545 imgu_node_map[node].name,
546 dummy ? 0 : buf->vid_buf.vbb.vb2_buf.index,
547 (u32)b->daddr);
548
549 if (dummy)
550 /* It was a dummy buffer, skip it */
551 continue;
552
553 /* Fill vb2 buffer entries and tell it's ready */
554 imgu_pipe = &imgu->imgu_pipe[pipe];
555 if (!imgu_pipe->nodes[node].output) {
556 buf->vid_buf.vbb.vb2_buf.timestamp = ns;
557 buf->vid_buf.vbb.field = V4L2_FIELD_NONE;
558 buf->vid_buf.vbb.sequence =
559 atomic_inc_return(
560 &imgu_pipe->nodes[node].sequence);
561 dev_dbg(&imgu->pci_dev->dev, "vb2 buffer sequence %d",
562 buf->vid_buf.vbb.sequence);
563 }
564 imgu_buffer_done(imgu, &buf->vid_buf.vbb.vb2_buf,
565 imgu_css_buf_state(&buf->css_buf) ==
566 IPU3_CSS_BUFFER_DONE ?
567 VB2_BUF_STATE_DONE :
568 VB2_BUF_STATE_ERROR);
569 mutex_lock(&imgu->lock);
570 if (imgu_css_queue_empty(&imgu->css))
571 wake_up_all(&imgu->buf_drain_wq);
572 mutex_unlock(&imgu->lock);
573 } while (1);
574
575 /*
576 * Try to queue more buffers for CSS.
577 * qbuf_barrier is used to disable new buffers
578 * to be queued to CSS.
579 */
580 if (!atomic_read(&imgu->qbuf_barrier))
581 for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
582 imgu_queue_buffers(imgu, false, p);
583
584 return IRQ_HANDLED;
585 }
586
imgu_isr(int irq,void * imgu_ptr)587 static irqreturn_t imgu_isr(int irq, void *imgu_ptr)
588 {
589 struct imgu_device *imgu = imgu_ptr;
590
591 /* acknowledge interruption */
592 if (imgu_css_irq_ack(&imgu->css) < 0)
593 return IRQ_NONE;
594
595 return IRQ_WAKE_THREAD;
596 }
597
imgu_pci_config_setup(struct pci_dev * dev)598 static int imgu_pci_config_setup(struct pci_dev *dev)
599 {
600 u16 pci_command;
601 int r = pci_enable_msi(dev);
602
603 if (r) {
604 dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
605 return r;
606 }
607
608 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
609 pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
610 PCI_COMMAND_INTX_DISABLE;
611 pci_write_config_word(dev, PCI_COMMAND, pci_command);
612
613 return 0;
614 }
615
imgu_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)616 static int imgu_pci_probe(struct pci_dev *pci_dev,
617 const struct pci_device_id *id)
618 {
619 struct imgu_device *imgu;
620 phys_addr_t phys;
621 unsigned long phys_len;
622 void __iomem *const *iomap;
623 int r;
624
625 imgu = devm_kzalloc(&pci_dev->dev, sizeof(*imgu), GFP_KERNEL);
626 if (!imgu)
627 return -ENOMEM;
628
629 imgu->pci_dev = pci_dev;
630
631 r = pcim_enable_device(pci_dev);
632 if (r) {
633 dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
634 return r;
635 }
636
637 dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
638 pci_dev->device, pci_dev->revision);
639
640 phys = pci_resource_start(pci_dev, IMGU_PCI_BAR);
641 phys_len = pci_resource_len(pci_dev, IMGU_PCI_BAR);
642
643 r = pcim_iomap_regions(pci_dev, 1 << IMGU_PCI_BAR, pci_name(pci_dev));
644 if (r) {
645 dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
646 return r;
647 }
648 dev_info(&pci_dev->dev, "physical base address %pap, %lu bytes\n",
649 &phys, phys_len);
650
651 iomap = pcim_iomap_table(pci_dev);
652 if (!iomap) {
653 dev_err(&pci_dev->dev, "failed to iomap table\n");
654 return -ENODEV;
655 }
656
657 imgu->base = iomap[IMGU_PCI_BAR];
658
659 pci_set_drvdata(pci_dev, imgu);
660
661 pci_set_master(pci_dev);
662
663 r = dma_coerce_mask_and_coherent(&pci_dev->dev, IMGU_DMA_MASK);
664 if (r) {
665 dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
666 return -ENODEV;
667 }
668
669 r = imgu_pci_config_setup(pci_dev);
670 if (r)
671 return r;
672
673 mutex_init(&imgu->lock);
674 mutex_init(&imgu->streaming_lock);
675 atomic_set(&imgu->qbuf_barrier, 0);
676 init_waitqueue_head(&imgu->buf_drain_wq);
677
678 r = imgu_css_set_powerup(&pci_dev->dev, imgu->base, 200);
679 if (r) {
680 dev_err(&pci_dev->dev,
681 "failed to power up CSS (%d)\n", r);
682 goto out_mutex_destroy;
683 }
684
685 imgu->mmu = imgu_mmu_init(&pci_dev->dev, imgu->base);
686 if (IS_ERR(imgu->mmu)) {
687 r = PTR_ERR(imgu->mmu);
688 dev_err(&pci_dev->dev, "failed to initialize MMU (%d)\n", r);
689 goto out_css_powerdown;
690 }
691
692 r = imgu_dmamap_init(imgu);
693 if (r) {
694 dev_err(&pci_dev->dev,
695 "failed to initialize DMA mapping (%d)\n", r);
696 goto out_mmu_exit;
697 }
698
699 /* ISP programming */
700 r = imgu_css_init(&pci_dev->dev, &imgu->css, imgu->base, phys_len);
701 if (r) {
702 dev_err(&pci_dev->dev, "failed to initialize CSS (%d)\n", r);
703 goto out_dmamap_exit;
704 }
705
706 /* v4l2 sub-device registration */
707 r = imgu_video_nodes_init(imgu);
708 if (r) {
709 dev_err(&pci_dev->dev, "failed to create V4L2 devices (%d)\n",
710 r);
711 goto out_css_cleanup;
712 }
713
714 r = devm_request_threaded_irq(&pci_dev->dev, pci_dev->irq,
715 imgu_isr, imgu_isr_threaded,
716 IRQF_SHARED, IMGU_NAME, imgu);
717 if (r) {
718 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
719 goto out_video_exit;
720 }
721
722 pm_runtime_put_noidle(&pci_dev->dev);
723 pm_runtime_allow(&pci_dev->dev);
724
725 return 0;
726
727 out_video_exit:
728 imgu_video_nodes_exit(imgu);
729 out_css_cleanup:
730 imgu_css_cleanup(&imgu->css);
731 out_dmamap_exit:
732 imgu_dmamap_exit(imgu);
733 out_mmu_exit:
734 imgu_mmu_exit(imgu->mmu);
735 out_css_powerdown:
736 imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
737 out_mutex_destroy:
738 mutex_destroy(&imgu->streaming_lock);
739 mutex_destroy(&imgu->lock);
740
741 return r;
742 }
743
imgu_pci_remove(struct pci_dev * pci_dev)744 static void imgu_pci_remove(struct pci_dev *pci_dev)
745 {
746 struct imgu_device *imgu = pci_get_drvdata(pci_dev);
747
748 pm_runtime_forbid(&pci_dev->dev);
749 pm_runtime_get_noresume(&pci_dev->dev);
750
751 imgu_video_nodes_exit(imgu);
752 imgu_css_cleanup(&imgu->css);
753 imgu_css_set_powerdown(&pci_dev->dev, imgu->base);
754 imgu_dmamap_exit(imgu);
755 imgu_mmu_exit(imgu->mmu);
756 mutex_destroy(&imgu->streaming_lock);
757 mutex_destroy(&imgu->lock);
758 }
759
imgu_suspend(struct device * dev)760 static int __maybe_unused imgu_suspend(struct device *dev)
761 {
762 struct pci_dev *pci_dev = to_pci_dev(dev);
763 struct imgu_device *imgu = pci_get_drvdata(pci_dev);
764
765 imgu->suspend_in_stream = imgu_css_is_streaming(&imgu->css);
766 if (!imgu->suspend_in_stream)
767 goto out;
768 /* Block new buffers to be queued to CSS. */
769 atomic_set(&imgu->qbuf_barrier, 1);
770 /*
771 * Wait for currently running irq handler to be done so that
772 * no new buffers will be queued to fw later.
773 */
774 synchronize_irq(pci_dev->irq);
775 /* Wait until all buffers in CSS are done. */
776 if (!wait_event_timeout(imgu->buf_drain_wq,
777 imgu_css_queue_empty(&imgu->css), msecs_to_jiffies(1000)))
778 dev_err(dev, "wait buffer drain timeout.\n");
779
780 imgu_css_stop_streaming(&imgu->css);
781 atomic_set(&imgu->qbuf_barrier, 0);
782 imgu_powerdown(imgu);
783 pm_runtime_force_suspend(dev);
784 out:
785 return 0;
786 }
787
imgu_resume(struct device * dev)788 static int __maybe_unused imgu_resume(struct device *dev)
789 {
790 struct imgu_device *imgu = dev_get_drvdata(dev);
791 int r = 0;
792 unsigned int pipe;
793
794 if (!imgu->suspend_in_stream)
795 goto out;
796
797 pm_runtime_force_resume(dev);
798
799 r = imgu_powerup(imgu);
800 if (r) {
801 dev_err(dev, "failed to power up imgu\n");
802 goto out;
803 }
804
805 /* Start CSS streaming */
806 r = imgu_css_start_streaming(&imgu->css);
807 if (r) {
808 dev_err(dev, "failed to resume css streaming (%d)", r);
809 goto out;
810 }
811
812 for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
813 r = imgu_queue_buffers(imgu, true, pipe);
814 if (r)
815 dev_err(dev, "failed to queue buffers to pipe %d (%d)",
816 pipe, r);
817 }
818
819 out:
820 return r;
821 }
822
823 /*
824 * PCI rpm framework checks the existence of driver rpm callbacks.
825 * Place a dummy callback here to avoid rpm going into error state.
826 */
imgu_rpm_dummy_cb(struct device * dev)827 static __maybe_unused int imgu_rpm_dummy_cb(struct device *dev)
828 {
829 return 0;
830 }
831
832 static const struct dev_pm_ops imgu_pm_ops = {
833 SET_RUNTIME_PM_OPS(&imgu_rpm_dummy_cb, &imgu_rpm_dummy_cb, NULL)
834 SET_SYSTEM_SLEEP_PM_OPS(&imgu_suspend, &imgu_resume)
835 };
836
837 static const struct pci_device_id imgu_pci_tbl[] = {
838 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, IMGU_PCI_ID) },
839 { 0, }
840 };
841
842 MODULE_DEVICE_TABLE(pci, imgu_pci_tbl);
843
844 static struct pci_driver imgu_pci_driver = {
845 .name = IMGU_NAME,
846 .id_table = imgu_pci_tbl,
847 .probe = imgu_pci_probe,
848 .remove = imgu_pci_remove,
849 .driver = {
850 .pm = &imgu_pm_ops,
851 },
852 };
853
854 module_pci_driver(imgu_pci_driver);
855
856 MODULE_AUTHOR("Tuukka Toivonen");
857 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
858 MODULE_AUTHOR("Jian Xu Zheng");
859 MODULE_AUTHOR("Yuning Pu");
860 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
861 MODULE_LICENSE("GPL v2");
862 MODULE_DESCRIPTION("Intel ipu3_imgu PCI driver");
863 MODULE_FIRMWARE(IMGU_FW_NAME);
864 MODULE_FIRMWARE(IMGU_FW_NAME_20161208);
865 MODULE_FIRMWARE(IMGU_FW_NAME_IPU_20161208);
866