1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/device/bus.h>
16 #include <linux/mm.h>
17 #include <linux/highmem.h>
18 #include <linux/pagemap.h>
19 #include <linux/bug.h>
20 #include <linux/completion.h>
21 #include <linux/list.h>
22 #include <linux/of.h>
23 #include <linux/platform_device.h>
24 #include <linux/compat.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/rcupdate.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32 #include <soc/bcm2835/raspberrypi-firmware.h>
33 
34 #include "vchiq_core.h"
35 #include "vchiq_ioctl.h"
36 #include "vchiq_arm.h"
37 #include "vchiq_bus.h"
38 #include "vchiq_debugfs.h"
39 #include "vchiq_connected.h"
40 #include "vchiq_pagelist.h"
41 
42 #define DEVICE_NAME "vchiq"
43 
44 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
45 
46 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
47 
48 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
49 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
50 
51 #define BELL0	0x00
52 #define BELL2	0x08
53 
54 #define ARM_DS_ACTIVE	BIT(2)
55 
56 /* Override the default prefix, which would be vchiq_arm (from the filename) */
57 #undef MODULE_PARAM_PREFIX
58 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
59 
60 #define KEEPALIVE_VER 1
61 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
62 
63 DEFINE_SPINLOCK(msg_queue_spinlock);
64 struct vchiq_state g_state;
65 
66 /*
67  * The devices implemented in the VCHIQ firmware are not discoverable,
68  * so we need to maintain a list of them in order to register them with
69  * the interface.
70  */
71 static struct vchiq_device *bcm2835_audio;
72 static struct vchiq_device *bcm2835_camera;
73 
74 struct vchiq_drvdata {
75 	const unsigned int cache_line_size;
76 	struct rpi_firmware *fw;
77 };
78 
79 static struct vchiq_drvdata bcm2835_drvdata = {
80 	.cache_line_size = 32,
81 };
82 
83 static struct vchiq_drvdata bcm2836_drvdata = {
84 	.cache_line_size = 64,
85 };
86 
87 struct vchiq_arm_state {
88 	/* Keepalive-related data */
89 	struct task_struct *ka_thread;
90 	struct completion ka_evt;
91 	atomic_t ka_use_count;
92 	atomic_t ka_use_ack_count;
93 	atomic_t ka_release_count;
94 
95 	rwlock_t susp_res_lock;
96 
97 	struct vchiq_state *state;
98 
99 	/*
100 	 * Global use count for videocore.
101 	 * This is equal to the sum of the use counts for all services.  When
102 	 * this hits zero the videocore suspend procedure will be initiated.
103 	 */
104 	int videocore_use_count;
105 
106 	/*
107 	 * Use count to track requests from videocore peer.
108 	 * This use count is not associated with a service, so needs to be
109 	 * tracked separately with the state.
110 	 */
111 	int peer_use_count;
112 
113 	/*
114 	 * Flag to indicate that the first vchiq connect has made it through.
115 	 * This means that both sides should be fully ready, and we should
116 	 * be able to suspend after this point.
117 	 */
118 	int first_connect;
119 };
120 
121 struct vchiq_2835_state {
122 	int inited;
123 	struct vchiq_arm_state arm_state;
124 };
125 
126 struct vchiq_pagelist_info {
127 	struct pagelist *pagelist;
128 	size_t pagelist_buffer_size;
129 	dma_addr_t dma_addr;
130 	enum dma_data_direction dma_dir;
131 	unsigned int num_pages;
132 	unsigned int pages_need_release;
133 	struct page **pages;
134 	struct scatterlist *scatterlist;
135 	unsigned int scatterlist_mapped;
136 };
137 
138 static void __iomem *g_regs;
139 /* This value is the size of the L2 cache lines as understood by the
140  * VPU firmware, which determines the required alignment of the
141  * offsets/sizes in pagelists.
142  *
143  * Modern VPU firmware looks for a DT "cache-line-size" property in
144  * the VCHIQ node and will overwrite it with the actual L2 cache size,
145  * which the kernel must then respect.  That property was rejected
146  * upstream, so we have to use the VPU firmware's compatibility value
147  * of 32.
148  */
149 static unsigned int g_cache_line_size = 32;
150 static unsigned int g_fragments_size;
151 static char *g_fragments_base;
152 static char *g_free_fragments;
153 static struct semaphore g_free_fragments_sema;
154 
155 static DEFINE_SEMAPHORE(g_free_fragments_mutex, 1);
156 
157 static int
158 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
159 			     unsigned int size, enum vchiq_bulk_dir dir);
160 
161 static irqreturn_t
vchiq_doorbell_irq(int irq,void * dev_id)162 vchiq_doorbell_irq(int irq, void *dev_id)
163 {
164 	struct vchiq_state *state = dev_id;
165 	irqreturn_t ret = IRQ_NONE;
166 	unsigned int status;
167 
168 	/* Read (and clear) the doorbell */
169 	status = readl(g_regs + BELL0);
170 
171 	if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
172 		remote_event_pollall(state);
173 		ret = IRQ_HANDLED;
174 	}
175 
176 	return ret;
177 }
178 
179 static void
cleanup_pagelistinfo(struct vchiq_instance * instance,struct vchiq_pagelist_info * pagelistinfo)180 cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo)
181 {
182 	if (pagelistinfo->scatterlist_mapped) {
183 		dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
184 			     pagelistinfo->num_pages, pagelistinfo->dma_dir);
185 	}
186 
187 	if (pagelistinfo->pages_need_release)
188 		unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
189 
190 	dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size,
191 			  pagelistinfo->pagelist, pagelistinfo->dma_addr);
192 }
193 
194 static inline bool
is_adjacent_block(u32 * addrs,u32 addr,unsigned int k)195 is_adjacent_block(u32 *addrs, u32 addr, unsigned int k)
196 {
197 	u32 tmp;
198 
199 	if (!k)
200 		return false;
201 
202 	tmp = (addrs[k - 1] & PAGE_MASK) +
203 	      (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT);
204 
205 	return tmp == (addr & PAGE_MASK);
206 }
207 
208 /* There is a potential problem with partial cache lines (pages?)
209  * at the ends of the block when reading. If the CPU accessed anything in
210  * the same line (page?) then it may have pulled old data into the cache,
211  * obscuring the new data underneath. We can solve this by transferring the
212  * partial cache lines separately, and allowing the ARM to copy into the
213  * cached area.
214  */
215 
216 static struct vchiq_pagelist_info *
create_pagelist(struct vchiq_instance * instance,char * buf,char __user * ubuf,size_t count,unsigned short type)217 create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
218 		size_t count, unsigned short type)
219 {
220 	struct pagelist *pagelist;
221 	struct vchiq_pagelist_info *pagelistinfo;
222 	struct page **pages;
223 	u32 *addrs;
224 	unsigned int num_pages, offset, i, k;
225 	int actual_pages;
226 	size_t pagelist_size;
227 	struct scatterlist *scatterlist, *sg;
228 	int dma_buffers;
229 	dma_addr_t dma_addr;
230 
231 	if (count >= INT_MAX - PAGE_SIZE)
232 		return NULL;
233 
234 	if (buf)
235 		offset = (uintptr_t)buf & (PAGE_SIZE - 1);
236 	else
237 		offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
238 	num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
239 
240 	if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
241 			 sizeof(struct vchiq_pagelist_info)) /
242 			(sizeof(u32) + sizeof(pages[0]) +
243 			 sizeof(struct scatterlist)))
244 		return NULL;
245 
246 	pagelist_size = sizeof(struct pagelist) +
247 			(num_pages * sizeof(u32)) +
248 			(num_pages * sizeof(pages[0]) +
249 			(num_pages * sizeof(struct scatterlist))) +
250 			sizeof(struct vchiq_pagelist_info);
251 
252 	/* Allocate enough storage to hold the page pointers and the page
253 	 * list
254 	 */
255 	pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
256 				      GFP_KERNEL);
257 
258 	dev_dbg(instance->state->dev, "arm: %pK\n", pagelist);
259 
260 	if (!pagelist)
261 		return NULL;
262 
263 	addrs		= pagelist->addrs;
264 	pages		= (struct page **)(addrs + num_pages);
265 	scatterlist	= (struct scatterlist *)(pages + num_pages);
266 	pagelistinfo	= (struct vchiq_pagelist_info *)
267 			  (scatterlist + num_pages);
268 
269 	pagelist->length = count;
270 	pagelist->type = type;
271 	pagelist->offset = offset;
272 
273 	/* Populate the fields of the pagelistinfo structure */
274 	pagelistinfo->pagelist = pagelist;
275 	pagelistinfo->pagelist_buffer_size = pagelist_size;
276 	pagelistinfo->dma_addr = dma_addr;
277 	pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
278 				  DMA_TO_DEVICE : DMA_FROM_DEVICE;
279 	pagelistinfo->num_pages = num_pages;
280 	pagelistinfo->pages_need_release = 0;
281 	pagelistinfo->pages = pages;
282 	pagelistinfo->scatterlist = scatterlist;
283 	pagelistinfo->scatterlist_mapped = 0;
284 
285 	if (buf) {
286 		unsigned long length = count;
287 		unsigned int off = offset;
288 
289 		for (actual_pages = 0; actual_pages < num_pages;
290 		     actual_pages++) {
291 			struct page *pg =
292 				vmalloc_to_page((buf +
293 						 (actual_pages * PAGE_SIZE)));
294 			size_t bytes = PAGE_SIZE - off;
295 
296 			if (!pg) {
297 				cleanup_pagelistinfo(instance, pagelistinfo);
298 				return NULL;
299 			}
300 
301 			if (bytes > length)
302 				bytes = length;
303 			pages[actual_pages] = pg;
304 			length -= bytes;
305 			off = 0;
306 		}
307 		/* do not try and release vmalloc pages */
308 	} else {
309 		actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
310 						   type == PAGELIST_READ, pages);
311 
312 		if (actual_pages != num_pages) {
313 			dev_dbg(instance->state->dev, "arm: Only %d/%d pages locked\n",
314 				actual_pages, num_pages);
315 
316 			/* This is probably due to the process being killed */
317 			if (actual_pages > 0)
318 				unpin_user_pages(pages, actual_pages);
319 			cleanup_pagelistinfo(instance, pagelistinfo);
320 			return NULL;
321 		}
322 		 /* release user pages */
323 		pagelistinfo->pages_need_release = 1;
324 	}
325 
326 	/*
327 	 * Initialize the scatterlist so that the magic cookie
328 	 *  is filled if debugging is enabled
329 	 */
330 	sg_init_table(scatterlist, num_pages);
331 	/* Now set the pages for each scatterlist */
332 	for (i = 0; i < num_pages; i++)	{
333 		unsigned int len = PAGE_SIZE - offset;
334 
335 		if (len > count)
336 			len = count;
337 		sg_set_page(scatterlist + i, pages[i], len, offset);
338 		offset = 0;
339 		count -= len;
340 	}
341 
342 	dma_buffers = dma_map_sg(instance->state->dev,
343 				 scatterlist,
344 				 num_pages,
345 				 pagelistinfo->dma_dir);
346 
347 	if (dma_buffers == 0) {
348 		cleanup_pagelistinfo(instance, pagelistinfo);
349 		return NULL;
350 	}
351 
352 	pagelistinfo->scatterlist_mapped = 1;
353 
354 	/* Combine adjacent blocks for performance */
355 	k = 0;
356 	for_each_sg(scatterlist, sg, dma_buffers, i) {
357 		u32 len = sg_dma_len(sg);
358 		u32 addr = sg_dma_address(sg);
359 
360 		/* Note: addrs is the address + page_count - 1
361 		 * The firmware expects blocks after the first to be page-
362 		 * aligned and a multiple of the page size
363 		 */
364 		WARN_ON(len == 0);
365 		WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
366 		WARN_ON(i && (addr & ~PAGE_MASK));
367 		if (is_adjacent_block(addrs, addr, k))
368 			addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
369 		else
370 			addrs[k++] = (addr & PAGE_MASK) |
371 				(((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
372 	}
373 
374 	/* Partial cache lines (fragments) require special measures */
375 	if ((type == PAGELIST_READ) &&
376 	    ((pagelist->offset & (g_cache_line_size - 1)) ||
377 	    ((pagelist->offset + pagelist->length) &
378 	    (g_cache_line_size - 1)))) {
379 		char *fragments;
380 
381 		if (down_interruptible(&g_free_fragments_sema)) {
382 			cleanup_pagelistinfo(instance, pagelistinfo);
383 			return NULL;
384 		}
385 
386 		WARN_ON(!g_free_fragments);
387 
388 		down(&g_free_fragments_mutex);
389 		fragments = g_free_fragments;
390 		WARN_ON(!fragments);
391 		g_free_fragments = *(char **)g_free_fragments;
392 		up(&g_free_fragments_mutex);
393 		pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
394 			(fragments - g_fragments_base) / g_fragments_size;
395 	}
396 
397 	return pagelistinfo;
398 }
399 
400 static void
free_pagelist(struct vchiq_instance * instance,struct vchiq_pagelist_info * pagelistinfo,int actual)401 free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo,
402 	      int actual)
403 {
404 	struct pagelist *pagelist = pagelistinfo->pagelist;
405 	struct page **pages = pagelistinfo->pages;
406 	unsigned int num_pages = pagelistinfo->num_pages;
407 
408 	dev_dbg(instance->state->dev, "arm: %pK, %d\n", pagelistinfo->pagelist, actual);
409 
410 	/*
411 	 * NOTE: dma_unmap_sg must be called before the
412 	 * cpu can touch any of the data/pages.
413 	 */
414 	dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
415 		     pagelistinfo->num_pages, pagelistinfo->dma_dir);
416 	pagelistinfo->scatterlist_mapped = 0;
417 
418 	/* Deal with any partial cache lines (fragments) */
419 	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && g_fragments_base) {
420 		char *fragments = g_fragments_base +
421 			(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
422 			g_fragments_size;
423 		int head_bytes, tail_bytes;
424 
425 		head_bytes = (g_cache_line_size - pagelist->offset) &
426 			(g_cache_line_size - 1);
427 		tail_bytes = (pagelist->offset + actual) &
428 			(g_cache_line_size - 1);
429 
430 		if ((actual >= 0) && (head_bytes != 0)) {
431 			if (head_bytes > actual)
432 				head_bytes = actual;
433 
434 			memcpy_to_page(pages[0],
435 				pagelist->offset,
436 				fragments,
437 				head_bytes);
438 		}
439 		if ((actual >= 0) && (head_bytes < actual) &&
440 		    (tail_bytes != 0))
441 			memcpy_to_page(pages[num_pages - 1],
442 				(pagelist->offset + actual) &
443 				(PAGE_SIZE - 1) & ~(g_cache_line_size - 1),
444 				fragments + g_cache_line_size,
445 				tail_bytes);
446 
447 		down(&g_free_fragments_mutex);
448 		*(char **)fragments = g_free_fragments;
449 		g_free_fragments = fragments;
450 		up(&g_free_fragments_mutex);
451 		up(&g_free_fragments_sema);
452 	}
453 
454 	/* Need to mark all the pages dirty. */
455 	if (pagelist->type != PAGELIST_WRITE &&
456 	    pagelistinfo->pages_need_release) {
457 		unsigned int i;
458 
459 		for (i = 0; i < num_pages; i++)
460 			set_page_dirty(pages[i]);
461 	}
462 
463 	cleanup_pagelistinfo(instance, pagelistinfo);
464 }
465 
vchiq_platform_init(struct platform_device * pdev,struct vchiq_state * state)466 static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
467 {
468 	struct device *dev = &pdev->dev;
469 	struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
470 	struct rpi_firmware *fw = drvdata->fw;
471 	struct vchiq_slot_zero *vchiq_slot_zero;
472 	void *slot_mem;
473 	dma_addr_t slot_phys;
474 	u32 channelbase;
475 	int slot_mem_size, frag_mem_size;
476 	int err, irq, i;
477 
478 	/*
479 	 * VCHI messages between the CPU and firmware use
480 	 * 32-bit bus addresses.
481 	 */
482 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
483 
484 	if (err < 0)
485 		return err;
486 
487 	g_cache_line_size = drvdata->cache_line_size;
488 	g_fragments_size = 2 * g_cache_line_size;
489 
490 	/* Allocate space for the channels in coherent memory */
491 	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
492 	frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
493 
494 	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
495 				       &slot_phys, GFP_KERNEL);
496 	if (!slot_mem) {
497 		dev_err(dev, "could not allocate DMA memory\n");
498 		return -ENOMEM;
499 	}
500 
501 	WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
502 
503 	vchiq_slot_zero = vchiq_init_slots(dev, slot_mem, slot_mem_size);
504 	if (!vchiq_slot_zero)
505 		return -ENOMEM;
506 
507 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
508 		(int)slot_phys + slot_mem_size;
509 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
510 		MAX_FRAGMENTS;
511 
512 	g_fragments_base = (char *)slot_mem + slot_mem_size;
513 
514 	g_free_fragments = g_fragments_base;
515 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
516 		*(char **)&g_fragments_base[i * g_fragments_size] =
517 			&g_fragments_base[(i + 1) * g_fragments_size];
518 	}
519 	*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
520 	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
521 
522 	err = vchiq_init_state(state, vchiq_slot_zero, dev);
523 	if (err)
524 		return err;
525 
526 	g_regs = devm_platform_ioremap_resource(pdev, 0);
527 	if (IS_ERR(g_regs))
528 		return PTR_ERR(g_regs);
529 
530 	irq = platform_get_irq(pdev, 0);
531 	if (irq <= 0)
532 		return irq;
533 
534 	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
535 			       "VCHIQ doorbell", state);
536 	if (err) {
537 		dev_err(dev, "failed to register irq=%d\n", irq);
538 		return err;
539 	}
540 
541 	/* Send the base address of the slots to VideoCore */
542 	channelbase = slot_phys;
543 	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
544 				    &channelbase, sizeof(channelbase));
545 	if (err) {
546 		dev_err(dev, "failed to send firmware property: %d\n", err);
547 		return err;
548 	}
549 
550 	if (channelbase) {
551 		dev_err(dev, "failed to set channelbase (response: %x)\n",
552 			channelbase);
553 		return -ENXIO;
554 	}
555 
556 	dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %pK, phys %pad)\n",
557 		vchiq_slot_zero, &slot_phys);
558 
559 	vchiq_call_connected_callbacks();
560 
561 	return 0;
562 }
563 
564 static void
vchiq_arm_init_state(struct vchiq_state * state,struct vchiq_arm_state * arm_state)565 vchiq_arm_init_state(struct vchiq_state *state,
566 		     struct vchiq_arm_state *arm_state)
567 {
568 	if (arm_state) {
569 		rwlock_init(&arm_state->susp_res_lock);
570 
571 		init_completion(&arm_state->ka_evt);
572 		atomic_set(&arm_state->ka_use_count, 0);
573 		atomic_set(&arm_state->ka_use_ack_count, 0);
574 		atomic_set(&arm_state->ka_release_count, 0);
575 
576 		arm_state->state = state;
577 		arm_state->first_connect = 0;
578 	}
579 }
580 
581 int
vchiq_platform_init_state(struct vchiq_state * state)582 vchiq_platform_init_state(struct vchiq_state *state)
583 {
584 	struct vchiq_2835_state *platform_state;
585 
586 	state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
587 	if (!state->platform_state)
588 		return -ENOMEM;
589 
590 	platform_state = (struct vchiq_2835_state *)state->platform_state;
591 
592 	platform_state->inited = 1;
593 	vchiq_arm_init_state(state, &platform_state->arm_state);
594 
595 	return 0;
596 }
597 
vchiq_platform_get_arm_state(struct vchiq_state * state)598 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
599 {
600 	struct vchiq_2835_state *platform_state;
601 
602 	platform_state   = (struct vchiq_2835_state *)state->platform_state;
603 
604 	WARN_ON_ONCE(!platform_state->inited);
605 
606 	return &platform_state->arm_state;
607 }
608 
609 void
remote_event_signal(struct remote_event * event)610 remote_event_signal(struct remote_event *event)
611 {
612 	/*
613 	 * Ensure that all writes to shared data structures have completed
614 	 * before signalling the peer.
615 	 */
616 	wmb();
617 
618 	event->fired = 1;
619 
620 	dsb(sy);         /* data barrier operation */
621 
622 	if (event->armed)
623 		writel(0, g_regs + BELL2); /* trigger vc interrupt */
624 }
625 
626 int
vchiq_prepare_bulk_data(struct vchiq_instance * instance,struct vchiq_bulk * bulk,void * offset,void __user * uoffset,int size,int dir)627 vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
628 			void __user *uoffset, int size, int dir)
629 {
630 	struct vchiq_pagelist_info *pagelistinfo;
631 
632 	pagelistinfo = create_pagelist(instance, offset, uoffset, size,
633 				       (dir == VCHIQ_BULK_RECEIVE)
634 				       ? PAGELIST_READ
635 				       : PAGELIST_WRITE);
636 
637 	if (!pagelistinfo)
638 		return -ENOMEM;
639 
640 	bulk->data = pagelistinfo->dma_addr;
641 
642 	/*
643 	 * Store the pagelistinfo address in remote_data,
644 	 * which isn't used by the slave.
645 	 */
646 	bulk->remote_data = pagelistinfo;
647 
648 	return 0;
649 }
650 
651 void
vchiq_complete_bulk(struct vchiq_instance * instance,struct vchiq_bulk * bulk)652 vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
653 {
654 	if (bulk && bulk->remote_data && bulk->actual)
655 		free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
656 			      bulk->actual);
657 }
658 
vchiq_dump_platform_state(struct seq_file * f)659 void vchiq_dump_platform_state(struct seq_file *f)
660 {
661 	seq_puts(f, "  Platform: 2835 (VC master)\n");
662 }
663 
664 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(struct vchiq_instance ** instance_out)665 int vchiq_initialise(struct vchiq_instance **instance_out)
666 {
667 	struct vchiq_state *state;
668 	struct vchiq_instance *instance = NULL;
669 	int i, ret;
670 
671 	/*
672 	 * VideoCore may not be ready due to boot up timing.
673 	 * It may never be ready if kernel and firmware are mismatched,so don't
674 	 * block forever.
675 	 */
676 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
677 		state = vchiq_get_state();
678 		if (state)
679 			break;
680 		usleep_range(500, 600);
681 	}
682 	if (i == VCHIQ_INIT_RETRIES) {
683 		dev_err(state->dev, "core: %s: Videocore not initialized\n", __func__);
684 		ret = -ENOTCONN;
685 		goto failed;
686 	} else if (i > 0) {
687 		dev_warn(state->dev, "core: %s: videocore initialized after %d retries\n",
688 			 __func__, i);
689 	}
690 
691 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
692 	if (!instance) {
693 		dev_err(state->dev, "core: %s: Cannot allocate vchiq instance\n", __func__);
694 		ret = -ENOMEM;
695 		goto failed;
696 	}
697 
698 	instance->connected = 0;
699 	instance->state = state;
700 	mutex_init(&instance->bulk_waiter_list_mutex);
701 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
702 
703 	*instance_out = instance;
704 
705 	ret = 0;
706 
707 failed:
708 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
709 
710 	return ret;
711 }
712 EXPORT_SYMBOL(vchiq_initialise);
713 
free_bulk_waiter(struct vchiq_instance * instance)714 void free_bulk_waiter(struct vchiq_instance *instance)
715 {
716 	struct bulk_waiter_node *waiter, *next;
717 
718 	list_for_each_entry_safe(waiter, next,
719 				 &instance->bulk_waiter_list, list) {
720 		list_del(&waiter->list);
721 		dev_dbg(instance->state->dev,
722 			"arm: bulk_waiter - cleaned up %pK for pid %d\n",
723 			waiter, waiter->pid);
724 		kfree(waiter);
725 	}
726 }
727 
vchiq_shutdown(struct vchiq_instance * instance)728 int vchiq_shutdown(struct vchiq_instance *instance)
729 {
730 	int status = 0;
731 	struct vchiq_state *state = instance->state;
732 
733 	if (mutex_lock_killable(&state->mutex))
734 		return -EAGAIN;
735 
736 	/* Remove all services */
737 	vchiq_shutdown_internal(state, instance);
738 
739 	mutex_unlock(&state->mutex);
740 
741 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, status);
742 
743 	free_bulk_waiter(instance);
744 	kfree(instance);
745 
746 	return status;
747 }
748 EXPORT_SYMBOL(vchiq_shutdown);
749 
vchiq_is_connected(struct vchiq_instance * instance)750 static int vchiq_is_connected(struct vchiq_instance *instance)
751 {
752 	return instance->connected;
753 }
754 
vchiq_connect(struct vchiq_instance * instance)755 int vchiq_connect(struct vchiq_instance *instance)
756 {
757 	int status;
758 	struct vchiq_state *state = instance->state;
759 
760 	if (mutex_lock_killable(&state->mutex)) {
761 		dev_dbg(state->dev,
762 			"core: call to mutex_lock failed\n");
763 		status = -EAGAIN;
764 		goto failed;
765 	}
766 	status = vchiq_connect_internal(state, instance);
767 
768 	if (!status)
769 		instance->connected = 1;
770 
771 	mutex_unlock(&state->mutex);
772 
773 failed:
774 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, status);
775 
776 	return status;
777 }
778 EXPORT_SYMBOL(vchiq_connect);
779 
780 static int
vchiq_add_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)781 vchiq_add_service(struct vchiq_instance *instance,
782 		  const struct vchiq_service_params_kernel *params,
783 		  unsigned int *phandle)
784 {
785 	int status;
786 	struct vchiq_state *state = instance->state;
787 	struct vchiq_service *service = NULL;
788 	int srvstate;
789 
790 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
791 
792 	srvstate = vchiq_is_connected(instance)
793 		? VCHIQ_SRVSTATE_LISTENING
794 		: VCHIQ_SRVSTATE_HIDDEN;
795 
796 	service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
797 
798 	if (service) {
799 		*phandle = service->handle;
800 		status = 0;
801 	} else {
802 		status = -EINVAL;
803 	}
804 
805 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, status);
806 
807 	return status;
808 }
809 
810 int
vchiq_open_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)811 vchiq_open_service(struct vchiq_instance *instance,
812 		   const struct vchiq_service_params_kernel *params,
813 		   unsigned int *phandle)
814 {
815 	int status = -EINVAL;
816 	struct vchiq_state   *state = instance->state;
817 	struct vchiq_service *service = NULL;
818 
819 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
820 
821 	if (!vchiq_is_connected(instance))
822 		goto failed;
823 
824 	service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
825 
826 	if (service) {
827 		*phandle = service->handle;
828 		status = vchiq_open_service_internal(service, current->pid);
829 		if (status) {
830 			vchiq_remove_service(instance, service->handle);
831 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
832 		}
833 	}
834 
835 failed:
836 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, status);
837 
838 	return status;
839 }
840 EXPORT_SYMBOL(vchiq_open_service);
841 
842 int
vchiq_bulk_transmit(struct vchiq_instance * instance,unsigned int handle,const void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)843 vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
844 		    unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
845 {
846 	int status;
847 
848 	while (1) {
849 		switch (mode) {
850 		case VCHIQ_BULK_MODE_NOCALLBACK:
851 		case VCHIQ_BULK_MODE_CALLBACK:
852 			status = vchiq_bulk_transfer(instance, handle,
853 						     (void *)data, NULL,
854 						     size, userdata, mode,
855 						     VCHIQ_BULK_TRANSMIT);
856 			break;
857 		case VCHIQ_BULK_MODE_BLOCKING:
858 			status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
859 							      VCHIQ_BULK_TRANSMIT);
860 			break;
861 		default:
862 			return -EINVAL;
863 		}
864 
865 		/*
866 		 * vchiq_*_bulk_transfer() may return -EAGAIN, so we need
867 		 * to implement a retry mechanism since this function is
868 		 * supposed to block until queued
869 		 */
870 		if (status != -EAGAIN)
871 			break;
872 
873 		msleep(1);
874 	}
875 
876 	return status;
877 }
878 EXPORT_SYMBOL(vchiq_bulk_transmit);
879 
vchiq_bulk_receive(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)880 int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
881 		       void *data, unsigned int size, void *userdata,
882 		       enum vchiq_bulk_mode mode)
883 {
884 	int status;
885 
886 	while (1) {
887 		switch (mode) {
888 		case VCHIQ_BULK_MODE_NOCALLBACK:
889 		case VCHIQ_BULK_MODE_CALLBACK:
890 			status = vchiq_bulk_transfer(instance, handle, data, NULL,
891 						     size, userdata,
892 						     mode, VCHIQ_BULK_RECEIVE);
893 			break;
894 		case VCHIQ_BULK_MODE_BLOCKING:
895 			status = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
896 							      VCHIQ_BULK_RECEIVE);
897 			break;
898 		default:
899 			return -EINVAL;
900 		}
901 
902 		/*
903 		 * vchiq_*_bulk_transfer() may return -EAGAIN, so we need
904 		 * to implement a retry mechanism since this function is
905 		 * supposed to block until queued
906 		 */
907 		if (status != -EAGAIN)
908 			break;
909 
910 		msleep(1);
911 	}
912 
913 	return status;
914 }
915 EXPORT_SYMBOL(vchiq_bulk_receive);
916 
917 static int
vchiq_blocking_bulk_transfer(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size,enum vchiq_bulk_dir dir)918 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
919 			     unsigned int size, enum vchiq_bulk_dir dir)
920 {
921 	struct vchiq_service *service;
922 	int status;
923 	struct bulk_waiter_node *waiter = NULL, *iter;
924 
925 	service = find_service_by_handle(instance, handle);
926 	if (!service)
927 		return -EINVAL;
928 
929 	vchiq_service_put(service);
930 
931 	mutex_lock(&instance->bulk_waiter_list_mutex);
932 	list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
933 		if (iter->pid == current->pid) {
934 			list_del(&iter->list);
935 			waiter = iter;
936 			break;
937 		}
938 	}
939 	mutex_unlock(&instance->bulk_waiter_list_mutex);
940 
941 	if (waiter) {
942 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
943 
944 		if (bulk) {
945 			/* This thread has an outstanding bulk transfer. */
946 			/* FIXME: why compare a dma address to a pointer? */
947 			if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
948 				/*
949 				 * This is not a retry of the previous one.
950 				 * Cancel the signal when the transfer completes.
951 				 */
952 				spin_lock(&bulk_waiter_spinlock);
953 				bulk->userdata = NULL;
954 				spin_unlock(&bulk_waiter_spinlock);
955 			}
956 		}
957 	} else {
958 		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
959 		if (!waiter) {
960 			dev_err(service->state->dev, "core: %s: - Out of memory\n", __func__);
961 			return -ENOMEM;
962 		}
963 	}
964 
965 	status = vchiq_bulk_transfer(instance, handle, data, NULL, size,
966 				     &waiter->bulk_waiter,
967 				     VCHIQ_BULK_MODE_BLOCKING, dir);
968 	if ((status != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
969 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
970 
971 		if (bulk) {
972 			/* Cancel the signal when the transfer completes. */
973 			spin_lock(&bulk_waiter_spinlock);
974 			bulk->userdata = NULL;
975 			spin_unlock(&bulk_waiter_spinlock);
976 		}
977 		kfree(waiter);
978 	} else {
979 		waiter->pid = current->pid;
980 		mutex_lock(&instance->bulk_waiter_list_mutex);
981 		list_add(&waiter->list, &instance->bulk_waiter_list);
982 		mutex_unlock(&instance->bulk_waiter_list_mutex);
983 		dev_dbg(instance->state->dev, "arm: saved bulk_waiter %pK for pid %d\n",
984 			waiter, current->pid);
985 	}
986 
987 	return status;
988 }
989 
990 static int
add_completion(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,struct user_service * user_service,void * bulk_userdata)991 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
992 	       struct vchiq_header *header, struct user_service *user_service,
993 	       void *bulk_userdata)
994 {
995 	struct vchiq_completion_data_kernel *completion;
996 	int insert;
997 
998 	DEBUG_INITIALISE(g_state.local);
999 
1000 	insert = instance->completion_insert;
1001 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
1002 		/* Out of space - wait for the client */
1003 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1004 		dev_dbg(instance->state->dev, "core: completion queue full\n");
1005 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
1006 		if (wait_for_completion_interruptible(&instance->remove_event)) {
1007 			dev_dbg(instance->state->dev, "arm: service_callback interrupted\n");
1008 			return -EAGAIN;
1009 		} else if (instance->closing) {
1010 			dev_dbg(instance->state->dev, "arm: service_callback closing\n");
1011 			return 0;
1012 		}
1013 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1014 	}
1015 
1016 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
1017 
1018 	completion->header = header;
1019 	completion->reason = reason;
1020 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
1021 	completion->service_userdata = user_service->service;
1022 	completion->bulk_userdata = bulk_userdata;
1023 
1024 	if (reason == VCHIQ_SERVICE_CLOSED) {
1025 		/*
1026 		 * Take an extra reference, to be held until
1027 		 * this CLOSED notification is delivered.
1028 		 */
1029 		vchiq_service_get(user_service->service);
1030 		if (instance->use_close_delivered)
1031 			user_service->close_pending = 1;
1032 	}
1033 
1034 	/*
1035 	 * A write barrier is needed here to ensure that the entire completion
1036 	 * record is written out before the insert point.
1037 	 */
1038 	wmb();
1039 
1040 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
1041 		user_service->message_available_pos = insert;
1042 
1043 	insert++;
1044 	instance->completion_insert = insert;
1045 
1046 	complete(&instance->insert_event);
1047 
1048 	return 0;
1049 }
1050 
1051 int
service_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * bulk_userdata)1052 service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
1053 		 struct vchiq_header *header, unsigned int handle, void *bulk_userdata)
1054 {
1055 	/*
1056 	 * How do we ensure the callback goes to the right client?
1057 	 * The service_user data points to a user_service record
1058 	 * containing the original callback and the user state structure, which
1059 	 * contains a circular buffer for completion records.
1060 	 */
1061 	struct user_service *user_service;
1062 	struct vchiq_service *service;
1063 	bool skip_completion = false;
1064 
1065 	DEBUG_INITIALISE(g_state.local);
1066 
1067 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1068 
1069 	rcu_read_lock();
1070 	service = handle_to_service(instance, handle);
1071 	if (WARN_ON(!service)) {
1072 		rcu_read_unlock();
1073 		return 0;
1074 	}
1075 
1076 	user_service = (struct user_service *)service->base.userdata;
1077 
1078 	if (!instance || instance->closing) {
1079 		rcu_read_unlock();
1080 		return 0;
1081 	}
1082 
1083 	/*
1084 	 * As hopping around different synchronization mechanism,
1085 	 * taking an extra reference results in simpler implementation.
1086 	 */
1087 	vchiq_service_get(service);
1088 	rcu_read_unlock();
1089 
1090 	dev_dbg(service->state->dev,
1091 		"arm: service %p(%d,%p), reason %d, header %p, instance %p, bulk_userdata %p\n",
1092 		user_service, service->localport, user_service->userdata,
1093 		reason, header, instance, bulk_userdata);
1094 
1095 	if (header && user_service->is_vchi) {
1096 		spin_lock(&msg_queue_spinlock);
1097 		while (user_service->msg_insert ==
1098 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
1099 			spin_unlock(&msg_queue_spinlock);
1100 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1101 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1102 			dev_dbg(service->state->dev, "arm: msg queue full\n");
1103 			/*
1104 			 * If there is no MESSAGE_AVAILABLE in the completion
1105 			 * queue, add one
1106 			 */
1107 			if ((user_service->message_available_pos -
1108 				instance->completion_remove) < 0) {
1109 				int status;
1110 
1111 				dev_dbg(instance->state->dev,
1112 					"arm: Inserting extra MESSAGE_AVAILABLE\n");
1113 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1114 				status = add_completion(instance, reason, NULL, user_service,
1115 							bulk_userdata);
1116 				if (status) {
1117 					DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1118 					vchiq_service_put(service);
1119 					return status;
1120 				}
1121 			}
1122 
1123 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1124 			if (wait_for_completion_interruptible(&user_service->remove_event)) {
1125 				dev_dbg(instance->state->dev, "arm: interrupted\n");
1126 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1127 				vchiq_service_put(service);
1128 				return -EAGAIN;
1129 			} else if (instance->closing) {
1130 				dev_dbg(instance->state->dev, "arm: closing\n");
1131 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1132 				vchiq_service_put(service);
1133 				return -EINVAL;
1134 			}
1135 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1136 			spin_lock(&msg_queue_spinlock);
1137 		}
1138 
1139 		user_service->msg_queue[user_service->msg_insert &
1140 			(MSG_QUEUE_SIZE - 1)] = header;
1141 		user_service->msg_insert++;
1142 
1143 		/*
1144 		 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1145 		 * there is a MESSAGE_AVAILABLE in the completion queue then
1146 		 * bypass the completion queue.
1147 		 */
1148 		if (((user_service->message_available_pos -
1149 			instance->completion_remove) >= 0) ||
1150 			user_service->dequeue_pending) {
1151 			user_service->dequeue_pending = 0;
1152 			skip_completion = true;
1153 		}
1154 
1155 		spin_unlock(&msg_queue_spinlock);
1156 		complete(&user_service->insert_event);
1157 
1158 		header = NULL;
1159 	}
1160 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1161 	vchiq_service_put(service);
1162 
1163 	if (skip_completion)
1164 		return 0;
1165 
1166 	return add_completion(instance, reason, header, user_service,
1167 		bulk_userdata);
1168 }
1169 
vchiq_dump_platform_instances(struct seq_file * f)1170 void vchiq_dump_platform_instances(struct seq_file *f)
1171 {
1172 	struct vchiq_state *state = vchiq_get_state();
1173 	int i;
1174 
1175 	if (!state)
1176 		return;
1177 
1178 	/*
1179 	 * There is no list of instances, so instead scan all services,
1180 	 * marking those that have been dumped.
1181 	 */
1182 
1183 	rcu_read_lock();
1184 	for (i = 0; i < state->unused_service; i++) {
1185 		struct vchiq_service *service;
1186 		struct vchiq_instance *instance;
1187 
1188 		service = rcu_dereference(state->services[i]);
1189 		if (!service || service->base.callback != service_callback)
1190 			continue;
1191 
1192 		instance = service->instance;
1193 		if (instance)
1194 			instance->mark = 0;
1195 	}
1196 	rcu_read_unlock();
1197 
1198 	for (i = 0; i < state->unused_service; i++) {
1199 		struct vchiq_service *service;
1200 		struct vchiq_instance *instance;
1201 
1202 		rcu_read_lock();
1203 		service = rcu_dereference(state->services[i]);
1204 		if (!service || service->base.callback != service_callback) {
1205 			rcu_read_unlock();
1206 			continue;
1207 		}
1208 
1209 		instance = service->instance;
1210 		if (!instance || instance->mark) {
1211 			rcu_read_unlock();
1212 			continue;
1213 		}
1214 		rcu_read_unlock();
1215 
1216 		seq_printf(f, "Instance %pK: pid %d,%s completions %d/%d\n",
1217 			   instance, instance->pid,
1218 			   instance->connected ? " connected, " :
1219 			   "",
1220 			   instance->completion_insert -
1221 			   instance->completion_remove,
1222 			   MAX_COMPLETIONS);
1223 		instance->mark = 1;
1224 	}
1225 }
1226 
vchiq_dump_platform_service_state(struct seq_file * f,struct vchiq_service * service)1227 void vchiq_dump_platform_service_state(struct seq_file *f,
1228 				       struct vchiq_service *service)
1229 {
1230 	struct user_service *user_service =
1231 			(struct user_service *)service->base.userdata;
1232 
1233 	seq_printf(f, "  instance %pK", service->instance);
1234 
1235 	if ((service->base.callback == service_callback) && user_service->is_vchi) {
1236 		seq_printf(f, ", %d/%d messages",
1237 			   user_service->msg_insert - user_service->msg_remove,
1238 			   MSG_QUEUE_SIZE);
1239 
1240 		if (user_service->dequeue_pending)
1241 			seq_puts(f, " (dequeue pending)");
1242 	}
1243 
1244 	seq_puts(f, "\n");
1245 }
1246 
1247 struct vchiq_state *
vchiq_get_state(void)1248 vchiq_get_state(void)
1249 {
1250 	if (!g_state.remote) {
1251 		pr_err("%s: g_state.remote == NULL\n", __func__);
1252 		return NULL;
1253 	}
1254 
1255 	if (g_state.remote->initialised != 1) {
1256 		pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
1257 			  __func__, g_state.remote->initialised);
1258 		return NULL;
1259 	}
1260 
1261 	return &g_state;
1262 }
1263 
1264 /*
1265  * Autosuspend related functionality
1266  */
1267 
1268 static int
vchiq_keepalive_vchiq_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int service_user,void * bulk_user)1269 vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
1270 			       enum vchiq_reason reason,
1271 			       struct vchiq_header *header,
1272 			       unsigned int service_user, void *bulk_user)
1273 {
1274 	dev_err(instance->state->dev, "suspend: %s: callback reason %d\n",
1275 		__func__, reason);
1276 	return 0;
1277 }
1278 
1279 static int
vchiq_keepalive_thread_func(void * v)1280 vchiq_keepalive_thread_func(void *v)
1281 {
1282 	struct vchiq_state *state = (struct vchiq_state *)v;
1283 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1284 
1285 	int status;
1286 	struct vchiq_instance *instance;
1287 	unsigned int ka_handle;
1288 	int ret;
1289 
1290 	struct vchiq_service_params_kernel params = {
1291 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1292 		.callback    = vchiq_keepalive_vchiq_callback,
1293 		.version     = KEEPALIVE_VER,
1294 		.version_min = KEEPALIVE_VER_MIN
1295 	};
1296 
1297 	ret = vchiq_initialise(&instance);
1298 	if (ret) {
1299 		dev_err(state->dev, "suspend: %s: vchiq_initialise failed %d\n", __func__, ret);
1300 		goto exit;
1301 	}
1302 
1303 	status = vchiq_connect(instance);
1304 	if (status) {
1305 		dev_err(state->dev, "suspend: %s: vchiq_connect failed %d\n", __func__, status);
1306 		goto shutdown;
1307 	}
1308 
1309 	status = vchiq_add_service(instance, &params, &ka_handle);
1310 	if (status) {
1311 		dev_err(state->dev, "suspend: %s: vchiq_open_service failed %d\n",
1312 			__func__, status);
1313 		goto shutdown;
1314 	}
1315 
1316 	while (1) {
1317 		long rc = 0, uc = 0;
1318 
1319 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1320 			dev_err(state->dev, "suspend: %s: interrupted\n", __func__);
1321 			flush_signals(current);
1322 			continue;
1323 		}
1324 
1325 		/*
1326 		 * read and clear counters.  Do release_count then use_count to
1327 		 * prevent getting more releases than uses
1328 		 */
1329 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
1330 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
1331 
1332 		/*
1333 		 * Call use/release service the requisite number of times.
1334 		 * Process use before release so use counts don't go negative
1335 		 */
1336 		while (uc--) {
1337 			atomic_inc(&arm_state->ka_use_ack_count);
1338 			status = vchiq_use_service(instance, ka_handle);
1339 			if (status) {
1340 				dev_err(state->dev, "suspend: %s: vchiq_use_service error %d\n",
1341 					__func__, status);
1342 			}
1343 		}
1344 		while (rc--) {
1345 			status = vchiq_release_service(instance, ka_handle);
1346 			if (status) {
1347 				dev_err(state->dev, "suspend: %s: vchiq_release_service error %d\n",
1348 					__func__, status);
1349 			}
1350 		}
1351 	}
1352 
1353 shutdown:
1354 	vchiq_shutdown(instance);
1355 exit:
1356 	return 0;
1357 }
1358 
1359 int
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)1360 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1361 		   enum USE_TYPE_E use_type)
1362 {
1363 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1364 	int ret = 0;
1365 	char entity[64];
1366 	int *entity_uc;
1367 	int local_uc;
1368 
1369 	if (!arm_state) {
1370 		ret = -EINVAL;
1371 		goto out;
1372 	}
1373 
1374 	if (use_type == USE_TYPE_VCHIQ) {
1375 		snprintf(entity, sizeof(entity), "VCHIQ:   ");
1376 		entity_uc = &arm_state->peer_use_count;
1377 	} else if (service) {
1378 		snprintf(entity, sizeof(entity), "%p4cc:%03d",
1379 			 &service->base.fourcc,
1380 			 service->client_id);
1381 		entity_uc = &service->service_use_count;
1382 	} else {
1383 		dev_err(state->dev, "suspend: %s: null service ptr\n", __func__);
1384 		ret = -EINVAL;
1385 		goto out;
1386 	}
1387 
1388 	write_lock_bh(&arm_state->susp_res_lock);
1389 	local_uc = ++arm_state->videocore_use_count;
1390 	++(*entity_uc);
1391 
1392 	dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
1393 		entity, *entity_uc, local_uc);
1394 
1395 	write_unlock_bh(&arm_state->susp_res_lock);
1396 
1397 	if (!ret) {
1398 		int status = 0;
1399 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1400 
1401 		while (ack_cnt && !status) {
1402 			/* Send the use notify to videocore */
1403 			status = vchiq_send_remote_use_active(state);
1404 			if (!status)
1405 				ack_cnt--;
1406 			else
1407 				atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1408 		}
1409 	}
1410 
1411 out:
1412 	dev_dbg(state->dev, "suspend: exit %d\n", ret);
1413 	return ret;
1414 }
1415 
1416 int
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)1417 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1418 {
1419 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1420 	int ret = 0;
1421 	char entity[64];
1422 	int *entity_uc;
1423 
1424 	if (!arm_state) {
1425 		ret = -EINVAL;
1426 		goto out;
1427 	}
1428 
1429 	if (service) {
1430 		snprintf(entity, sizeof(entity), "%p4cc:%03d",
1431 			 &service->base.fourcc,
1432 			 service->client_id);
1433 		entity_uc = &service->service_use_count;
1434 	} else {
1435 		snprintf(entity, sizeof(entity), "PEER:   ");
1436 		entity_uc = &arm_state->peer_use_count;
1437 	}
1438 
1439 	write_lock_bh(&arm_state->susp_res_lock);
1440 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
1441 		/* Don't use BUG_ON - don't allow user thread to crash kernel */
1442 		WARN_ON(!arm_state->videocore_use_count);
1443 		WARN_ON(!(*entity_uc));
1444 		ret = -EINVAL;
1445 		goto unlock;
1446 	}
1447 	--arm_state->videocore_use_count;
1448 	--(*entity_uc);
1449 
1450 	dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
1451 		entity, *entity_uc, arm_state->videocore_use_count);
1452 
1453 unlock:
1454 	write_unlock_bh(&arm_state->susp_res_lock);
1455 
1456 out:
1457 	dev_dbg(state->dev, "suspend: exit %d\n", ret);
1458 	return ret;
1459 }
1460 
1461 void
vchiq_on_remote_use(struct vchiq_state * state)1462 vchiq_on_remote_use(struct vchiq_state *state)
1463 {
1464 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1465 
1466 	atomic_inc(&arm_state->ka_use_count);
1467 	complete(&arm_state->ka_evt);
1468 }
1469 
1470 void
vchiq_on_remote_release(struct vchiq_state * state)1471 vchiq_on_remote_release(struct vchiq_state *state)
1472 {
1473 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1474 
1475 	atomic_inc(&arm_state->ka_release_count);
1476 	complete(&arm_state->ka_evt);
1477 }
1478 
1479 int
vchiq_use_service_internal(struct vchiq_service * service)1480 vchiq_use_service_internal(struct vchiq_service *service)
1481 {
1482 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1483 }
1484 
1485 int
vchiq_release_service_internal(struct vchiq_service * service)1486 vchiq_release_service_internal(struct vchiq_service *service)
1487 {
1488 	return vchiq_release_internal(service->state, service);
1489 }
1490 
1491 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance * instance)1492 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1493 {
1494 	return &instance->debugfs_node;
1495 }
1496 
1497 int
vchiq_instance_get_use_count(struct vchiq_instance * instance)1498 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1499 {
1500 	struct vchiq_service *service;
1501 	int use_count = 0, i;
1502 
1503 	i = 0;
1504 	rcu_read_lock();
1505 	while ((service = __next_service_by_instance(instance->state,
1506 						     instance, &i)))
1507 		use_count += service->service_use_count;
1508 	rcu_read_unlock();
1509 	return use_count;
1510 }
1511 
1512 int
vchiq_instance_get_pid(struct vchiq_instance * instance)1513 vchiq_instance_get_pid(struct vchiq_instance *instance)
1514 {
1515 	return instance->pid;
1516 }
1517 
1518 int
vchiq_instance_get_trace(struct vchiq_instance * instance)1519 vchiq_instance_get_trace(struct vchiq_instance *instance)
1520 {
1521 	return instance->trace;
1522 }
1523 
1524 void
vchiq_instance_set_trace(struct vchiq_instance * instance,int trace)1525 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1526 {
1527 	struct vchiq_service *service;
1528 	int i;
1529 
1530 	i = 0;
1531 	rcu_read_lock();
1532 	while ((service = __next_service_by_instance(instance->state,
1533 						     instance, &i)))
1534 		service->trace = trace;
1535 	rcu_read_unlock();
1536 	instance->trace = (trace != 0);
1537 }
1538 
1539 int
vchiq_use_service(struct vchiq_instance * instance,unsigned int handle)1540 vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
1541 {
1542 	int ret = -EINVAL;
1543 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1544 
1545 	if (service) {
1546 		ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1547 		vchiq_service_put(service);
1548 	}
1549 	return ret;
1550 }
1551 EXPORT_SYMBOL(vchiq_use_service);
1552 
1553 int
vchiq_release_service(struct vchiq_instance * instance,unsigned int handle)1554 vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
1555 {
1556 	int ret = -EINVAL;
1557 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1558 
1559 	if (service) {
1560 		ret = vchiq_release_internal(service->state, service);
1561 		vchiq_service_put(service);
1562 	}
1563 	return ret;
1564 }
1565 EXPORT_SYMBOL(vchiq_release_service);
1566 
1567 struct service_data_struct {
1568 	int fourcc;
1569 	int clientid;
1570 	int use_count;
1571 };
1572 
1573 void
vchiq_dump_service_use_state(struct vchiq_state * state)1574 vchiq_dump_service_use_state(struct vchiq_state *state)
1575 {
1576 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1577 	struct service_data_struct *service_data;
1578 	int i, found = 0;
1579 	/*
1580 	 * If there's more than 64 services, only dump ones with
1581 	 * non-zero counts
1582 	 */
1583 	int only_nonzero = 0;
1584 	static const char *nz = "<-- preventing suspend";
1585 
1586 	int peer_count;
1587 	int vc_use_count;
1588 	int active_services;
1589 
1590 	if (!arm_state)
1591 		return;
1592 
1593 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1594 				     GFP_KERNEL);
1595 	if (!service_data)
1596 		return;
1597 
1598 	read_lock_bh(&arm_state->susp_res_lock);
1599 	peer_count = arm_state->peer_use_count;
1600 	vc_use_count = arm_state->videocore_use_count;
1601 	active_services = state->unused_service;
1602 	if (active_services > MAX_SERVICES)
1603 		only_nonzero = 1;
1604 
1605 	rcu_read_lock();
1606 	for (i = 0; i < active_services; i++) {
1607 		struct vchiq_service *service_ptr =
1608 			rcu_dereference(state->services[i]);
1609 
1610 		if (!service_ptr)
1611 			continue;
1612 
1613 		if (only_nonzero && !service_ptr->service_use_count)
1614 			continue;
1615 
1616 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1617 			continue;
1618 
1619 		service_data[found].fourcc = service_ptr->base.fourcc;
1620 		service_data[found].clientid = service_ptr->client_id;
1621 		service_data[found].use_count = service_ptr->service_use_count;
1622 		found++;
1623 		if (found >= MAX_SERVICES)
1624 			break;
1625 	}
1626 	rcu_read_unlock();
1627 
1628 	read_unlock_bh(&arm_state->susp_res_lock);
1629 
1630 	if (only_nonzero)
1631 		dev_warn(state->dev,
1632 			 "suspend: Too many active services (%d). Only dumping up to first %d services with non-zero use-count\n",
1633 			 active_services, found);
1634 
1635 	for (i = 0; i < found; i++) {
1636 		dev_warn(state->dev,
1637 			 "suspend: %p4cc:%d service count %d %s\n",
1638 			 &service_data[i].fourcc,
1639 			 service_data[i].clientid, service_data[i].use_count,
1640 			 service_data[i].use_count ? nz : "");
1641 	}
1642 	dev_warn(state->dev, "suspend: VCHIQ use count %d\n", peer_count);
1643 	dev_warn(state->dev, "suspend: Overall vchiq instance use count %d\n", vc_use_count);
1644 
1645 	kfree(service_data);
1646 }
1647 
1648 int
vchiq_check_service(struct vchiq_service * service)1649 vchiq_check_service(struct vchiq_service *service)
1650 {
1651 	struct vchiq_arm_state *arm_state;
1652 	int ret = -EINVAL;
1653 
1654 	if (!service || !service->state)
1655 		goto out;
1656 
1657 	arm_state = vchiq_platform_get_arm_state(service->state);
1658 
1659 	read_lock_bh(&arm_state->susp_res_lock);
1660 	if (service->service_use_count)
1661 		ret = 0;
1662 	read_unlock_bh(&arm_state->susp_res_lock);
1663 
1664 	if (ret) {
1665 		dev_err(service->state->dev,
1666 			"suspend: %s:  %p4cc:%d service count %d, state count %d\n",
1667 			__func__, &service->base.fourcc, service->client_id,
1668 			service->service_use_count, arm_state->videocore_use_count);
1669 		vchiq_dump_service_use_state(service->state);
1670 	}
1671 out:
1672 	return ret;
1673 }
1674 
vchiq_platform_conn_state_changed(struct vchiq_state * state,enum vchiq_connstate oldstate,enum vchiq_connstate newstate)1675 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1676 				       enum vchiq_connstate oldstate,
1677 				       enum vchiq_connstate newstate)
1678 {
1679 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1680 	char threadname[16];
1681 
1682 	dev_dbg(state->dev, "suspend: %d: %s->%s\n",
1683 		state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
1684 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1685 		return;
1686 
1687 	write_lock_bh(&arm_state->susp_res_lock);
1688 	if (arm_state->first_connect) {
1689 		write_unlock_bh(&arm_state->susp_res_lock);
1690 		return;
1691 	}
1692 
1693 	arm_state->first_connect = 1;
1694 	write_unlock_bh(&arm_state->susp_res_lock);
1695 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1696 		 state->id);
1697 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1698 					      (void *)state,
1699 					      threadname);
1700 	if (IS_ERR(arm_state->ka_thread)) {
1701 		dev_err(state->dev, "suspend: Couldn't create thread %s\n",
1702 			threadname);
1703 	} else {
1704 		wake_up_process(arm_state->ka_thread);
1705 	}
1706 }
1707 
1708 static const struct of_device_id vchiq_of_match[] = {
1709 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
1710 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
1711 	{},
1712 };
1713 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1714 
vchiq_probe(struct platform_device * pdev)1715 static int vchiq_probe(struct platform_device *pdev)
1716 {
1717 	struct device_node *fw_node;
1718 	const struct of_device_id *of_id;
1719 	struct vchiq_drvdata *drvdata;
1720 	int err;
1721 
1722 	of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1723 	drvdata = (struct vchiq_drvdata *)of_id->data;
1724 	if (!drvdata)
1725 		return -EINVAL;
1726 
1727 	fw_node = of_find_compatible_node(NULL, NULL,
1728 					  "raspberrypi,bcm2835-firmware");
1729 	if (!fw_node) {
1730 		dev_err(&pdev->dev, "Missing firmware node\n");
1731 		return -ENOENT;
1732 	}
1733 
1734 	drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1735 	of_node_put(fw_node);
1736 	if (!drvdata->fw)
1737 		return -EPROBE_DEFER;
1738 
1739 	platform_set_drvdata(pdev, drvdata);
1740 
1741 	err = vchiq_platform_init(pdev, &g_state);
1742 	if (err)
1743 		goto failed_platform_init;
1744 
1745 	vchiq_debugfs_init();
1746 
1747 	dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
1748 		VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1749 
1750 	/*
1751 	 * Simply exit on error since the function handles cleanup in
1752 	 * cases of failure.
1753 	 */
1754 	err = vchiq_register_chrdev(&pdev->dev);
1755 	if (err) {
1756 		dev_warn(&pdev->dev, "arm: Failed to initialize vchiq cdev\n");
1757 		goto error_exit;
1758 	}
1759 
1760 	bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
1761 	bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera");
1762 
1763 	return 0;
1764 
1765 failed_platform_init:
1766 	dev_warn(&pdev->dev, "arm: Could not initialize vchiq platform\n");
1767 error_exit:
1768 	return err;
1769 }
1770 
vchiq_remove(struct platform_device * pdev)1771 static void vchiq_remove(struct platform_device *pdev)
1772 {
1773 	vchiq_device_unregister(bcm2835_audio);
1774 	vchiq_device_unregister(bcm2835_camera);
1775 	vchiq_debugfs_deinit();
1776 	vchiq_deregister_chrdev();
1777 }
1778 
1779 static struct platform_driver vchiq_driver = {
1780 	.driver = {
1781 		.name = "bcm2835_vchiq",
1782 		.of_match_table = vchiq_of_match,
1783 	},
1784 	.probe = vchiq_probe,
1785 	.remove_new = vchiq_remove,
1786 };
1787 
vchiq_driver_init(void)1788 static int __init vchiq_driver_init(void)
1789 {
1790 	int ret;
1791 
1792 	ret = bus_register(&vchiq_bus_type);
1793 	if (ret) {
1794 		pr_err("Failed to register %s\n", vchiq_bus_type.name);
1795 		return ret;
1796 	}
1797 
1798 	ret = platform_driver_register(&vchiq_driver);
1799 	if (ret) {
1800 		pr_err("Failed to register vchiq driver\n");
1801 		bus_unregister(&vchiq_bus_type);
1802 	}
1803 
1804 	return ret;
1805 }
1806 module_init(vchiq_driver_init);
1807 
vchiq_driver_exit(void)1808 static void __exit vchiq_driver_exit(void)
1809 {
1810 	bus_unregister(&vchiq_bus_type);
1811 	platform_driver_unregister(&vchiq_driver);
1812 }
1813 module_exit(vchiq_driver_exit);
1814 
1815 MODULE_LICENSE("Dual BSD/GPL");
1816 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1817 MODULE_AUTHOR("Broadcom Corporation");
1818