1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/device/bus.h>
16 #include <linux/mm.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/uaccess.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
32 
33 #include "vchiq_core.h"
34 #include "vchiq_ioctl.h"
35 #include "vchiq_arm.h"
36 #include "vchiq_bus.h"
37 #include "vchiq_debugfs.h"
38 
39 #define DEVICE_NAME "vchiq"
40 
41 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
42 
43 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
44 
45 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
46 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
47 
48 #define BELL0	0x00
49 
50 #define ARM_DS_ACTIVE	BIT(2)
51 
52 /* Override the default prefix, which would be vchiq_arm (from the filename) */
53 #undef MODULE_PARAM_PREFIX
54 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
55 
56 #define KEEPALIVE_VER 1
57 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
58 
59 /*
60  * The devices implemented in the VCHIQ firmware are not discoverable,
61  * so we need to maintain a list of them in order to register them with
62  * the interface.
63  */
64 static struct vchiq_device *bcm2835_audio;
65 static struct vchiq_device *bcm2835_camera;
66 
67 static const struct vchiq_platform_info bcm2835_info = {
68 	.cache_line_size = 32,
69 };
70 
71 static const struct vchiq_platform_info bcm2836_info = {
72 	.cache_line_size = 64,
73 };
74 
75 struct vchiq_arm_state {
76 	/* Keepalive-related data */
77 	struct task_struct *ka_thread;
78 	struct completion ka_evt;
79 	atomic_t ka_use_count;
80 	atomic_t ka_use_ack_count;
81 	atomic_t ka_release_count;
82 
83 	rwlock_t susp_res_lock;
84 
85 	struct vchiq_state *state;
86 
87 	/*
88 	 * Global use count for videocore.
89 	 * This is equal to the sum of the use counts for all services.  When
90 	 * this hits zero the videocore suspend procedure will be initiated.
91 	 */
92 	int videocore_use_count;
93 
94 	/*
95 	 * Use count to track requests from videocore peer.
96 	 * This use count is not associated with a service, so needs to be
97 	 * tracked separately with the state.
98 	 */
99 	int peer_use_count;
100 };
101 
102 static int
103 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
104 			     struct vchiq_bulk *bulk_params);
105 
106 static irqreturn_t
vchiq_doorbell_irq(int irq,void * dev_id)107 vchiq_doorbell_irq(int irq, void *dev_id)
108 {
109 	struct vchiq_state *state = dev_id;
110 	struct vchiq_drv_mgmt *mgmt;
111 	irqreturn_t ret = IRQ_NONE;
112 	unsigned int status;
113 
114 	mgmt = dev_get_drvdata(state->dev);
115 
116 	/* Read (and clear) the doorbell */
117 	status = readl(mgmt->regs + BELL0);
118 
119 	if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
120 		remote_event_pollall(state);
121 		ret = IRQ_HANDLED;
122 	}
123 
124 	return ret;
125 }
126 
127 /*
128  * This function is called by the vchiq stack once it has been connected to
129  * the videocore and clients can start to use the stack.
130  */
vchiq_call_connected_callbacks(struct vchiq_drv_mgmt * drv_mgmt)131 static void vchiq_call_connected_callbacks(struct vchiq_drv_mgmt *drv_mgmt)
132 {
133 	int i;
134 
135 	if (mutex_lock_killable(&drv_mgmt->connected_mutex))
136 		return;
137 
138 	for (i = 0; i < drv_mgmt->num_deferred_callbacks; i++)
139 		drv_mgmt->deferred_callback[i]();
140 
141 	drv_mgmt->num_deferred_callbacks = 0;
142 	drv_mgmt->connected = true;
143 	mutex_unlock(&drv_mgmt->connected_mutex);
144 }
145 
146 /*
147  * This function is used to defer initialization until the vchiq stack is
148  * initialized. If the stack is already initialized, then the callback will
149  * be made immediately, otherwise it will be deferred until
150  * vchiq_call_connected_callbacks is called.
151  */
vchiq_add_connected_callback(struct vchiq_device * device,void (* callback)(void))152 void vchiq_add_connected_callback(struct vchiq_device *device, void (*callback)(void))
153 {
154 	struct vchiq_drv_mgmt *drv_mgmt = device->drv_mgmt;
155 
156 	if (mutex_lock_killable(&drv_mgmt->connected_mutex))
157 		return;
158 
159 	if (drv_mgmt->connected) {
160 		/* We're already connected. Call the callback immediately. */
161 		callback();
162 	} else {
163 		if (drv_mgmt->num_deferred_callbacks >= VCHIQ_DRV_MAX_CALLBACKS) {
164 			dev_err(&device->dev,
165 				"core: deferred callbacks(%d) exceeded the maximum limit(%d)\n",
166 				drv_mgmt->num_deferred_callbacks, VCHIQ_DRV_MAX_CALLBACKS);
167 		} else {
168 			drv_mgmt->deferred_callback[drv_mgmt->num_deferred_callbacks] =
169 				callback;
170 			drv_mgmt->num_deferred_callbacks++;
171 		}
172 	}
173 	mutex_unlock(&drv_mgmt->connected_mutex);
174 }
175 EXPORT_SYMBOL(vchiq_add_connected_callback);
176 
vchiq_platform_init(struct platform_device * pdev,struct vchiq_state * state)177 static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
178 {
179 	struct device *dev = &pdev->dev;
180 	struct vchiq_drv_mgmt *drv_mgmt = platform_get_drvdata(pdev);
181 	struct rpi_firmware *fw = drv_mgmt->fw;
182 	struct vchiq_slot_zero *vchiq_slot_zero;
183 	void *slot_mem;
184 	dma_addr_t slot_phys;
185 	u32 channelbase;
186 	int slot_mem_size, frag_mem_size;
187 	int err, irq, i;
188 
189 	/*
190 	 * VCHI messages between the CPU and firmware use
191 	 * 32-bit bus addresses.
192 	 */
193 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
194 
195 	if (err < 0)
196 		return err;
197 
198 	drv_mgmt->fragments_size = 2 * drv_mgmt->info->cache_line_size;
199 
200 	/* Allocate space for the channels in coherent memory */
201 	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
202 	frag_mem_size = PAGE_ALIGN(drv_mgmt->fragments_size * MAX_FRAGMENTS);
203 
204 	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
205 				       &slot_phys, GFP_KERNEL);
206 	if (!slot_mem) {
207 		dev_err(dev, "could not allocate DMA memory\n");
208 		return -ENOMEM;
209 	}
210 
211 	WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
212 
213 	vchiq_slot_zero = vchiq_init_slots(dev, slot_mem, slot_mem_size);
214 	if (!vchiq_slot_zero)
215 		return -ENOMEM;
216 
217 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
218 		(int)slot_phys + slot_mem_size;
219 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
220 		MAX_FRAGMENTS;
221 
222 	drv_mgmt->fragments_base = (char *)slot_mem + slot_mem_size;
223 
224 	drv_mgmt->free_fragments = drv_mgmt->fragments_base;
225 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
226 		*(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] =
227 			&drv_mgmt->fragments_base[(i + 1) * drv_mgmt->fragments_size];
228 	}
229 	*(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] = NULL;
230 	sema_init(&drv_mgmt->free_fragments_sema, MAX_FRAGMENTS);
231 	sema_init(&drv_mgmt->free_fragments_mutex, 1);
232 
233 	err = vchiq_init_state(state, vchiq_slot_zero, dev);
234 	if (err)
235 		return err;
236 
237 	drv_mgmt->regs = devm_platform_ioremap_resource(pdev, 0);
238 	if (IS_ERR(drv_mgmt->regs))
239 		return PTR_ERR(drv_mgmt->regs);
240 
241 	irq = platform_get_irq(pdev, 0);
242 	if (irq <= 0)
243 		return irq;
244 
245 	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
246 			       "VCHIQ doorbell", state);
247 	if (err) {
248 		dev_err(dev, "failed to register irq=%d\n", irq);
249 		return err;
250 	}
251 
252 	/* Send the base address of the slots to VideoCore */
253 	channelbase = slot_phys;
254 	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
255 				    &channelbase, sizeof(channelbase));
256 	if (err) {
257 		dev_err(dev, "failed to send firmware property: %d\n", err);
258 		return err;
259 	}
260 
261 	if (channelbase) {
262 		dev_err(dev, "failed to set channelbase (response: %x)\n",
263 			channelbase);
264 		return -ENXIO;
265 	}
266 
267 	dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %p, phys %pad)\n",
268 		vchiq_slot_zero, &slot_phys);
269 
270 	mutex_init(&drv_mgmt->connected_mutex);
271 	vchiq_call_connected_callbacks(drv_mgmt);
272 
273 	return 0;
274 }
275 
vchiq_platform_get_arm_state(struct vchiq_state * state)276 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
277 {
278 	return (struct vchiq_arm_state *)state->platform_state;
279 }
280 
281 static void
vchiq_platform_uninit(struct vchiq_drv_mgmt * mgmt)282 vchiq_platform_uninit(struct vchiq_drv_mgmt *mgmt)
283 {
284 	struct vchiq_arm_state *arm_state;
285 
286 	kthread_stop(mgmt->state.sync_thread);
287 	kthread_stop(mgmt->state.recycle_thread);
288 	kthread_stop(mgmt->state.slot_handler_thread);
289 
290 	arm_state = vchiq_platform_get_arm_state(&mgmt->state);
291 	if (!IS_ERR_OR_NULL(arm_state->ka_thread))
292 		kthread_stop(arm_state->ka_thread);
293 }
294 
vchiq_dump_platform_state(struct seq_file * f)295 void vchiq_dump_platform_state(struct seq_file *f)
296 {
297 	seq_puts(f, "  Platform: 2835 (VC master)\n");
298 }
299 
300 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(struct vchiq_state * state,struct vchiq_instance ** instance_out)301 int vchiq_initialise(struct vchiq_state *state, struct vchiq_instance **instance_out)
302 {
303 	struct vchiq_instance *instance = NULL;
304 	int i, ret;
305 
306 	/*
307 	 * VideoCore may not be ready due to boot up timing.
308 	 * It may never be ready if kernel and firmware are mismatched,so don't
309 	 * block forever.
310 	 */
311 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
312 		if (vchiq_remote_initialised(state))
313 			break;
314 		usleep_range(500, 600);
315 	}
316 	if (i == VCHIQ_INIT_RETRIES) {
317 		dev_err(state->dev, "core: %s: Videocore not initialized\n", __func__);
318 		ret = -ENOTCONN;
319 		goto failed;
320 	} else if (i > 0) {
321 		dev_warn(state->dev, "core: %s: videocore initialized after %d retries\n",
322 			 __func__, i);
323 	}
324 
325 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
326 	if (!instance) {
327 		ret = -ENOMEM;
328 		goto failed;
329 	}
330 
331 	instance->connected = 0;
332 	instance->state = state;
333 	mutex_init(&instance->bulk_waiter_list_mutex);
334 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
335 
336 	*instance_out = instance;
337 
338 	ret = 0;
339 
340 failed:
341 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
342 
343 	return ret;
344 }
345 EXPORT_SYMBOL(vchiq_initialise);
346 
free_bulk_waiter(struct vchiq_instance * instance)347 void free_bulk_waiter(struct vchiq_instance *instance)
348 {
349 	struct bulk_waiter_node *waiter, *next;
350 
351 	list_for_each_entry_safe(waiter, next,
352 				 &instance->bulk_waiter_list, list) {
353 		list_del(&waiter->list);
354 		dev_dbg(instance->state->dev,
355 			"arm: bulk_waiter - cleaned up %p for pid %d\n",
356 			waiter, waiter->pid);
357 		kfree(waiter);
358 	}
359 }
360 
vchiq_shutdown(struct vchiq_instance * instance)361 int vchiq_shutdown(struct vchiq_instance *instance)
362 {
363 	struct vchiq_state *state = instance->state;
364 	int ret = 0;
365 
366 	if (mutex_lock_killable(&state->mutex))
367 		return -EAGAIN;
368 
369 	/* Remove all services */
370 	vchiq_shutdown_internal(state, instance);
371 
372 	mutex_unlock(&state->mutex);
373 
374 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
375 
376 	free_bulk_waiter(instance);
377 	kfree(instance);
378 
379 	return ret;
380 }
381 EXPORT_SYMBOL(vchiq_shutdown);
382 
vchiq_is_connected(struct vchiq_instance * instance)383 static int vchiq_is_connected(struct vchiq_instance *instance)
384 {
385 	return instance->connected;
386 }
387 
vchiq_connect(struct vchiq_instance * instance)388 int vchiq_connect(struct vchiq_instance *instance)
389 {
390 	struct vchiq_state *state = instance->state;
391 	int ret;
392 
393 	if (mutex_lock_killable(&state->mutex)) {
394 		dev_dbg(state->dev,
395 			"core: call to mutex_lock failed\n");
396 		ret = -EAGAIN;
397 		goto failed;
398 	}
399 	ret = vchiq_connect_internal(state, instance);
400 
401 	if (!ret)
402 		instance->connected = 1;
403 
404 	mutex_unlock(&state->mutex);
405 
406 failed:
407 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
408 
409 	return ret;
410 }
411 EXPORT_SYMBOL(vchiq_connect);
412 
413 static int
vchiq_add_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)414 vchiq_add_service(struct vchiq_instance *instance,
415 		  const struct vchiq_service_params_kernel *params,
416 		  unsigned int *phandle)
417 {
418 	struct vchiq_state *state = instance->state;
419 	struct vchiq_service *service = NULL;
420 	int srvstate, ret;
421 
422 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
423 
424 	srvstate = vchiq_is_connected(instance)
425 		? VCHIQ_SRVSTATE_LISTENING
426 		: VCHIQ_SRVSTATE_HIDDEN;
427 
428 	service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
429 
430 	if (service) {
431 		*phandle = service->handle;
432 		ret = 0;
433 	} else {
434 		ret = -EINVAL;
435 	}
436 
437 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
438 
439 	return ret;
440 }
441 
442 int
vchiq_open_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)443 vchiq_open_service(struct vchiq_instance *instance,
444 		   const struct vchiq_service_params_kernel *params,
445 		   unsigned int *phandle)
446 {
447 	struct vchiq_state   *state = instance->state;
448 	struct vchiq_service *service = NULL;
449 	int ret = -EINVAL;
450 
451 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
452 
453 	if (!vchiq_is_connected(instance))
454 		goto failed;
455 
456 	service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
457 
458 	if (service) {
459 		*phandle = service->handle;
460 		ret = vchiq_open_service_internal(service, current->pid);
461 		if (ret) {
462 			vchiq_remove_service(instance, service->handle);
463 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
464 		}
465 	}
466 
467 failed:
468 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
469 
470 	return ret;
471 }
472 EXPORT_SYMBOL(vchiq_open_service);
473 
474 int
vchiq_bulk_transmit(struct vchiq_instance * instance,unsigned int handle,const void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)475 vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
476 		    unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
477 {
478 	struct vchiq_bulk bulk_params = {};
479 	int ret;
480 
481 	switch (mode) {
482 	case VCHIQ_BULK_MODE_NOCALLBACK:
483 	case VCHIQ_BULK_MODE_CALLBACK:
484 
485 		bulk_params.offset = (void *)data;
486 		bulk_params.mode = mode;
487 		bulk_params.size = size;
488 		bulk_params.cb_data = userdata;
489 		bulk_params.dir = VCHIQ_BULK_TRANSMIT;
490 
491 		ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params);
492 		break;
493 	case VCHIQ_BULK_MODE_BLOCKING:
494 		bulk_params.offset = (void *)data;
495 		bulk_params.mode = mode;
496 		bulk_params.size = size;
497 		bulk_params.dir = VCHIQ_BULK_TRANSMIT;
498 
499 		ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params);
500 		break;
501 	default:
502 		return -EINVAL;
503 	}
504 
505 	return ret;
506 }
507 EXPORT_SYMBOL(vchiq_bulk_transmit);
508 
vchiq_bulk_receive(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)509 int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
510 		       void *data, unsigned int size, void *userdata,
511 		       enum vchiq_bulk_mode mode)
512 {
513 	struct vchiq_bulk bulk_params = {};
514 	int ret;
515 
516 	switch (mode) {
517 	case VCHIQ_BULK_MODE_NOCALLBACK:
518 	case VCHIQ_BULK_MODE_CALLBACK:
519 
520 		bulk_params.offset = (void *)data;
521 		bulk_params.mode = mode;
522 		bulk_params.size = size;
523 		bulk_params.cb_data = userdata;
524 		bulk_params.dir = VCHIQ_BULK_RECEIVE;
525 
526 		ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params);
527 		break;
528 	case VCHIQ_BULK_MODE_BLOCKING:
529 		bulk_params.offset = (void *)data;
530 		bulk_params.mode = mode;
531 		bulk_params.size = size;
532 		bulk_params.dir = VCHIQ_BULK_RECEIVE;
533 
534 		ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params);
535 		break;
536 	default:
537 		return -EINVAL;
538 	}
539 
540 	return ret;
541 }
542 EXPORT_SYMBOL(vchiq_bulk_receive);
543 
544 static int
vchiq_blocking_bulk_transfer(struct vchiq_instance * instance,unsigned int handle,struct vchiq_bulk * bulk_params)545 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
546 			     struct vchiq_bulk *bulk_params)
547 {
548 	struct vchiq_service *service;
549 	struct bulk_waiter_node *waiter = NULL, *iter;
550 	int ret;
551 
552 	service = find_service_by_handle(instance, handle);
553 	if (!service)
554 		return -EINVAL;
555 
556 	vchiq_service_put(service);
557 
558 	mutex_lock(&instance->bulk_waiter_list_mutex);
559 	list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
560 		if (iter->pid == current->pid) {
561 			list_del(&iter->list);
562 			waiter = iter;
563 			break;
564 		}
565 	}
566 	mutex_unlock(&instance->bulk_waiter_list_mutex);
567 
568 	if (waiter) {
569 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
570 
571 		if (bulk) {
572 			/* This thread has an outstanding bulk transfer. */
573 			/* FIXME: why compare a dma address to a pointer? */
574 			if ((bulk->dma_addr != (dma_addr_t)(uintptr_t)bulk_params->dma_addr) ||
575 			    (bulk->size != bulk_params->size)) {
576 				/*
577 				 * This is not a retry of the previous one.
578 				 * Cancel the signal when the transfer completes.
579 				 */
580 				spin_lock(&service->state->bulk_waiter_spinlock);
581 				bulk->waiter = NULL;
582 				spin_unlock(&service->state->bulk_waiter_spinlock);
583 			}
584 		}
585 	} else {
586 		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
587 		if (!waiter)
588 			return -ENOMEM;
589 	}
590 
591 	bulk_params->waiter = &waiter->bulk_waiter;
592 
593 	ret = vchiq_bulk_xfer_blocking(instance, handle, bulk_params);
594 	if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
595 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
596 
597 		if (bulk) {
598 			/* Cancel the signal when the transfer completes. */
599 			spin_lock(&service->state->bulk_waiter_spinlock);
600 			bulk->waiter = NULL;
601 			spin_unlock(&service->state->bulk_waiter_spinlock);
602 		}
603 		kfree(waiter);
604 	} else {
605 		waiter->pid = current->pid;
606 		mutex_lock(&instance->bulk_waiter_list_mutex);
607 		list_add(&waiter->list, &instance->bulk_waiter_list);
608 		mutex_unlock(&instance->bulk_waiter_list_mutex);
609 		dev_dbg(instance->state->dev, "arm: saved bulk_waiter %p for pid %d\n",
610 			waiter, current->pid);
611 	}
612 
613 	return ret;
614 }
615 
616 static int
add_completion(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,struct user_service * user_service,void * cb_data,void __user * cb_userdata)617 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
618 	       struct vchiq_header *header, struct user_service *user_service,
619 	       void *cb_data, void __user *cb_userdata)
620 {
621 	struct vchiq_completion_data_kernel *completion;
622 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
623 	int insert;
624 
625 	DEBUG_INITIALISE(mgmt->state.local);
626 
627 	insert = instance->completion_insert;
628 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
629 		/* Out of space - wait for the client */
630 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
631 		dev_dbg(instance->state->dev, "core: completion queue full\n");
632 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
633 		if (wait_for_completion_interruptible(&instance->remove_event)) {
634 			dev_dbg(instance->state->dev, "arm: service_callback interrupted\n");
635 			return -EAGAIN;
636 		} else if (instance->closing) {
637 			dev_dbg(instance->state->dev, "arm: service_callback closing\n");
638 			return 0;
639 		}
640 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
641 	}
642 
643 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
644 
645 	completion->header = header;
646 	completion->reason = reason;
647 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
648 	completion->service_userdata = user_service->service;
649 	completion->cb_data = cb_data;
650 	completion->cb_userdata = cb_userdata;
651 
652 	if (reason == VCHIQ_SERVICE_CLOSED) {
653 		/*
654 		 * Take an extra reference, to be held until
655 		 * this CLOSED notification is delivered.
656 		 */
657 		vchiq_service_get(user_service->service);
658 		if (instance->use_close_delivered)
659 			user_service->close_pending = 1;
660 	}
661 
662 	/*
663 	 * A write barrier is needed here to ensure that the entire completion
664 	 * record is written out before the insert point.
665 	 */
666 	wmb();
667 
668 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
669 		user_service->message_available_pos = insert;
670 
671 	insert++;
672 	instance->completion_insert = insert;
673 
674 	complete(&instance->insert_event);
675 
676 	return 0;
677 }
678 
679 static int
service_single_message(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_service * service,void * cb_data,void __user * cb_userdata)680 service_single_message(struct vchiq_instance *instance,
681 		       enum vchiq_reason reason, struct vchiq_service *service,
682 		       void *cb_data, void __user *cb_userdata)
683 {
684 	struct user_service *user_service;
685 
686 	user_service = (struct user_service *)service->base.userdata;
687 
688 	dev_dbg(service->state->dev, "arm: msg queue full\n");
689 	/*
690 	 * If there is no MESSAGE_AVAILABLE in the completion
691 	 * queue, add one
692 	 */
693 	if ((user_service->message_available_pos -
694 	     instance->completion_remove) < 0) {
695 		int ret;
696 
697 		dev_dbg(instance->state->dev,
698 			"arm: Inserting extra MESSAGE_AVAILABLE\n");
699 		ret = add_completion(instance, reason, NULL, user_service,
700 				     cb_data, cb_userdata);
701 		if (ret)
702 			return ret;
703 	}
704 
705 	if (wait_for_completion_interruptible(&user_service->remove_event)) {
706 		dev_dbg(instance->state->dev, "arm: interrupted\n");
707 		return -EAGAIN;
708 	} else if (instance->closing) {
709 		dev_dbg(instance->state->dev, "arm: closing\n");
710 		return -EINVAL;
711 	}
712 
713 	return 0;
714 }
715 
716 int
service_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * cb_data,void __user * cb_userdata)717 service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
718 		 struct vchiq_header *header, unsigned int handle,
719 		 void *cb_data, void __user *cb_userdata)
720 {
721 	/*
722 	 * How do we ensure the callback goes to the right client?
723 	 * The service_user data points to a user_service record
724 	 * containing the original callback and the user state structure, which
725 	 * contains a circular buffer for completion records.
726 	 */
727 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
728 	struct user_service *user_service;
729 	struct vchiq_service *service;
730 	bool skip_completion = false;
731 
732 	DEBUG_INITIALISE(mgmt->state.local);
733 
734 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
735 
736 	rcu_read_lock();
737 	service = handle_to_service(instance, handle);
738 	if (WARN_ON(!service)) {
739 		rcu_read_unlock();
740 		return 0;
741 	}
742 
743 	user_service = (struct user_service *)service->base.userdata;
744 
745 	if (instance->closing) {
746 		rcu_read_unlock();
747 		return 0;
748 	}
749 
750 	/*
751 	 * As hopping around different synchronization mechanism,
752 	 * taking an extra reference results in simpler implementation.
753 	 */
754 	vchiq_service_get(service);
755 	rcu_read_unlock();
756 
757 	dev_dbg(service->state->dev,
758 		"arm: service %p(%d,%p), reason %d, header %p, instance %p, cb_data %p, cb_userdata %p\n",
759 		user_service, service->localport, user_service->userdata,
760 		reason, header, instance, cb_data, cb_userdata);
761 
762 	if (header && user_service->is_vchi) {
763 		spin_lock(&service->state->msg_queue_spinlock);
764 		while (user_service->msg_insert ==
765 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
766 			int ret;
767 
768 			spin_unlock(&service->state->msg_queue_spinlock);
769 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
770 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
771 
772 			ret = service_single_message(instance, reason, service,
773 						     cb_data, cb_userdata);
774 			if (ret) {
775 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
776 				vchiq_service_put(service);
777 				return ret;
778 			}
779 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
780 			spin_lock(&service->state->msg_queue_spinlock);
781 		}
782 
783 		user_service->msg_queue[user_service->msg_insert &
784 			(MSG_QUEUE_SIZE - 1)] = header;
785 		user_service->msg_insert++;
786 
787 		/*
788 		 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
789 		 * there is a MESSAGE_AVAILABLE in the completion queue then
790 		 * bypass the completion queue.
791 		 */
792 		if (((user_service->message_available_pos -
793 			instance->completion_remove) >= 0) ||
794 			user_service->dequeue_pending) {
795 			user_service->dequeue_pending = 0;
796 			skip_completion = true;
797 		}
798 
799 		spin_unlock(&service->state->msg_queue_spinlock);
800 		complete(&user_service->insert_event);
801 
802 		header = NULL;
803 	}
804 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
805 	vchiq_service_put(service);
806 
807 	if (skip_completion)
808 		return 0;
809 
810 	return add_completion(instance, reason, header, user_service,
811 			      cb_data, cb_userdata);
812 }
813 
vchiq_dump_platform_instances(struct vchiq_state * state,struct seq_file * f)814 void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f)
815 {
816 	int i;
817 
818 	if (!vchiq_remote_initialised(state))
819 		return;
820 
821 	/*
822 	 * There is no list of instances, so instead scan all services,
823 	 * marking those that have been dumped.
824 	 */
825 
826 	rcu_read_lock();
827 	for (i = 0; i < state->unused_service; i++) {
828 		struct vchiq_service *service;
829 		struct vchiq_instance *instance;
830 
831 		service = rcu_dereference(state->services[i]);
832 		if (!service || service->base.callback != service_callback)
833 			continue;
834 
835 		instance = service->instance;
836 		if (instance)
837 			instance->mark = 0;
838 	}
839 	rcu_read_unlock();
840 
841 	for (i = 0; i < state->unused_service; i++) {
842 		struct vchiq_service *service;
843 		struct vchiq_instance *instance;
844 
845 		rcu_read_lock();
846 		service = rcu_dereference(state->services[i]);
847 		if (!service || service->base.callback != service_callback) {
848 			rcu_read_unlock();
849 			continue;
850 		}
851 
852 		instance = service->instance;
853 		if (!instance || instance->mark) {
854 			rcu_read_unlock();
855 			continue;
856 		}
857 		rcu_read_unlock();
858 
859 		seq_printf(f, "Instance %pK: pid %d,%s completions %d/%d\n",
860 			   instance, instance->pid,
861 			   instance->connected ? " connected, " :
862 			   "",
863 			   instance->completion_insert -
864 			   instance->completion_remove,
865 			   MAX_COMPLETIONS);
866 		instance->mark = 1;
867 	}
868 }
869 
vchiq_dump_platform_service_state(struct seq_file * f,struct vchiq_service * service)870 void vchiq_dump_platform_service_state(struct seq_file *f,
871 				       struct vchiq_service *service)
872 {
873 	struct user_service *user_service =
874 			(struct user_service *)service->base.userdata;
875 
876 	seq_printf(f, "  instance %pK", service->instance);
877 
878 	if ((service->base.callback == service_callback) && user_service->is_vchi) {
879 		seq_printf(f, ", %d/%d messages",
880 			   user_service->msg_insert - user_service->msg_remove,
881 			   MSG_QUEUE_SIZE);
882 
883 		if (user_service->dequeue_pending)
884 			seq_puts(f, " (dequeue pending)");
885 	}
886 
887 	seq_puts(f, "\n");
888 }
889 
890 /*
891  * Autosuspend related functionality
892  */
893 
894 static int
vchiq_keepalive_vchiq_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int service_user,void * cb_data,void __user * cb_userdata)895 vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
896 			       enum vchiq_reason reason,
897 			       struct vchiq_header *header,
898 			       unsigned int service_user,
899 			       void *cb_data, void __user *cb_userdata)
900 {
901 	dev_err(instance->state->dev, "suspend: %s: callback reason %d\n",
902 		__func__, reason);
903 	return 0;
904 }
905 
906 static int
vchiq_keepalive_thread_func(void * v)907 vchiq_keepalive_thread_func(void *v)
908 {
909 	struct vchiq_state *state = (struct vchiq_state *)v;
910 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
911 	struct vchiq_instance *instance;
912 	unsigned int ka_handle;
913 	int ret;
914 
915 	struct vchiq_service_params_kernel params = {
916 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
917 		.callback    = vchiq_keepalive_vchiq_callback,
918 		.version     = KEEPALIVE_VER,
919 		.version_min = KEEPALIVE_VER_MIN
920 	};
921 
922 	ret = vchiq_initialise(state, &instance);
923 	if (ret) {
924 		dev_err(state->dev, "suspend: %s: vchiq_initialise failed %d\n", __func__, ret);
925 		goto exit;
926 	}
927 
928 	ret = vchiq_connect(instance);
929 	if (ret) {
930 		dev_err(state->dev, "suspend: %s: vchiq_connect failed %d\n", __func__, ret);
931 		goto shutdown;
932 	}
933 
934 	ret = vchiq_add_service(instance, &params, &ka_handle);
935 	if (ret) {
936 		dev_err(state->dev, "suspend: %s: vchiq_open_service failed %d\n",
937 			__func__, ret);
938 		goto shutdown;
939 	}
940 
941 	while (!kthread_should_stop()) {
942 		long rc = 0, uc = 0;
943 
944 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
945 			dev_dbg(state->dev, "suspend: %s: interrupted\n", __func__);
946 			flush_signals(current);
947 			continue;
948 		}
949 
950 		/*
951 		 * read and clear counters.  Do release_count then use_count to
952 		 * prevent getting more releases than uses
953 		 */
954 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
955 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
956 
957 		/*
958 		 * Call use/release service the requisite number of times.
959 		 * Process use before release so use counts don't go negative
960 		 */
961 		while (uc--) {
962 			atomic_inc(&arm_state->ka_use_ack_count);
963 			ret = vchiq_use_service(instance, ka_handle);
964 			if (ret) {
965 				dev_err(state->dev, "suspend: %s: vchiq_use_service error %d\n",
966 					__func__, ret);
967 			}
968 		}
969 		while (rc--) {
970 			ret = vchiq_release_service(instance, ka_handle);
971 			if (ret) {
972 				dev_err(state->dev, "suspend: %s: vchiq_release_service error %d\n",
973 					__func__, ret);
974 			}
975 		}
976 	}
977 
978 shutdown:
979 	vchiq_shutdown(instance);
980 exit:
981 	return 0;
982 }
983 
984 int
vchiq_platform_init_state(struct vchiq_state * state)985 vchiq_platform_init_state(struct vchiq_state *state)
986 {
987 	struct vchiq_arm_state *platform_state;
988 	char threadname[16];
989 
990 	platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
991 	if (!platform_state)
992 		return -ENOMEM;
993 
994 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
995 		 state->id);
996 	platform_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
997 						   (void *)state, threadname);
998 	if (IS_ERR(platform_state->ka_thread)) {
999 		dev_err(state->dev, "couldn't create thread %s\n", threadname);
1000 		return PTR_ERR(platform_state->ka_thread);
1001 	}
1002 
1003 	rwlock_init(&platform_state->susp_res_lock);
1004 
1005 	init_completion(&platform_state->ka_evt);
1006 	atomic_set(&platform_state->ka_use_count, 0);
1007 	atomic_set(&platform_state->ka_use_ack_count, 0);
1008 	atomic_set(&platform_state->ka_release_count, 0);
1009 
1010 	platform_state->state = state;
1011 
1012 	state->platform_state = (struct opaque_platform_state *)platform_state;
1013 
1014 	return 0;
1015 }
1016 
1017 int
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)1018 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1019 		   enum USE_TYPE_E use_type)
1020 {
1021 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1022 	int ret = 0;
1023 	char entity[64];
1024 	int *entity_uc;
1025 	int local_uc;
1026 
1027 	if (!arm_state) {
1028 		ret = -EINVAL;
1029 		goto out;
1030 	}
1031 
1032 	if (use_type == USE_TYPE_VCHIQ) {
1033 		snprintf(entity, sizeof(entity), "VCHIQ:   ");
1034 		entity_uc = &arm_state->peer_use_count;
1035 	} else if (service) {
1036 		snprintf(entity, sizeof(entity), "%p4cc:%03d",
1037 			 &service->base.fourcc,
1038 			 service->client_id);
1039 		entity_uc = &service->service_use_count;
1040 	} else {
1041 		dev_err(state->dev, "suspend: %s: null service ptr\n", __func__);
1042 		ret = -EINVAL;
1043 		goto out;
1044 	}
1045 
1046 	write_lock_bh(&arm_state->susp_res_lock);
1047 	local_uc = ++arm_state->videocore_use_count;
1048 	++(*entity_uc);
1049 
1050 	dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
1051 		entity, *entity_uc, local_uc);
1052 
1053 	write_unlock_bh(&arm_state->susp_res_lock);
1054 
1055 	if (!ret) {
1056 		int ret = 0;
1057 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1058 
1059 		while (ack_cnt && !ret) {
1060 			/* Send the use notify to videocore */
1061 			ret = vchiq_send_remote_use_active(state);
1062 			if (!ret)
1063 				ack_cnt--;
1064 			else
1065 				atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1066 		}
1067 	}
1068 
1069 out:
1070 	dev_dbg(state->dev, "suspend: exit %d\n", ret);
1071 	return ret;
1072 }
1073 
1074 int
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)1075 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1076 {
1077 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1078 	int ret = 0;
1079 	char entity[64];
1080 	int *entity_uc;
1081 
1082 	if (!arm_state) {
1083 		ret = -EINVAL;
1084 		goto out;
1085 	}
1086 
1087 	if (service) {
1088 		snprintf(entity, sizeof(entity), "%p4cc:%03d",
1089 			 &service->base.fourcc,
1090 			 service->client_id);
1091 		entity_uc = &service->service_use_count;
1092 	} else {
1093 		snprintf(entity, sizeof(entity), "PEER:   ");
1094 		entity_uc = &arm_state->peer_use_count;
1095 	}
1096 
1097 	write_lock_bh(&arm_state->susp_res_lock);
1098 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
1099 		WARN_ON(!arm_state->videocore_use_count);
1100 		WARN_ON(!(*entity_uc));
1101 		ret = -EINVAL;
1102 		goto unlock;
1103 	}
1104 	--arm_state->videocore_use_count;
1105 	--(*entity_uc);
1106 
1107 	dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
1108 		entity, *entity_uc, arm_state->videocore_use_count);
1109 
1110 unlock:
1111 	write_unlock_bh(&arm_state->susp_res_lock);
1112 
1113 out:
1114 	dev_dbg(state->dev, "suspend: exit %d\n", ret);
1115 	return ret;
1116 }
1117 
1118 void
vchiq_on_remote_use(struct vchiq_state * state)1119 vchiq_on_remote_use(struct vchiq_state *state)
1120 {
1121 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1122 
1123 	atomic_inc(&arm_state->ka_use_count);
1124 	complete(&arm_state->ka_evt);
1125 }
1126 
1127 void
vchiq_on_remote_release(struct vchiq_state * state)1128 vchiq_on_remote_release(struct vchiq_state *state)
1129 {
1130 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1131 
1132 	atomic_inc(&arm_state->ka_release_count);
1133 	complete(&arm_state->ka_evt);
1134 }
1135 
1136 int
vchiq_use_service_internal(struct vchiq_service * service)1137 vchiq_use_service_internal(struct vchiq_service *service)
1138 {
1139 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1140 }
1141 
1142 int
vchiq_release_service_internal(struct vchiq_service * service)1143 vchiq_release_service_internal(struct vchiq_service *service)
1144 {
1145 	return vchiq_release_internal(service->state, service);
1146 }
1147 
1148 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance * instance)1149 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1150 {
1151 	return &instance->debugfs_node;
1152 }
1153 
1154 int
vchiq_instance_get_use_count(struct vchiq_instance * instance)1155 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1156 {
1157 	struct vchiq_service *service;
1158 	int use_count = 0, i;
1159 
1160 	i = 0;
1161 	rcu_read_lock();
1162 	while ((service = __next_service_by_instance(instance->state,
1163 						     instance, &i)))
1164 		use_count += service->service_use_count;
1165 	rcu_read_unlock();
1166 	return use_count;
1167 }
1168 
1169 int
vchiq_instance_get_pid(struct vchiq_instance * instance)1170 vchiq_instance_get_pid(struct vchiq_instance *instance)
1171 {
1172 	return instance->pid;
1173 }
1174 
1175 int
vchiq_instance_get_trace(struct vchiq_instance * instance)1176 vchiq_instance_get_trace(struct vchiq_instance *instance)
1177 {
1178 	return instance->trace;
1179 }
1180 
1181 void
vchiq_instance_set_trace(struct vchiq_instance * instance,int trace)1182 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1183 {
1184 	struct vchiq_service *service;
1185 	int i;
1186 
1187 	i = 0;
1188 	rcu_read_lock();
1189 	while ((service = __next_service_by_instance(instance->state,
1190 						     instance, &i)))
1191 		service->trace = trace;
1192 	rcu_read_unlock();
1193 	instance->trace = (trace != 0);
1194 }
1195 
1196 int
vchiq_use_service(struct vchiq_instance * instance,unsigned int handle)1197 vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
1198 {
1199 	int ret = -EINVAL;
1200 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1201 
1202 	if (service) {
1203 		ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1204 		vchiq_service_put(service);
1205 	}
1206 	return ret;
1207 }
1208 EXPORT_SYMBOL(vchiq_use_service);
1209 
1210 int
vchiq_release_service(struct vchiq_instance * instance,unsigned int handle)1211 vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
1212 {
1213 	int ret = -EINVAL;
1214 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1215 
1216 	if (service) {
1217 		ret = vchiq_release_internal(service->state, service);
1218 		vchiq_service_put(service);
1219 	}
1220 	return ret;
1221 }
1222 EXPORT_SYMBOL(vchiq_release_service);
1223 
1224 struct service_data_struct {
1225 	int fourcc;
1226 	int clientid;
1227 	int use_count;
1228 };
1229 
1230 void
vchiq_dump_service_use_state(struct vchiq_state * state)1231 vchiq_dump_service_use_state(struct vchiq_state *state)
1232 {
1233 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1234 	struct service_data_struct *service_data;
1235 	int i, found = 0;
1236 	/*
1237 	 * If there's more than 64 services, only dump ones with
1238 	 * non-zero counts
1239 	 */
1240 	int only_nonzero = 0;
1241 	static const char *nz = "<-- preventing suspend";
1242 
1243 	int peer_count;
1244 	int vc_use_count;
1245 	int active_services;
1246 
1247 	if (!arm_state)
1248 		return;
1249 
1250 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1251 				     GFP_KERNEL);
1252 	if (!service_data)
1253 		return;
1254 
1255 	read_lock_bh(&arm_state->susp_res_lock);
1256 	peer_count = arm_state->peer_use_count;
1257 	vc_use_count = arm_state->videocore_use_count;
1258 	active_services = state->unused_service;
1259 	if (active_services > MAX_SERVICES)
1260 		only_nonzero = 1;
1261 
1262 	rcu_read_lock();
1263 	for (i = 0; i < active_services; i++) {
1264 		struct vchiq_service *service_ptr =
1265 			rcu_dereference(state->services[i]);
1266 
1267 		if (!service_ptr)
1268 			continue;
1269 
1270 		if (only_nonzero && !service_ptr->service_use_count)
1271 			continue;
1272 
1273 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1274 			continue;
1275 
1276 		service_data[found].fourcc = service_ptr->base.fourcc;
1277 		service_data[found].clientid = service_ptr->client_id;
1278 		service_data[found].use_count = service_ptr->service_use_count;
1279 		found++;
1280 		if (found >= MAX_SERVICES)
1281 			break;
1282 	}
1283 	rcu_read_unlock();
1284 
1285 	read_unlock_bh(&arm_state->susp_res_lock);
1286 
1287 	if (only_nonzero)
1288 		dev_warn(state->dev,
1289 			 "suspend: Too many active services (%d). Only dumping up to first %d services with non-zero use-count\n",
1290 			 active_services, found);
1291 
1292 	for (i = 0; i < found; i++) {
1293 		dev_warn(state->dev,
1294 			 "suspend: %p4cc:%d service count %d %s\n",
1295 			 &service_data[i].fourcc,
1296 			 service_data[i].clientid, service_data[i].use_count,
1297 			 service_data[i].use_count ? nz : "");
1298 	}
1299 	dev_warn(state->dev, "suspend: VCHIQ use count %d\n", peer_count);
1300 	dev_warn(state->dev, "suspend: Overall vchiq instance use count %d\n", vc_use_count);
1301 
1302 	kfree(service_data);
1303 }
1304 
1305 int
vchiq_check_service(struct vchiq_service * service)1306 vchiq_check_service(struct vchiq_service *service)
1307 {
1308 	struct vchiq_arm_state *arm_state;
1309 	int ret = -EINVAL;
1310 
1311 	if (!service || !service->state)
1312 		goto out;
1313 
1314 	arm_state = vchiq_platform_get_arm_state(service->state);
1315 
1316 	read_lock_bh(&arm_state->susp_res_lock);
1317 	if (service->service_use_count)
1318 		ret = 0;
1319 	read_unlock_bh(&arm_state->susp_res_lock);
1320 
1321 	if (ret) {
1322 		dev_err(service->state->dev,
1323 			"suspend: %s:  %p4cc:%d service count %d, state count %d\n",
1324 			__func__, &service->base.fourcc, service->client_id,
1325 			service->service_use_count, arm_state->videocore_use_count);
1326 		vchiq_dump_service_use_state(service->state);
1327 	}
1328 out:
1329 	return ret;
1330 }
1331 
vchiq_platform_connected(struct vchiq_state * state)1332 void vchiq_platform_connected(struct vchiq_state *state)
1333 {
1334 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1335 
1336 	wake_up_process(arm_state->ka_thread);
1337 }
1338 
vchiq_platform_conn_state_changed(struct vchiq_state * state,enum vchiq_connstate oldstate,enum vchiq_connstate newstate)1339 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1340 				       enum vchiq_connstate oldstate,
1341 				       enum vchiq_connstate newstate)
1342 {
1343 	dev_dbg(state->dev, "suspend: %d: %s->%s\n",
1344 		state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
1345 }
1346 
1347 static const struct of_device_id vchiq_of_match[] = {
1348 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_info },
1349 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_info },
1350 	{},
1351 };
1352 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1353 
vchiq_probe(struct platform_device * pdev)1354 static int vchiq_probe(struct platform_device *pdev)
1355 {
1356 	const struct vchiq_platform_info *info;
1357 	struct vchiq_drv_mgmt *mgmt;
1358 	int ret;
1359 
1360 	info = of_device_get_match_data(&pdev->dev);
1361 	if (!info)
1362 		return -EINVAL;
1363 
1364 	struct device_node *fw_node __free(device_node) =
1365 		of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
1366 	if (!fw_node) {
1367 		dev_err(&pdev->dev, "Missing firmware node\n");
1368 		return -ENOENT;
1369 	}
1370 
1371 	mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
1372 	if (!mgmt)
1373 		return -ENOMEM;
1374 
1375 	mgmt->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1376 	if (!mgmt->fw)
1377 		return -EPROBE_DEFER;
1378 
1379 	mgmt->info = info;
1380 	platform_set_drvdata(pdev, mgmt);
1381 
1382 	ret = vchiq_platform_init(pdev, &mgmt->state);
1383 	if (ret) {
1384 		dev_err(&pdev->dev, "arm: Could not initialize vchiq platform\n");
1385 		return ret;
1386 	}
1387 
1388 	dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
1389 		VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1390 
1391 	/*
1392 	 * Simply exit on error since the function handles cleanup in
1393 	 * cases of failure.
1394 	 */
1395 	ret = vchiq_register_chrdev(&pdev->dev);
1396 	if (ret) {
1397 		dev_err(&pdev->dev, "arm: Failed to initialize vchiq cdev\n");
1398 		vchiq_platform_uninit(mgmt);
1399 		return ret;
1400 	}
1401 
1402 	vchiq_debugfs_init(&mgmt->state);
1403 
1404 	bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
1405 	bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera");
1406 
1407 	return 0;
1408 }
1409 
vchiq_remove(struct platform_device * pdev)1410 static void vchiq_remove(struct platform_device *pdev)
1411 {
1412 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(&pdev->dev);
1413 
1414 	vchiq_device_unregister(bcm2835_audio);
1415 	vchiq_device_unregister(bcm2835_camera);
1416 	vchiq_debugfs_deinit();
1417 	vchiq_deregister_chrdev();
1418 	vchiq_platform_uninit(mgmt);
1419 }
1420 
1421 static struct platform_driver vchiq_driver = {
1422 	.driver = {
1423 		.name = "bcm2835_vchiq",
1424 		.of_match_table = vchiq_of_match,
1425 	},
1426 	.probe = vchiq_probe,
1427 	.remove = vchiq_remove,
1428 };
1429 
vchiq_driver_init(void)1430 static int __init vchiq_driver_init(void)
1431 {
1432 	int ret;
1433 
1434 	ret = bus_register(&vchiq_bus_type);
1435 	if (ret) {
1436 		pr_err("Failed to register %s\n", vchiq_bus_type.name);
1437 		return ret;
1438 	}
1439 
1440 	ret = platform_driver_register(&vchiq_driver);
1441 	if (ret) {
1442 		pr_err("Failed to register vchiq driver\n");
1443 		bus_unregister(&vchiq_bus_type);
1444 	}
1445 
1446 	return ret;
1447 }
1448 module_init(vchiq_driver_init);
1449 
vchiq_driver_exit(void)1450 static void __exit vchiq_driver_exit(void)
1451 {
1452 	bus_unregister(&vchiq_bus_type);
1453 	platform_driver_unregister(&vchiq_driver);
1454 }
1455 module_exit(vchiq_driver_exit);
1456 
1457 MODULE_LICENSE("Dual BSD/GPL");
1458 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1459 MODULE_AUTHOR("Broadcom Corporation");
1460