xref: /linux/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c (revision f61389a9cd26b424485acade726ccfff96c749de)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/device/bus.h>
16 #include <linux/mm.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/uaccess.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
32 
33 #include "vchiq_core.h"
34 #include "vchiq_ioctl.h"
35 #include "vchiq_arm.h"
36 #include "vchiq_bus.h"
37 #include "vchiq_debugfs.h"
38 
39 #define DEVICE_NAME "vchiq"
40 
41 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
42 
43 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
44 
45 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
46 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
47 
48 #define BELL0	0x00
49 
50 #define ARM_DS_ACTIVE	BIT(2)
51 
52 /* Override the default prefix, which would be vchiq_arm (from the filename) */
53 #undef MODULE_PARAM_PREFIX
54 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
55 
56 #define KEEPALIVE_VER 1
57 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
58 
59 /*
60  * The devices implemented in the VCHIQ firmware are not discoverable,
61  * so we need to maintain a list of them in order to register them with
62  * the interface.
63  */
64 static struct vchiq_device *bcm2835_audio;
65 static struct vchiq_device *bcm2835_camera;
66 
67 static const struct vchiq_platform_info bcm2835_info = {
68 	.cache_line_size = 32,
69 };
70 
71 static const struct vchiq_platform_info bcm2836_info = {
72 	.cache_line_size = 64,
73 };
74 
75 struct vchiq_arm_state {
76 	/* Keepalive-related data */
77 	struct task_struct *ka_thread;
78 	struct completion ka_evt;
79 	atomic_t ka_use_count;
80 	atomic_t ka_use_ack_count;
81 	atomic_t ka_release_count;
82 
83 	rwlock_t susp_res_lock;
84 
85 	struct vchiq_state *state;
86 
87 	/*
88 	 * Global use count for videocore.
89 	 * This is equal to the sum of the use counts for all services.  When
90 	 * this hits zero the videocore suspend procedure will be initiated.
91 	 */
92 	int videocore_use_count;
93 
94 	/*
95 	 * Use count to track requests from videocore peer.
96 	 * This use count is not associated with a service, so needs to be
97 	 * tracked separately with the state.
98 	 */
99 	int peer_use_count;
100 
101 	/*
102 	 * Flag to indicate that the first vchiq connect has made it through.
103 	 * This means that both sides should be fully ready, and we should
104 	 * be able to suspend after this point.
105 	 */
106 	int first_connect;
107 };
108 
109 static int
110 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
111 			     struct vchiq_bulk *bulk_params);
112 
113 static irqreturn_t
vchiq_doorbell_irq(int irq,void * dev_id)114 vchiq_doorbell_irq(int irq, void *dev_id)
115 {
116 	struct vchiq_state *state = dev_id;
117 	struct vchiq_drv_mgmt *mgmt;
118 	irqreturn_t ret = IRQ_NONE;
119 	unsigned int status;
120 
121 	mgmt = dev_get_drvdata(state->dev);
122 
123 	/* Read (and clear) the doorbell */
124 	status = readl(mgmt->regs + BELL0);
125 
126 	if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
127 		remote_event_pollall(state);
128 		ret = IRQ_HANDLED;
129 	}
130 
131 	return ret;
132 }
133 
134 /*
135  * This function is called by the vchiq stack once it has been connected to
136  * the videocore and clients can start to use the stack.
137  */
vchiq_call_connected_callbacks(struct vchiq_drv_mgmt * drv_mgmt)138 static void vchiq_call_connected_callbacks(struct vchiq_drv_mgmt *drv_mgmt)
139 {
140 	int i;
141 
142 	if (mutex_lock_killable(&drv_mgmt->connected_mutex))
143 		return;
144 
145 	for (i = 0; i < drv_mgmt->num_deferred_callbacks; i++)
146 		drv_mgmt->deferred_callback[i]();
147 
148 	drv_mgmt->num_deferred_callbacks = 0;
149 	drv_mgmt->connected = true;
150 	mutex_unlock(&drv_mgmt->connected_mutex);
151 }
152 
153 /*
154  * This function is used to defer initialization until the vchiq stack is
155  * initialized. If the stack is already initialized, then the callback will
156  * be made immediately, otherwise it will be deferred until
157  * vchiq_call_connected_callbacks is called.
158  */
vchiq_add_connected_callback(struct vchiq_device * device,void (* callback)(void))159 void vchiq_add_connected_callback(struct vchiq_device *device, void (*callback)(void))
160 {
161 	struct vchiq_drv_mgmt *drv_mgmt = device->drv_mgmt;
162 
163 	if (mutex_lock_killable(&drv_mgmt->connected_mutex))
164 		return;
165 
166 	if (drv_mgmt->connected) {
167 		/* We're already connected. Call the callback immediately. */
168 		callback();
169 	} else {
170 		if (drv_mgmt->num_deferred_callbacks >= VCHIQ_DRV_MAX_CALLBACKS) {
171 			dev_err(&device->dev,
172 				"core: deferred callbacks(%d) exceeded the maximum limit(%d)\n",
173 				drv_mgmt->num_deferred_callbacks, VCHIQ_DRV_MAX_CALLBACKS);
174 		} else {
175 			drv_mgmt->deferred_callback[drv_mgmt->num_deferred_callbacks] =
176 				callback;
177 			drv_mgmt->num_deferred_callbacks++;
178 		}
179 	}
180 	mutex_unlock(&drv_mgmt->connected_mutex);
181 }
182 EXPORT_SYMBOL(vchiq_add_connected_callback);
183 
vchiq_platform_init(struct platform_device * pdev,struct vchiq_state * state)184 static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
185 {
186 	struct device *dev = &pdev->dev;
187 	struct vchiq_drv_mgmt *drv_mgmt = platform_get_drvdata(pdev);
188 	struct rpi_firmware *fw = drv_mgmt->fw;
189 	struct vchiq_slot_zero *vchiq_slot_zero;
190 	void *slot_mem;
191 	dma_addr_t slot_phys;
192 	u32 channelbase;
193 	int slot_mem_size, frag_mem_size;
194 	int err, irq, i;
195 
196 	/*
197 	 * VCHI messages between the CPU and firmware use
198 	 * 32-bit bus addresses.
199 	 */
200 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
201 
202 	if (err < 0)
203 		return err;
204 
205 	drv_mgmt->fragments_size = 2 * drv_mgmt->info->cache_line_size;
206 
207 	/* Allocate space for the channels in coherent memory */
208 	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
209 	frag_mem_size = PAGE_ALIGN(drv_mgmt->fragments_size * MAX_FRAGMENTS);
210 
211 	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
212 				       &slot_phys, GFP_KERNEL);
213 	if (!slot_mem) {
214 		dev_err(dev, "could not allocate DMA memory\n");
215 		return -ENOMEM;
216 	}
217 
218 	WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
219 
220 	vchiq_slot_zero = vchiq_init_slots(dev, slot_mem, slot_mem_size);
221 	if (!vchiq_slot_zero)
222 		return -ENOMEM;
223 
224 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
225 		(int)slot_phys + slot_mem_size;
226 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
227 		MAX_FRAGMENTS;
228 
229 	drv_mgmt->fragments_base = (char *)slot_mem + slot_mem_size;
230 
231 	drv_mgmt->free_fragments = drv_mgmt->fragments_base;
232 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
233 		*(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] =
234 			&drv_mgmt->fragments_base[(i + 1) * drv_mgmt->fragments_size];
235 	}
236 	*(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] = NULL;
237 	sema_init(&drv_mgmt->free_fragments_sema, MAX_FRAGMENTS);
238 	sema_init(&drv_mgmt->free_fragments_mutex, 1);
239 
240 	err = vchiq_init_state(state, vchiq_slot_zero, dev);
241 	if (err)
242 		return err;
243 
244 	drv_mgmt->regs = devm_platform_ioremap_resource(pdev, 0);
245 	if (IS_ERR(drv_mgmt->regs))
246 		return PTR_ERR(drv_mgmt->regs);
247 
248 	irq = platform_get_irq(pdev, 0);
249 	if (irq <= 0)
250 		return irq;
251 
252 	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
253 			       "VCHIQ doorbell", state);
254 	if (err) {
255 		dev_err(dev, "failed to register irq=%d\n", irq);
256 		return err;
257 	}
258 
259 	/* Send the base address of the slots to VideoCore */
260 	channelbase = slot_phys;
261 	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
262 				    &channelbase, sizeof(channelbase));
263 	if (err) {
264 		dev_err(dev, "failed to send firmware property: %d\n", err);
265 		return err;
266 	}
267 
268 	if (channelbase) {
269 		dev_err(dev, "failed to set channelbase (response: %x)\n",
270 			channelbase);
271 		return -ENXIO;
272 	}
273 
274 	dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %p, phys %pad)\n",
275 		vchiq_slot_zero, &slot_phys);
276 
277 	mutex_init(&drv_mgmt->connected_mutex);
278 	vchiq_call_connected_callbacks(drv_mgmt);
279 
280 	return 0;
281 }
282 
283 int
vchiq_platform_init_state(struct vchiq_state * state)284 vchiq_platform_init_state(struct vchiq_state *state)
285 {
286 	struct vchiq_arm_state *platform_state;
287 
288 	platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
289 	if (!platform_state)
290 		return -ENOMEM;
291 
292 	rwlock_init(&platform_state->susp_res_lock);
293 
294 	init_completion(&platform_state->ka_evt);
295 	atomic_set(&platform_state->ka_use_count, 0);
296 	atomic_set(&platform_state->ka_use_ack_count, 0);
297 	atomic_set(&platform_state->ka_release_count, 0);
298 
299 	platform_state->state = state;
300 
301 	state->platform_state = (struct opaque_platform_state *)platform_state;
302 
303 	return 0;
304 }
305 
vchiq_platform_get_arm_state(struct vchiq_state * state)306 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
307 {
308 	return (struct vchiq_arm_state *)state->platform_state;
309 }
310 
311 static void
vchiq_platform_uninit(struct vchiq_drv_mgmt * mgmt)312 vchiq_platform_uninit(struct vchiq_drv_mgmt *mgmt)
313 {
314 	struct vchiq_arm_state *arm_state;
315 
316 	kthread_stop(mgmt->state.sync_thread);
317 	kthread_stop(mgmt->state.recycle_thread);
318 	kthread_stop(mgmt->state.slot_handler_thread);
319 
320 	arm_state = vchiq_platform_get_arm_state(&mgmt->state);
321 	if (!IS_ERR_OR_NULL(arm_state->ka_thread))
322 		kthread_stop(arm_state->ka_thread);
323 }
324 
vchiq_dump_platform_state(struct seq_file * f)325 void vchiq_dump_platform_state(struct seq_file *f)
326 {
327 	seq_puts(f, "  Platform: 2835 (VC master)\n");
328 }
329 
330 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(struct vchiq_state * state,struct vchiq_instance ** instance_out)331 int vchiq_initialise(struct vchiq_state *state, struct vchiq_instance **instance_out)
332 {
333 	struct vchiq_instance *instance = NULL;
334 	int i, ret;
335 
336 	/*
337 	 * VideoCore may not be ready due to boot up timing.
338 	 * It may never be ready if kernel and firmware are mismatched,so don't
339 	 * block forever.
340 	 */
341 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
342 		if (vchiq_remote_initialised(state))
343 			break;
344 		usleep_range(500, 600);
345 	}
346 	if (i == VCHIQ_INIT_RETRIES) {
347 		dev_err(state->dev, "core: %s: Videocore not initialized\n", __func__);
348 		ret = -ENOTCONN;
349 		goto failed;
350 	} else if (i > 0) {
351 		dev_warn(state->dev, "core: %s: videocore initialized after %d retries\n",
352 			 __func__, i);
353 	}
354 
355 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
356 	if (!instance) {
357 		ret = -ENOMEM;
358 		goto failed;
359 	}
360 
361 	instance->connected = 0;
362 	instance->state = state;
363 	mutex_init(&instance->bulk_waiter_list_mutex);
364 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
365 
366 	*instance_out = instance;
367 
368 	ret = 0;
369 
370 failed:
371 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
372 
373 	return ret;
374 }
375 EXPORT_SYMBOL(vchiq_initialise);
376 
free_bulk_waiter(struct vchiq_instance * instance)377 void free_bulk_waiter(struct vchiq_instance *instance)
378 {
379 	struct bulk_waiter_node *waiter, *next;
380 
381 	list_for_each_entry_safe(waiter, next,
382 				 &instance->bulk_waiter_list, list) {
383 		list_del(&waiter->list);
384 		dev_dbg(instance->state->dev,
385 			"arm: bulk_waiter - cleaned up %p for pid %d\n",
386 			waiter, waiter->pid);
387 		kfree(waiter);
388 	}
389 }
390 
vchiq_shutdown(struct vchiq_instance * instance)391 int vchiq_shutdown(struct vchiq_instance *instance)
392 {
393 	struct vchiq_state *state = instance->state;
394 	int ret = 0;
395 
396 	mutex_lock(&state->mutex);
397 
398 	/* Remove all services */
399 	vchiq_shutdown_internal(state, instance);
400 
401 	mutex_unlock(&state->mutex);
402 
403 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
404 
405 	free_bulk_waiter(instance);
406 	kfree(instance);
407 
408 	return ret;
409 }
410 EXPORT_SYMBOL(vchiq_shutdown);
411 
vchiq_is_connected(struct vchiq_instance * instance)412 static int vchiq_is_connected(struct vchiq_instance *instance)
413 {
414 	return instance->connected;
415 }
416 
vchiq_connect(struct vchiq_instance * instance)417 int vchiq_connect(struct vchiq_instance *instance)
418 {
419 	struct vchiq_state *state = instance->state;
420 	int ret;
421 
422 	if (mutex_lock_killable(&state->mutex)) {
423 		dev_dbg(state->dev,
424 			"core: call to mutex_lock failed\n");
425 		ret = -EAGAIN;
426 		goto failed;
427 	}
428 	ret = vchiq_connect_internal(state, instance);
429 
430 	if (!ret)
431 		instance->connected = 1;
432 
433 	mutex_unlock(&state->mutex);
434 
435 failed:
436 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
437 
438 	return ret;
439 }
440 EXPORT_SYMBOL(vchiq_connect);
441 
442 static int
vchiq_add_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)443 vchiq_add_service(struct vchiq_instance *instance,
444 		  const struct vchiq_service_params_kernel *params,
445 		  unsigned int *phandle)
446 {
447 	struct vchiq_state *state = instance->state;
448 	struct vchiq_service *service = NULL;
449 	int srvstate, ret;
450 
451 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
452 
453 	srvstate = vchiq_is_connected(instance)
454 		? VCHIQ_SRVSTATE_LISTENING
455 		: VCHIQ_SRVSTATE_HIDDEN;
456 
457 	service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
458 
459 	if (service) {
460 		*phandle = service->handle;
461 		ret = 0;
462 	} else {
463 		ret = -EINVAL;
464 	}
465 
466 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
467 
468 	return ret;
469 }
470 
471 int
vchiq_open_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)472 vchiq_open_service(struct vchiq_instance *instance,
473 		   const struct vchiq_service_params_kernel *params,
474 		   unsigned int *phandle)
475 {
476 	struct vchiq_state   *state = instance->state;
477 	struct vchiq_service *service = NULL;
478 	int ret = -EINVAL;
479 
480 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
481 
482 	if (!vchiq_is_connected(instance))
483 		goto failed;
484 
485 	service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
486 
487 	if (service) {
488 		*phandle = service->handle;
489 		ret = vchiq_open_service_internal(service, current->pid);
490 		if (ret) {
491 			vchiq_remove_service(instance, service->handle);
492 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
493 		}
494 	}
495 
496 failed:
497 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
498 
499 	return ret;
500 }
501 EXPORT_SYMBOL(vchiq_open_service);
502 
503 int
vchiq_bulk_transmit(struct vchiq_instance * instance,unsigned int handle,const void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)504 vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
505 		    unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
506 {
507 	struct vchiq_bulk bulk_params = {};
508 	int ret;
509 
510 	switch (mode) {
511 	case VCHIQ_BULK_MODE_NOCALLBACK:
512 	case VCHIQ_BULK_MODE_CALLBACK:
513 
514 		bulk_params.offset = (void *)data;
515 		bulk_params.mode = mode;
516 		bulk_params.size = size;
517 		bulk_params.cb_data = userdata;
518 		bulk_params.dir = VCHIQ_BULK_TRANSMIT;
519 
520 		ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params);
521 		break;
522 	case VCHIQ_BULK_MODE_BLOCKING:
523 		bulk_params.offset = (void *)data;
524 		bulk_params.mode = mode;
525 		bulk_params.size = size;
526 		bulk_params.dir = VCHIQ_BULK_TRANSMIT;
527 
528 		ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params);
529 		break;
530 	default:
531 		return -EINVAL;
532 	}
533 
534 	return ret;
535 }
536 EXPORT_SYMBOL(vchiq_bulk_transmit);
537 
vchiq_bulk_receive(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)538 int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
539 		       void *data, unsigned int size, void *userdata,
540 		       enum vchiq_bulk_mode mode)
541 {
542 	struct vchiq_bulk bulk_params = {};
543 	int ret;
544 
545 	switch (mode) {
546 	case VCHIQ_BULK_MODE_NOCALLBACK:
547 	case VCHIQ_BULK_MODE_CALLBACK:
548 
549 		bulk_params.offset = (void *)data;
550 		bulk_params.mode = mode;
551 		bulk_params.size = size;
552 		bulk_params.cb_data = userdata;
553 		bulk_params.dir = VCHIQ_BULK_RECEIVE;
554 
555 		ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params);
556 		break;
557 	case VCHIQ_BULK_MODE_BLOCKING:
558 		bulk_params.offset = (void *)data;
559 		bulk_params.mode = mode;
560 		bulk_params.size = size;
561 		bulk_params.dir = VCHIQ_BULK_RECEIVE;
562 
563 		ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params);
564 		break;
565 	default:
566 		return -EINVAL;
567 	}
568 
569 	return ret;
570 }
571 EXPORT_SYMBOL(vchiq_bulk_receive);
572 
573 static int
vchiq_blocking_bulk_transfer(struct vchiq_instance * instance,unsigned int handle,struct vchiq_bulk * bulk_params)574 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
575 			     struct vchiq_bulk *bulk_params)
576 {
577 	struct vchiq_service *service;
578 	struct bulk_waiter_node *waiter = NULL, *iter;
579 	int ret;
580 
581 	service = find_service_by_handle(instance, handle);
582 	if (!service)
583 		return -EINVAL;
584 
585 	vchiq_service_put(service);
586 
587 	mutex_lock(&instance->bulk_waiter_list_mutex);
588 	list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
589 		if (iter->pid == current->pid) {
590 			list_del(&iter->list);
591 			waiter = iter;
592 			break;
593 		}
594 	}
595 	mutex_unlock(&instance->bulk_waiter_list_mutex);
596 
597 	if (waiter) {
598 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
599 
600 		if (bulk) {
601 			/* This thread has an outstanding bulk transfer. */
602 			/* FIXME: why compare a dma address to a pointer? */
603 			if ((bulk->dma_addr != (dma_addr_t)(uintptr_t)bulk_params->dma_addr) ||
604 			    (bulk->size != bulk_params->size)) {
605 				/*
606 				 * This is not a retry of the previous one.
607 				 * Cancel the signal when the transfer completes.
608 				 */
609 				spin_lock(&service->state->bulk_waiter_spinlock);
610 				bulk->waiter = NULL;
611 				spin_unlock(&service->state->bulk_waiter_spinlock);
612 			}
613 		}
614 	} else {
615 		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
616 		if (!waiter)
617 			return -ENOMEM;
618 	}
619 
620 	bulk_params->waiter = &waiter->bulk_waiter;
621 
622 	ret = vchiq_bulk_xfer_blocking(instance, handle, bulk_params);
623 	if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
624 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
625 
626 		if (bulk) {
627 			/* Cancel the signal when the transfer completes. */
628 			spin_lock(&service->state->bulk_waiter_spinlock);
629 			bulk->waiter = NULL;
630 			spin_unlock(&service->state->bulk_waiter_spinlock);
631 		}
632 		kfree(waiter);
633 	} else {
634 		waiter->pid = current->pid;
635 		mutex_lock(&instance->bulk_waiter_list_mutex);
636 		list_add(&waiter->list, &instance->bulk_waiter_list);
637 		mutex_unlock(&instance->bulk_waiter_list_mutex);
638 		dev_dbg(instance->state->dev, "arm: saved bulk_waiter %p for pid %d\n",
639 			waiter, current->pid);
640 	}
641 
642 	return ret;
643 }
644 
645 static int
add_completion(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,struct user_service * user_service,void * cb_data,void __user * cb_userdata)646 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
647 	       struct vchiq_header *header, struct user_service *user_service,
648 	       void *cb_data, void __user *cb_userdata)
649 {
650 	struct vchiq_completion_data_kernel *completion;
651 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
652 	int insert;
653 
654 	DEBUG_INITIALISE(mgmt->state.local);
655 
656 	insert = instance->completion_insert;
657 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
658 		/* Out of space - wait for the client */
659 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
660 		dev_dbg(instance->state->dev, "core: completion queue full\n");
661 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
662 		if (wait_for_completion_interruptible(&instance->remove_event)) {
663 			dev_dbg(instance->state->dev, "arm: service_callback interrupted\n");
664 			return -EAGAIN;
665 		} else if (instance->closing) {
666 			dev_dbg(instance->state->dev, "arm: service_callback closing\n");
667 			return 0;
668 		}
669 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
670 	}
671 
672 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
673 
674 	completion->header = header;
675 	completion->reason = reason;
676 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
677 	completion->service_userdata = user_service->service;
678 	completion->cb_data = cb_data;
679 	completion->cb_userdata = cb_userdata;
680 
681 	if (reason == VCHIQ_SERVICE_CLOSED) {
682 		/*
683 		 * Take an extra reference, to be held until
684 		 * this CLOSED notification is delivered.
685 		 */
686 		vchiq_service_get(user_service->service);
687 		if (instance->use_close_delivered)
688 			user_service->close_pending = 1;
689 	}
690 
691 	/*
692 	 * A write barrier is needed here to ensure that the entire completion
693 	 * record is written out before the insert point.
694 	 */
695 	wmb();
696 
697 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
698 		user_service->message_available_pos = insert;
699 
700 	insert++;
701 	instance->completion_insert = insert;
702 
703 	complete(&instance->insert_event);
704 
705 	return 0;
706 }
707 
708 static int
service_single_message(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_service * service,void * cb_data,void __user * cb_userdata)709 service_single_message(struct vchiq_instance *instance,
710 		       enum vchiq_reason reason, struct vchiq_service *service,
711 		       void *cb_data, void __user *cb_userdata)
712 {
713 	struct user_service *user_service;
714 
715 	user_service = (struct user_service *)service->base.userdata;
716 
717 	dev_dbg(service->state->dev, "arm: msg queue full\n");
718 	/*
719 	 * If there is no MESSAGE_AVAILABLE in the completion
720 	 * queue, add one
721 	 */
722 	if ((user_service->message_available_pos -
723 	     instance->completion_remove) < 0) {
724 		int ret;
725 
726 		dev_dbg(instance->state->dev,
727 			"arm: Inserting extra MESSAGE_AVAILABLE\n");
728 		ret = add_completion(instance, reason, NULL, user_service,
729 				     cb_data, cb_userdata);
730 		if (ret)
731 			return ret;
732 	}
733 
734 	if (wait_for_completion_interruptible(&user_service->remove_event)) {
735 		dev_dbg(instance->state->dev, "arm: interrupted\n");
736 		return -EAGAIN;
737 	} else if (instance->closing) {
738 		dev_dbg(instance->state->dev, "arm: closing\n");
739 		return -EINVAL;
740 	}
741 
742 	return 0;
743 }
744 
745 int
service_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * cb_data,void __user * cb_userdata)746 service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
747 		 struct vchiq_header *header, unsigned int handle,
748 		 void *cb_data, void __user *cb_userdata)
749 {
750 	/*
751 	 * How do we ensure the callback goes to the right client?
752 	 * The service_user data points to a user_service record
753 	 * containing the original callback and the user state structure, which
754 	 * contains a circular buffer for completion records.
755 	 */
756 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
757 	struct user_service *user_service;
758 	struct vchiq_service *service;
759 	bool skip_completion = false;
760 
761 	DEBUG_INITIALISE(mgmt->state.local);
762 
763 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
764 
765 	rcu_read_lock();
766 	service = handle_to_service(instance, handle);
767 	if (WARN_ON(!service)) {
768 		rcu_read_unlock();
769 		return 0;
770 	}
771 
772 	user_service = (struct user_service *)service->base.userdata;
773 
774 	if (instance->closing) {
775 		rcu_read_unlock();
776 		return 0;
777 	}
778 
779 	/*
780 	 * As hopping around different synchronization mechanism,
781 	 * taking an extra reference results in simpler implementation.
782 	 */
783 	vchiq_service_get(service);
784 	rcu_read_unlock();
785 
786 	dev_dbg(service->state->dev,
787 		"arm: service %p(%d,%p), reason %d, header %p, instance %p, cb_data %p, cb_userdata %p\n",
788 		user_service, service->localport, user_service->userdata,
789 		reason, header, instance, cb_data, cb_userdata);
790 
791 	if (header && user_service->is_vchi) {
792 		spin_lock(&service->state->msg_queue_spinlock);
793 		while (user_service->msg_insert ==
794 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
795 			int ret;
796 
797 			spin_unlock(&service->state->msg_queue_spinlock);
798 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
799 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
800 
801 			ret = service_single_message(instance, reason, service,
802 						     cb_data, cb_userdata);
803 			if (ret) {
804 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
805 				vchiq_service_put(service);
806 				return ret;
807 			}
808 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
809 			spin_lock(&service->state->msg_queue_spinlock);
810 		}
811 
812 		user_service->msg_queue[user_service->msg_insert &
813 			(MSG_QUEUE_SIZE - 1)] = header;
814 		user_service->msg_insert++;
815 
816 		/*
817 		 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
818 		 * there is a MESSAGE_AVAILABLE in the completion queue then
819 		 * bypass the completion queue.
820 		 */
821 		if (((user_service->message_available_pos -
822 			instance->completion_remove) >= 0) ||
823 			user_service->dequeue_pending) {
824 			user_service->dequeue_pending = 0;
825 			skip_completion = true;
826 		}
827 
828 		spin_unlock(&service->state->msg_queue_spinlock);
829 		complete(&user_service->insert_event);
830 
831 		header = NULL;
832 	}
833 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
834 	vchiq_service_put(service);
835 
836 	if (skip_completion)
837 		return 0;
838 
839 	return add_completion(instance, reason, header, user_service,
840 			      cb_data, cb_userdata);
841 }
842 
vchiq_dump_platform_instances(struct vchiq_state * state,struct seq_file * f)843 void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f)
844 {
845 	int i;
846 
847 	if (!vchiq_remote_initialised(state))
848 		return;
849 
850 	/*
851 	 * There is no list of instances, so instead scan all services,
852 	 * marking those that have been dumped.
853 	 */
854 
855 	rcu_read_lock();
856 	for (i = 0; i < state->unused_service; i++) {
857 		struct vchiq_service *service;
858 		struct vchiq_instance *instance;
859 
860 		service = rcu_dereference(state->services[i]);
861 		if (!service || service->base.callback != service_callback)
862 			continue;
863 
864 		instance = service->instance;
865 		if (instance)
866 			instance->mark = 0;
867 	}
868 	rcu_read_unlock();
869 
870 	for (i = 0; i < state->unused_service; i++) {
871 		struct vchiq_service *service;
872 		struct vchiq_instance *instance;
873 
874 		rcu_read_lock();
875 		service = rcu_dereference(state->services[i]);
876 		if (!service || service->base.callback != service_callback) {
877 			rcu_read_unlock();
878 			continue;
879 		}
880 
881 		instance = service->instance;
882 		if (!instance || instance->mark) {
883 			rcu_read_unlock();
884 			continue;
885 		}
886 		rcu_read_unlock();
887 
888 		seq_printf(f, "Instance %pK: pid %d,%s completions %d/%d\n",
889 			   instance, instance->pid,
890 			   instance->connected ? " connected, " :
891 			   "",
892 			   instance->completion_insert -
893 			   instance->completion_remove,
894 			   MAX_COMPLETIONS);
895 		instance->mark = 1;
896 	}
897 }
898 
vchiq_dump_platform_service_state(struct seq_file * f,struct vchiq_service * service)899 void vchiq_dump_platform_service_state(struct seq_file *f,
900 				       struct vchiq_service *service)
901 {
902 	struct user_service *user_service =
903 			(struct user_service *)service->base.userdata;
904 
905 	seq_printf(f, "  instance %pK", service->instance);
906 
907 	if ((service->base.callback == service_callback) && user_service->is_vchi) {
908 		seq_printf(f, ", %d/%d messages",
909 			   user_service->msg_insert - user_service->msg_remove,
910 			   MSG_QUEUE_SIZE);
911 
912 		if (user_service->dequeue_pending)
913 			seq_puts(f, " (dequeue pending)");
914 	}
915 
916 	seq_puts(f, "\n");
917 }
918 
919 /*
920  * Autosuspend related functionality
921  */
922 
923 static int
vchiq_keepalive_vchiq_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int service_user,void * cb_data,void __user * cb_userdata)924 vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
925 			       enum vchiq_reason reason,
926 			       struct vchiq_header *header,
927 			       unsigned int service_user,
928 			       void *cb_data, void __user *cb_userdata)
929 {
930 	dev_err(instance->state->dev, "suspend: %s: callback reason %d\n",
931 		__func__, reason);
932 	return 0;
933 }
934 
935 static int
vchiq_keepalive_thread_func(void * v)936 vchiq_keepalive_thread_func(void *v)
937 {
938 	struct vchiq_state *state = (struct vchiq_state *)v;
939 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
940 	struct vchiq_instance *instance;
941 	unsigned int ka_handle;
942 	int ret;
943 
944 	struct vchiq_service_params_kernel params = {
945 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
946 		.callback    = vchiq_keepalive_vchiq_callback,
947 		.version     = KEEPALIVE_VER,
948 		.version_min = KEEPALIVE_VER_MIN
949 	};
950 
951 	ret = vchiq_initialise(state, &instance);
952 	if (ret) {
953 		dev_err(state->dev, "suspend: %s: vchiq_initialise failed %d\n", __func__, ret);
954 		goto exit;
955 	}
956 
957 	ret = vchiq_connect(instance);
958 	if (ret) {
959 		dev_err(state->dev, "suspend: %s: vchiq_connect failed %d\n", __func__, ret);
960 		goto shutdown;
961 	}
962 
963 	ret = vchiq_add_service(instance, &params, &ka_handle);
964 	if (ret) {
965 		dev_err(state->dev, "suspend: %s: vchiq_open_service failed %d\n",
966 			__func__, ret);
967 		goto shutdown;
968 	}
969 
970 	while (!kthread_should_stop()) {
971 		long rc = 0, uc = 0;
972 
973 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
974 			dev_dbg(state->dev, "suspend: %s: interrupted\n", __func__);
975 			flush_signals(current);
976 			continue;
977 		}
978 
979 		/*
980 		 * read and clear counters.  Do release_count then use_count to
981 		 * prevent getting more releases than uses
982 		 */
983 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
984 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
985 
986 		/*
987 		 * Call use/release service the requisite number of times.
988 		 * Process use before release so use counts don't go negative
989 		 */
990 		while (uc--) {
991 			atomic_inc(&arm_state->ka_use_ack_count);
992 			ret = vchiq_use_service(instance, ka_handle);
993 			if (ret) {
994 				dev_err(state->dev, "suspend: %s: vchiq_use_service error %d\n",
995 					__func__, ret);
996 			}
997 		}
998 		while (rc--) {
999 			ret = vchiq_release_service(instance, ka_handle);
1000 			if (ret) {
1001 				dev_err(state->dev, "suspend: %s: vchiq_release_service error %d\n",
1002 					__func__, ret);
1003 			}
1004 		}
1005 	}
1006 
1007 shutdown:
1008 	vchiq_shutdown(instance);
1009 exit:
1010 	return 0;
1011 }
1012 
1013 int
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)1014 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1015 		   enum USE_TYPE_E use_type)
1016 {
1017 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1018 	int ret = 0;
1019 	char entity[64];
1020 	int *entity_uc;
1021 	int local_uc;
1022 
1023 	if (!arm_state) {
1024 		ret = -EINVAL;
1025 		goto out;
1026 	}
1027 
1028 	if (use_type == USE_TYPE_VCHIQ) {
1029 		snprintf(entity, sizeof(entity), "VCHIQ:   ");
1030 		entity_uc = &arm_state->peer_use_count;
1031 	} else if (service) {
1032 		snprintf(entity, sizeof(entity), "%p4cc:%03d",
1033 			 &service->base.fourcc,
1034 			 service->client_id);
1035 		entity_uc = &service->service_use_count;
1036 	} else {
1037 		dev_err(state->dev, "suspend: %s: null service ptr\n", __func__);
1038 		ret = -EINVAL;
1039 		goto out;
1040 	}
1041 
1042 	write_lock_bh(&arm_state->susp_res_lock);
1043 	local_uc = ++arm_state->videocore_use_count;
1044 	++(*entity_uc);
1045 
1046 	dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
1047 		entity, *entity_uc, local_uc);
1048 
1049 	write_unlock_bh(&arm_state->susp_res_lock);
1050 
1051 	if (!ret) {
1052 		int ret = 0;
1053 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1054 
1055 		while (ack_cnt && !ret) {
1056 			/* Send the use notify to videocore */
1057 			ret = vchiq_send_remote_use_active(state);
1058 			if (!ret)
1059 				ack_cnt--;
1060 			else
1061 				atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1062 		}
1063 	}
1064 
1065 out:
1066 	dev_dbg(state->dev, "suspend: exit %d\n", ret);
1067 	return ret;
1068 }
1069 
1070 int
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)1071 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1072 {
1073 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1074 	int ret = 0;
1075 	char entity[64];
1076 	int *entity_uc;
1077 
1078 	if (!arm_state) {
1079 		ret = -EINVAL;
1080 		goto out;
1081 	}
1082 
1083 	if (service) {
1084 		snprintf(entity, sizeof(entity), "%p4cc:%03d",
1085 			 &service->base.fourcc,
1086 			 service->client_id);
1087 		entity_uc = &service->service_use_count;
1088 	} else {
1089 		snprintf(entity, sizeof(entity), "PEER:   ");
1090 		entity_uc = &arm_state->peer_use_count;
1091 	}
1092 
1093 	write_lock_bh(&arm_state->susp_res_lock);
1094 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
1095 		WARN_ON(!arm_state->videocore_use_count);
1096 		WARN_ON(!(*entity_uc));
1097 		ret = -EINVAL;
1098 		goto unlock;
1099 	}
1100 	--arm_state->videocore_use_count;
1101 	--(*entity_uc);
1102 
1103 	dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
1104 		entity, *entity_uc, arm_state->videocore_use_count);
1105 
1106 unlock:
1107 	write_unlock_bh(&arm_state->susp_res_lock);
1108 
1109 out:
1110 	dev_dbg(state->dev, "suspend: exit %d\n", ret);
1111 	return ret;
1112 }
1113 
1114 void
vchiq_on_remote_use(struct vchiq_state * state)1115 vchiq_on_remote_use(struct vchiq_state *state)
1116 {
1117 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1118 
1119 	atomic_inc(&arm_state->ka_use_count);
1120 	complete(&arm_state->ka_evt);
1121 }
1122 
1123 void
vchiq_on_remote_release(struct vchiq_state * state)1124 vchiq_on_remote_release(struct vchiq_state *state)
1125 {
1126 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1127 
1128 	atomic_inc(&arm_state->ka_release_count);
1129 	complete(&arm_state->ka_evt);
1130 }
1131 
1132 int
vchiq_use_service_internal(struct vchiq_service * service)1133 vchiq_use_service_internal(struct vchiq_service *service)
1134 {
1135 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1136 }
1137 
1138 int
vchiq_release_service_internal(struct vchiq_service * service)1139 vchiq_release_service_internal(struct vchiq_service *service)
1140 {
1141 	return vchiq_release_internal(service->state, service);
1142 }
1143 
1144 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance * instance)1145 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1146 {
1147 	return &instance->debugfs_node;
1148 }
1149 
1150 int
vchiq_instance_get_use_count(struct vchiq_instance * instance)1151 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1152 {
1153 	struct vchiq_service *service;
1154 	int use_count = 0, i;
1155 
1156 	i = 0;
1157 	rcu_read_lock();
1158 	while ((service = __next_service_by_instance(instance->state,
1159 						     instance, &i)))
1160 		use_count += service->service_use_count;
1161 	rcu_read_unlock();
1162 	return use_count;
1163 }
1164 
1165 int
vchiq_instance_get_pid(struct vchiq_instance * instance)1166 vchiq_instance_get_pid(struct vchiq_instance *instance)
1167 {
1168 	return instance->pid;
1169 }
1170 
1171 int
vchiq_instance_get_trace(struct vchiq_instance * instance)1172 vchiq_instance_get_trace(struct vchiq_instance *instance)
1173 {
1174 	return instance->trace;
1175 }
1176 
1177 void
vchiq_instance_set_trace(struct vchiq_instance * instance,int trace)1178 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1179 {
1180 	struct vchiq_service *service;
1181 	int i;
1182 
1183 	i = 0;
1184 	rcu_read_lock();
1185 	while ((service = __next_service_by_instance(instance->state,
1186 						     instance, &i)))
1187 		service->trace = trace;
1188 	rcu_read_unlock();
1189 	instance->trace = (trace != 0);
1190 }
1191 
1192 int
vchiq_use_service(struct vchiq_instance * instance,unsigned int handle)1193 vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
1194 {
1195 	int ret = -EINVAL;
1196 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1197 
1198 	if (service) {
1199 		ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1200 		vchiq_service_put(service);
1201 	}
1202 	return ret;
1203 }
1204 EXPORT_SYMBOL(vchiq_use_service);
1205 
1206 int
vchiq_release_service(struct vchiq_instance * instance,unsigned int handle)1207 vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
1208 {
1209 	int ret = -EINVAL;
1210 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1211 
1212 	if (service) {
1213 		ret = vchiq_release_internal(service->state, service);
1214 		vchiq_service_put(service);
1215 	}
1216 	return ret;
1217 }
1218 EXPORT_SYMBOL(vchiq_release_service);
1219 
1220 struct service_data_struct {
1221 	int fourcc;
1222 	int clientid;
1223 	int use_count;
1224 };
1225 
1226 void
vchiq_dump_service_use_state(struct vchiq_state * state)1227 vchiq_dump_service_use_state(struct vchiq_state *state)
1228 {
1229 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1230 	struct service_data_struct *service_data;
1231 	int i, found = 0;
1232 	/*
1233 	 * If there's more than 64 services, only dump ones with
1234 	 * non-zero counts
1235 	 */
1236 	int only_nonzero = 0;
1237 	static const char *nz = "<-- preventing suspend";
1238 
1239 	int peer_count;
1240 	int vc_use_count;
1241 	int active_services;
1242 
1243 	if (!arm_state)
1244 		return;
1245 
1246 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1247 				     GFP_KERNEL);
1248 	if (!service_data)
1249 		return;
1250 
1251 	read_lock_bh(&arm_state->susp_res_lock);
1252 	peer_count = arm_state->peer_use_count;
1253 	vc_use_count = arm_state->videocore_use_count;
1254 	active_services = state->unused_service;
1255 	if (active_services > MAX_SERVICES)
1256 		only_nonzero = 1;
1257 
1258 	rcu_read_lock();
1259 	for (i = 0; i < active_services; i++) {
1260 		struct vchiq_service *service_ptr =
1261 			rcu_dereference(state->services[i]);
1262 
1263 		if (!service_ptr)
1264 			continue;
1265 
1266 		if (only_nonzero && !service_ptr->service_use_count)
1267 			continue;
1268 
1269 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1270 			continue;
1271 
1272 		service_data[found].fourcc = service_ptr->base.fourcc;
1273 		service_data[found].clientid = service_ptr->client_id;
1274 		service_data[found].use_count = service_ptr->service_use_count;
1275 		found++;
1276 		if (found >= MAX_SERVICES)
1277 			break;
1278 	}
1279 	rcu_read_unlock();
1280 
1281 	read_unlock_bh(&arm_state->susp_res_lock);
1282 
1283 	if (only_nonzero)
1284 		dev_warn(state->dev,
1285 			 "suspend: Too many active services (%d). Only dumping up to first %d services with non-zero use-count\n",
1286 			 active_services, found);
1287 
1288 	for (i = 0; i < found; i++) {
1289 		dev_warn(state->dev,
1290 			 "suspend: %p4cc:%d service count %d %s\n",
1291 			 &service_data[i].fourcc,
1292 			 service_data[i].clientid, service_data[i].use_count,
1293 			 service_data[i].use_count ? nz : "");
1294 	}
1295 	dev_warn(state->dev, "suspend: VCHIQ use count %d\n", peer_count);
1296 	dev_warn(state->dev, "suspend: Overall vchiq instance use count %d\n", vc_use_count);
1297 
1298 	kfree(service_data);
1299 }
1300 
1301 int
vchiq_check_service(struct vchiq_service * service)1302 vchiq_check_service(struct vchiq_service *service)
1303 {
1304 	struct vchiq_arm_state *arm_state;
1305 	int ret = -EINVAL;
1306 
1307 	if (!service || !service->state)
1308 		goto out;
1309 
1310 	arm_state = vchiq_platform_get_arm_state(service->state);
1311 
1312 	read_lock_bh(&arm_state->susp_res_lock);
1313 	if (service->service_use_count)
1314 		ret = 0;
1315 	read_unlock_bh(&arm_state->susp_res_lock);
1316 
1317 	if (ret) {
1318 		dev_err(service->state->dev,
1319 			"suspend: %s:  %p4cc:%d service count %d, state count %d\n",
1320 			__func__, &service->base.fourcc, service->client_id,
1321 			service->service_use_count, arm_state->videocore_use_count);
1322 		vchiq_dump_service_use_state(service->state);
1323 	}
1324 out:
1325 	return ret;
1326 }
1327 
vchiq_platform_conn_state_changed(struct vchiq_state * state,enum vchiq_connstate oldstate,enum vchiq_connstate newstate)1328 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1329 				       enum vchiq_connstate oldstate,
1330 				       enum vchiq_connstate newstate)
1331 {
1332 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1333 	char threadname[16];
1334 
1335 	dev_dbg(state->dev, "suspend: %d: %s->%s\n",
1336 		state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
1337 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1338 		return;
1339 
1340 	write_lock_bh(&arm_state->susp_res_lock);
1341 	if (arm_state->first_connect) {
1342 		write_unlock_bh(&arm_state->susp_res_lock);
1343 		return;
1344 	}
1345 
1346 	arm_state->first_connect = 1;
1347 	write_unlock_bh(&arm_state->susp_res_lock);
1348 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1349 		 state->id);
1350 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1351 					      (void *)state,
1352 					      threadname);
1353 	if (IS_ERR(arm_state->ka_thread)) {
1354 		dev_err(state->dev, "suspend: Couldn't create thread %s\n",
1355 			threadname);
1356 	} else {
1357 		wake_up_process(arm_state->ka_thread);
1358 	}
1359 }
1360 
1361 static const struct of_device_id vchiq_of_match[] = {
1362 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_info },
1363 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_info },
1364 	{},
1365 };
1366 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1367 
vchiq_probe(struct platform_device * pdev)1368 static int vchiq_probe(struct platform_device *pdev)
1369 {
1370 	const struct vchiq_platform_info *info;
1371 	struct vchiq_drv_mgmt *mgmt;
1372 	int ret;
1373 
1374 	info = of_device_get_match_data(&pdev->dev);
1375 	if (!info)
1376 		return -EINVAL;
1377 
1378 	struct device_node *fw_node __free(device_node) =
1379 		of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
1380 	if (!fw_node) {
1381 		dev_err(&pdev->dev, "Missing firmware node\n");
1382 		return -ENOENT;
1383 	}
1384 
1385 	mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
1386 	if (!mgmt)
1387 		return -ENOMEM;
1388 
1389 	mgmt->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1390 	if (!mgmt->fw)
1391 		return -EPROBE_DEFER;
1392 
1393 	mgmt->info = info;
1394 	platform_set_drvdata(pdev, mgmt);
1395 
1396 	ret = vchiq_platform_init(pdev, &mgmt->state);
1397 	if (ret) {
1398 		dev_err(&pdev->dev, "arm: Could not initialize vchiq platform\n");
1399 		return ret;
1400 	}
1401 
1402 	dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
1403 		VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1404 
1405 	/*
1406 	 * Simply exit on error since the function handles cleanup in
1407 	 * cases of failure.
1408 	 */
1409 	ret = vchiq_register_chrdev(&pdev->dev);
1410 	if (ret) {
1411 		dev_err(&pdev->dev, "arm: Failed to initialize vchiq cdev\n");
1412 		vchiq_platform_uninit(mgmt);
1413 		return ret;
1414 	}
1415 
1416 	vchiq_debugfs_init(&mgmt->state);
1417 
1418 	bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
1419 	bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera");
1420 
1421 	return 0;
1422 }
1423 
vchiq_remove(struct platform_device * pdev)1424 static void vchiq_remove(struct platform_device *pdev)
1425 {
1426 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(&pdev->dev);
1427 
1428 	vchiq_device_unregister(bcm2835_audio);
1429 	vchiq_device_unregister(bcm2835_camera);
1430 	vchiq_debugfs_deinit();
1431 	vchiq_deregister_chrdev();
1432 	vchiq_platform_uninit(mgmt);
1433 }
1434 
1435 static struct platform_driver vchiq_driver = {
1436 	.driver = {
1437 		.name = "bcm2835_vchiq",
1438 		.of_match_table = vchiq_of_match,
1439 	},
1440 	.probe = vchiq_probe,
1441 	.remove = vchiq_remove,
1442 };
1443 
vchiq_driver_init(void)1444 static int __init vchiq_driver_init(void)
1445 {
1446 	int ret;
1447 
1448 	ret = bus_register(&vchiq_bus_type);
1449 	if (ret) {
1450 		pr_err("Failed to register %s\n", vchiq_bus_type.name);
1451 		return ret;
1452 	}
1453 
1454 	ret = platform_driver_register(&vchiq_driver);
1455 	if (ret) {
1456 		pr_err("Failed to register vchiq driver\n");
1457 		bus_unregister(&vchiq_bus_type);
1458 	}
1459 
1460 	return ret;
1461 }
1462 module_init(vchiq_driver_init);
1463 
vchiq_driver_exit(void)1464 static void __exit vchiq_driver_exit(void)
1465 {
1466 	bus_unregister(&vchiq_bus_type);
1467 	platform_driver_unregister(&vchiq_driver);
1468 }
1469 module_exit(vchiq_driver_exit);
1470 
1471 MODULE_LICENSE("Dual BSD/GPL");
1472 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1473 MODULE_AUTHOR("Broadcom Corporation");
1474