1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <soc/bcm2835/raspberrypi-firmware.h>
29 
30 #include "vchiq_core.h"
31 #include "vchiq_ioctl.h"
32 #include "vchiq_arm.h"
33 #include "vchiq_debugfs.h"
34 
35 #define DEVICE_NAME "vchiq"
36 
37 /* Override the default prefix, which would be vchiq_arm (from the filename) */
38 #undef MODULE_PARAM_PREFIX
39 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
40 
41 /* Some per-instance constants */
42 #define MAX_COMPLETIONS 128
43 #define MAX_SERVICES 64
44 #define MAX_ELEMENTS 8
45 #define MSG_QUEUE_SIZE 128
46 
47 #define KEEPALIVE_VER 1
48 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
49 
50 /* Run time control of log level, based on KERN_XXX level. */
51 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
52 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
53 
54 struct user_service {
55 	struct vchiq_service *service;
56 	void __user *userdata;
57 	struct vchiq_instance *instance;
58 	char is_vchi;
59 	char dequeue_pending;
60 	char close_pending;
61 	int message_available_pos;
62 	int msg_insert;
63 	int msg_remove;
64 	struct completion insert_event;
65 	struct completion remove_event;
66 	struct completion close_event;
67 	struct vchiq_header *msg_queue[MSG_QUEUE_SIZE];
68 };
69 
70 struct bulk_waiter_node {
71 	struct bulk_waiter bulk_waiter;
72 	int pid;
73 	struct list_head list;
74 };
75 
76 struct vchiq_instance {
77 	struct vchiq_state *state;
78 	struct vchiq_completion_data_kernel completions[MAX_COMPLETIONS];
79 	int completion_insert;
80 	int completion_remove;
81 	struct completion insert_event;
82 	struct completion remove_event;
83 	struct mutex completion_mutex;
84 
85 	int connected;
86 	int closing;
87 	int pid;
88 	int mark;
89 	int use_close_delivered;
90 	int trace;
91 
92 	struct list_head bulk_waiter_list;
93 	struct mutex bulk_waiter_list_mutex;
94 
95 	struct vchiq_debugfs_node debugfs_node;
96 };
97 
98 struct dump_context {
99 	char __user *buf;
100 	size_t actual;
101 	size_t space;
102 	loff_t offset;
103 };
104 
105 static struct cdev    vchiq_cdev;
106 static dev_t          vchiq_devid;
107 static struct vchiq_state g_state;
108 static struct class  *vchiq_class;
109 static DEFINE_SPINLOCK(msg_queue_spinlock);
110 static struct platform_device *bcm2835_camera;
111 static struct platform_device *bcm2835_audio;
112 
113 static struct vchiq_drvdata bcm2835_drvdata = {
114 	.cache_line_size = 32,
115 };
116 
117 static struct vchiq_drvdata bcm2836_drvdata = {
118 	.cache_line_size = 64,
119 };
120 
121 static const char *const ioctl_names[] = {
122 	"CONNECT",
123 	"SHUTDOWN",
124 	"CREATE_SERVICE",
125 	"REMOVE_SERVICE",
126 	"QUEUE_MESSAGE",
127 	"QUEUE_BULK_TRANSMIT",
128 	"QUEUE_BULK_RECEIVE",
129 	"AWAIT_COMPLETION",
130 	"DEQUEUE_MESSAGE",
131 	"GET_CLIENT_ID",
132 	"GET_CONFIG",
133 	"CLOSE_SERVICE",
134 	"USE_SERVICE",
135 	"RELEASE_SERVICE",
136 	"SET_SERVICE_OPTION",
137 	"DUMP_PHYS_MEM",
138 	"LIB_VERSION",
139 	"CLOSE_DELIVERED"
140 };
141 
142 vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
143 		    (VCHIQ_IOC_MAX + 1));
144 
145 static enum vchiq_status
146 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
147 	unsigned int size, enum vchiq_bulk_dir dir);
148 
149 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(struct vchiq_instance ** instance_out)150 enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
151 {
152 	enum vchiq_status status = VCHIQ_ERROR;
153 	struct vchiq_state *state;
154 	struct vchiq_instance *instance = NULL;
155 	int i;
156 
157 	vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
158 
159 	/* VideoCore may not be ready due to boot up timing.
160 	 * It may never be ready if kernel and firmware are mismatched,so don't
161 	 * block forever.
162 	 */
163 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
164 		state = vchiq_get_state();
165 		if (state)
166 			break;
167 		usleep_range(500, 600);
168 	}
169 	if (i == VCHIQ_INIT_RETRIES) {
170 		vchiq_log_error(vchiq_core_log_level,
171 			"%s: videocore not initialized\n", __func__);
172 		goto failed;
173 	} else if (i > 0) {
174 		vchiq_log_warning(vchiq_core_log_level,
175 			"%s: videocore initialized after %d retries\n",
176 			__func__, i);
177 	}
178 
179 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
180 	if (!instance) {
181 		vchiq_log_error(vchiq_core_log_level,
182 			"%s: error allocating vchiq instance\n", __func__);
183 		goto failed;
184 	}
185 
186 	instance->connected = 0;
187 	instance->state = state;
188 	mutex_init(&instance->bulk_waiter_list_mutex);
189 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
190 
191 	*instance_out = instance;
192 
193 	status = VCHIQ_SUCCESS;
194 
195 failed:
196 	vchiq_log_trace(vchiq_core_log_level,
197 		"%s(%p): returning %d", __func__, instance, status);
198 
199 	return status;
200 }
201 EXPORT_SYMBOL(vchiq_initialise);
202 
vchiq_shutdown(struct vchiq_instance * instance)203 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
204 {
205 	enum vchiq_status status;
206 	struct vchiq_state *state = instance->state;
207 
208 	vchiq_log_trace(vchiq_core_log_level,
209 		"%s(%p) called", __func__, instance);
210 
211 	if (mutex_lock_killable(&state->mutex))
212 		return VCHIQ_RETRY;
213 
214 	/* Remove all services */
215 	status = vchiq_shutdown_internal(state, instance);
216 
217 	mutex_unlock(&state->mutex);
218 
219 	vchiq_log_trace(vchiq_core_log_level,
220 		"%s(%p): returning %d", __func__, instance, status);
221 
222 	if (status == VCHIQ_SUCCESS) {
223 		struct bulk_waiter_node *waiter, *next;
224 
225 		list_for_each_entry_safe(waiter, next,
226 					 &instance->bulk_waiter_list, list) {
227 			list_del(&waiter->list);
228 			vchiq_log_info(vchiq_arm_log_level,
229 					"bulk_waiter - cleaned up %pK for pid %d",
230 					waiter, waiter->pid);
231 			kfree(waiter);
232 		}
233 		kfree(instance);
234 	}
235 
236 	return status;
237 }
238 EXPORT_SYMBOL(vchiq_shutdown);
239 
vchiq_is_connected(struct vchiq_instance * instance)240 static int vchiq_is_connected(struct vchiq_instance *instance)
241 {
242 	return instance->connected;
243 }
244 
vchiq_connect(struct vchiq_instance * instance)245 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
246 {
247 	enum vchiq_status status;
248 	struct vchiq_state *state = instance->state;
249 
250 	vchiq_log_trace(vchiq_core_log_level,
251 		"%s(%p) called", __func__, instance);
252 
253 	if (mutex_lock_killable(&state->mutex)) {
254 		vchiq_log_trace(vchiq_core_log_level,
255 			"%s: call to mutex_lock failed", __func__);
256 		status = VCHIQ_RETRY;
257 		goto failed;
258 	}
259 	status = vchiq_connect_internal(state, instance);
260 
261 	if (status == VCHIQ_SUCCESS)
262 		instance->connected = 1;
263 
264 	mutex_unlock(&state->mutex);
265 
266 failed:
267 	vchiq_log_trace(vchiq_core_log_level,
268 		"%s(%p): returning %d", __func__, instance, status);
269 
270 	return status;
271 }
272 EXPORT_SYMBOL(vchiq_connect);
273 
vchiq_add_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)274 static enum vchiq_status vchiq_add_service(
275 	struct vchiq_instance             *instance,
276 	const struct vchiq_service_params_kernel *params,
277 	unsigned int       *phandle)
278 {
279 	enum vchiq_status status;
280 	struct vchiq_state *state = instance->state;
281 	struct vchiq_service *service = NULL;
282 	int srvstate;
283 
284 	vchiq_log_trace(vchiq_core_log_level,
285 		"%s(%p) called", __func__, instance);
286 
287 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
288 
289 	srvstate = vchiq_is_connected(instance)
290 		? VCHIQ_SRVSTATE_LISTENING
291 		: VCHIQ_SRVSTATE_HIDDEN;
292 
293 	service = vchiq_add_service_internal(
294 		state,
295 		params,
296 		srvstate,
297 		instance,
298 		NULL);
299 
300 	if (service) {
301 		*phandle = service->handle;
302 		status = VCHIQ_SUCCESS;
303 	} else
304 		status = VCHIQ_ERROR;
305 
306 	vchiq_log_trace(vchiq_core_log_level,
307 		"%s(%p): returning %d", __func__, instance, status);
308 
309 	return status;
310 }
311 
vchiq_open_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)312 enum vchiq_status vchiq_open_service(
313 	struct vchiq_instance             *instance,
314 	const struct vchiq_service_params_kernel *params,
315 	unsigned int       *phandle)
316 {
317 	enum vchiq_status   status = VCHIQ_ERROR;
318 	struct vchiq_state   *state = instance->state;
319 	struct vchiq_service *service = NULL;
320 
321 	vchiq_log_trace(vchiq_core_log_level,
322 		"%s(%p) called", __func__, instance);
323 
324 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
325 
326 	if (!vchiq_is_connected(instance))
327 		goto failed;
328 
329 	service = vchiq_add_service_internal(state,
330 		params,
331 		VCHIQ_SRVSTATE_OPENING,
332 		instance,
333 		NULL);
334 
335 	if (service) {
336 		*phandle = service->handle;
337 		status = vchiq_open_service_internal(service, current->pid);
338 		if (status != VCHIQ_SUCCESS) {
339 			vchiq_remove_service(service->handle);
340 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
341 		}
342 	}
343 
344 failed:
345 	vchiq_log_trace(vchiq_core_log_level,
346 		"%s(%p): returning %d", __func__, instance, status);
347 
348 	return status;
349 }
350 EXPORT_SYMBOL(vchiq_open_service);
351 
352 enum vchiq_status
vchiq_bulk_transmit(unsigned int handle,const void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)353 vchiq_bulk_transmit(unsigned int handle, const void *data,
354 	unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
355 {
356 	enum vchiq_status status;
357 
358 	while (1) {
359 		switch (mode) {
360 		case VCHIQ_BULK_MODE_NOCALLBACK:
361 		case VCHIQ_BULK_MODE_CALLBACK:
362 			status = vchiq_bulk_transfer(handle,
363 						     (void *)data, NULL,
364 						     size, userdata, mode,
365 						     VCHIQ_BULK_TRANSMIT);
366 			break;
367 		case VCHIQ_BULK_MODE_BLOCKING:
368 			status = vchiq_blocking_bulk_transfer(handle,
369 				(void *)data, size, VCHIQ_BULK_TRANSMIT);
370 			break;
371 		default:
372 			return VCHIQ_ERROR;
373 		}
374 
375 		/*
376 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
377 		 * to implement a retry mechanism since this function is
378 		 * supposed to block until queued
379 		 */
380 		if (status != VCHIQ_RETRY)
381 			break;
382 
383 		msleep(1);
384 	}
385 
386 	return status;
387 }
388 EXPORT_SYMBOL(vchiq_bulk_transmit);
389 
vchiq_bulk_receive(unsigned int handle,void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)390 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
391 				     unsigned int size, void *userdata,
392 				     enum vchiq_bulk_mode mode)
393 {
394 	enum vchiq_status status;
395 
396 	while (1) {
397 		switch (mode) {
398 		case VCHIQ_BULK_MODE_NOCALLBACK:
399 		case VCHIQ_BULK_MODE_CALLBACK:
400 			status = vchiq_bulk_transfer(handle, data, NULL,
401 						     size, userdata,
402 						     mode, VCHIQ_BULK_RECEIVE);
403 			break;
404 		case VCHIQ_BULK_MODE_BLOCKING:
405 			status = vchiq_blocking_bulk_transfer(handle,
406 				(void *)data, size, VCHIQ_BULK_RECEIVE);
407 			break;
408 		default:
409 			return VCHIQ_ERROR;
410 		}
411 
412 		/*
413 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
414 		 * to implement a retry mechanism since this function is
415 		 * supposed to block until queued
416 		 */
417 		if (status != VCHIQ_RETRY)
418 			break;
419 
420 		msleep(1);
421 	}
422 
423 	return status;
424 }
425 EXPORT_SYMBOL(vchiq_bulk_receive);
426 
427 static enum vchiq_status
vchiq_blocking_bulk_transfer(unsigned int handle,void * data,unsigned int size,enum vchiq_bulk_dir dir)428 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
429 	unsigned int size, enum vchiq_bulk_dir dir)
430 {
431 	struct vchiq_instance *instance;
432 	struct vchiq_service *service;
433 	enum vchiq_status status;
434 	struct bulk_waiter_node *waiter = NULL;
435 	bool found = false;
436 
437 	service = find_service_by_handle(handle);
438 	if (!service)
439 		return VCHIQ_ERROR;
440 
441 	instance = service->instance;
442 
443 	unlock_service(service);
444 
445 	mutex_lock(&instance->bulk_waiter_list_mutex);
446 	list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
447 		if (waiter->pid == current->pid) {
448 			list_del(&waiter->list);
449 			found = true;
450 			break;
451 		}
452 	}
453 	mutex_unlock(&instance->bulk_waiter_list_mutex);
454 
455 	if (found) {
456 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
457 
458 		if (bulk) {
459 			/* This thread has an outstanding bulk transfer. */
460 			/* FIXME: why compare a dma address to a pointer? */
461 			if ((bulk->data != (dma_addr_t)(uintptr_t)data) ||
462 				(bulk->size != size)) {
463 				/* This is not a retry of the previous one.
464 				 * Cancel the signal when the transfer
465 				 * completes.
466 				 */
467 				spin_lock(&bulk_waiter_spinlock);
468 				bulk->userdata = NULL;
469 				spin_unlock(&bulk_waiter_spinlock);
470 			}
471 		}
472 	} else {
473 		waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
474 		if (!waiter) {
475 			vchiq_log_error(vchiq_core_log_level,
476 				"%s - out of memory", __func__);
477 			return VCHIQ_ERROR;
478 		}
479 	}
480 
481 	status = vchiq_bulk_transfer(handle, data, NULL, size,
482 				     &waiter->bulk_waiter,
483 				     VCHIQ_BULK_MODE_BLOCKING, dir);
484 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
485 		!waiter->bulk_waiter.bulk) {
486 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
487 
488 		if (bulk) {
489 			/* Cancel the signal when the transfer
490 			 * completes.
491 			 */
492 			spin_lock(&bulk_waiter_spinlock);
493 			bulk->userdata = NULL;
494 			spin_unlock(&bulk_waiter_spinlock);
495 		}
496 		kfree(waiter);
497 	} else {
498 		waiter->pid = current->pid;
499 		mutex_lock(&instance->bulk_waiter_list_mutex);
500 		list_add(&waiter->list, &instance->bulk_waiter_list);
501 		mutex_unlock(&instance->bulk_waiter_list_mutex);
502 		vchiq_log_info(vchiq_arm_log_level,
503 				"saved bulk_waiter %pK for pid %d",
504 				waiter, current->pid);
505 	}
506 
507 	return status;
508 }
509 /****************************************************************************
510 *
511 *   add_completion
512 *
513 ***************************************************************************/
514 
515 static enum vchiq_status
add_completion(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,struct user_service * user_service,void * bulk_userdata)516 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
517 	       struct vchiq_header *header, struct user_service *user_service,
518 	       void *bulk_userdata)
519 {
520 	struct vchiq_completion_data_kernel *completion;
521 	int insert;
522 
523 	DEBUG_INITIALISE(g_state.local)
524 
525 	insert = instance->completion_insert;
526 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
527 		/* Out of space - wait for the client */
528 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
529 		vchiq_log_trace(vchiq_arm_log_level,
530 			"%s - completion queue full", __func__);
531 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
532 		if (wait_for_completion_interruptible(
533 					&instance->remove_event)) {
534 			vchiq_log_info(vchiq_arm_log_level,
535 				"service_callback interrupted");
536 			return VCHIQ_RETRY;
537 		} else if (instance->closing) {
538 			vchiq_log_info(vchiq_arm_log_level,
539 				"service_callback closing");
540 			return VCHIQ_SUCCESS;
541 		}
542 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
543 	}
544 
545 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
546 
547 	completion->header = header;
548 	completion->reason = reason;
549 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
550 	completion->service_userdata = user_service->service;
551 	completion->bulk_userdata = bulk_userdata;
552 
553 	if (reason == VCHIQ_SERVICE_CLOSED) {
554 		/* Take an extra reference, to be held until
555 		   this CLOSED notification is delivered. */
556 		lock_service(user_service->service);
557 		if (instance->use_close_delivered)
558 			user_service->close_pending = 1;
559 	}
560 
561 	/* A write barrier is needed here to ensure that the entire completion
562 		record is written out before the insert point. */
563 	wmb();
564 
565 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
566 		user_service->message_available_pos = insert;
567 
568 	insert++;
569 	instance->completion_insert = insert;
570 
571 	complete(&instance->insert_event);
572 
573 	return VCHIQ_SUCCESS;
574 }
575 
576 /****************************************************************************
577 *
578 *   service_callback
579 *
580 ***************************************************************************/
581 
582 static enum vchiq_status
service_callback(enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * bulk_userdata)583 service_callback(enum vchiq_reason reason, struct vchiq_header *header,
584 		 unsigned int handle, void *bulk_userdata)
585 {
586 	/* How do we ensure the callback goes to the right client?
587 	** The service_user data points to a user_service record
588 	** containing the original callback and the user state structure, which
589 	** contains a circular buffer for completion records.
590 	*/
591 	struct user_service *user_service;
592 	struct vchiq_service *service;
593 	struct vchiq_instance *instance;
594 	bool skip_completion = false;
595 
596 	DEBUG_INITIALISE(g_state.local)
597 
598 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
599 
600 	service = handle_to_service(handle);
601 	BUG_ON(!service);
602 	user_service = (struct user_service *)service->base.userdata;
603 	instance = user_service->instance;
604 
605 	if (!instance || instance->closing)
606 		return VCHIQ_SUCCESS;
607 
608 	vchiq_log_trace(vchiq_arm_log_level,
609 		"%s - service %lx(%d,%p), reason %d, header %lx, "
610 		"instance %lx, bulk_userdata %lx",
611 		__func__, (unsigned long)user_service,
612 		service->localport, user_service->userdata,
613 		reason, (unsigned long)header,
614 		(unsigned long)instance, (unsigned long)bulk_userdata);
615 
616 	if (header && user_service->is_vchi) {
617 		spin_lock(&msg_queue_spinlock);
618 		while (user_service->msg_insert ==
619 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
620 			spin_unlock(&msg_queue_spinlock);
621 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
622 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
623 			vchiq_log_trace(vchiq_arm_log_level,
624 				"service_callback - msg queue full");
625 			/* If there is no MESSAGE_AVAILABLE in the completion
626 			** queue, add one
627 			*/
628 			if ((user_service->message_available_pos -
629 				instance->completion_remove) < 0) {
630 				enum vchiq_status status;
631 
632 				vchiq_log_info(vchiq_arm_log_level,
633 					"Inserting extra MESSAGE_AVAILABLE");
634 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
635 				status = add_completion(instance, reason,
636 					NULL, user_service, bulk_userdata);
637 				if (status != VCHIQ_SUCCESS) {
638 					DEBUG_TRACE(SERVICE_CALLBACK_LINE);
639 					return status;
640 				}
641 			}
642 
643 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
644 			if (wait_for_completion_interruptible(
645 						&user_service->remove_event)) {
646 				vchiq_log_info(vchiq_arm_log_level,
647 					"%s interrupted", __func__);
648 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
649 				return VCHIQ_RETRY;
650 			} else if (instance->closing) {
651 				vchiq_log_info(vchiq_arm_log_level,
652 					"%s closing", __func__);
653 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
654 				return VCHIQ_ERROR;
655 			}
656 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
657 			spin_lock(&msg_queue_spinlock);
658 		}
659 
660 		user_service->msg_queue[user_service->msg_insert &
661 			(MSG_QUEUE_SIZE - 1)] = header;
662 		user_service->msg_insert++;
663 
664 		/* If there is a thread waiting in DEQUEUE_MESSAGE, or if
665 		** there is a MESSAGE_AVAILABLE in the completion queue then
666 		** bypass the completion queue.
667 		*/
668 		if (((user_service->message_available_pos -
669 			instance->completion_remove) >= 0) ||
670 			user_service->dequeue_pending) {
671 			user_service->dequeue_pending = 0;
672 			skip_completion = true;
673 		}
674 
675 		spin_unlock(&msg_queue_spinlock);
676 		complete(&user_service->insert_event);
677 
678 		header = NULL;
679 	}
680 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
681 
682 	if (skip_completion)
683 		return VCHIQ_SUCCESS;
684 
685 	return add_completion(instance, reason, header, user_service,
686 		bulk_userdata);
687 }
688 
689 /****************************************************************************
690 *
691 *   user_service_free
692 *
693 ***************************************************************************/
694 static void
user_service_free(void * userdata)695 user_service_free(void *userdata)
696 {
697 	kfree(userdata);
698 }
699 
700 /****************************************************************************
701 *
702 *   close_delivered
703 *
704 ***************************************************************************/
close_delivered(struct user_service * user_service)705 static void close_delivered(struct user_service *user_service)
706 {
707 	vchiq_log_info(vchiq_arm_log_level,
708 		"%s(handle=%x)",
709 		__func__, user_service->service->handle);
710 
711 	if (user_service->close_pending) {
712 		/* Allow the underlying service to be culled */
713 		unlock_service(user_service->service);
714 
715 		/* Wake the user-thread blocked in close_ or remove_service */
716 		complete(&user_service->close_event);
717 
718 		user_service->close_pending = 0;
719 	}
720 }
721 
722 struct vchiq_io_copy_callback_context {
723 	struct vchiq_element *element;
724 	size_t element_offset;
725 	unsigned long elements_to_go;
726 };
727 
vchiq_ioc_copy_element_data(void * context,void * dest,size_t offset,size_t maxsize)728 static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
729 					   size_t offset, size_t maxsize)
730 {
731 	struct vchiq_io_copy_callback_context *cc = context;
732 	size_t total_bytes_copied = 0;
733 	size_t bytes_this_round;
734 
735 	while (total_bytes_copied < maxsize) {
736 		if (!cc->elements_to_go)
737 			return total_bytes_copied;
738 
739 		if (!cc->element->size) {
740 			cc->elements_to_go--;
741 			cc->element++;
742 			cc->element_offset = 0;
743 			continue;
744 		}
745 
746 		bytes_this_round = min(cc->element->size - cc->element_offset,
747 				       maxsize - total_bytes_copied);
748 
749 		if (copy_from_user(dest + total_bytes_copied,
750 				  cc->element->data + cc->element_offset,
751 				  bytes_this_round))
752 			return -EFAULT;
753 
754 		cc->element_offset += bytes_this_round;
755 		total_bytes_copied += bytes_this_round;
756 
757 		if (cc->element_offset == cc->element->size) {
758 			cc->elements_to_go--;
759 			cc->element++;
760 			cc->element_offset = 0;
761 		}
762 	}
763 
764 	return maxsize;
765 }
766 
767 /**************************************************************************
768  *
769  *   vchiq_ioc_queue_message
770  *
771  **************************************************************************/
772 static int
vchiq_ioc_queue_message(unsigned int handle,struct vchiq_element * elements,unsigned long count)773 vchiq_ioc_queue_message(unsigned int handle,
774 			struct vchiq_element *elements,
775 			unsigned long count)
776 {
777 	struct vchiq_io_copy_callback_context context;
778 	enum vchiq_status status = VCHIQ_SUCCESS;
779 	unsigned long i;
780 	size_t total_size = 0;
781 
782 	context.element = elements;
783 	context.element_offset = 0;
784 	context.elements_to_go = count;
785 
786 	for (i = 0; i < count; i++) {
787 		if (!elements[i].data && elements[i].size != 0)
788 			return -EFAULT;
789 
790 		total_size += elements[i].size;
791 	}
792 
793 	status = vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
794 				     &context, total_size);
795 
796 	if (status == VCHIQ_ERROR)
797 		return -EIO;
798 	else if (status == VCHIQ_RETRY)
799 		return -EINTR;
800 	return 0;
801 }
802 
vchiq_ioc_create_service(struct vchiq_instance * instance,struct vchiq_create_service * args)803 static int vchiq_ioc_create_service(struct vchiq_instance *instance,
804 				    struct vchiq_create_service *args)
805 {
806 	struct user_service *user_service = NULL;
807 	struct vchiq_service *service;
808 	enum vchiq_status status = VCHIQ_SUCCESS;
809 	struct vchiq_service_params_kernel params;
810 	int srvstate;
811 
812 	user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
813 	if (!user_service)
814 		return -ENOMEM;
815 
816 	if (args->is_open) {
817 		if (!instance->connected) {
818 			kfree(user_service);
819 			return -ENOTCONN;
820 		}
821 		srvstate = VCHIQ_SRVSTATE_OPENING;
822 	} else {
823 		srvstate = instance->connected ?
824 			 VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN;
825 	}
826 
827 	params = (struct vchiq_service_params_kernel) {
828 		.fourcc   = args->params.fourcc,
829 		.callback = service_callback,
830 		.userdata = user_service,
831 		.version  = args->params.version,
832 		.version_min = args->params.version_min,
833 	};
834 	service = vchiq_add_service_internal(instance->state, &params,
835 					     srvstate, instance,
836 					     user_service_free);
837 	if (!service) {
838 		kfree(user_service);
839 		return -EEXIST;
840 	}
841 
842 	user_service->service = service;
843 	user_service->userdata = args->params.userdata;
844 	user_service->instance = instance;
845 	user_service->is_vchi = (args->is_vchi != 0);
846 	user_service->dequeue_pending = 0;
847 	user_service->close_pending = 0;
848 	user_service->message_available_pos = instance->completion_remove - 1;
849 	user_service->msg_insert = 0;
850 	user_service->msg_remove = 0;
851 	init_completion(&user_service->insert_event);
852 	init_completion(&user_service->remove_event);
853 	init_completion(&user_service->close_event);
854 
855 	if (args->is_open) {
856 		status = vchiq_open_service_internal(service, instance->pid);
857 		if (status != VCHIQ_SUCCESS) {
858 			vchiq_remove_service(service->handle);
859 			return (status == VCHIQ_RETRY) ?
860 				-EINTR : -EIO;
861 		}
862 	}
863 	args->handle = service->handle;
864 
865 	return 0;
866 }
867 
vchiq_ioc_dequeue_message(struct vchiq_instance * instance,struct vchiq_dequeue_message * args)868 static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
869 				     struct vchiq_dequeue_message *args)
870 {
871 	struct user_service *user_service;
872 	struct vchiq_service *service;
873 	struct vchiq_header *header;
874 	int ret;
875 
876 	DEBUG_INITIALISE(g_state.local)
877 	DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
878 	service = find_service_for_instance(instance, args->handle);
879 	if (!service)
880 		return -EINVAL;
881 
882 	user_service = (struct user_service *)service->base.userdata;
883 	if (user_service->is_vchi == 0) {
884 		ret = -EINVAL;
885 		goto out;
886 	}
887 
888 	spin_lock(&msg_queue_spinlock);
889 	if (user_service->msg_remove == user_service->msg_insert) {
890 		if (!args->blocking) {
891 			spin_unlock(&msg_queue_spinlock);
892 			DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
893 			ret = -EWOULDBLOCK;
894 			goto out;
895 		}
896 		user_service->dequeue_pending = 1;
897 		ret = 0;
898 		do {
899 			spin_unlock(&msg_queue_spinlock);
900 			DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
901 			if (wait_for_completion_interruptible(
902 				&user_service->insert_event)) {
903 				vchiq_log_info(vchiq_arm_log_level,
904 					"DEQUEUE_MESSAGE interrupted");
905 				ret = -EINTR;
906 				break;
907 			}
908 			spin_lock(&msg_queue_spinlock);
909 		} while (user_service->msg_remove ==
910 			user_service->msg_insert);
911 
912 		if (ret)
913 			goto out;
914 	}
915 
916 	BUG_ON((int)(user_service->msg_insert -
917 		user_service->msg_remove) < 0);
918 
919 	header = user_service->msg_queue[user_service->msg_remove &
920 		(MSG_QUEUE_SIZE - 1)];
921 	user_service->msg_remove++;
922 	spin_unlock(&msg_queue_spinlock);
923 
924 	complete(&user_service->remove_event);
925 	if (!header) {
926 		ret = -ENOTCONN;
927 	} else if (header->size <= args->bufsize) {
928 		/* Copy to user space if msgbuf is not NULL */
929 		if (!args->buf || (copy_to_user(args->buf,
930 					header->data, header->size) == 0)) {
931 			ret = header->size;
932 			vchiq_release_message(service->handle, header);
933 		} else
934 			ret = -EFAULT;
935 	} else {
936 		vchiq_log_error(vchiq_arm_log_level,
937 			"header %pK: bufsize %x < size %x",
938 			header, args->bufsize, header->size);
939 		WARN(1, "invalid size\n");
940 		ret = -EMSGSIZE;
941 	}
942 	DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
943 out:
944 	unlock_service(service);
945 	return ret;
946 }
947 
vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance * instance,struct vchiq_queue_bulk_transfer * args,enum vchiq_bulk_dir dir,enum vchiq_bulk_mode __user * mode)948 static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
949 				      struct vchiq_queue_bulk_transfer *args,
950 				      enum vchiq_bulk_dir dir,
951 				      enum vchiq_bulk_mode __user *mode)
952 {
953 	struct vchiq_service *service;
954 	struct bulk_waiter_node *waiter = NULL;
955 	bool found = false;
956 	void *userdata = NULL;
957 	int status = 0;
958 	int ret;
959 
960 	service = find_service_for_instance(instance, args->handle);
961 	if (!service)
962 		return -EINVAL;
963 
964 	if (args->mode == VCHIQ_BULK_MODE_BLOCKING) {
965 		waiter = kzalloc(sizeof(struct bulk_waiter_node),
966 			GFP_KERNEL);
967 		if (!waiter) {
968 			ret = -ENOMEM;
969 			goto out;
970 		}
971 
972 		userdata = &waiter->bulk_waiter;
973 	} else if (args->mode == VCHIQ_BULK_MODE_WAITING) {
974 		mutex_lock(&instance->bulk_waiter_list_mutex);
975 		list_for_each_entry(waiter, &instance->bulk_waiter_list,
976 				    list) {
977 			if (waiter->pid == current->pid) {
978 				list_del(&waiter->list);
979 				found = true;
980 				break;
981 			}
982 		}
983 		mutex_unlock(&instance->bulk_waiter_list_mutex);
984 		if (!found) {
985 			vchiq_log_error(vchiq_arm_log_level,
986 				"no bulk_waiter found for pid %d",
987 				current->pid);
988 			ret = -ESRCH;
989 			goto out;
990 		}
991 		vchiq_log_info(vchiq_arm_log_level,
992 			"found bulk_waiter %pK for pid %d", waiter,
993 			current->pid);
994 		userdata = &waiter->bulk_waiter;
995 	}
996 
997 	/*
998 	 * FIXME address space mismatch:
999 	 * args->data may be interpreted as a kernel pointer
1000 	 * in create_pagelist() called from vchiq_bulk_transfer(),
1001 	 * accessing kernel data instead of user space, based on the
1002 	 * address.
1003 	 */
1004 	status = vchiq_bulk_transfer(args->handle, NULL, args->data, args->size,
1005 				     userdata, args->mode, dir);
1006 
1007 	if (!waiter) {
1008 		ret = 0;
1009 		goto out;
1010 	}
1011 
1012 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
1013 		!waiter->bulk_waiter.bulk) {
1014 		if (waiter->bulk_waiter.bulk) {
1015 			/* Cancel the signal when the transfer
1016 			** completes. */
1017 			spin_lock(&bulk_waiter_spinlock);
1018 			waiter->bulk_waiter.bulk->userdata = NULL;
1019 			spin_unlock(&bulk_waiter_spinlock);
1020 		}
1021 		kfree(waiter);
1022 		ret = 0;
1023 	} else {
1024 		const enum vchiq_bulk_mode mode_waiting =
1025 			VCHIQ_BULK_MODE_WAITING;
1026 		waiter->pid = current->pid;
1027 		mutex_lock(&instance->bulk_waiter_list_mutex);
1028 		list_add(&waiter->list, &instance->bulk_waiter_list);
1029 		mutex_unlock(&instance->bulk_waiter_list_mutex);
1030 		vchiq_log_info(vchiq_arm_log_level,
1031 			"saved bulk_waiter %pK for pid %d",
1032 			waiter, current->pid);
1033 
1034 		ret = put_user(mode_waiting, mode);
1035 	}
1036 out:
1037 	unlock_service(service);
1038 	if (ret)
1039 		return ret;
1040 	else if (status == VCHIQ_ERROR)
1041 		return -EIO;
1042 	else if (status == VCHIQ_RETRY)
1043 		return -EINTR;
1044 	return 0;
1045 }
1046 
1047 /* read a user pointer value from an array pointers in user space */
vchiq_get_user_ptr(void __user ** buf,void __user * ubuf,int index)1048 static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index)
1049 {
1050 	int ret;
1051 
1052 	if (in_compat_syscall()) {
1053 		compat_uptr_t ptr32;
1054 		compat_uptr_t __user *uptr = ubuf;
1055 		ret = get_user(ptr32, uptr + index);
1056 		*buf = compat_ptr(ptr32);
1057 	} else {
1058 		uintptr_t ptr, __user *uptr = ubuf;
1059 		ret = get_user(ptr, uptr + index);
1060 		*buf = (void __user *)ptr;
1061 	}
1062 
1063 	return ret;
1064 }
1065 
1066 struct vchiq_completion_data32 {
1067 	enum vchiq_reason reason;
1068 	compat_uptr_t header;
1069 	compat_uptr_t service_userdata;
1070 	compat_uptr_t bulk_userdata;
1071 };
1072 
vchiq_put_completion(struct vchiq_completion_data __user * buf,struct vchiq_completion_data * completion,int index)1073 static int vchiq_put_completion(struct vchiq_completion_data __user *buf,
1074 				struct vchiq_completion_data *completion,
1075 				int index)
1076 {
1077 	struct vchiq_completion_data32 __user *buf32 = (void __user *)buf;
1078 
1079 	if (in_compat_syscall()) {
1080 		struct vchiq_completion_data32 tmp = {
1081 			.reason		  = completion->reason,
1082 			.header		  = ptr_to_compat(completion->header),
1083 			.service_userdata = ptr_to_compat(completion->service_userdata),
1084 			.bulk_userdata	  = ptr_to_compat(completion->bulk_userdata),
1085 		};
1086 		if (copy_to_user(&buf32[index], &tmp, sizeof(tmp)))
1087 			return -EFAULT;
1088 	} else {
1089 		if (copy_to_user(&buf[index], completion, sizeof(*completion)))
1090 			return -EFAULT;
1091 	}
1092 
1093 	return 0;
1094 }
1095 
vchiq_ioc_await_completion(struct vchiq_instance * instance,struct vchiq_await_completion * args,int __user * msgbufcountp)1096 static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
1097 				      struct vchiq_await_completion *args,
1098 				      int __user *msgbufcountp)
1099 {
1100 	int msgbufcount;
1101 	int remove;
1102 	int ret;
1103 
1104 	DEBUG_INITIALISE(g_state.local)
1105 
1106 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1107 	if (!instance->connected) {
1108 		return -ENOTCONN;
1109 	}
1110 
1111 	mutex_lock(&instance->completion_mutex);
1112 
1113 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1114 	while ((instance->completion_remove ==
1115 		instance->completion_insert)
1116 		&& !instance->closing) {
1117 		int rc;
1118 
1119 		DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1120 		mutex_unlock(&instance->completion_mutex);
1121 		rc = wait_for_completion_interruptible(
1122 					&instance->insert_event);
1123 		mutex_lock(&instance->completion_mutex);
1124 		if (rc) {
1125 			DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1126 			vchiq_log_info(vchiq_arm_log_level,
1127 				"AWAIT_COMPLETION interrupted");
1128 			ret = -EINTR;
1129 			goto out;
1130 		}
1131 	}
1132 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1133 
1134 	msgbufcount = args->msgbufcount;
1135 	remove = instance->completion_remove;
1136 
1137 	for (ret = 0; ret < args->count; ret++) {
1138 		struct vchiq_completion_data_kernel *completion;
1139 		struct vchiq_completion_data user_completion;
1140 		struct vchiq_service *service;
1141 		struct user_service *user_service;
1142 		struct vchiq_header *header;
1143 
1144 		if (remove == instance->completion_insert)
1145 			break;
1146 
1147 		completion = &instance->completions[
1148 			remove & (MAX_COMPLETIONS - 1)];
1149 
1150 		/*
1151 		 * A read memory barrier is needed to stop
1152 		 * prefetch of a stale completion record
1153 		 */
1154 		rmb();
1155 
1156 		service = completion->service_userdata;
1157 		user_service = service->base.userdata;
1158 
1159 		memset(&user_completion, 0, sizeof(user_completion));
1160 		user_completion = (struct vchiq_completion_data) {
1161 			.reason = completion->reason,
1162 			.service_userdata = user_service->userdata,
1163 		};
1164 
1165 		header = completion->header;
1166 		if (header) {
1167 			void __user *msgbuf;
1168 			int msglen;
1169 
1170 			msglen = header->size + sizeof(struct vchiq_header);
1171 			/* This must be a VCHIQ-style service */
1172 			if (args->msgbufsize < msglen) {
1173 				vchiq_log_error(vchiq_arm_log_level,
1174 					"header %pK: msgbufsize %x < msglen %x",
1175 					header, args->msgbufsize, msglen);
1176 				WARN(1, "invalid message size\n");
1177 				if (ret == 0)
1178 					ret = -EMSGSIZE;
1179 				break;
1180 			}
1181 			if (msgbufcount <= 0)
1182 				/* Stall here for lack of a
1183 				** buffer for the message. */
1184 				break;
1185 			/* Get the pointer from user space */
1186 			msgbufcount--;
1187 			if (vchiq_get_user_ptr(&msgbuf, args->msgbufs,
1188 						msgbufcount)) {
1189 				if (ret == 0)
1190 					ret = -EFAULT;
1191 				break;
1192 			}
1193 
1194 			/* Copy the message to user space */
1195 			if (copy_to_user(msgbuf, header, msglen)) {
1196 				if (ret == 0)
1197 					ret = -EFAULT;
1198 				break;
1199 			}
1200 
1201 			/* Now it has been copied, the message
1202 			** can be released. */
1203 			vchiq_release_message(service->handle, header);
1204 
1205 			/* The completion must point to the
1206 			** msgbuf. */
1207 			user_completion.header = msgbuf;
1208 		}
1209 
1210 		if ((completion->reason == VCHIQ_SERVICE_CLOSED) &&
1211 		    !instance->use_close_delivered)
1212 			unlock_service(service);
1213 
1214 		/*
1215 		 * FIXME: address space mismatch, does bulk_userdata
1216 		 * actually point to user or kernel memory?
1217 		 */
1218 		user_completion.bulk_userdata = completion->bulk_userdata;
1219 
1220 		if (vchiq_put_completion(args->buf, &user_completion, ret)) {
1221 			if (ret == 0)
1222 				ret = -EFAULT;
1223 			break;
1224 		}
1225 
1226 		/*
1227 		 * Ensure that the above copy has completed
1228 		 * before advancing the remove pointer.
1229 		 */
1230 		mb();
1231 		remove++;
1232 		instance->completion_remove = remove;
1233 	}
1234 
1235 	if (msgbufcount != args->msgbufcount) {
1236 		if (put_user(msgbufcount, msgbufcountp))
1237 			ret = -EFAULT;
1238 	}
1239 out:
1240 	if (ret)
1241 		complete(&instance->remove_event);
1242 	mutex_unlock(&instance->completion_mutex);
1243 	DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1244 
1245 	return ret;
1246 }
1247 
1248 /****************************************************************************
1249 *
1250 *   vchiq_ioctl
1251 *
1252 ***************************************************************************/
1253 static long
vchiq_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1254 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1255 {
1256 	struct vchiq_instance *instance = file->private_data;
1257 	enum vchiq_status status = VCHIQ_SUCCESS;
1258 	struct vchiq_service *service = NULL;
1259 	long ret = 0;
1260 	int i, rc;
1261 
1262 	vchiq_log_trace(vchiq_arm_log_level,
1263 		"%s - instance %pK, cmd %s, arg %lx",
1264 		__func__, instance,
1265 		((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
1266 		(_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
1267 		ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
1268 
1269 	switch (cmd) {
1270 	case VCHIQ_IOC_SHUTDOWN:
1271 		if (!instance->connected)
1272 			break;
1273 
1274 		/* Remove all services */
1275 		i = 0;
1276 		while ((service = next_service_by_instance(instance->state,
1277 			instance, &i))) {
1278 			status = vchiq_remove_service(service->handle);
1279 			unlock_service(service);
1280 			if (status != VCHIQ_SUCCESS)
1281 				break;
1282 		}
1283 		service = NULL;
1284 
1285 		if (status == VCHIQ_SUCCESS) {
1286 			/* Wake the completion thread and ask it to exit */
1287 			instance->closing = 1;
1288 			complete(&instance->insert_event);
1289 		}
1290 
1291 		break;
1292 
1293 	case VCHIQ_IOC_CONNECT:
1294 		if (instance->connected) {
1295 			ret = -EINVAL;
1296 			break;
1297 		}
1298 		rc = mutex_lock_killable(&instance->state->mutex);
1299 		if (rc) {
1300 			vchiq_log_error(vchiq_arm_log_level,
1301 				"vchiq: connect: could not lock mutex for "
1302 				"state %d: %d",
1303 				instance->state->id, rc);
1304 			ret = -EINTR;
1305 			break;
1306 		}
1307 		status = vchiq_connect_internal(instance->state, instance);
1308 		mutex_unlock(&instance->state->mutex);
1309 
1310 		if (status == VCHIQ_SUCCESS)
1311 			instance->connected = 1;
1312 		else
1313 			vchiq_log_error(vchiq_arm_log_level,
1314 				"vchiq: could not connect: %d", status);
1315 		break;
1316 
1317 	case VCHIQ_IOC_CREATE_SERVICE: {
1318 		struct vchiq_create_service __user *argp;
1319 		struct vchiq_create_service args;
1320 
1321 		argp = (void __user *)arg;
1322 		if (copy_from_user(&args, argp, sizeof(args))) {
1323 			ret = -EFAULT;
1324 			break;
1325 		}
1326 
1327 		ret = vchiq_ioc_create_service(instance, &args);
1328 		if (ret < 0)
1329 			break;
1330 
1331 		if (put_user(args.handle, &argp->handle)) {
1332 			vchiq_remove_service(args.handle);
1333 			ret = -EFAULT;
1334 		}
1335 	} break;
1336 
1337 	case VCHIQ_IOC_CLOSE_SERVICE:
1338 	case VCHIQ_IOC_REMOVE_SERVICE: {
1339 		unsigned int handle = (unsigned int)arg;
1340 		struct user_service *user_service;
1341 
1342 		service = find_service_for_instance(instance, handle);
1343 		if (!service) {
1344 			ret = -EINVAL;
1345 			break;
1346 		}
1347 
1348 		user_service = service->base.userdata;
1349 
1350 		/* close_pending is false on first entry, and when the
1351 		   wait in vchiq_close_service has been interrupted. */
1352 		if (!user_service->close_pending) {
1353 			status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
1354 				 vchiq_close_service(service->handle) :
1355 				 vchiq_remove_service(service->handle);
1356 			if (status != VCHIQ_SUCCESS)
1357 				break;
1358 		}
1359 
1360 		/* close_pending is true once the underlying service
1361 		   has been closed until the client library calls the
1362 		   CLOSE_DELIVERED ioctl, signalling close_event. */
1363 		if (user_service->close_pending &&
1364 			wait_for_completion_interruptible(
1365 				&user_service->close_event))
1366 			status = VCHIQ_RETRY;
1367 		break;
1368 	}
1369 
1370 	case VCHIQ_IOC_USE_SERVICE:
1371 	case VCHIQ_IOC_RELEASE_SERVICE:	{
1372 		unsigned int handle = (unsigned int)arg;
1373 
1374 		service = find_service_for_instance(instance, handle);
1375 		if (service) {
1376 			status = (cmd == VCHIQ_IOC_USE_SERVICE)	?
1377 				vchiq_use_service_internal(service) :
1378 				vchiq_release_service_internal(service);
1379 			if (status != VCHIQ_SUCCESS) {
1380 				vchiq_log_error(vchiq_susp_log_level,
1381 					"%s: cmd %s returned error %d for "
1382 					"service %c%c%c%c:%03d",
1383 					__func__,
1384 					(cmd == VCHIQ_IOC_USE_SERVICE) ?
1385 						"VCHIQ_IOC_USE_SERVICE" :
1386 						"VCHIQ_IOC_RELEASE_SERVICE",
1387 					status,
1388 					VCHIQ_FOURCC_AS_4CHARS(
1389 						service->base.fourcc),
1390 					service->client_id);
1391 				ret = -EINVAL;
1392 			}
1393 		} else
1394 			ret = -EINVAL;
1395 	} break;
1396 
1397 	case VCHIQ_IOC_QUEUE_MESSAGE: {
1398 		struct vchiq_queue_message args;
1399 
1400 		if (copy_from_user(&args, (const void __user *)arg,
1401 				   sizeof(args))) {
1402 			ret = -EFAULT;
1403 			break;
1404 		}
1405 
1406 		service = find_service_for_instance(instance, args.handle);
1407 
1408 		if (service && (args.count <= MAX_ELEMENTS)) {
1409 			/* Copy elements into kernel space */
1410 			struct vchiq_element elements[MAX_ELEMENTS];
1411 
1412 			if (copy_from_user(elements, args.elements,
1413 				args.count * sizeof(struct vchiq_element)) == 0)
1414 				ret = vchiq_ioc_queue_message(args.handle, elements,
1415 							      args.count);
1416 			else
1417 				ret = -EFAULT;
1418 		} else {
1419 			ret = -EINVAL;
1420 		}
1421 	} break;
1422 
1423 	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
1424 	case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
1425 		struct vchiq_queue_bulk_transfer args;
1426 		struct vchiq_queue_bulk_transfer __user *argp;
1427 
1428 		enum vchiq_bulk_dir dir =
1429 			(cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
1430 			VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1431 
1432 		argp = (void __user *)arg;
1433 		if (copy_from_user(&args, argp, sizeof(args))) {
1434 			ret = -EFAULT;
1435 			break;
1436 		}
1437 
1438 		ret = vchiq_irq_queue_bulk_tx_rx(instance, &args,
1439 						 dir, &argp->mode);
1440 	} break;
1441 
1442 	case VCHIQ_IOC_AWAIT_COMPLETION: {
1443 		struct vchiq_await_completion args;
1444 		struct vchiq_await_completion __user *argp;
1445 
1446 		argp = (void __user *)arg;
1447 		if (copy_from_user(&args, argp, sizeof(args))) {
1448 			ret = -EFAULT;
1449 			break;
1450 		}
1451 
1452 		ret = vchiq_ioc_await_completion(instance, &args,
1453 						 &argp->msgbufcount);
1454 	} break;
1455 
1456 	case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1457 		struct vchiq_dequeue_message args;
1458 
1459 		if (copy_from_user(&args, (const void __user *)arg,
1460 				   sizeof(args))) {
1461 			ret = -EFAULT;
1462 			break;
1463 		}
1464 
1465 		ret = vchiq_ioc_dequeue_message(instance, &args);
1466 	} break;
1467 
1468 	case VCHIQ_IOC_GET_CLIENT_ID: {
1469 		unsigned int handle = (unsigned int)arg;
1470 
1471 		ret = vchiq_get_client_id(handle);
1472 	} break;
1473 
1474 	case VCHIQ_IOC_GET_CONFIG: {
1475 		struct vchiq_get_config args;
1476 		struct vchiq_config config;
1477 
1478 		if (copy_from_user(&args, (const void __user *)arg,
1479 				   sizeof(args))) {
1480 			ret = -EFAULT;
1481 			break;
1482 		}
1483 		if (args.config_size > sizeof(config)) {
1484 			ret = -EINVAL;
1485 			break;
1486 		}
1487 
1488 		vchiq_get_config(&config);
1489 		if (copy_to_user(args.pconfig, &config, args.config_size)) {
1490 			ret = -EFAULT;
1491 			break;
1492 		}
1493 	} break;
1494 
1495 	case VCHIQ_IOC_SET_SERVICE_OPTION: {
1496 		struct vchiq_set_service_option args;
1497 
1498 		if (copy_from_user(&args, (const void __user *)arg,
1499 				   sizeof(args))) {
1500 			ret = -EFAULT;
1501 			break;
1502 		}
1503 
1504 		service = find_service_for_instance(instance, args.handle);
1505 		if (!service) {
1506 			ret = -EINVAL;
1507 			break;
1508 		}
1509 
1510 		status = vchiq_set_service_option(
1511 				args.handle, args.option, args.value);
1512 	} break;
1513 
1514 	case VCHIQ_IOC_LIB_VERSION: {
1515 		unsigned int lib_version = (unsigned int)arg;
1516 
1517 		if (lib_version < VCHIQ_VERSION_MIN)
1518 			ret = -EINVAL;
1519 		else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1520 			instance->use_close_delivered = 1;
1521 	} break;
1522 
1523 	case VCHIQ_IOC_CLOSE_DELIVERED: {
1524 		unsigned int handle = (unsigned int)arg;
1525 
1526 		service = find_closed_service_for_instance(instance, handle);
1527 		if (service) {
1528 			struct user_service *user_service =
1529 				(struct user_service *)service->base.userdata;
1530 			close_delivered(user_service);
1531 		} else
1532 			ret = -EINVAL;
1533 	} break;
1534 
1535 	default:
1536 		ret = -ENOTTY;
1537 		break;
1538 	}
1539 
1540 	if (service)
1541 		unlock_service(service);
1542 
1543 	if (ret == 0) {
1544 		if (status == VCHIQ_ERROR)
1545 			ret = -EIO;
1546 		else if (status == VCHIQ_RETRY)
1547 			ret = -EINTR;
1548 	}
1549 
1550 	if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1551 		(ret != -EWOULDBLOCK))
1552 		vchiq_log_info(vchiq_arm_log_level,
1553 			"  ioctl instance %pK, cmd %s -> status %d, %ld",
1554 			instance,
1555 			(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1556 				ioctl_names[_IOC_NR(cmd)] :
1557 				"<invalid>",
1558 			status, ret);
1559 	else
1560 		vchiq_log_trace(vchiq_arm_log_level,
1561 			"  ioctl instance %pK, cmd %s -> status %d, %ld",
1562 			instance,
1563 			(_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1564 				ioctl_names[_IOC_NR(cmd)] :
1565 				"<invalid>",
1566 			status, ret);
1567 
1568 	return ret;
1569 }
1570 
1571 #if defined(CONFIG_COMPAT)
1572 
1573 struct vchiq_service_params32 {
1574 	int fourcc;
1575 	compat_uptr_t callback;
1576 	compat_uptr_t userdata;
1577 	short version; /* Increment for non-trivial changes */
1578 	short version_min; /* Update for incompatible changes */
1579 };
1580 
1581 struct vchiq_create_service32 {
1582 	struct vchiq_service_params32 params;
1583 	int is_open;
1584 	int is_vchi;
1585 	unsigned int handle; /* OUT */
1586 };
1587 
1588 #define VCHIQ_IOC_CREATE_SERVICE32 \
1589 	_IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
1590 
1591 static long
vchiq_compat_ioctl_create_service(struct file * file,unsigned int cmd,struct vchiq_create_service32 __user * ptrargs32)1592 vchiq_compat_ioctl_create_service(
1593 	struct file *file,
1594 	unsigned int cmd,
1595 	struct vchiq_create_service32 __user *ptrargs32)
1596 {
1597 	struct vchiq_create_service args;
1598 	struct vchiq_create_service32 args32;
1599 	long ret;
1600 
1601 	if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
1602 		return -EFAULT;
1603 
1604 	args = (struct vchiq_create_service) {
1605 		.params = {
1606 			.fourcc	     = args32.params.fourcc,
1607 			.callback    = compat_ptr(args32.params.callback),
1608 			.userdata    = compat_ptr(args32.params.userdata),
1609 			.version     = args32.params.version,
1610 			.version_min = args32.params.version_min,
1611 		},
1612 		.is_open = args32.is_open,
1613 		.is_vchi = args32.is_vchi,
1614 		.handle  = args32.handle,
1615 	};
1616 
1617 	ret = vchiq_ioc_create_service(file->private_data, &args);
1618 	if (ret < 0)
1619 		return ret;
1620 
1621 	if (put_user(args.handle, &ptrargs32->handle)) {
1622 		vchiq_remove_service(args.handle);
1623 		return -EFAULT;
1624 	}
1625 
1626 	return 0;
1627 }
1628 
1629 struct vchiq_element32 {
1630 	compat_uptr_t data;
1631 	unsigned int size;
1632 };
1633 
1634 struct vchiq_queue_message32 {
1635 	unsigned int handle;
1636 	unsigned int count;
1637 	compat_uptr_t elements;
1638 };
1639 
1640 #define VCHIQ_IOC_QUEUE_MESSAGE32 \
1641 	_IOW(VCHIQ_IOC_MAGIC,  4, struct vchiq_queue_message32)
1642 
1643 static long
vchiq_compat_ioctl_queue_message(struct file * file,unsigned int cmd,struct vchiq_queue_message32 __user * arg)1644 vchiq_compat_ioctl_queue_message(struct file *file,
1645 				 unsigned int cmd,
1646 				 struct vchiq_queue_message32 __user *arg)
1647 {
1648 	struct vchiq_queue_message args;
1649 	struct vchiq_queue_message32 args32;
1650 	struct vchiq_service *service;
1651 	int ret;
1652 
1653 	if (copy_from_user(&args32, arg, sizeof(args32)))
1654 		return -EFAULT;
1655 
1656 	args = (struct vchiq_queue_message) {
1657 		.handle   = args32.handle,
1658 		.count    = args32.count,
1659 		.elements = compat_ptr(args32.elements),
1660 	};
1661 
1662 	if (args32.count > MAX_ELEMENTS)
1663 		return -EINVAL;
1664 
1665 	service = find_service_for_instance(file->private_data, args.handle);
1666 	if (!service)
1667 		return -EINVAL;
1668 
1669 	if (args32.elements && args32.count) {
1670 		struct vchiq_element32 element32[MAX_ELEMENTS];
1671 		struct vchiq_element elements[MAX_ELEMENTS];
1672 		unsigned int count;
1673 
1674 		if (copy_from_user(&element32, args.elements,
1675 				   sizeof(element32))) {
1676 			unlock_service(service);
1677 			return -EFAULT;
1678 		}
1679 
1680 		for (count = 0; count < args32.count; count++) {
1681 			elements[count].data =
1682 				compat_ptr(element32[count].data);
1683 			elements[count].size = element32[count].size;
1684 		}
1685 		ret = vchiq_ioc_queue_message(args.handle, elements,
1686 					      args.count);
1687 	} else {
1688 		ret = -EINVAL;
1689 	}
1690 	unlock_service(service);
1691 
1692 	return ret;
1693 }
1694 
1695 struct vchiq_queue_bulk_transfer32 {
1696 	unsigned int handle;
1697 	compat_uptr_t data;
1698 	unsigned int size;
1699 	compat_uptr_t userdata;
1700 	enum vchiq_bulk_mode mode;
1701 };
1702 
1703 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1704 	_IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1705 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1706 	_IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1707 
1708 static long
vchiq_compat_ioctl_queue_bulk(struct file * file,unsigned int cmd,struct vchiq_queue_bulk_transfer32 __user * argp)1709 vchiq_compat_ioctl_queue_bulk(struct file *file,
1710 			      unsigned int cmd,
1711 			      struct vchiq_queue_bulk_transfer32 __user *argp)
1712 {
1713 	struct vchiq_queue_bulk_transfer32 args32;
1714 	struct vchiq_queue_bulk_transfer args;
1715 	enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
1716 				  VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1717 
1718 	if (copy_from_user(&args32, argp, sizeof(args32)))
1719 		return -EFAULT;
1720 
1721 	args = (struct vchiq_queue_bulk_transfer) {
1722 		.handle   = args32.handle,
1723 		.data	  = compat_ptr(args32.data),
1724 		.size	  = args32.size,
1725 		.userdata = compat_ptr(args32.userdata),
1726 		.mode	  = args32.mode,
1727 	};
1728 
1729 	return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args,
1730 					  dir, &argp->mode);
1731 }
1732 
1733 struct vchiq_await_completion32 {
1734 	unsigned int count;
1735 	compat_uptr_t buf;
1736 	unsigned int msgbufsize;
1737 	unsigned int msgbufcount; /* IN/OUT */
1738 	compat_uptr_t msgbufs;
1739 };
1740 
1741 #define VCHIQ_IOC_AWAIT_COMPLETION32 \
1742 	_IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1743 
1744 static long
vchiq_compat_ioctl_await_completion(struct file * file,unsigned int cmd,struct vchiq_await_completion32 __user * argp)1745 vchiq_compat_ioctl_await_completion(struct file *file,
1746 				    unsigned int cmd,
1747 				    struct vchiq_await_completion32 __user *argp)
1748 {
1749 	struct vchiq_await_completion args;
1750 	struct vchiq_await_completion32 args32;
1751 
1752 	if (copy_from_user(&args32, argp, sizeof(args32)))
1753 		return -EFAULT;
1754 
1755 	args = (struct vchiq_await_completion) {
1756 		.count		= args32.count,
1757 		.buf		= compat_ptr(args32.buf),
1758 		.msgbufsize	= args32.msgbufsize,
1759 		.msgbufcount	= args32.msgbufcount,
1760 		.msgbufs	= compat_ptr(args32.msgbufs),
1761 	};
1762 
1763 	return vchiq_ioc_await_completion(file->private_data, &args,
1764 					  &argp->msgbufcount);
1765 }
1766 
1767 struct vchiq_dequeue_message32 {
1768 	unsigned int handle;
1769 	int blocking;
1770 	unsigned int bufsize;
1771 	compat_uptr_t buf;
1772 };
1773 
1774 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1775 	_IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1776 
1777 static long
vchiq_compat_ioctl_dequeue_message(struct file * file,unsigned int cmd,struct vchiq_dequeue_message32 __user * arg)1778 vchiq_compat_ioctl_dequeue_message(struct file *file,
1779 				   unsigned int cmd,
1780 				   struct vchiq_dequeue_message32 __user *arg)
1781 {
1782 	struct vchiq_dequeue_message32 args32;
1783 	struct vchiq_dequeue_message args;
1784 
1785 	if (copy_from_user(&args32, arg, sizeof(args32)))
1786 		return -EFAULT;
1787 
1788 	args = (struct vchiq_dequeue_message) {
1789 		.handle		= args32.handle,
1790 		.blocking	= args32.blocking,
1791 		.bufsize	= args32.bufsize,
1792 		.buf		= compat_ptr(args32.buf),
1793 	};
1794 
1795 	return vchiq_ioc_dequeue_message(file->private_data, &args);
1796 }
1797 
1798 struct vchiq_get_config32 {
1799 	unsigned int config_size;
1800 	compat_uptr_t pconfig;
1801 };
1802 
1803 #define VCHIQ_IOC_GET_CONFIG32 \
1804 	_IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1805 
1806 static long
vchiq_compat_ioctl_get_config(struct file * file,unsigned int cmd,struct vchiq_get_config32 __user * arg)1807 vchiq_compat_ioctl_get_config(struct file *file,
1808 			      unsigned int cmd,
1809 			      struct vchiq_get_config32 __user *arg)
1810 {
1811 	struct vchiq_get_config32 args32;
1812 	struct vchiq_config config;
1813 	void __user *ptr;
1814 
1815 	if (copy_from_user(&args32, arg, sizeof(args32)))
1816 		return -EFAULT;
1817 	if (args32.config_size > sizeof(config))
1818 		return -EINVAL;
1819 
1820 	vchiq_get_config(&config);
1821 	ptr = compat_ptr(args32.pconfig);
1822 	if (copy_to_user(ptr, &config, args32.config_size))
1823 		return -EFAULT;
1824 
1825 	return 0;
1826 }
1827 
1828 static long
vchiq_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1829 vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1830 {
1831 	void __user *argp = compat_ptr(arg);
1832 	switch (cmd) {
1833 	case VCHIQ_IOC_CREATE_SERVICE32:
1834 		return vchiq_compat_ioctl_create_service(file, cmd, argp);
1835 	case VCHIQ_IOC_QUEUE_MESSAGE32:
1836 		return vchiq_compat_ioctl_queue_message(file, cmd, argp);
1837 	case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1838 	case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1839 		return vchiq_compat_ioctl_queue_bulk(file, cmd, argp);
1840 	case VCHIQ_IOC_AWAIT_COMPLETION32:
1841 		return vchiq_compat_ioctl_await_completion(file, cmd, argp);
1842 	case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1843 		return vchiq_compat_ioctl_dequeue_message(file, cmd, argp);
1844 	case VCHIQ_IOC_GET_CONFIG32:
1845 		return vchiq_compat_ioctl_get_config(file, cmd, argp);
1846 	default:
1847 		return vchiq_ioctl(file, cmd, (unsigned long)argp);
1848 	}
1849 }
1850 
1851 #endif
1852 
vchiq_open(struct inode * inode,struct file * file)1853 static int vchiq_open(struct inode *inode, struct file *file)
1854 {
1855 	struct vchiq_state *state = vchiq_get_state();
1856 	struct vchiq_instance *instance;
1857 
1858 	vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1859 
1860 	if (!state) {
1861 		vchiq_log_error(vchiq_arm_log_level,
1862 				"vchiq has no connection to VideoCore");
1863 		return -ENOTCONN;
1864 	}
1865 
1866 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1867 	if (!instance)
1868 		return -ENOMEM;
1869 
1870 	instance->state = state;
1871 	instance->pid = current->tgid;
1872 
1873 	vchiq_debugfs_add_instance(instance);
1874 
1875 	init_completion(&instance->insert_event);
1876 	init_completion(&instance->remove_event);
1877 	mutex_init(&instance->completion_mutex);
1878 	mutex_init(&instance->bulk_waiter_list_mutex);
1879 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
1880 
1881 	file->private_data = instance;
1882 
1883 	return 0;
1884 }
1885 
vchiq_release(struct inode * inode,struct file * file)1886 static int vchiq_release(struct inode *inode, struct file *file)
1887 {
1888 	struct vchiq_instance *instance = file->private_data;
1889 	struct vchiq_state *state = vchiq_get_state();
1890 	struct vchiq_service *service;
1891 	int ret = 0;
1892 	int i;
1893 
1894 	vchiq_log_info(vchiq_arm_log_level, "%s: instance=%lx", __func__,
1895 		       (unsigned long)instance);
1896 
1897 	if (!state) {
1898 		ret = -EPERM;
1899 		goto out;
1900 	}
1901 
1902 	/* Ensure videocore is awake to allow termination. */
1903 	vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
1904 
1905 	mutex_lock(&instance->completion_mutex);
1906 
1907 	/* Wake the completion thread and ask it to exit */
1908 	instance->closing = 1;
1909 	complete(&instance->insert_event);
1910 
1911 	mutex_unlock(&instance->completion_mutex);
1912 
1913 	/* Wake the slot handler if the completion queue is full. */
1914 	complete(&instance->remove_event);
1915 
1916 	/* Mark all services for termination... */
1917 	i = 0;
1918 	while ((service = next_service_by_instance(state, instance, &i))) {
1919 		struct user_service *user_service = service->base.userdata;
1920 
1921 		/* Wake the slot handler if the msg queue is full. */
1922 		complete(&user_service->remove_event);
1923 
1924 		vchiq_terminate_service_internal(service);
1925 		unlock_service(service);
1926 	}
1927 
1928 	/* ...and wait for them to die */
1929 	i = 0;
1930 	while ((service = next_service_by_instance(state, instance, &i))) {
1931 		struct user_service *user_service = service->base.userdata;
1932 
1933 		wait_for_completion(&service->remove_event);
1934 
1935 		BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
1936 
1937 		spin_lock(&msg_queue_spinlock);
1938 
1939 		while (user_service->msg_remove != user_service->msg_insert) {
1940 			struct vchiq_header *header;
1941 			int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
1942 
1943 			header = user_service->msg_queue[m];
1944 			user_service->msg_remove++;
1945 			spin_unlock(&msg_queue_spinlock);
1946 
1947 			if (header)
1948 				vchiq_release_message(service->handle, header);
1949 			spin_lock(&msg_queue_spinlock);
1950 		}
1951 
1952 		spin_unlock(&msg_queue_spinlock);
1953 
1954 		unlock_service(service);
1955 	}
1956 
1957 	/* Release any closed services */
1958 	while (instance->completion_remove !=
1959 		instance->completion_insert) {
1960 		struct vchiq_completion_data_kernel *completion;
1961 		struct vchiq_service *service;
1962 
1963 		completion = &instance->completions[
1964 			instance->completion_remove & (MAX_COMPLETIONS - 1)];
1965 		service = completion->service_userdata;
1966 		if (completion->reason == VCHIQ_SERVICE_CLOSED) {
1967 			struct user_service *user_service =
1968 							service->base.userdata;
1969 
1970 			/* Wake any blocked user-thread */
1971 			if (instance->use_close_delivered)
1972 				complete(&user_service->close_event);
1973 			unlock_service(service);
1974 		}
1975 		instance->completion_remove++;
1976 	}
1977 
1978 	/* Release the PEER service count. */
1979 	vchiq_release_internal(instance->state, NULL);
1980 
1981 	{
1982 		struct bulk_waiter_node *waiter, *next;
1983 
1984 		list_for_each_entry_safe(waiter, next,
1985 					 &instance->bulk_waiter_list, list) {
1986 			list_del(&waiter->list);
1987 			vchiq_log_info(vchiq_arm_log_level,
1988 				"bulk_waiter - cleaned up %pK for pid %d",
1989 				waiter, waiter->pid);
1990 			kfree(waiter);
1991 		}
1992 	}
1993 
1994 	vchiq_debugfs_remove_instance(instance);
1995 
1996 	kfree(instance);
1997 	file->private_data = NULL;
1998 
1999 out:
2000 	return ret;
2001 }
2002 
2003 /****************************************************************************
2004 *
2005 *   vchiq_dump
2006 *
2007 ***************************************************************************/
2008 
vchiq_dump(void * dump_context,const char * str,int len)2009 int vchiq_dump(void *dump_context, const char *str, int len)
2010 {
2011 	struct dump_context *context = (struct dump_context *)dump_context;
2012 	int copy_bytes;
2013 
2014 	if (context->actual >= context->space)
2015 		return 0;
2016 
2017 	if (context->offset > 0) {
2018 		int skip_bytes = min_t(int, len, context->offset);
2019 
2020 		str += skip_bytes;
2021 		len -= skip_bytes;
2022 		context->offset -= skip_bytes;
2023 		if (context->offset > 0)
2024 			return 0;
2025 	}
2026 	copy_bytes = min_t(int, len, context->space - context->actual);
2027 	if (copy_bytes == 0)
2028 		return 0;
2029 	if (copy_to_user(context->buf + context->actual, str,
2030 			 copy_bytes))
2031 		return -EFAULT;
2032 	context->actual += copy_bytes;
2033 	len -= copy_bytes;
2034 
2035 	/*
2036 	 * If the terminating NUL is included in the length, then it
2037 	 * marks the end of a line and should be replaced with a
2038 	 * carriage return.
2039 	 */
2040 	if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
2041 		char cr = '\n';
2042 
2043 		if (copy_to_user(context->buf + context->actual - 1,
2044 				 &cr, 1))
2045 			return -EFAULT;
2046 	}
2047 	return 0;
2048 }
2049 
2050 /****************************************************************************
2051 *
2052 *   vchiq_dump_platform_instance_state
2053 *
2054 ***************************************************************************/
2055 
vchiq_dump_platform_instances(void * dump_context)2056 int vchiq_dump_platform_instances(void *dump_context)
2057 {
2058 	struct vchiq_state *state = vchiq_get_state();
2059 	char buf[80];
2060 	int len;
2061 	int i;
2062 
2063 	/* There is no list of instances, so instead scan all services,
2064 		marking those that have been dumped. */
2065 
2066 	rcu_read_lock();
2067 	for (i = 0; i < state->unused_service; i++) {
2068 		struct vchiq_service *service;
2069 		struct vchiq_instance *instance;
2070 
2071 		service = rcu_dereference(state->services[i]);
2072 		if (!service || service->base.callback != service_callback)
2073 			continue;
2074 
2075 		instance = service->instance;
2076 		if (instance)
2077 			instance->mark = 0;
2078 	}
2079 	rcu_read_unlock();
2080 
2081 	for (i = 0; i < state->unused_service; i++) {
2082 		struct vchiq_service *service;
2083 		struct vchiq_instance *instance;
2084 		int err;
2085 
2086 		rcu_read_lock();
2087 		service = rcu_dereference(state->services[i]);
2088 		if (!service || service->base.callback != service_callback) {
2089 			rcu_read_unlock();
2090 			continue;
2091 		}
2092 
2093 		instance = service->instance;
2094 		if (!instance || instance->mark) {
2095 			rcu_read_unlock();
2096 			continue;
2097 		}
2098 		rcu_read_unlock();
2099 
2100 		len = snprintf(buf, sizeof(buf),
2101 			       "Instance %pK: pid %d,%s completions %d/%d",
2102 			       instance, instance->pid,
2103 			       instance->connected ? " connected, " :
2104 			       "",
2105 			       instance->completion_insert -
2106 			       instance->completion_remove,
2107 			       MAX_COMPLETIONS);
2108 		err = vchiq_dump(dump_context, buf, len + 1);
2109 		if (err)
2110 			return err;
2111 		instance->mark = 1;
2112 	}
2113 	return 0;
2114 }
2115 
2116 /****************************************************************************
2117 *
2118 *   vchiq_dump_platform_service_state
2119 *
2120 ***************************************************************************/
2121 
vchiq_dump_platform_service_state(void * dump_context,struct vchiq_service * service)2122 int vchiq_dump_platform_service_state(void *dump_context,
2123 				      struct vchiq_service *service)
2124 {
2125 	struct user_service *user_service =
2126 			(struct user_service *)service->base.userdata;
2127 	char buf[80];
2128 	int len;
2129 
2130 	len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
2131 
2132 	if ((service->base.callback == service_callback) &&
2133 		user_service->is_vchi) {
2134 		len += scnprintf(buf + len, sizeof(buf) - len,
2135 			", %d/%d messages",
2136 			user_service->msg_insert - user_service->msg_remove,
2137 			MSG_QUEUE_SIZE);
2138 
2139 		if (user_service->dequeue_pending)
2140 			len += scnprintf(buf + len, sizeof(buf) - len,
2141 				" (dequeue pending)");
2142 	}
2143 
2144 	return vchiq_dump(dump_context, buf, len + 1);
2145 }
2146 
2147 /****************************************************************************
2148 *
2149 *   vchiq_read
2150 *
2151 ***************************************************************************/
2152 
2153 static ssize_t
vchiq_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)2154 vchiq_read(struct file *file, char __user *buf,
2155 	size_t count, loff_t *ppos)
2156 {
2157 	struct dump_context context;
2158 	int err;
2159 
2160 	context.buf = buf;
2161 	context.actual = 0;
2162 	context.space = count;
2163 	context.offset = *ppos;
2164 
2165 	err = vchiq_dump_state(&context, &g_state);
2166 	if (err)
2167 		return err;
2168 
2169 	*ppos += context.actual;
2170 
2171 	return context.actual;
2172 }
2173 
2174 struct vchiq_state *
vchiq_get_state(void)2175 vchiq_get_state(void)
2176 {
2177 
2178 	if (!g_state.remote)
2179 		printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
2180 	else if (g_state.remote->initialised != 1)
2181 		printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
2182 			__func__, g_state.remote->initialised);
2183 
2184 	return (g_state.remote &&
2185 		(g_state.remote->initialised == 1)) ? &g_state : NULL;
2186 }
2187 
2188 static const struct file_operations
2189 vchiq_fops = {
2190 	.owner = THIS_MODULE,
2191 	.unlocked_ioctl = vchiq_ioctl,
2192 #if defined(CONFIG_COMPAT)
2193 	.compat_ioctl = vchiq_compat_ioctl,
2194 #endif
2195 	.open = vchiq_open,
2196 	.release = vchiq_release,
2197 	.read = vchiq_read
2198 };
2199 
2200 /*
2201  * Autosuspend related functionality
2202  */
2203 
2204 static enum vchiq_status
vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,struct vchiq_header * header,unsigned int service_user,void * bulk_user)2205 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
2206 	struct vchiq_header *header,
2207 	unsigned int service_user,
2208 	void *bulk_user)
2209 {
2210 	vchiq_log_error(vchiq_susp_log_level,
2211 		"%s callback reason %d", __func__, reason);
2212 	return 0;
2213 }
2214 
2215 static int
vchiq_keepalive_thread_func(void * v)2216 vchiq_keepalive_thread_func(void *v)
2217 {
2218 	struct vchiq_state *state = (struct vchiq_state *)v;
2219 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2220 
2221 	enum vchiq_status status;
2222 	struct vchiq_instance *instance;
2223 	unsigned int ka_handle;
2224 
2225 	struct vchiq_service_params_kernel params = {
2226 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
2227 		.callback    = vchiq_keepalive_vchiq_callback,
2228 		.version     = KEEPALIVE_VER,
2229 		.version_min = KEEPALIVE_VER_MIN
2230 	};
2231 
2232 	status = vchiq_initialise(&instance);
2233 	if (status != VCHIQ_SUCCESS) {
2234 		vchiq_log_error(vchiq_susp_log_level,
2235 			"%s vchiq_initialise failed %d", __func__, status);
2236 		goto exit;
2237 	}
2238 
2239 	status = vchiq_connect(instance);
2240 	if (status != VCHIQ_SUCCESS) {
2241 		vchiq_log_error(vchiq_susp_log_level,
2242 			"%s vchiq_connect failed %d", __func__, status);
2243 		goto shutdown;
2244 	}
2245 
2246 	status = vchiq_add_service(instance, &params, &ka_handle);
2247 	if (status != VCHIQ_SUCCESS) {
2248 		vchiq_log_error(vchiq_susp_log_level,
2249 			"%s vchiq_open_service failed %d", __func__, status);
2250 		goto shutdown;
2251 	}
2252 
2253 	while (1) {
2254 		long rc = 0, uc = 0;
2255 
2256 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
2257 			vchiq_log_error(vchiq_susp_log_level,
2258 				"%s interrupted", __func__);
2259 			flush_signals(current);
2260 			continue;
2261 		}
2262 
2263 		/* read and clear counters.  Do release_count then use_count to
2264 		 * prevent getting more releases than uses */
2265 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
2266 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
2267 
2268 		/* Call use/release service the requisite number of times.
2269 		 * Process use before release so use counts don't go negative */
2270 		while (uc--) {
2271 			atomic_inc(&arm_state->ka_use_ack_count);
2272 			status = vchiq_use_service(ka_handle);
2273 			if (status != VCHIQ_SUCCESS) {
2274 				vchiq_log_error(vchiq_susp_log_level,
2275 					"%s vchiq_use_service error %d",
2276 					__func__, status);
2277 			}
2278 		}
2279 		while (rc--) {
2280 			status = vchiq_release_service(ka_handle);
2281 			if (status != VCHIQ_SUCCESS) {
2282 				vchiq_log_error(vchiq_susp_log_level,
2283 					"%s vchiq_release_service error %d",
2284 					__func__, status);
2285 			}
2286 		}
2287 	}
2288 
2289 shutdown:
2290 	vchiq_shutdown(instance);
2291 exit:
2292 	return 0;
2293 }
2294 
2295 enum vchiq_status
vchiq_arm_init_state(struct vchiq_state * state,struct vchiq_arm_state * arm_state)2296 vchiq_arm_init_state(struct vchiq_state *state,
2297 		     struct vchiq_arm_state *arm_state)
2298 {
2299 	if (arm_state) {
2300 		rwlock_init(&arm_state->susp_res_lock);
2301 
2302 		init_completion(&arm_state->ka_evt);
2303 		atomic_set(&arm_state->ka_use_count, 0);
2304 		atomic_set(&arm_state->ka_use_ack_count, 0);
2305 		atomic_set(&arm_state->ka_release_count, 0);
2306 
2307 		arm_state->state = state;
2308 		arm_state->first_connect = 0;
2309 
2310 	}
2311 	return VCHIQ_SUCCESS;
2312 }
2313 
2314 enum vchiq_status
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)2315 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
2316 		   enum USE_TYPE_E use_type)
2317 {
2318 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2319 	enum vchiq_status ret = VCHIQ_SUCCESS;
2320 	char entity[16];
2321 	int *entity_uc;
2322 	int local_uc;
2323 
2324 	if (!arm_state)
2325 		goto out;
2326 
2327 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2328 
2329 	if (use_type == USE_TYPE_VCHIQ) {
2330 		sprintf(entity, "VCHIQ:   ");
2331 		entity_uc = &arm_state->peer_use_count;
2332 	} else if (service) {
2333 		sprintf(entity, "%c%c%c%c:%03d",
2334 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2335 			service->client_id);
2336 		entity_uc = &service->service_use_count;
2337 	} else {
2338 		vchiq_log_error(vchiq_susp_log_level, "%s null service "
2339 				"ptr", __func__);
2340 		ret = VCHIQ_ERROR;
2341 		goto out;
2342 	}
2343 
2344 	write_lock_bh(&arm_state->susp_res_lock);
2345 	local_uc = ++arm_state->videocore_use_count;
2346 	++(*entity_uc);
2347 
2348 	vchiq_log_trace(vchiq_susp_log_level,
2349 		"%s %s count %d, state count %d",
2350 		__func__, entity, *entity_uc, local_uc);
2351 
2352 	write_unlock_bh(&arm_state->susp_res_lock);
2353 
2354 	if (ret == VCHIQ_SUCCESS) {
2355 		enum vchiq_status status = VCHIQ_SUCCESS;
2356 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2357 
2358 		while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2359 			/* Send the use notify to videocore */
2360 			status = vchiq_send_remote_use_active(state);
2361 			if (status == VCHIQ_SUCCESS)
2362 				ack_cnt--;
2363 			else
2364 				atomic_add(ack_cnt,
2365 					&arm_state->ka_use_ack_count);
2366 		}
2367 	}
2368 
2369 out:
2370 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2371 	return ret;
2372 }
2373 
2374 enum vchiq_status
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)2375 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
2376 {
2377 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2378 	enum vchiq_status ret = VCHIQ_SUCCESS;
2379 	char entity[16];
2380 	int *entity_uc;
2381 
2382 	if (!arm_state)
2383 		goto out;
2384 
2385 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2386 
2387 	if (service) {
2388 		sprintf(entity, "%c%c%c%c:%03d",
2389 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2390 			service->client_id);
2391 		entity_uc = &service->service_use_count;
2392 	} else {
2393 		sprintf(entity, "PEER:   ");
2394 		entity_uc = &arm_state->peer_use_count;
2395 	}
2396 
2397 	write_lock_bh(&arm_state->susp_res_lock);
2398 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
2399 		/* Don't use BUG_ON - don't allow user thread to crash kernel */
2400 		WARN_ON(!arm_state->videocore_use_count);
2401 		WARN_ON(!(*entity_uc));
2402 		ret = VCHIQ_ERROR;
2403 		goto unlock;
2404 	}
2405 	--arm_state->videocore_use_count;
2406 	--(*entity_uc);
2407 
2408 	vchiq_log_trace(vchiq_susp_log_level,
2409 		"%s %s count %d, state count %d",
2410 		__func__, entity, *entity_uc,
2411 		arm_state->videocore_use_count);
2412 
2413 unlock:
2414 	write_unlock_bh(&arm_state->susp_res_lock);
2415 
2416 out:
2417 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2418 	return ret;
2419 }
2420 
2421 void
vchiq_on_remote_use(struct vchiq_state * state)2422 vchiq_on_remote_use(struct vchiq_state *state)
2423 {
2424 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2425 
2426 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2427 	atomic_inc(&arm_state->ka_use_count);
2428 	complete(&arm_state->ka_evt);
2429 }
2430 
2431 void
vchiq_on_remote_release(struct vchiq_state * state)2432 vchiq_on_remote_release(struct vchiq_state *state)
2433 {
2434 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2435 
2436 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2437 	atomic_inc(&arm_state->ka_release_count);
2438 	complete(&arm_state->ka_evt);
2439 }
2440 
2441 enum vchiq_status
vchiq_use_service_internal(struct vchiq_service * service)2442 vchiq_use_service_internal(struct vchiq_service *service)
2443 {
2444 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2445 }
2446 
2447 enum vchiq_status
vchiq_release_service_internal(struct vchiq_service * service)2448 vchiq_release_service_internal(struct vchiq_service *service)
2449 {
2450 	return vchiq_release_internal(service->state, service);
2451 }
2452 
2453 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance * instance)2454 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
2455 {
2456 	return &instance->debugfs_node;
2457 }
2458 
2459 int
vchiq_instance_get_use_count(struct vchiq_instance * instance)2460 vchiq_instance_get_use_count(struct vchiq_instance *instance)
2461 {
2462 	struct vchiq_service *service;
2463 	int use_count = 0, i;
2464 
2465 	i = 0;
2466 	rcu_read_lock();
2467 	while ((service = __next_service_by_instance(instance->state,
2468 						     instance, &i)))
2469 		use_count += service->service_use_count;
2470 	rcu_read_unlock();
2471 	return use_count;
2472 }
2473 
2474 int
vchiq_instance_get_pid(struct vchiq_instance * instance)2475 vchiq_instance_get_pid(struct vchiq_instance *instance)
2476 {
2477 	return instance->pid;
2478 }
2479 
2480 int
vchiq_instance_get_trace(struct vchiq_instance * instance)2481 vchiq_instance_get_trace(struct vchiq_instance *instance)
2482 {
2483 	return instance->trace;
2484 }
2485 
2486 void
vchiq_instance_set_trace(struct vchiq_instance * instance,int trace)2487 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
2488 {
2489 	struct vchiq_service *service;
2490 	int i;
2491 
2492 	i = 0;
2493 	rcu_read_lock();
2494 	while ((service = __next_service_by_instance(instance->state,
2495 						     instance, &i)))
2496 		service->trace = trace;
2497 	rcu_read_unlock();
2498 	instance->trace = (trace != 0);
2499 }
2500 
2501 enum vchiq_status
vchiq_use_service(unsigned int handle)2502 vchiq_use_service(unsigned int handle)
2503 {
2504 	enum vchiq_status ret = VCHIQ_ERROR;
2505 	struct vchiq_service *service = find_service_by_handle(handle);
2506 
2507 	if (service) {
2508 		ret = vchiq_use_internal(service->state, service,
2509 				USE_TYPE_SERVICE);
2510 		unlock_service(service);
2511 	}
2512 	return ret;
2513 }
2514 EXPORT_SYMBOL(vchiq_use_service);
2515 
2516 enum vchiq_status
vchiq_release_service(unsigned int handle)2517 vchiq_release_service(unsigned int handle)
2518 {
2519 	enum vchiq_status ret = VCHIQ_ERROR;
2520 	struct vchiq_service *service = find_service_by_handle(handle);
2521 
2522 	if (service) {
2523 		ret = vchiq_release_internal(service->state, service);
2524 		unlock_service(service);
2525 	}
2526 	return ret;
2527 }
2528 EXPORT_SYMBOL(vchiq_release_service);
2529 
2530 struct service_data_struct {
2531 	int fourcc;
2532 	int clientid;
2533 	int use_count;
2534 };
2535 
2536 void
vchiq_dump_service_use_state(struct vchiq_state * state)2537 vchiq_dump_service_use_state(struct vchiq_state *state)
2538 {
2539 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2540 	struct service_data_struct *service_data;
2541 	int i, found = 0;
2542 	/* If there's more than 64 services, only dump ones with
2543 	 * non-zero counts */
2544 	int only_nonzero = 0;
2545 	static const char *nz = "<-- preventing suspend";
2546 
2547 	int peer_count;
2548 	int vc_use_count;
2549 	int active_services;
2550 
2551 	if (!arm_state)
2552 		return;
2553 
2554 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
2555 				     GFP_KERNEL);
2556 	if (!service_data)
2557 		return;
2558 
2559 	read_lock_bh(&arm_state->susp_res_lock);
2560 	peer_count = arm_state->peer_use_count;
2561 	vc_use_count = arm_state->videocore_use_count;
2562 	active_services = state->unused_service;
2563 	if (active_services > MAX_SERVICES)
2564 		only_nonzero = 1;
2565 
2566 	rcu_read_lock();
2567 	for (i = 0; i < active_services; i++) {
2568 		struct vchiq_service *service_ptr =
2569 			rcu_dereference(state->services[i]);
2570 
2571 		if (!service_ptr)
2572 			continue;
2573 
2574 		if (only_nonzero && !service_ptr->service_use_count)
2575 			continue;
2576 
2577 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
2578 			continue;
2579 
2580 		service_data[found].fourcc = service_ptr->base.fourcc;
2581 		service_data[found].clientid = service_ptr->client_id;
2582 		service_data[found].use_count = service_ptr->service_use_count;
2583 		found++;
2584 		if (found >= MAX_SERVICES)
2585 			break;
2586 	}
2587 	rcu_read_unlock();
2588 
2589 	read_unlock_bh(&arm_state->susp_res_lock);
2590 
2591 	if (only_nonzero)
2592 		vchiq_log_warning(vchiq_susp_log_level, "Too many active "
2593 			"services (%d).  Only dumping up to first %d services "
2594 			"with non-zero use-count", active_services, found);
2595 
2596 	for (i = 0; i < found; i++) {
2597 		vchiq_log_warning(vchiq_susp_log_level,
2598 			"----- %c%c%c%c:%d service count %d %s",
2599 			VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
2600 			service_data[i].clientid,
2601 			service_data[i].use_count,
2602 			service_data[i].use_count ? nz : "");
2603 	}
2604 	vchiq_log_warning(vchiq_susp_log_level,
2605 		"----- VCHIQ use count count %d", peer_count);
2606 	vchiq_log_warning(vchiq_susp_log_level,
2607 		"--- Overall vchiq instance use count %d", vc_use_count);
2608 
2609 	kfree(service_data);
2610 }
2611 
2612 enum vchiq_status
vchiq_check_service(struct vchiq_service * service)2613 vchiq_check_service(struct vchiq_service *service)
2614 {
2615 	struct vchiq_arm_state *arm_state;
2616 	enum vchiq_status ret = VCHIQ_ERROR;
2617 
2618 	if (!service || !service->state)
2619 		goto out;
2620 
2621 	vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2622 
2623 	arm_state = vchiq_platform_get_arm_state(service->state);
2624 
2625 	read_lock_bh(&arm_state->susp_res_lock);
2626 	if (service->service_use_count)
2627 		ret = VCHIQ_SUCCESS;
2628 	read_unlock_bh(&arm_state->susp_res_lock);
2629 
2630 	if (ret == VCHIQ_ERROR) {
2631 		vchiq_log_error(vchiq_susp_log_level,
2632 			"%s ERROR - %c%c%c%c:%d service count %d, "
2633 			"state count %d", __func__,
2634 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2635 			service->client_id, service->service_use_count,
2636 			arm_state->videocore_use_count);
2637 		vchiq_dump_service_use_state(service->state);
2638 	}
2639 out:
2640 	return ret;
2641 }
2642 
vchiq_platform_conn_state_changed(struct vchiq_state * state,enum vchiq_connstate oldstate,enum vchiq_connstate newstate)2643 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
2644 				       enum vchiq_connstate oldstate,
2645 				       enum vchiq_connstate newstate)
2646 {
2647 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2648 	char threadname[16];
2649 
2650 	vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
2651 		get_conn_state_name(oldstate), get_conn_state_name(newstate));
2652 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
2653 		return;
2654 
2655 	write_lock_bh(&arm_state->susp_res_lock);
2656 	if (arm_state->first_connect) {
2657 		write_unlock_bh(&arm_state->susp_res_lock);
2658 		return;
2659 	}
2660 
2661 	arm_state->first_connect = 1;
2662 	write_unlock_bh(&arm_state->susp_res_lock);
2663 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
2664 		 state->id);
2665 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
2666 					      (void *)state,
2667 					      threadname);
2668 	if (IS_ERR(arm_state->ka_thread)) {
2669 		vchiq_log_error(vchiq_susp_log_level,
2670 				"vchiq: FATAL: couldn't create thread %s",
2671 				threadname);
2672 	} else {
2673 		wake_up_process(arm_state->ka_thread);
2674 	}
2675 }
2676 
2677 static const struct of_device_id vchiq_of_match[] = {
2678 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
2679 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
2680 	{},
2681 };
2682 MODULE_DEVICE_TABLE(of, vchiq_of_match);
2683 
2684 static struct platform_device *
vchiq_register_child(struct platform_device * pdev,const char * name)2685 vchiq_register_child(struct platform_device *pdev, const char *name)
2686 {
2687 	struct platform_device_info pdevinfo;
2688 	struct platform_device *child;
2689 
2690 	memset(&pdevinfo, 0, sizeof(pdevinfo));
2691 
2692 	pdevinfo.parent = &pdev->dev;
2693 	pdevinfo.name = name;
2694 	pdevinfo.id = PLATFORM_DEVID_NONE;
2695 	pdevinfo.dma_mask = DMA_BIT_MASK(32);
2696 
2697 	child = platform_device_register_full(&pdevinfo);
2698 	if (IS_ERR(child)) {
2699 		dev_warn(&pdev->dev, "%s not registered\n", name);
2700 		child = NULL;
2701 	}
2702 
2703 	return child;
2704 }
2705 
vchiq_probe(struct platform_device * pdev)2706 static int vchiq_probe(struct platform_device *pdev)
2707 {
2708 	struct device_node *fw_node;
2709 	const struct of_device_id *of_id;
2710 	struct vchiq_drvdata *drvdata;
2711 	struct device *vchiq_dev;
2712 	int err;
2713 
2714 	of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
2715 	drvdata = (struct vchiq_drvdata *)of_id->data;
2716 	if (!drvdata)
2717 		return -EINVAL;
2718 
2719 	fw_node = of_find_compatible_node(NULL, NULL,
2720 					  "raspberrypi,bcm2835-firmware");
2721 	if (!fw_node) {
2722 		dev_err(&pdev->dev, "Missing firmware node\n");
2723 		return -ENOENT;
2724 	}
2725 
2726 	drvdata->fw = rpi_firmware_get(fw_node);
2727 	of_node_put(fw_node);
2728 	if (!drvdata->fw)
2729 		return -EPROBE_DEFER;
2730 
2731 	platform_set_drvdata(pdev, drvdata);
2732 
2733 	err = vchiq_platform_init(pdev, &g_state);
2734 	if (err)
2735 		goto failed_platform_init;
2736 
2737 	cdev_init(&vchiq_cdev, &vchiq_fops);
2738 	vchiq_cdev.owner = THIS_MODULE;
2739 	err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
2740 	if (err) {
2741 		vchiq_log_error(vchiq_arm_log_level,
2742 			"Unable to register device");
2743 		goto failed_platform_init;
2744 	}
2745 
2746 	vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
2747 				  "vchiq");
2748 	if (IS_ERR(vchiq_dev)) {
2749 		err = PTR_ERR(vchiq_dev);
2750 		goto failed_device_create;
2751 	}
2752 
2753 	vchiq_debugfs_init();
2754 
2755 	vchiq_log_info(vchiq_arm_log_level,
2756 		"vchiq: initialised - version %d (min %d), device %d.%d",
2757 		VCHIQ_VERSION, VCHIQ_VERSION_MIN,
2758 		MAJOR(vchiq_devid), MINOR(vchiq_devid));
2759 
2760 	bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
2761 	bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
2762 
2763 	return 0;
2764 
2765 failed_device_create:
2766 	cdev_del(&vchiq_cdev);
2767 failed_platform_init:
2768 	vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
2769 	return err;
2770 }
2771 
vchiq_remove(struct platform_device * pdev)2772 static int vchiq_remove(struct platform_device *pdev)
2773 {
2774 	platform_device_unregister(bcm2835_audio);
2775 	platform_device_unregister(bcm2835_camera);
2776 	vchiq_debugfs_deinit();
2777 	device_destroy(vchiq_class, vchiq_devid);
2778 	cdev_del(&vchiq_cdev);
2779 
2780 	return 0;
2781 }
2782 
2783 static struct platform_driver vchiq_driver = {
2784 	.driver = {
2785 		.name = "bcm2835_vchiq",
2786 		.of_match_table = vchiq_of_match,
2787 	},
2788 	.probe = vchiq_probe,
2789 	.remove = vchiq_remove,
2790 };
2791 
vchiq_driver_init(void)2792 static int __init vchiq_driver_init(void)
2793 {
2794 	int ret;
2795 
2796 	vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
2797 	if (IS_ERR(vchiq_class)) {
2798 		pr_err("Failed to create vchiq class\n");
2799 		return PTR_ERR(vchiq_class);
2800 	}
2801 
2802 	ret = alloc_chrdev_region(&vchiq_devid, 0, 1, DEVICE_NAME);
2803 	if (ret) {
2804 		pr_err("Failed to allocate vchiq's chrdev region\n");
2805 		goto class_destroy;
2806 	}
2807 
2808 	ret = platform_driver_register(&vchiq_driver);
2809 	if (ret) {
2810 		pr_err("Failed to register vchiq driver\n");
2811 		goto region_unregister;
2812 	}
2813 
2814 	return 0;
2815 
2816 region_unregister:
2817 	unregister_chrdev_region(vchiq_devid, 1);
2818 
2819 class_destroy:
2820 	class_destroy(vchiq_class);
2821 
2822 	return ret;
2823 }
2824 module_init(vchiq_driver_init);
2825 
vchiq_driver_exit(void)2826 static void __exit vchiq_driver_exit(void)
2827 {
2828 	platform_driver_unregister(&vchiq_driver);
2829 	unregister_chrdev_region(vchiq_devid, 1);
2830 	class_destroy(vchiq_class);
2831 }
2832 module_exit(vchiq_driver_exit);
2833 
2834 MODULE_LICENSE("Dual BSD/GPL");
2835 MODULE_DESCRIPTION("Videocore VCHIQ driver");
2836 MODULE_AUTHOR("Broadcom Corporation");
2837