1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
15 
16 #include "vchiq_core.h"
17 
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
19 
20 #define HANDLE_STATE_SHIFT 12
21 
22 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
23 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
24 #define SLOT_INDEX_FROM_DATA(state, data) \
25 	(((unsigned int)((char *)data - (char *)state->slot_data)) / \
26 	VCHIQ_SLOT_SIZE)
27 #define SLOT_INDEX_FROM_INFO(state, info) \
28 	((unsigned int)(info - state->slot_info))
29 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
30 	((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
31 
32 #define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
33 
34 #define SRVTRACE_LEVEL(srv) \
35 	(((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
36 #define SRVTRACE_ENABLED(srv, lev) \
37 	(((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
38 
39 struct vchiq_open_payload {
40 	int fourcc;
41 	int client_id;
42 	short version;
43 	short version_min;
44 };
45 
46 struct vchiq_openack_payload {
47 	short version;
48 };
49 
50 enum {
51 	QMFLAGS_IS_BLOCKING     = BIT(0),
52 	QMFLAGS_NO_MUTEX_LOCK   = BIT(1),
53 	QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
54 };
55 
56 /* we require this for consistency between endpoints */
57 vchiq_static_assert(sizeof(struct vchiq_header) == 8);
58 vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header)));
59 vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
60 vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
61 vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
62 vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
63 
64 /* Run time control of log level, based on KERN_XXX level. */
65 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
66 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
67 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
68 
69 DEFINE_SPINLOCK(bulk_waiter_spinlock);
70 static DEFINE_SPINLOCK(quota_spinlock);
71 
72 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
73 static unsigned int handle_seq;
74 
75 static const char *const srvstate_names[] = {
76 	"FREE",
77 	"HIDDEN",
78 	"LISTENING",
79 	"OPENING",
80 	"OPEN",
81 	"OPENSYNC",
82 	"CLOSESENT",
83 	"CLOSERECVD",
84 	"CLOSEWAIT",
85 	"CLOSED"
86 };
87 
88 static const char *const reason_names[] = {
89 	"SERVICE_OPENED",
90 	"SERVICE_CLOSED",
91 	"MESSAGE_AVAILABLE",
92 	"BULK_TRANSMIT_DONE",
93 	"BULK_RECEIVE_DONE",
94 	"BULK_TRANSMIT_ABORTED",
95 	"BULK_RECEIVE_ABORTED"
96 };
97 
98 static const char *const conn_state_names[] = {
99 	"DISCONNECTED",
100 	"CONNECTING",
101 	"CONNECTED",
102 	"PAUSING",
103 	"PAUSE_SENT",
104 	"PAUSED",
105 	"RESUMING",
106 	"PAUSE_TIMEOUT",
107 	"RESUME_TIMEOUT"
108 };
109 
110 static void
111 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
112 
msg_type_str(unsigned int msg_type)113 static const char *msg_type_str(unsigned int msg_type)
114 {
115 	switch (msg_type) {
116 	case VCHIQ_MSG_PADDING:       return "PADDING";
117 	case VCHIQ_MSG_CONNECT:       return "CONNECT";
118 	case VCHIQ_MSG_OPEN:          return "OPEN";
119 	case VCHIQ_MSG_OPENACK:       return "OPENACK";
120 	case VCHIQ_MSG_CLOSE:         return "CLOSE";
121 	case VCHIQ_MSG_DATA:          return "DATA";
122 	case VCHIQ_MSG_BULK_RX:       return "BULK_RX";
123 	case VCHIQ_MSG_BULK_TX:       return "BULK_TX";
124 	case VCHIQ_MSG_BULK_RX_DONE:  return "BULK_RX_DONE";
125 	case VCHIQ_MSG_BULK_TX_DONE:  return "BULK_TX_DONE";
126 	case VCHIQ_MSG_PAUSE:         return "PAUSE";
127 	case VCHIQ_MSG_RESUME:        return "RESUME";
128 	case VCHIQ_MSG_REMOTE_USE:    return "REMOTE_USE";
129 	case VCHIQ_MSG_REMOTE_RELEASE:      return "REMOTE_RELEASE";
130 	case VCHIQ_MSG_REMOTE_USE_ACTIVE:   return "REMOTE_USE_ACTIVE";
131 	}
132 	return "???";
133 }
134 
135 static inline void
vchiq_set_service_state(struct vchiq_service * service,int newstate)136 vchiq_set_service_state(struct vchiq_service *service, int newstate)
137 {
138 	vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
139 		service->state->id, service->localport,
140 		srvstate_names[service->srvstate],
141 		srvstate_names[newstate]);
142 	service->srvstate = newstate;
143 }
144 
145 struct vchiq_service *
find_service_by_handle(unsigned int handle)146 find_service_by_handle(unsigned int handle)
147 {
148 	struct vchiq_service *service;
149 
150 	rcu_read_lock();
151 	service = handle_to_service(handle);
152 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
153 	    service->handle == handle &&
154 	    kref_get_unless_zero(&service->ref_count)) {
155 		service = rcu_pointer_handoff(service);
156 		rcu_read_unlock();
157 		return service;
158 	}
159 	rcu_read_unlock();
160 	vchiq_log_info(vchiq_core_log_level,
161 		       "Invalid service handle 0x%x", handle);
162 	return NULL;
163 }
164 
165 struct vchiq_service *
find_service_by_port(struct vchiq_state * state,int localport)166 find_service_by_port(struct vchiq_state *state, int localport)
167 {
168 
169 	if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
170 		struct vchiq_service *service;
171 
172 		rcu_read_lock();
173 		service = rcu_dereference(state->services[localport]);
174 		if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
175 		    kref_get_unless_zero(&service->ref_count)) {
176 			service = rcu_pointer_handoff(service);
177 			rcu_read_unlock();
178 			return service;
179 		}
180 		rcu_read_unlock();
181 	}
182 	vchiq_log_info(vchiq_core_log_level,
183 		       "Invalid port %d", localport);
184 	return NULL;
185 }
186 
187 struct vchiq_service *
find_service_for_instance(struct vchiq_instance * instance,unsigned int handle)188 find_service_for_instance(struct vchiq_instance *instance,
189 	unsigned int handle)
190 {
191 	struct vchiq_service *service;
192 
193 	rcu_read_lock();
194 	service = handle_to_service(handle);
195 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
196 	    service->handle == handle &&
197 	    service->instance == instance &&
198 	    kref_get_unless_zero(&service->ref_count)) {
199 		service = rcu_pointer_handoff(service);
200 		rcu_read_unlock();
201 		return service;
202 	}
203 	rcu_read_unlock();
204 	vchiq_log_info(vchiq_core_log_level,
205 		       "Invalid service handle 0x%x", handle);
206 	return NULL;
207 }
208 
209 struct vchiq_service *
find_closed_service_for_instance(struct vchiq_instance * instance,unsigned int handle)210 find_closed_service_for_instance(struct vchiq_instance *instance,
211 	unsigned int handle)
212 {
213 	struct vchiq_service *service;
214 
215 	rcu_read_lock();
216 	service = handle_to_service(handle);
217 	if (service &&
218 	    (service->srvstate == VCHIQ_SRVSTATE_FREE ||
219 	     service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
220 	    service->handle == handle &&
221 	    service->instance == instance &&
222 	    kref_get_unless_zero(&service->ref_count)) {
223 		service = rcu_pointer_handoff(service);
224 		rcu_read_unlock();
225 		return service;
226 	}
227 	rcu_read_unlock();
228 	vchiq_log_info(vchiq_core_log_level,
229 		       "Invalid service handle 0x%x", handle);
230 	return service;
231 }
232 
233 struct vchiq_service *
__next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)234 __next_service_by_instance(struct vchiq_state *state,
235 			   struct vchiq_instance *instance,
236 			   int *pidx)
237 {
238 	struct vchiq_service *service = NULL;
239 	int idx = *pidx;
240 
241 	while (idx < state->unused_service) {
242 		struct vchiq_service *srv;
243 
244 		srv = rcu_dereference(state->services[idx++]);
245 		if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
246 		    srv->instance == instance) {
247 			service = srv;
248 			break;
249 		}
250 	}
251 
252 	*pidx = idx;
253 	return service;
254 }
255 
256 struct vchiq_service *
next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)257 next_service_by_instance(struct vchiq_state *state,
258 			 struct vchiq_instance *instance,
259 			 int *pidx)
260 {
261 	struct vchiq_service *service;
262 
263 	rcu_read_lock();
264 	while (1) {
265 		service = __next_service_by_instance(state, instance, pidx);
266 		if (!service)
267 			break;
268 		if (kref_get_unless_zero(&service->ref_count)) {
269 			service = rcu_pointer_handoff(service);
270 			break;
271 		}
272 	}
273 	rcu_read_unlock();
274 	return service;
275 }
276 
277 void
lock_service(struct vchiq_service * service)278 lock_service(struct vchiq_service *service)
279 {
280 	if (!service) {
281 		WARN(1, "%s service is NULL\n", __func__);
282 		return;
283 	}
284 	kref_get(&service->ref_count);
285 }
286 
service_release(struct kref * kref)287 static void service_release(struct kref *kref)
288 {
289 	struct vchiq_service *service =
290 		container_of(kref, struct vchiq_service, ref_count);
291 	struct vchiq_state *state = service->state;
292 
293 	WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
294 	rcu_assign_pointer(state->services[service->localport], NULL);
295 	if (service->userdata_term)
296 		service->userdata_term(service->base.userdata);
297 	kfree_rcu(service, rcu);
298 }
299 
300 void
unlock_service(struct vchiq_service * service)301 unlock_service(struct vchiq_service *service)
302 {
303 	if (!service) {
304 		WARN(1, "%s: service is NULL\n", __func__);
305 		return;
306 	}
307 	kref_put(&service->ref_count, service_release);
308 }
309 
310 int
vchiq_get_client_id(unsigned int handle)311 vchiq_get_client_id(unsigned int handle)
312 {
313 	struct vchiq_service *service;
314 	int id;
315 
316 	rcu_read_lock();
317 	service = handle_to_service(handle);
318 	id = service ? service->client_id : 0;
319 	rcu_read_unlock();
320 	return id;
321 }
322 
323 void *
vchiq_get_service_userdata(unsigned int handle)324 vchiq_get_service_userdata(unsigned int handle)
325 {
326 	void *userdata;
327 	struct vchiq_service *service;
328 
329 	rcu_read_lock();
330 	service = handle_to_service(handle);
331 	userdata = service ? service->base.userdata : NULL;
332 	rcu_read_unlock();
333 	return userdata;
334 }
335 EXPORT_SYMBOL(vchiq_get_service_userdata);
336 
337 static void
mark_service_closing_internal(struct vchiq_service * service,int sh_thread)338 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
339 {
340 	struct vchiq_state *state = service->state;
341 	struct vchiq_service_quota *service_quota;
342 
343 	service->closing = 1;
344 
345 	/* Synchronise with other threads. */
346 	mutex_lock(&state->recycle_mutex);
347 	mutex_unlock(&state->recycle_mutex);
348 	if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
349 		/* If we're pausing then the slot_mutex is held until resume
350 		 * by the slot handler.  Therefore don't try to acquire this
351 		 * mutex if we're the slot handler and in the pause sent state.
352 		 * We don't need to in this case anyway. */
353 		mutex_lock(&state->slot_mutex);
354 		mutex_unlock(&state->slot_mutex);
355 	}
356 
357 	/* Unblock any sending thread. */
358 	service_quota = &state->service_quotas[service->localport];
359 	complete(&service_quota->quota_event);
360 }
361 
362 static void
mark_service_closing(struct vchiq_service * service)363 mark_service_closing(struct vchiq_service *service)
364 {
365 	mark_service_closing_internal(service, 0);
366 }
367 
368 static inline enum vchiq_status
make_service_callback(struct vchiq_service * service,enum vchiq_reason reason,struct vchiq_header * header,void * bulk_userdata)369 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
370 		      struct vchiq_header *header, void *bulk_userdata)
371 {
372 	enum vchiq_status status;
373 
374 	vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
375 		service->state->id, service->localport, reason_names[reason],
376 		header, bulk_userdata);
377 	status = service->base.callback(reason, header, service->handle,
378 		bulk_userdata);
379 	if (status == VCHIQ_ERROR) {
380 		vchiq_log_warning(vchiq_core_log_level,
381 			"%d: ignoring ERROR from callback to service %x",
382 			service->state->id, service->handle);
383 		status = VCHIQ_SUCCESS;
384 	}
385 
386 	if (reason != VCHIQ_MESSAGE_AVAILABLE)
387 		vchiq_release_message(service->handle, header);
388 
389 	return status;
390 }
391 
392 inline void
vchiq_set_conn_state(struct vchiq_state * state,enum vchiq_connstate newstate)393 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
394 {
395 	enum vchiq_connstate oldstate = state->conn_state;
396 
397 	vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
398 		conn_state_names[oldstate],
399 		conn_state_names[newstate]);
400 	state->conn_state = newstate;
401 	vchiq_platform_conn_state_changed(state, oldstate, newstate);
402 }
403 
404 static inline void
remote_event_create(wait_queue_head_t * wq,struct remote_event * event)405 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
406 {
407 	event->armed = 0;
408 	/* Don't clear the 'fired' flag because it may already have been set
409 	** by the other side. */
410 	init_waitqueue_head(wq);
411 }
412 
413 /*
414  * All the event waiting routines in VCHIQ used a custom semaphore
415  * implementation that filtered most signals. This achieved a behaviour similar
416  * to the "killable" family of functions. While cleaning up this code all the
417  * routines where switched to the "interruptible" family of functions, as the
418  * former was deemed unjustified and the use "killable" set all VCHIQ's
419  * threads in D state.
420  */
421 static inline int
remote_event_wait(wait_queue_head_t * wq,struct remote_event * event)422 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
423 {
424 	if (!event->fired) {
425 		event->armed = 1;
426 		dsb(sy);
427 		if (wait_event_interruptible(*wq, event->fired)) {
428 			event->armed = 0;
429 			return 0;
430 		}
431 		event->armed = 0;
432 		wmb();
433 	}
434 
435 	event->fired = 0;
436 	return 1;
437 }
438 
439 static inline void
remote_event_signal_local(wait_queue_head_t * wq,struct remote_event * event)440 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
441 {
442 	event->fired = 1;
443 	event->armed = 0;
444 	wake_up_all(wq);
445 }
446 
447 static inline void
remote_event_poll(wait_queue_head_t * wq,struct remote_event * event)448 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
449 {
450 	if (event->fired && event->armed)
451 		remote_event_signal_local(wq, event);
452 }
453 
454 void
remote_event_pollall(struct vchiq_state * state)455 remote_event_pollall(struct vchiq_state *state)
456 {
457 	remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
458 	remote_event_poll(&state->sync_release_event, &state->local->sync_release);
459 	remote_event_poll(&state->trigger_event, &state->local->trigger);
460 	remote_event_poll(&state->recycle_event, &state->local->recycle);
461 }
462 
463 /* Round up message sizes so that any space at the end of a slot is always big
464 ** enough for a header. This relies on header size being a power of two, which
465 ** has been verified earlier by a static assertion. */
466 
467 static inline size_t
calc_stride(size_t size)468 calc_stride(size_t size)
469 {
470 	/* Allow room for the header */
471 	size += sizeof(struct vchiq_header);
472 
473 	/* Round up */
474 	return (size + sizeof(struct vchiq_header) - 1) &
475 		~(sizeof(struct vchiq_header) - 1);
476 }
477 
478 /* Called by the slot handler thread */
479 static struct vchiq_service *
get_listening_service(struct vchiq_state * state,int fourcc)480 get_listening_service(struct vchiq_state *state, int fourcc)
481 {
482 	int i;
483 
484 	WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
485 
486 	rcu_read_lock();
487 	for (i = 0; i < state->unused_service; i++) {
488 		struct vchiq_service *service;
489 
490 		service = rcu_dereference(state->services[i]);
491 		if (service &&
492 		    service->public_fourcc == fourcc &&
493 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
494 		     (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
495 		      service->remoteport == VCHIQ_PORT_FREE)) &&
496 		    kref_get_unless_zero(&service->ref_count)) {
497 			service = rcu_pointer_handoff(service);
498 			rcu_read_unlock();
499 			return service;
500 		}
501 	}
502 	rcu_read_unlock();
503 	return NULL;
504 }
505 
506 /* Called by the slot handler thread */
507 static struct vchiq_service *
get_connected_service(struct vchiq_state * state,unsigned int port)508 get_connected_service(struct vchiq_state *state, unsigned int port)
509 {
510 	int i;
511 
512 	rcu_read_lock();
513 	for (i = 0; i < state->unused_service; i++) {
514 		struct vchiq_service *service =
515 			rcu_dereference(state->services[i]);
516 
517 		if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
518 		    service->remoteport == port &&
519 		    kref_get_unless_zero(&service->ref_count)) {
520 			service = rcu_pointer_handoff(service);
521 			rcu_read_unlock();
522 			return service;
523 		}
524 	}
525 	rcu_read_unlock();
526 	return NULL;
527 }
528 
529 inline void
request_poll(struct vchiq_state * state,struct vchiq_service * service,int poll_type)530 request_poll(struct vchiq_state *state, struct vchiq_service *service,
531 	     int poll_type)
532 {
533 	u32 value;
534 
535 	if (service) {
536 		do {
537 			value = atomic_read(&service->poll_flags);
538 		} while (atomic_cmpxchg(&service->poll_flags, value,
539 			value | BIT(poll_type)) != value);
540 
541 		do {
542 			value = atomic_read(&state->poll_services[
543 				service->localport>>5]);
544 		} while (atomic_cmpxchg(
545 			&state->poll_services[service->localport>>5],
546 			value, value | BIT(service->localport & 0x1f))
547 			!= value);
548 	}
549 
550 	state->poll_needed = 1;
551 	wmb();
552 
553 	/* ... and ensure the slot handler runs. */
554 	remote_event_signal_local(&state->trigger_event, &state->local->trigger);
555 }
556 
557 /* Called from queue_message, by the slot handler and application threads,
558 ** with slot_mutex held */
559 static struct vchiq_header *
reserve_space(struct vchiq_state * state,size_t space,int is_blocking)560 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
561 {
562 	struct vchiq_shared_state *local = state->local;
563 	int tx_pos = state->local_tx_pos;
564 	int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
565 
566 	if (space > slot_space) {
567 		struct vchiq_header *header;
568 		/* Fill the remaining space with padding */
569 		WARN_ON(!state->tx_data);
570 		header = (struct vchiq_header *)
571 			(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
572 		header->msgid = VCHIQ_MSGID_PADDING;
573 		header->size = slot_space - sizeof(struct vchiq_header);
574 
575 		tx_pos += slot_space;
576 	}
577 
578 	/* If necessary, get the next slot. */
579 	if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
580 		int slot_index;
581 
582 		/* If there is no free slot... */
583 
584 		if (!try_wait_for_completion(&state->slot_available_event)) {
585 			/* ...wait for one. */
586 
587 			VCHIQ_STATS_INC(state, slot_stalls);
588 
589 			/* But first, flush through the last slot. */
590 			state->local_tx_pos = tx_pos;
591 			local->tx_pos = tx_pos;
592 			remote_event_signal(&state->remote->trigger);
593 
594 			if (!is_blocking ||
595 				(wait_for_completion_interruptible(
596 				&state->slot_available_event)))
597 				return NULL; /* No space available */
598 		}
599 
600 		if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
601 			complete(&state->slot_available_event);
602 			pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
603 			return NULL;
604 		}
605 
606 		slot_index = local->slot_queue[
607 			SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
608 			VCHIQ_SLOT_QUEUE_MASK];
609 		state->tx_data =
610 			(char *)SLOT_DATA_FROM_INDEX(state, slot_index);
611 	}
612 
613 	state->local_tx_pos = tx_pos + space;
614 
615 	return (struct vchiq_header *)(state->tx_data +
616 						(tx_pos & VCHIQ_SLOT_MASK));
617 }
618 
619 /* Called by the recycle thread. */
620 static void
process_free_queue(struct vchiq_state * state,BITSET_T * service_found,size_t length)621 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
622 		   size_t length)
623 {
624 	struct vchiq_shared_state *local = state->local;
625 	int slot_queue_available;
626 
627 	/* Find slots which have been freed by the other side, and return them
628 	** to the available queue. */
629 	slot_queue_available = state->slot_queue_available;
630 
631 	/*
632 	 * Use a memory barrier to ensure that any state that may have been
633 	 * modified by another thread is not masked by stale prefetched
634 	 * values.
635 	 */
636 	mb();
637 
638 	while (slot_queue_available != local->slot_queue_recycle) {
639 		unsigned int pos;
640 		int slot_index = local->slot_queue[slot_queue_available++ &
641 			VCHIQ_SLOT_QUEUE_MASK];
642 		char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
643 		int data_found = 0;
644 
645 		/*
646 		 * Beware of the address dependency - data is calculated
647 		 * using an index written by the other side.
648 		 */
649 		rmb();
650 
651 		vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
652 			state->id, slot_index, data,
653 			local->slot_queue_recycle, slot_queue_available);
654 
655 		/* Initialise the bitmask for services which have used this
656 		** slot */
657 		memset(service_found, 0, length);
658 
659 		pos = 0;
660 
661 		while (pos < VCHIQ_SLOT_SIZE) {
662 			struct vchiq_header *header =
663 				(struct vchiq_header *)(data + pos);
664 			int msgid = header->msgid;
665 
666 			if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
667 				int port = VCHIQ_MSG_SRCPORT(msgid);
668 				struct vchiq_service_quota *service_quota =
669 					&state->service_quotas[port];
670 				int count;
671 
672 				spin_lock(&quota_spinlock);
673 				count = service_quota->message_use_count;
674 				if (count > 0)
675 					service_quota->message_use_count =
676 						count - 1;
677 				spin_unlock(&quota_spinlock);
678 
679 				if (count == service_quota->message_quota)
680 					/* Signal the service that it
681 					** has dropped below its quota
682 					*/
683 					complete(&service_quota->quota_event);
684 				else if (count == 0) {
685 					vchiq_log_error(vchiq_core_log_level,
686 						"service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
687 						port,
688 						service_quota->message_use_count,
689 						header, msgid, header->msgid,
690 						header->size);
691 					WARN(1, "invalid message use count\n");
692 				}
693 				if (!BITSET_IS_SET(service_found, port)) {
694 					/* Set the found bit for this service */
695 					BITSET_SET(service_found, port);
696 
697 					spin_lock(&quota_spinlock);
698 					count = service_quota->slot_use_count;
699 					if (count > 0)
700 						service_quota->slot_use_count =
701 							count - 1;
702 					spin_unlock(&quota_spinlock);
703 
704 					if (count > 0) {
705 						/* Signal the service in case
706 						** it has dropped below its
707 						** quota */
708 						complete(&service_quota->quota_event);
709 						vchiq_log_trace(
710 							vchiq_core_log_level,
711 							"%d: pfq:%d %x@%pK - slot_use->%d",
712 							state->id, port,
713 							header->size, header,
714 							count - 1);
715 					} else {
716 						vchiq_log_error(
717 							vchiq_core_log_level,
718 								"service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
719 							port, count, header,
720 							msgid, header->msgid,
721 							header->size);
722 						WARN(1, "bad slot use count\n");
723 					}
724 				}
725 
726 				data_found = 1;
727 			}
728 
729 			pos += calc_stride(header->size);
730 			if (pos > VCHIQ_SLOT_SIZE) {
731 				vchiq_log_error(vchiq_core_log_level,
732 					"pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
733 					pos, header, msgid, header->msgid,
734 					header->size);
735 				WARN(1, "invalid slot position\n");
736 			}
737 		}
738 
739 		if (data_found) {
740 			int count;
741 
742 			spin_lock(&quota_spinlock);
743 			count = state->data_use_count;
744 			if (count > 0)
745 				state->data_use_count =
746 					count - 1;
747 			spin_unlock(&quota_spinlock);
748 			if (count == state->data_quota)
749 				complete(&state->data_quota_event);
750 		}
751 
752 		/*
753 		 * Don't allow the slot to be reused until we are no
754 		 * longer interested in it.
755 		 */
756 		mb();
757 
758 		state->slot_queue_available = slot_queue_available;
759 		complete(&state->slot_available_event);
760 	}
761 }
762 
763 static ssize_t
memcpy_copy_callback(void * context,void * dest,size_t offset,size_t maxsize)764 memcpy_copy_callback(
765 	void *context, void *dest,
766 	size_t offset, size_t maxsize)
767 {
768 	memcpy(dest + offset, context + offset, maxsize);
769 	return maxsize;
770 }
771 
772 static ssize_t
copy_message_data(ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,void * dest,size_t size)773 copy_message_data(
774 	ssize_t (*copy_callback)(void *context, void *dest,
775 				 size_t offset, size_t maxsize),
776 	void *context,
777 	void *dest,
778 	size_t size)
779 {
780 	size_t pos = 0;
781 
782 	while (pos < size) {
783 		ssize_t callback_result;
784 		size_t max_bytes = size - pos;
785 
786 		callback_result =
787 			copy_callback(context, dest + pos,
788 				      pos, max_bytes);
789 
790 		if (callback_result < 0)
791 			return callback_result;
792 
793 		if (!callback_result)
794 			return -EIO;
795 
796 		if (callback_result > max_bytes)
797 			return -EIO;
798 
799 		pos += callback_result;
800 	}
801 
802 	return size;
803 }
804 
805 /* Called by the slot handler and application threads */
806 static enum vchiq_status
queue_message(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size,int flags)807 queue_message(struct vchiq_state *state, struct vchiq_service *service,
808 	      int msgid,
809 	      ssize_t (*copy_callback)(void *context, void *dest,
810 				       size_t offset, size_t maxsize),
811 	      void *context, size_t size, int flags)
812 {
813 	struct vchiq_shared_state *local;
814 	struct vchiq_service_quota *service_quota = NULL;
815 	struct vchiq_header *header;
816 	int type = VCHIQ_MSG_TYPE(msgid);
817 
818 	size_t stride;
819 
820 	local = state->local;
821 
822 	stride = calc_stride(size);
823 
824 	WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
825 
826 	if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
827 	    mutex_lock_killable(&state->slot_mutex))
828 		return VCHIQ_RETRY;
829 
830 	if (type == VCHIQ_MSG_DATA) {
831 		int tx_end_index;
832 
833 		if (!service) {
834 			WARN(1, "%s: service is NULL\n", __func__);
835 			mutex_unlock(&state->slot_mutex);
836 			return VCHIQ_ERROR;
837 		}
838 
839 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
840 				 QMFLAGS_NO_MUTEX_UNLOCK));
841 
842 		if (service->closing) {
843 			/* The service has been closed */
844 			mutex_unlock(&state->slot_mutex);
845 			return VCHIQ_ERROR;
846 		}
847 
848 		service_quota = &state->service_quotas[service->localport];
849 
850 		spin_lock(&quota_spinlock);
851 
852 		/* Ensure this service doesn't use more than its quota of
853 		** messages or slots */
854 		tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
855 			state->local_tx_pos + stride - 1);
856 
857 		/* Ensure data messages don't use more than their quota of
858 		** slots */
859 		while ((tx_end_index != state->previous_data_index) &&
860 			(state->data_use_count == state->data_quota)) {
861 			VCHIQ_STATS_INC(state, data_stalls);
862 			spin_unlock(&quota_spinlock);
863 			mutex_unlock(&state->slot_mutex);
864 
865 			if (wait_for_completion_interruptible(
866 						&state->data_quota_event))
867 				return VCHIQ_RETRY;
868 
869 			mutex_lock(&state->slot_mutex);
870 			spin_lock(&quota_spinlock);
871 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
872 				state->local_tx_pos + stride - 1);
873 			if ((tx_end_index == state->previous_data_index) ||
874 				(state->data_use_count < state->data_quota)) {
875 				/* Pass the signal on to other waiters */
876 				complete(&state->data_quota_event);
877 				break;
878 			}
879 		}
880 
881 		while ((service_quota->message_use_count ==
882 				service_quota->message_quota) ||
883 			((tx_end_index != service_quota->previous_tx_index) &&
884 			(service_quota->slot_use_count ==
885 				service_quota->slot_quota))) {
886 			spin_unlock(&quota_spinlock);
887 			vchiq_log_trace(vchiq_core_log_level,
888 				"%d: qm:%d %s,%zx - quota stall "
889 				"(msg %d, slot %d)",
890 				state->id, service->localport,
891 				msg_type_str(type), size,
892 				service_quota->message_use_count,
893 				service_quota->slot_use_count);
894 			VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
895 			mutex_unlock(&state->slot_mutex);
896 			if (wait_for_completion_interruptible(
897 						&service_quota->quota_event))
898 				return VCHIQ_RETRY;
899 			if (service->closing)
900 				return VCHIQ_ERROR;
901 			if (mutex_lock_killable(&state->slot_mutex))
902 				return VCHIQ_RETRY;
903 			if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
904 				/* The service has been closed */
905 				mutex_unlock(&state->slot_mutex);
906 				return VCHIQ_ERROR;
907 			}
908 			spin_lock(&quota_spinlock);
909 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
910 				state->local_tx_pos + stride - 1);
911 		}
912 
913 		spin_unlock(&quota_spinlock);
914 	}
915 
916 	header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
917 
918 	if (!header) {
919 		if (service)
920 			VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
921 		/* In the event of a failure, return the mutex to the
922 		   state it was in */
923 		if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
924 			mutex_unlock(&state->slot_mutex);
925 		return VCHIQ_RETRY;
926 	}
927 
928 	if (type == VCHIQ_MSG_DATA) {
929 		ssize_t callback_result;
930 		int tx_end_index;
931 		int slot_use_count;
932 
933 		vchiq_log_info(vchiq_core_log_level,
934 			"%d: qm %s@%pK,%zx (%d->%d)",
935 			state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
936 			header, size, VCHIQ_MSG_SRCPORT(msgid),
937 			VCHIQ_MSG_DSTPORT(msgid));
938 
939 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
940 				 QMFLAGS_NO_MUTEX_UNLOCK));
941 
942 		callback_result =
943 			copy_message_data(copy_callback, context,
944 					  header->data, size);
945 
946 		if (callback_result < 0) {
947 			mutex_unlock(&state->slot_mutex);
948 			VCHIQ_SERVICE_STATS_INC(service,
949 						error_count);
950 			return VCHIQ_ERROR;
951 		}
952 
953 		if (SRVTRACE_ENABLED(service,
954 				     VCHIQ_LOG_INFO))
955 			vchiq_log_dump_mem("Sent", 0,
956 					   header->data,
957 					   min((size_t)16,
958 					       (size_t)callback_result));
959 
960 		spin_lock(&quota_spinlock);
961 		service_quota->message_use_count++;
962 
963 		tx_end_index =
964 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
965 
966 		/* If this transmission can't fit in the last slot used by any
967 		** service, the data_use_count must be increased. */
968 		if (tx_end_index != state->previous_data_index) {
969 			state->previous_data_index = tx_end_index;
970 			state->data_use_count++;
971 		}
972 
973 		/* If this isn't the same slot last used by this service,
974 		** the service's slot_use_count must be increased. */
975 		if (tx_end_index != service_quota->previous_tx_index) {
976 			service_quota->previous_tx_index = tx_end_index;
977 			slot_use_count = ++service_quota->slot_use_count;
978 		} else {
979 			slot_use_count = 0;
980 		}
981 
982 		spin_unlock(&quota_spinlock);
983 
984 		if (slot_use_count)
985 			vchiq_log_trace(vchiq_core_log_level,
986 				"%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
987 				state->id, service->localport,
988 				msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
989 				slot_use_count, header);
990 
991 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
992 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
993 	} else {
994 		vchiq_log_info(vchiq_core_log_level,
995 			"%d: qm %s@%pK,%zx (%d->%d)", state->id,
996 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
997 			header, size, VCHIQ_MSG_SRCPORT(msgid),
998 			VCHIQ_MSG_DSTPORT(msgid));
999 		if (size != 0) {
1000 			/* It is assumed for now that this code path
1001 			 * only happens from calls inside this file.
1002 			 *
1003 			 * External callers are through the vchiq_queue_message
1004 			 * path which always sets the type to be VCHIQ_MSG_DATA
1005 			 *
1006 			 * At first glance this appears to be correct but
1007 			 * more review is needed.
1008 			 */
1009 			copy_message_data(copy_callback, context,
1010 					  header->data, size);
1011 		}
1012 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1013 	}
1014 
1015 	header->msgid = msgid;
1016 	header->size = size;
1017 
1018 	{
1019 		int svc_fourcc;
1020 
1021 		svc_fourcc = service
1022 			? service->base.fourcc
1023 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1024 
1025 		vchiq_log_info(SRVTRACE_LEVEL(service),
1026 			"Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1027 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1028 			VCHIQ_MSG_TYPE(msgid),
1029 			VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1030 			VCHIQ_MSG_SRCPORT(msgid),
1031 			VCHIQ_MSG_DSTPORT(msgid),
1032 			size);
1033 	}
1034 
1035 	/* Make sure the new header is visible to the peer. */
1036 	wmb();
1037 
1038 	/* Make the new tx_pos visible to the peer. */
1039 	local->tx_pos = state->local_tx_pos;
1040 	wmb();
1041 
1042 	if (service && (type == VCHIQ_MSG_CLOSE))
1043 		vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1044 
1045 	if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1046 		mutex_unlock(&state->slot_mutex);
1047 
1048 	remote_event_signal(&state->remote->trigger);
1049 
1050 	return VCHIQ_SUCCESS;
1051 }
1052 
1053 /* Called by the slot handler and application threads */
1054 static enum vchiq_status
queue_message_sync(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,int size,int is_blocking)1055 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1056 		   int msgid,
1057 		   ssize_t (*copy_callback)(void *context, void *dest,
1058 					    size_t offset, size_t maxsize),
1059 		   void *context, int size, int is_blocking)
1060 {
1061 	struct vchiq_shared_state *local;
1062 	struct vchiq_header *header;
1063 	ssize_t callback_result;
1064 
1065 	local = state->local;
1066 
1067 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1068 	    mutex_lock_killable(&state->sync_mutex))
1069 		return VCHIQ_RETRY;
1070 
1071 	remote_event_wait(&state->sync_release_event, &local->sync_release);
1072 
1073 	rmb();
1074 
1075 	header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1076 		local->slot_sync);
1077 
1078 	{
1079 		int oldmsgid = header->msgid;
1080 
1081 		if (oldmsgid != VCHIQ_MSGID_PADDING)
1082 			vchiq_log_error(vchiq_core_log_level,
1083 				"%d: qms - msgid %x, not PADDING",
1084 				state->id, oldmsgid);
1085 	}
1086 
1087 	vchiq_log_info(vchiq_sync_log_level,
1088 		       "%d: qms %s@%pK,%x (%d->%d)", state->id,
1089 		       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1090 		       header, size, VCHIQ_MSG_SRCPORT(msgid),
1091 		       VCHIQ_MSG_DSTPORT(msgid));
1092 
1093 	callback_result =
1094 		copy_message_data(copy_callback, context,
1095 				  header->data, size);
1096 
1097 	if (callback_result < 0) {
1098 		mutex_unlock(&state->slot_mutex);
1099 		VCHIQ_SERVICE_STATS_INC(service,
1100 					error_count);
1101 		return VCHIQ_ERROR;
1102 	}
1103 
1104 	if (service) {
1105 		if (SRVTRACE_ENABLED(service,
1106 				     VCHIQ_LOG_INFO))
1107 			vchiq_log_dump_mem("Sent", 0,
1108 					   header->data,
1109 					   min((size_t)16,
1110 					       (size_t)callback_result));
1111 
1112 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1113 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1114 	} else {
1115 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1116 	}
1117 
1118 	header->size = size;
1119 	header->msgid = msgid;
1120 
1121 	if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1122 		int svc_fourcc;
1123 
1124 		svc_fourcc = service
1125 			? service->base.fourcc
1126 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1127 
1128 		vchiq_log_trace(vchiq_sync_log_level,
1129 			"Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1130 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1131 			VCHIQ_MSG_TYPE(msgid),
1132 			VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1133 			VCHIQ_MSG_SRCPORT(msgid),
1134 			VCHIQ_MSG_DSTPORT(msgid),
1135 			size);
1136 	}
1137 
1138 	remote_event_signal(&state->remote->sync_trigger);
1139 
1140 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1141 		mutex_unlock(&state->sync_mutex);
1142 
1143 	return VCHIQ_SUCCESS;
1144 }
1145 
1146 static inline void
claim_slot(struct vchiq_slot_info * slot)1147 claim_slot(struct vchiq_slot_info *slot)
1148 {
1149 	slot->use_count++;
1150 }
1151 
1152 static void
release_slot(struct vchiq_state * state,struct vchiq_slot_info * slot_info,struct vchiq_header * header,struct vchiq_service * service)1153 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1154 	     struct vchiq_header *header, struct vchiq_service *service)
1155 {
1156 	int release_count;
1157 
1158 	mutex_lock(&state->recycle_mutex);
1159 
1160 	if (header) {
1161 		int msgid = header->msgid;
1162 
1163 		if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1164 			(service && service->closing)) {
1165 			mutex_unlock(&state->recycle_mutex);
1166 			return;
1167 		}
1168 
1169 		/* Rewrite the message header to prevent a double
1170 		** release */
1171 		header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1172 	}
1173 
1174 	release_count = slot_info->release_count;
1175 	slot_info->release_count = ++release_count;
1176 
1177 	if (release_count == slot_info->use_count) {
1178 		int slot_queue_recycle;
1179 		/* Add to the freed queue */
1180 
1181 		/* A read barrier is necessary here to prevent speculative
1182 		** fetches of remote->slot_queue_recycle from overtaking the
1183 		** mutex. */
1184 		rmb();
1185 
1186 		slot_queue_recycle = state->remote->slot_queue_recycle;
1187 		state->remote->slot_queue[slot_queue_recycle &
1188 			VCHIQ_SLOT_QUEUE_MASK] =
1189 			SLOT_INDEX_FROM_INFO(state, slot_info);
1190 		state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1191 		vchiq_log_info(vchiq_core_log_level,
1192 			"%d: %s %d - recycle->%x", state->id, __func__,
1193 			SLOT_INDEX_FROM_INFO(state, slot_info),
1194 			state->remote->slot_queue_recycle);
1195 
1196 		/* A write barrier is necessary, but remote_event_signal
1197 		** contains one. */
1198 		remote_event_signal(&state->remote->recycle);
1199 	}
1200 
1201 	mutex_unlock(&state->recycle_mutex);
1202 }
1203 
1204 /* Called by the slot handler - don't hold the bulk mutex */
1205 static enum vchiq_status
notify_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue,int retry_poll)1206 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1207 	     int retry_poll)
1208 {
1209 	enum vchiq_status status = VCHIQ_SUCCESS;
1210 
1211 	vchiq_log_trace(vchiq_core_log_level,
1212 		"%d: nb:%d %cx - p=%x rn=%x r=%x",
1213 		service->state->id, service->localport,
1214 		(queue == &service->bulk_tx) ? 't' : 'r',
1215 		queue->process, queue->remote_notify, queue->remove);
1216 
1217 	queue->remote_notify = queue->process;
1218 
1219 	if (status == VCHIQ_SUCCESS) {
1220 		while (queue->remove != queue->remote_notify) {
1221 			struct vchiq_bulk *bulk =
1222 				&queue->bulks[BULK_INDEX(queue->remove)];
1223 
1224 			/* Only generate callbacks for non-dummy bulk
1225 			** requests, and non-terminated services */
1226 			if (bulk->data && service->instance) {
1227 				if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1228 					if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1229 						VCHIQ_SERVICE_STATS_INC(service,
1230 							bulk_tx_count);
1231 						VCHIQ_SERVICE_STATS_ADD(service,
1232 							bulk_tx_bytes,
1233 							bulk->actual);
1234 					} else {
1235 						VCHIQ_SERVICE_STATS_INC(service,
1236 							bulk_rx_count);
1237 						VCHIQ_SERVICE_STATS_ADD(service,
1238 							bulk_rx_bytes,
1239 							bulk->actual);
1240 					}
1241 				} else {
1242 					VCHIQ_SERVICE_STATS_INC(service,
1243 						bulk_aborted_count);
1244 				}
1245 				if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1246 					struct bulk_waiter *waiter;
1247 
1248 					spin_lock(&bulk_waiter_spinlock);
1249 					waiter = bulk->userdata;
1250 					if (waiter) {
1251 						waiter->actual = bulk->actual;
1252 						complete(&waiter->event);
1253 					}
1254 					spin_unlock(&bulk_waiter_spinlock);
1255 				} else if (bulk->mode ==
1256 					VCHIQ_BULK_MODE_CALLBACK) {
1257 					enum vchiq_reason reason = (bulk->dir ==
1258 						VCHIQ_BULK_TRANSMIT) ?
1259 						((bulk->actual ==
1260 						VCHIQ_BULK_ACTUAL_ABORTED) ?
1261 						VCHIQ_BULK_TRANSMIT_ABORTED :
1262 						VCHIQ_BULK_TRANSMIT_DONE) :
1263 						((bulk->actual ==
1264 						VCHIQ_BULK_ACTUAL_ABORTED) ?
1265 						VCHIQ_BULK_RECEIVE_ABORTED :
1266 						VCHIQ_BULK_RECEIVE_DONE);
1267 					status = make_service_callback(service,
1268 						reason,	NULL, bulk->userdata);
1269 					if (status == VCHIQ_RETRY)
1270 						break;
1271 				}
1272 			}
1273 
1274 			queue->remove++;
1275 			complete(&service->bulk_remove_event);
1276 		}
1277 		if (!retry_poll)
1278 			status = VCHIQ_SUCCESS;
1279 	}
1280 
1281 	if (status == VCHIQ_RETRY)
1282 		request_poll(service->state, service,
1283 			(queue == &service->bulk_tx) ?
1284 			VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1285 
1286 	return status;
1287 }
1288 
1289 /* Called by the slot handler thread */
1290 static void
poll_services(struct vchiq_state * state)1291 poll_services(struct vchiq_state *state)
1292 {
1293 	int group, i;
1294 
1295 	for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
1296 		u32 flags;
1297 
1298 		flags = atomic_xchg(&state->poll_services[group], 0);
1299 		for (i = 0; flags; i++) {
1300 			if (flags & BIT(i)) {
1301 				struct vchiq_service *service =
1302 					find_service_by_port(state,
1303 						(group<<5) + i);
1304 				u32 service_flags;
1305 
1306 				flags &= ~BIT(i);
1307 				if (!service)
1308 					continue;
1309 				service_flags =
1310 					atomic_xchg(&service->poll_flags, 0);
1311 				if (service_flags &
1312 					BIT(VCHIQ_POLL_REMOVE)) {
1313 					vchiq_log_info(vchiq_core_log_level,
1314 						"%d: ps - remove %d<->%d",
1315 						state->id, service->localport,
1316 						service->remoteport);
1317 
1318 					/* Make it look like a client, because
1319 					   it must be removed and not left in
1320 					   the LISTENING state. */
1321 					service->public_fourcc =
1322 						VCHIQ_FOURCC_INVALID;
1323 
1324 					if (vchiq_close_service_internal(
1325 						service, 0/*!close_recvd*/) !=
1326 						VCHIQ_SUCCESS)
1327 						request_poll(state, service,
1328 							VCHIQ_POLL_REMOVE);
1329 				} else if (service_flags &
1330 					BIT(VCHIQ_POLL_TERMINATE)) {
1331 					vchiq_log_info(vchiq_core_log_level,
1332 						"%d: ps - terminate %d<->%d",
1333 						state->id, service->localport,
1334 						service->remoteport);
1335 					if (vchiq_close_service_internal(
1336 						service, 0/*!close_recvd*/) !=
1337 						VCHIQ_SUCCESS)
1338 						request_poll(state, service,
1339 							VCHIQ_POLL_TERMINATE);
1340 				}
1341 				if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1342 					notify_bulks(service,
1343 						&service->bulk_tx,
1344 						1/*retry_poll*/);
1345 				if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1346 					notify_bulks(service,
1347 						&service->bulk_rx,
1348 						1/*retry_poll*/);
1349 				unlock_service(service);
1350 			}
1351 		}
1352 	}
1353 }
1354 
1355 /* Called with the bulk_mutex held */
1356 static void
abort_outstanding_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue)1357 abort_outstanding_bulks(struct vchiq_service *service,
1358 			struct vchiq_bulk_queue *queue)
1359 {
1360 	int is_tx = (queue == &service->bulk_tx);
1361 
1362 	vchiq_log_trace(vchiq_core_log_level,
1363 		"%d: aob:%d %cx - li=%x ri=%x p=%x",
1364 		service->state->id, service->localport, is_tx ? 't' : 'r',
1365 		queue->local_insert, queue->remote_insert, queue->process);
1366 
1367 	WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
1368 	WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
1369 
1370 	while ((queue->process != queue->local_insert) ||
1371 		(queue->process != queue->remote_insert)) {
1372 		struct vchiq_bulk *bulk =
1373 				&queue->bulks[BULK_INDEX(queue->process)];
1374 
1375 		if (queue->process == queue->remote_insert) {
1376 			/* fabricate a matching dummy bulk */
1377 			bulk->remote_data = NULL;
1378 			bulk->remote_size = 0;
1379 			queue->remote_insert++;
1380 		}
1381 
1382 		if (queue->process != queue->local_insert) {
1383 			vchiq_complete_bulk(bulk);
1384 
1385 			vchiq_log_info(SRVTRACE_LEVEL(service),
1386 				"%s %c%c%c%c d:%d ABORTED - tx len:%d, "
1387 				"rx len:%d",
1388 				is_tx ? "Send Bulk to" : "Recv Bulk from",
1389 				VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1390 				service->remoteport,
1391 				bulk->size,
1392 				bulk->remote_size);
1393 		} else {
1394 			/* fabricate a matching dummy bulk */
1395 			bulk->data = 0;
1396 			bulk->size = 0;
1397 			bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1398 			bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1399 				VCHIQ_BULK_RECEIVE;
1400 			queue->local_insert++;
1401 		}
1402 
1403 		queue->process++;
1404 	}
1405 }
1406 
1407 static int
parse_open(struct vchiq_state * state,struct vchiq_header * header)1408 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1409 {
1410 	struct vchiq_service *service = NULL;
1411 	int msgid, size;
1412 	unsigned int localport, remoteport;
1413 
1414 	msgid = header->msgid;
1415 	size = header->size;
1416 	localport = VCHIQ_MSG_DSTPORT(msgid);
1417 	remoteport = VCHIQ_MSG_SRCPORT(msgid);
1418 	if (size >= sizeof(struct vchiq_open_payload)) {
1419 		const struct vchiq_open_payload *payload =
1420 			(struct vchiq_open_payload *)header->data;
1421 		unsigned int fourcc;
1422 
1423 		fourcc = payload->fourcc;
1424 		vchiq_log_info(vchiq_core_log_level,
1425 			"%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1426 			state->id, header, localport,
1427 			VCHIQ_FOURCC_AS_4CHARS(fourcc));
1428 
1429 		service = get_listening_service(state, fourcc);
1430 
1431 		if (service) {
1432 			/* A matching service exists */
1433 			short version = payload->version;
1434 			short version_min = payload->version_min;
1435 
1436 			if ((service->version < version_min) ||
1437 				(version < service->version_min)) {
1438 				/* Version mismatch */
1439 				vchiq_loud_error_header();
1440 				vchiq_loud_error("%d: service %d (%c%c%c%c) "
1441 					"version mismatch - local (%d, min %d)"
1442 					" vs. remote (%d, min %d)",
1443 					state->id, service->localport,
1444 					VCHIQ_FOURCC_AS_4CHARS(fourcc),
1445 					service->version, service->version_min,
1446 					version, version_min);
1447 				vchiq_loud_error_footer();
1448 				unlock_service(service);
1449 				service = NULL;
1450 				goto fail_open;
1451 			}
1452 			service->peer_version = version;
1453 
1454 			if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1455 				struct vchiq_openack_payload ack_payload = {
1456 					service->version
1457 				};
1458 
1459 				if (state->version_common <
1460 				    VCHIQ_VERSION_SYNCHRONOUS_MODE)
1461 					service->sync = 0;
1462 
1463 				/* Acknowledge the OPEN */
1464 				if (service->sync) {
1465 					if (queue_message_sync(
1466 						state,
1467 						NULL,
1468 						VCHIQ_MAKE_MSG(
1469 							VCHIQ_MSG_OPENACK,
1470 							service->localport,
1471 							remoteport),
1472 						memcpy_copy_callback,
1473 						&ack_payload,
1474 						sizeof(ack_payload),
1475 						0) == VCHIQ_RETRY)
1476 						goto bail_not_ready;
1477 				} else {
1478 					if (queue_message(state,
1479 							NULL,
1480 							VCHIQ_MAKE_MSG(
1481 							VCHIQ_MSG_OPENACK,
1482 							service->localport,
1483 							remoteport),
1484 						memcpy_copy_callback,
1485 						&ack_payload,
1486 						sizeof(ack_payload),
1487 						0) == VCHIQ_RETRY)
1488 						goto bail_not_ready;
1489 				}
1490 
1491 				/* The service is now open */
1492 				vchiq_set_service_state(service,
1493 					service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1494 					: VCHIQ_SRVSTATE_OPEN);
1495 			}
1496 
1497 			/* Success - the message has been dealt with */
1498 			unlock_service(service);
1499 			return 1;
1500 		}
1501 	}
1502 
1503 fail_open:
1504 	/* No available service, or an invalid request - send a CLOSE */
1505 	if (queue_message(state, NULL,
1506 		VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
1507 		NULL, NULL, 0, 0) == VCHIQ_RETRY)
1508 		goto bail_not_ready;
1509 
1510 	return 1;
1511 
1512 bail_not_ready:
1513 	if (service)
1514 		unlock_service(service);
1515 
1516 	return 0;
1517 }
1518 
1519 /* Called by the slot handler thread */
1520 static void
parse_rx_slots(struct vchiq_state * state)1521 parse_rx_slots(struct vchiq_state *state)
1522 {
1523 	struct vchiq_shared_state *remote = state->remote;
1524 	struct vchiq_service *service = NULL;
1525 	int tx_pos;
1526 
1527 	DEBUG_INITIALISE(state->local)
1528 
1529 	tx_pos = remote->tx_pos;
1530 
1531 	while (state->rx_pos != tx_pos) {
1532 		struct vchiq_header *header;
1533 		int msgid, size;
1534 		int type;
1535 		unsigned int localport, remoteport;
1536 
1537 		DEBUG_TRACE(PARSE_LINE);
1538 		if (!state->rx_data) {
1539 			int rx_index;
1540 
1541 			WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
1542 			rx_index = remote->slot_queue[
1543 				SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
1544 				VCHIQ_SLOT_QUEUE_MASK];
1545 			state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1546 				rx_index);
1547 			state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1548 
1549 			/* Initialise use_count to one, and increment
1550 			** release_count at the end of the slot to avoid
1551 			** releasing the slot prematurely. */
1552 			state->rx_info->use_count = 1;
1553 			state->rx_info->release_count = 0;
1554 		}
1555 
1556 		header = (struct vchiq_header *)(state->rx_data +
1557 			(state->rx_pos & VCHIQ_SLOT_MASK));
1558 		DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1559 		msgid = header->msgid;
1560 		DEBUG_VALUE(PARSE_MSGID, msgid);
1561 		size = header->size;
1562 		type = VCHIQ_MSG_TYPE(msgid);
1563 		localport = VCHIQ_MSG_DSTPORT(msgid);
1564 		remoteport = VCHIQ_MSG_SRCPORT(msgid);
1565 
1566 		if (type != VCHIQ_MSG_DATA)
1567 			VCHIQ_STATS_INC(state, ctrl_rx_count);
1568 
1569 		switch (type) {
1570 		case VCHIQ_MSG_OPENACK:
1571 		case VCHIQ_MSG_CLOSE:
1572 		case VCHIQ_MSG_DATA:
1573 		case VCHIQ_MSG_BULK_RX:
1574 		case VCHIQ_MSG_BULK_TX:
1575 		case VCHIQ_MSG_BULK_RX_DONE:
1576 		case VCHIQ_MSG_BULK_TX_DONE:
1577 			service = find_service_by_port(state, localport);
1578 			if ((!service ||
1579 			     ((service->remoteport != remoteport) &&
1580 			      (service->remoteport != VCHIQ_PORT_FREE))) &&
1581 			    (localport == 0) &&
1582 			    (type == VCHIQ_MSG_CLOSE)) {
1583 				/* This could be a CLOSE from a client which
1584 				   hadn't yet received the OPENACK - look for
1585 				   the connected service */
1586 				if (service)
1587 					unlock_service(service);
1588 				service = get_connected_service(state,
1589 					remoteport);
1590 				if (service)
1591 					vchiq_log_warning(vchiq_core_log_level,
1592 						"%d: prs %s@%pK (%d->%d) - found connected service %d",
1593 						state->id, msg_type_str(type),
1594 						header, remoteport, localport,
1595 						service->localport);
1596 			}
1597 
1598 			if (!service) {
1599 				vchiq_log_error(vchiq_core_log_level,
1600 					"%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1601 					state->id, msg_type_str(type),
1602 					header, remoteport, localport,
1603 					localport);
1604 				goto skip_message;
1605 			}
1606 			break;
1607 		default:
1608 			break;
1609 		}
1610 
1611 		if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1612 			int svc_fourcc;
1613 
1614 			svc_fourcc = service
1615 				? service->base.fourcc
1616 				: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1617 			vchiq_log_info(SRVTRACE_LEVEL(service),
1618 				"Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
1619 				"len:%d",
1620 				msg_type_str(type), type,
1621 				VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1622 				remoteport, localport, size);
1623 			if (size > 0)
1624 				vchiq_log_dump_mem("Rcvd", 0, header->data,
1625 					min(16, size));
1626 		}
1627 
1628 		if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1629 		    calc_stride(size) > VCHIQ_SLOT_SIZE) {
1630 			vchiq_log_error(vchiq_core_log_level,
1631 				"header %pK (msgid %x) - size %x too big for slot",
1632 				header, (unsigned int)msgid,
1633 				(unsigned int)size);
1634 			WARN(1, "oversized for slot\n");
1635 		}
1636 
1637 		switch (type) {
1638 		case VCHIQ_MSG_OPEN:
1639 			WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
1640 			if (!parse_open(state, header))
1641 				goto bail_not_ready;
1642 			break;
1643 		case VCHIQ_MSG_OPENACK:
1644 			if (size >= sizeof(struct vchiq_openack_payload)) {
1645 				const struct vchiq_openack_payload *payload =
1646 					(struct vchiq_openack_payload *)
1647 					header->data;
1648 				service->peer_version = payload->version;
1649 			}
1650 			vchiq_log_info(vchiq_core_log_level,
1651 				"%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1652 				state->id, header, size, remoteport, localport,
1653 				service->peer_version);
1654 			if (service->srvstate ==
1655 				VCHIQ_SRVSTATE_OPENING) {
1656 				service->remoteport = remoteport;
1657 				vchiq_set_service_state(service,
1658 					VCHIQ_SRVSTATE_OPEN);
1659 				complete(&service->remove_event);
1660 			} else
1661 				vchiq_log_error(vchiq_core_log_level,
1662 					"OPENACK received in state %s",
1663 					srvstate_names[service->srvstate]);
1664 			break;
1665 		case VCHIQ_MSG_CLOSE:
1666 			WARN_ON(size != 0); /* There should be no data */
1667 
1668 			vchiq_log_info(vchiq_core_log_level,
1669 				"%d: prs CLOSE@%pK (%d->%d)",
1670 				state->id, header, remoteport, localport);
1671 
1672 			mark_service_closing_internal(service, 1);
1673 
1674 			if (vchiq_close_service_internal(service,
1675 				1/*close_recvd*/) == VCHIQ_RETRY)
1676 				goto bail_not_ready;
1677 
1678 			vchiq_log_info(vchiq_core_log_level,
1679 				"Close Service %c%c%c%c s:%u d:%d",
1680 				VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1681 				service->localport,
1682 				service->remoteport);
1683 			break;
1684 		case VCHIQ_MSG_DATA:
1685 			vchiq_log_info(vchiq_core_log_level,
1686 				"%d: prs DATA@%pK,%x (%d->%d)",
1687 				state->id, header, size, remoteport, localport);
1688 
1689 			if ((service->remoteport == remoteport)
1690 				&& (service->srvstate ==
1691 				VCHIQ_SRVSTATE_OPEN)) {
1692 				header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1693 				claim_slot(state->rx_info);
1694 				DEBUG_TRACE(PARSE_LINE);
1695 				if (make_service_callback(service,
1696 					VCHIQ_MESSAGE_AVAILABLE, header,
1697 					NULL) == VCHIQ_RETRY) {
1698 					DEBUG_TRACE(PARSE_LINE);
1699 					goto bail_not_ready;
1700 				}
1701 				VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1702 				VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1703 					size);
1704 			} else {
1705 				VCHIQ_STATS_INC(state, error_count);
1706 			}
1707 			break;
1708 		case VCHIQ_MSG_CONNECT:
1709 			vchiq_log_info(vchiq_core_log_level,
1710 				"%d: prs CONNECT@%pK", state->id, header);
1711 			state->version_common =	((struct vchiq_slot_zero *)
1712 						 state->slot_data)->version;
1713 			complete(&state->connect);
1714 			break;
1715 		case VCHIQ_MSG_BULK_RX:
1716 		case VCHIQ_MSG_BULK_TX:
1717 			/*
1718 			 * We should never receive a bulk request from the
1719 			 * other side since we're not setup to perform as the
1720 			 * master.
1721 			 */
1722 			WARN_ON(1);
1723 			break;
1724 		case VCHIQ_MSG_BULK_RX_DONE:
1725 		case VCHIQ_MSG_BULK_TX_DONE:
1726 			if ((service->remoteport == remoteport)
1727 				&& (service->srvstate !=
1728 				VCHIQ_SRVSTATE_FREE)) {
1729 				struct vchiq_bulk_queue *queue;
1730 				struct vchiq_bulk *bulk;
1731 
1732 				queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1733 					&service->bulk_rx : &service->bulk_tx;
1734 
1735 				DEBUG_TRACE(PARSE_LINE);
1736 				if (mutex_lock_killable(&service->bulk_mutex)) {
1737 					DEBUG_TRACE(PARSE_LINE);
1738 					goto bail_not_ready;
1739 				}
1740 				if ((int)(queue->remote_insert -
1741 					queue->local_insert) >= 0) {
1742 					vchiq_log_error(vchiq_core_log_level,
1743 						"%d: prs %s@%pK (%d->%d) "
1744 						"unexpected (ri=%d,li=%d)",
1745 						state->id, msg_type_str(type),
1746 						header, remoteport, localport,
1747 						queue->remote_insert,
1748 						queue->local_insert);
1749 					mutex_unlock(&service->bulk_mutex);
1750 					break;
1751 				}
1752 				if (queue->process != queue->remote_insert) {
1753 					pr_err("%s: p %x != ri %x\n",
1754 					       __func__,
1755 					       queue->process,
1756 					       queue->remote_insert);
1757 					mutex_unlock(&service->bulk_mutex);
1758 					goto bail_not_ready;
1759 				}
1760 
1761 				bulk = &queue->bulks[
1762 					BULK_INDEX(queue->remote_insert)];
1763 				bulk->actual = *(int *)header->data;
1764 				queue->remote_insert++;
1765 
1766 				vchiq_log_info(vchiq_core_log_level,
1767 					"%d: prs %s@%pK (%d->%d) %x@%pad",
1768 					state->id, msg_type_str(type),
1769 					header, remoteport, localport,
1770 					bulk->actual, &bulk->data);
1771 
1772 				vchiq_log_trace(vchiq_core_log_level,
1773 					"%d: prs:%d %cx li=%x ri=%x p=%x",
1774 					state->id, localport,
1775 					(type == VCHIQ_MSG_BULK_RX_DONE) ?
1776 						'r' : 't',
1777 					queue->local_insert,
1778 					queue->remote_insert, queue->process);
1779 
1780 				DEBUG_TRACE(PARSE_LINE);
1781 				WARN_ON(queue->process == queue->local_insert);
1782 				vchiq_complete_bulk(bulk);
1783 				queue->process++;
1784 				mutex_unlock(&service->bulk_mutex);
1785 				DEBUG_TRACE(PARSE_LINE);
1786 				notify_bulks(service, queue, 1/*retry_poll*/);
1787 				DEBUG_TRACE(PARSE_LINE);
1788 			}
1789 			break;
1790 		case VCHIQ_MSG_PADDING:
1791 			vchiq_log_trace(vchiq_core_log_level,
1792 				"%d: prs PADDING@%pK,%x",
1793 				state->id, header, size);
1794 			break;
1795 		case VCHIQ_MSG_PAUSE:
1796 			/* If initiated, signal the application thread */
1797 			vchiq_log_trace(vchiq_core_log_level,
1798 				"%d: prs PAUSE@%pK,%x",
1799 				state->id, header, size);
1800 			if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1801 				vchiq_log_error(vchiq_core_log_level,
1802 					"%d: PAUSE received in state PAUSED",
1803 					state->id);
1804 				break;
1805 			}
1806 			if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1807 				/* Send a PAUSE in response */
1808 				if (queue_message(state, NULL,
1809 					VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1810 					NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1811 				    == VCHIQ_RETRY)
1812 					goto bail_not_ready;
1813 			}
1814 			/* At this point slot_mutex is held */
1815 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1816 			break;
1817 		case VCHIQ_MSG_RESUME:
1818 			vchiq_log_trace(vchiq_core_log_level,
1819 				"%d: prs RESUME@%pK,%x",
1820 				state->id, header, size);
1821 			/* Release the slot mutex */
1822 			mutex_unlock(&state->slot_mutex);
1823 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1824 			break;
1825 
1826 		case VCHIQ_MSG_REMOTE_USE:
1827 			vchiq_on_remote_use(state);
1828 			break;
1829 		case VCHIQ_MSG_REMOTE_RELEASE:
1830 			vchiq_on_remote_release(state);
1831 			break;
1832 		case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1833 			break;
1834 
1835 		default:
1836 			vchiq_log_error(vchiq_core_log_level,
1837 				"%d: prs invalid msgid %x@%pK,%x",
1838 				state->id, msgid, header, size);
1839 			WARN(1, "invalid message\n");
1840 			break;
1841 		}
1842 
1843 skip_message:
1844 		if (service) {
1845 			unlock_service(service);
1846 			service = NULL;
1847 		}
1848 
1849 		state->rx_pos += calc_stride(size);
1850 
1851 		DEBUG_TRACE(PARSE_LINE);
1852 		/* Perform some housekeeping when the end of the slot is
1853 		** reached. */
1854 		if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1855 			/* Remove the extra reference count. */
1856 			release_slot(state, state->rx_info, NULL, NULL);
1857 			state->rx_data = NULL;
1858 		}
1859 	}
1860 
1861 bail_not_ready:
1862 	if (service)
1863 		unlock_service(service);
1864 }
1865 
1866 /* Called by the slot handler thread */
1867 static int
slot_handler_func(void * v)1868 slot_handler_func(void *v)
1869 {
1870 	struct vchiq_state *state = v;
1871 	struct vchiq_shared_state *local = state->local;
1872 
1873 	DEBUG_INITIALISE(local)
1874 
1875 	while (1) {
1876 		DEBUG_COUNT(SLOT_HANDLER_COUNT);
1877 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1878 		remote_event_wait(&state->trigger_event, &local->trigger);
1879 
1880 		rmb();
1881 
1882 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1883 		if (state->poll_needed) {
1884 
1885 			state->poll_needed = 0;
1886 
1887 			/* Handle service polling and other rare conditions here
1888 			** out of the mainline code */
1889 			switch (state->conn_state) {
1890 			case VCHIQ_CONNSTATE_CONNECTED:
1891 				/* Poll the services as requested */
1892 				poll_services(state);
1893 				break;
1894 
1895 			case VCHIQ_CONNSTATE_PAUSING:
1896 				if (queue_message(state, NULL,
1897 					VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1898 					NULL, NULL, 0,
1899 					QMFLAGS_NO_MUTEX_UNLOCK)
1900 				    != VCHIQ_RETRY) {
1901 					vchiq_set_conn_state(state,
1902 						VCHIQ_CONNSTATE_PAUSE_SENT);
1903 				} else {
1904 					/* Retry later */
1905 					state->poll_needed = 1;
1906 				}
1907 				break;
1908 
1909 			case VCHIQ_CONNSTATE_RESUMING:
1910 				if (queue_message(state, NULL,
1911 					VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
1912 					NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
1913 					!= VCHIQ_RETRY) {
1914 					vchiq_set_conn_state(state,
1915 						VCHIQ_CONNSTATE_CONNECTED);
1916 				} else {
1917 					/* This should really be impossible,
1918 					** since the PAUSE should have flushed
1919 					** through outstanding messages. */
1920 					vchiq_log_error(vchiq_core_log_level,
1921 						"Failed to send RESUME "
1922 						"message");
1923 				}
1924 				break;
1925 			default:
1926 				break;
1927 			}
1928 
1929 		}
1930 
1931 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1932 		parse_rx_slots(state);
1933 	}
1934 	return 0;
1935 }
1936 
1937 /* Called by the recycle thread */
1938 static int
recycle_func(void * v)1939 recycle_func(void *v)
1940 {
1941 	struct vchiq_state *state = v;
1942 	struct vchiq_shared_state *local = state->local;
1943 	BITSET_T *found;
1944 	size_t length;
1945 
1946 	length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1947 
1948 	found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1949 			      GFP_KERNEL);
1950 	if (!found)
1951 		return -ENOMEM;
1952 
1953 	while (1) {
1954 		remote_event_wait(&state->recycle_event, &local->recycle);
1955 
1956 		process_free_queue(state, found, length);
1957 	}
1958 	return 0;
1959 }
1960 
1961 /* Called by the sync thread */
1962 static int
sync_func(void * v)1963 sync_func(void *v)
1964 {
1965 	struct vchiq_state *state = v;
1966 	struct vchiq_shared_state *local = state->local;
1967 	struct vchiq_header *header =
1968 		(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1969 			state->remote->slot_sync);
1970 
1971 	while (1) {
1972 		struct vchiq_service *service;
1973 		int msgid, size;
1974 		int type;
1975 		unsigned int localport, remoteport;
1976 
1977 		remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
1978 
1979 		rmb();
1980 
1981 		msgid = header->msgid;
1982 		size = header->size;
1983 		type = VCHIQ_MSG_TYPE(msgid);
1984 		localport = VCHIQ_MSG_DSTPORT(msgid);
1985 		remoteport = VCHIQ_MSG_SRCPORT(msgid);
1986 
1987 		service = find_service_by_port(state, localport);
1988 
1989 		if (!service) {
1990 			vchiq_log_error(vchiq_sync_log_level,
1991 				"%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
1992 				state->id, msg_type_str(type),
1993 				header, remoteport, localport, localport);
1994 			release_message_sync(state, header);
1995 			continue;
1996 		}
1997 
1998 		if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1999 			int svc_fourcc;
2000 
2001 			svc_fourcc = service
2002 				? service->base.fourcc
2003 				: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2004 			vchiq_log_trace(vchiq_sync_log_level,
2005 				"Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2006 				msg_type_str(type),
2007 				VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2008 				remoteport, localport, size);
2009 			if (size > 0)
2010 				vchiq_log_dump_mem("Rcvd", 0, header->data,
2011 					min(16, size));
2012 		}
2013 
2014 		switch (type) {
2015 		case VCHIQ_MSG_OPENACK:
2016 			if (size >= sizeof(struct vchiq_openack_payload)) {
2017 				const struct vchiq_openack_payload *payload =
2018 					(struct vchiq_openack_payload *)
2019 					header->data;
2020 				service->peer_version = payload->version;
2021 			}
2022 			vchiq_log_info(vchiq_sync_log_level,
2023 				"%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2024 				state->id, header, size, remoteport, localport,
2025 				service->peer_version);
2026 			if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2027 				service->remoteport = remoteport;
2028 				vchiq_set_service_state(service,
2029 					VCHIQ_SRVSTATE_OPENSYNC);
2030 				service->sync = 1;
2031 				complete(&service->remove_event);
2032 			}
2033 			release_message_sync(state, header);
2034 			break;
2035 
2036 		case VCHIQ_MSG_DATA:
2037 			vchiq_log_trace(vchiq_sync_log_level,
2038 				"%d: sf DATA@%pK,%x (%d->%d)",
2039 				state->id, header, size, remoteport, localport);
2040 
2041 			if ((service->remoteport == remoteport) &&
2042 				(service->srvstate ==
2043 				VCHIQ_SRVSTATE_OPENSYNC)) {
2044 				if (make_service_callback(service,
2045 					VCHIQ_MESSAGE_AVAILABLE, header,
2046 					NULL) == VCHIQ_RETRY)
2047 					vchiq_log_error(vchiq_sync_log_level,
2048 						"synchronous callback to "
2049 						"service %d returns "
2050 						"VCHIQ_RETRY",
2051 						localport);
2052 			}
2053 			break;
2054 
2055 		default:
2056 			vchiq_log_error(vchiq_sync_log_level,
2057 				"%d: sf unexpected msgid %x@%pK,%x",
2058 				state->id, msgid, header, size);
2059 			release_message_sync(state, header);
2060 			break;
2061 		}
2062 
2063 		unlock_service(service);
2064 	}
2065 
2066 	return 0;
2067 }
2068 
2069 static void
init_bulk_queue(struct vchiq_bulk_queue * queue)2070 init_bulk_queue(struct vchiq_bulk_queue *queue)
2071 {
2072 	queue->local_insert = 0;
2073 	queue->remote_insert = 0;
2074 	queue->process = 0;
2075 	queue->remote_notify = 0;
2076 	queue->remove = 0;
2077 }
2078 
2079 inline const char *
get_conn_state_name(enum vchiq_connstate conn_state)2080 get_conn_state_name(enum vchiq_connstate conn_state)
2081 {
2082 	return conn_state_names[conn_state];
2083 }
2084 
2085 struct vchiq_slot_zero *
vchiq_init_slots(void * mem_base,int mem_size)2086 vchiq_init_slots(void *mem_base, int mem_size)
2087 {
2088 	int mem_align =
2089 		(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2090 	struct vchiq_slot_zero *slot_zero =
2091 		(struct vchiq_slot_zero *)(mem_base + mem_align);
2092 	int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
2093 	int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2094 
2095 	/* Ensure there is enough memory to run an absolutely minimum system */
2096 	num_slots -= first_data_slot;
2097 
2098 	if (num_slots < 4) {
2099 		vchiq_log_error(vchiq_core_log_level,
2100 			"%s - insufficient memory %x bytes",
2101 			__func__, mem_size);
2102 		return NULL;
2103 	}
2104 
2105 	memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2106 
2107 	slot_zero->magic = VCHIQ_MAGIC;
2108 	slot_zero->version = VCHIQ_VERSION;
2109 	slot_zero->version_min = VCHIQ_VERSION_MIN;
2110 	slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2111 	slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2112 	slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2113 	slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2114 
2115 	slot_zero->master.slot_sync = first_data_slot;
2116 	slot_zero->master.slot_first = first_data_slot + 1;
2117 	slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
2118 	slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
2119 	slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
2120 	slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2121 
2122 	return slot_zero;
2123 }
2124 
2125 enum vchiq_status
vchiq_init_state(struct vchiq_state * state,struct vchiq_slot_zero * slot_zero)2126 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2127 {
2128 	struct vchiq_shared_state *local;
2129 	struct vchiq_shared_state *remote;
2130 	enum vchiq_status status;
2131 	char threadname[16];
2132 	int i;
2133 
2134 	if (vchiq_states[0]) {
2135 		pr_err("%s: VCHIQ state already initialized\n", __func__);
2136 		return VCHIQ_ERROR;
2137 	}
2138 
2139 	local = &slot_zero->slave;
2140 	remote = &slot_zero->master;
2141 
2142 	if (local->initialised) {
2143 		vchiq_loud_error_header();
2144 		if (remote->initialised)
2145 			vchiq_loud_error("local state has already been "
2146 				"initialised");
2147 		else
2148 			vchiq_loud_error("master/slave mismatch two slaves");
2149 		vchiq_loud_error_footer();
2150 		return VCHIQ_ERROR;
2151 	}
2152 
2153 	memset(state, 0, sizeof(struct vchiq_state));
2154 
2155 	/*
2156 		initialize shared state pointers
2157 	 */
2158 
2159 	state->local = local;
2160 	state->remote = remote;
2161 	state->slot_data = (struct vchiq_slot *)slot_zero;
2162 
2163 	/*
2164 		initialize events and mutexes
2165 	 */
2166 
2167 	init_completion(&state->connect);
2168 	mutex_init(&state->mutex);
2169 	mutex_init(&state->slot_mutex);
2170 	mutex_init(&state->recycle_mutex);
2171 	mutex_init(&state->sync_mutex);
2172 	mutex_init(&state->bulk_transfer_mutex);
2173 
2174 	init_completion(&state->slot_available_event);
2175 	init_completion(&state->slot_remove_event);
2176 	init_completion(&state->data_quota_event);
2177 
2178 	state->slot_queue_available = 0;
2179 
2180 	for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2181 		struct vchiq_service_quota *service_quota =
2182 			&state->service_quotas[i];
2183 		init_completion(&service_quota->quota_event);
2184 	}
2185 
2186 	for (i = local->slot_first; i <= local->slot_last; i++) {
2187 		local->slot_queue[state->slot_queue_available++] = i;
2188 		complete(&state->slot_available_event);
2189 	}
2190 
2191 	state->default_slot_quota = state->slot_queue_available/2;
2192 	state->default_message_quota =
2193 		min((unsigned short)(state->default_slot_quota * 256),
2194 		(unsigned short)~0);
2195 
2196 	state->previous_data_index = -1;
2197 	state->data_use_count = 0;
2198 	state->data_quota = state->slot_queue_available - 1;
2199 
2200 	remote_event_create(&state->trigger_event, &local->trigger);
2201 	local->tx_pos = 0;
2202 	remote_event_create(&state->recycle_event, &local->recycle);
2203 	local->slot_queue_recycle = state->slot_queue_available;
2204 	remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2205 	remote_event_create(&state->sync_release_event, &local->sync_release);
2206 
2207 	/* At start-of-day, the slot is empty and available */
2208 	((struct vchiq_header *)
2209 		SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2210 							VCHIQ_MSGID_PADDING;
2211 	remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2212 
2213 	local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2214 
2215 	status = vchiq_platform_init_state(state);
2216 	if (status != VCHIQ_SUCCESS)
2217 		return VCHIQ_ERROR;
2218 
2219 	/*
2220 		bring up slot handler thread
2221 	 */
2222 	snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2223 	state->slot_handler_thread = kthread_create(&slot_handler_func,
2224 		(void *)state,
2225 		threadname);
2226 
2227 	if (IS_ERR(state->slot_handler_thread)) {
2228 		vchiq_loud_error_header();
2229 		vchiq_loud_error("couldn't create thread %s", threadname);
2230 		vchiq_loud_error_footer();
2231 		return VCHIQ_ERROR;
2232 	}
2233 	set_user_nice(state->slot_handler_thread, -19);
2234 
2235 	snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2236 	state->recycle_thread = kthread_create(&recycle_func,
2237 		(void *)state,
2238 		threadname);
2239 	if (IS_ERR(state->recycle_thread)) {
2240 		vchiq_loud_error_header();
2241 		vchiq_loud_error("couldn't create thread %s", threadname);
2242 		vchiq_loud_error_footer();
2243 		goto fail_free_handler_thread;
2244 	}
2245 	set_user_nice(state->recycle_thread, -19);
2246 
2247 	snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2248 	state->sync_thread = kthread_create(&sync_func,
2249 		(void *)state,
2250 		threadname);
2251 	if (IS_ERR(state->sync_thread)) {
2252 		vchiq_loud_error_header();
2253 		vchiq_loud_error("couldn't create thread %s", threadname);
2254 		vchiq_loud_error_footer();
2255 		goto fail_free_recycle_thread;
2256 	}
2257 	set_user_nice(state->sync_thread, -20);
2258 
2259 	wake_up_process(state->slot_handler_thread);
2260 	wake_up_process(state->recycle_thread);
2261 	wake_up_process(state->sync_thread);
2262 
2263 	vchiq_states[0] = state;
2264 
2265 	/* Indicate readiness to the other side */
2266 	local->initialised = 1;
2267 
2268 	return status;
2269 
2270 fail_free_recycle_thread:
2271 	kthread_stop(state->recycle_thread);
2272 fail_free_handler_thread:
2273 	kthread_stop(state->slot_handler_thread);
2274 
2275 	return VCHIQ_ERROR;
2276 }
2277 
vchiq_msg_queue_push(unsigned int handle,struct vchiq_header * header)2278 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
2279 {
2280 	struct vchiq_service *service = find_service_by_handle(handle);
2281 	int pos;
2282 
2283 	while (service->msg_queue_write == service->msg_queue_read +
2284 		VCHIQ_MAX_SLOTS) {
2285 		if (wait_for_completion_interruptible(&service->msg_queue_pop))
2286 			flush_signals(current);
2287 	}
2288 
2289 	pos = service->msg_queue_write++ & (VCHIQ_MAX_SLOTS - 1);
2290 	service->msg_queue[pos] = header;
2291 
2292 	complete(&service->msg_queue_push);
2293 }
2294 EXPORT_SYMBOL(vchiq_msg_queue_push);
2295 
vchiq_msg_hold(unsigned int handle)2296 struct vchiq_header *vchiq_msg_hold(unsigned int handle)
2297 {
2298 	struct vchiq_service *service = find_service_by_handle(handle);
2299 	struct vchiq_header *header;
2300 	int pos;
2301 
2302 	if (service->msg_queue_write == service->msg_queue_read)
2303 		return NULL;
2304 
2305 	while (service->msg_queue_write == service->msg_queue_read) {
2306 		if (wait_for_completion_interruptible(&service->msg_queue_push))
2307 			flush_signals(current);
2308 	}
2309 
2310 	pos = service->msg_queue_read++ & (VCHIQ_MAX_SLOTS - 1);
2311 	header = service->msg_queue[pos];
2312 
2313 	complete(&service->msg_queue_pop);
2314 
2315 	return header;
2316 }
2317 EXPORT_SYMBOL(vchiq_msg_hold);
2318 
vchiq_validate_params(const struct vchiq_service_params_kernel * params)2319 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2320 {
2321 	if (!params->callback || !params->fourcc) {
2322 		vchiq_loud_error("Can't add service, invalid params\n");
2323 		return -EINVAL;
2324 	}
2325 
2326 	return 0;
2327 }
2328 
2329 /* Called from application thread when a client or server service is created. */
2330 struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state * state,const struct vchiq_service_params_kernel * params,int srvstate,struct vchiq_instance * instance,vchiq_userdata_term userdata_term)2331 vchiq_add_service_internal(struct vchiq_state *state,
2332 			   const struct vchiq_service_params_kernel *params,
2333 			   int srvstate, struct vchiq_instance *instance,
2334 			   vchiq_userdata_term userdata_term)
2335 {
2336 	struct vchiq_service *service;
2337 	struct vchiq_service __rcu **pservice = NULL;
2338 	struct vchiq_service_quota *service_quota;
2339 	int ret;
2340 	int i;
2341 
2342 	ret = vchiq_validate_params(params);
2343 	if (ret)
2344 		return NULL;
2345 
2346 	service = kmalloc(sizeof(*service), GFP_KERNEL);
2347 	if (!service)
2348 		return service;
2349 
2350 	service->base.fourcc   = params->fourcc;
2351 	service->base.callback = params->callback;
2352 	service->base.userdata = params->userdata;
2353 	service->handle        = VCHIQ_SERVICE_HANDLE_INVALID;
2354 	kref_init(&service->ref_count);
2355 	service->srvstate      = VCHIQ_SRVSTATE_FREE;
2356 	service->userdata_term = userdata_term;
2357 	service->localport     = VCHIQ_PORT_FREE;
2358 	service->remoteport    = VCHIQ_PORT_FREE;
2359 
2360 	service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2361 		VCHIQ_FOURCC_INVALID : params->fourcc;
2362 	service->client_id     = 0;
2363 	service->auto_close    = 1;
2364 	service->sync          = 0;
2365 	service->closing       = 0;
2366 	service->trace         = 0;
2367 	atomic_set(&service->poll_flags, 0);
2368 	service->version       = params->version;
2369 	service->version_min   = params->version_min;
2370 	service->state         = state;
2371 	service->instance      = instance;
2372 	service->service_use_count = 0;
2373 	service->msg_queue_read = 0;
2374 	service->msg_queue_write = 0;
2375 	init_bulk_queue(&service->bulk_tx);
2376 	init_bulk_queue(&service->bulk_rx);
2377 	init_completion(&service->remove_event);
2378 	init_completion(&service->bulk_remove_event);
2379 	init_completion(&service->msg_queue_pop);
2380 	init_completion(&service->msg_queue_push);
2381 	mutex_init(&service->bulk_mutex);
2382 	memset(&service->stats, 0, sizeof(service->stats));
2383 	memset(&service->msg_queue, 0, sizeof(service->msg_queue));
2384 
2385 	/* Although it is perfectly possible to use a spinlock
2386 	** to protect the creation of services, it is overkill as it
2387 	** disables interrupts while the array is searched.
2388 	** The only danger is of another thread trying to create a
2389 	** service - service deletion is safe.
2390 	** Therefore it is preferable to use state->mutex which,
2391 	** although slower to claim, doesn't block interrupts while
2392 	** it is held.
2393 	*/
2394 
2395 	mutex_lock(&state->mutex);
2396 
2397 	/* Prepare to use a previously unused service */
2398 	if (state->unused_service < VCHIQ_MAX_SERVICES)
2399 		pservice = &state->services[state->unused_service];
2400 
2401 	if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2402 		for (i = 0; i < state->unused_service; i++) {
2403 			if (!rcu_access_pointer(state->services[i])) {
2404 				pservice = &state->services[i];
2405 				break;
2406 			}
2407 		}
2408 	} else {
2409 		rcu_read_lock();
2410 		for (i = (state->unused_service - 1); i >= 0; i--) {
2411 			struct vchiq_service *srv;
2412 
2413 			srv = rcu_dereference(state->services[i]);
2414 			if (!srv)
2415 				pservice = &state->services[i];
2416 			else if ((srv->public_fourcc == params->fourcc)
2417 				&& ((srv->instance != instance) ||
2418 				(srv->base.callback !=
2419 				params->callback))) {
2420 				/* There is another server using this
2421 				** fourcc which doesn't match. */
2422 				pservice = NULL;
2423 				break;
2424 			}
2425 		}
2426 		rcu_read_unlock();
2427 	}
2428 
2429 	if (pservice) {
2430 		service->localport = (pservice - state->services);
2431 		if (!handle_seq)
2432 			handle_seq = VCHIQ_MAX_STATES *
2433 				 VCHIQ_MAX_SERVICES;
2434 		service->handle = handle_seq |
2435 			(state->id * VCHIQ_MAX_SERVICES) |
2436 			service->localport;
2437 		handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2438 		rcu_assign_pointer(*pservice, service);
2439 		if (pservice == &state->services[state->unused_service])
2440 			state->unused_service++;
2441 	}
2442 
2443 	mutex_unlock(&state->mutex);
2444 
2445 	if (!pservice) {
2446 		kfree(service);
2447 		return NULL;
2448 	}
2449 
2450 	service_quota = &state->service_quotas[service->localport];
2451 	service_quota->slot_quota = state->default_slot_quota;
2452 	service_quota->message_quota = state->default_message_quota;
2453 	if (service_quota->slot_use_count == 0)
2454 		service_quota->previous_tx_index =
2455 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2456 			- 1;
2457 
2458 	/* Bring this service online */
2459 	vchiq_set_service_state(service, srvstate);
2460 
2461 	vchiq_log_info(vchiq_core_msg_log_level,
2462 		"%s Service %c%c%c%c SrcPort:%d",
2463 		(srvstate == VCHIQ_SRVSTATE_OPENING)
2464 		? "Open" : "Add",
2465 		VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2466 		service->localport);
2467 
2468 	/* Don't unlock the service - leave it with a ref_count of 1. */
2469 
2470 	return service;
2471 }
2472 
2473 enum vchiq_status
vchiq_open_service_internal(struct vchiq_service * service,int client_id)2474 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2475 {
2476 	struct vchiq_open_payload payload = {
2477 		service->base.fourcc,
2478 		client_id,
2479 		service->version,
2480 		service->version_min
2481 	};
2482 	enum vchiq_status status = VCHIQ_SUCCESS;
2483 
2484 	service->client_id = client_id;
2485 	vchiq_use_service_internal(service);
2486 	status = queue_message(service->state,
2487 			       NULL,
2488 			       VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2489 					      service->localport,
2490 					      0),
2491 			       memcpy_copy_callback,
2492 			       &payload,
2493 			       sizeof(payload),
2494 			       QMFLAGS_IS_BLOCKING);
2495 	if (status == VCHIQ_SUCCESS) {
2496 		/* Wait for the ACK/NAK */
2497 		if (wait_for_completion_interruptible(&service->remove_event)) {
2498 			status = VCHIQ_RETRY;
2499 			vchiq_release_service_internal(service);
2500 		} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2501 			   (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2502 			if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2503 				vchiq_log_error(vchiq_core_log_level,
2504 						"%d: osi - srvstate = %s (ref %u)",
2505 						service->state->id,
2506 						srvstate_names[service->srvstate],
2507 						kref_read(&service->ref_count));
2508 			status = VCHIQ_ERROR;
2509 			VCHIQ_SERVICE_STATS_INC(service, error_count);
2510 			vchiq_release_service_internal(service);
2511 		}
2512 	}
2513 	return status;
2514 }
2515 
2516 static void
release_service_messages(struct vchiq_service * service)2517 release_service_messages(struct vchiq_service *service)
2518 {
2519 	struct vchiq_state *state = service->state;
2520 	int slot_last = state->remote->slot_last;
2521 	int i;
2522 
2523 	/* Release any claimed messages aimed at this service */
2524 
2525 	if (service->sync) {
2526 		struct vchiq_header *header =
2527 			(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2528 						state->remote->slot_sync);
2529 		if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2530 			release_message_sync(state, header);
2531 
2532 		return;
2533 	}
2534 
2535 	for (i = state->remote->slot_first; i <= slot_last; i++) {
2536 		struct vchiq_slot_info *slot_info =
2537 			SLOT_INFO_FROM_INDEX(state, i);
2538 		if (slot_info->release_count != slot_info->use_count) {
2539 			char *data =
2540 				(char *)SLOT_DATA_FROM_INDEX(state, i);
2541 			unsigned int pos, end;
2542 
2543 			end = VCHIQ_SLOT_SIZE;
2544 			if (data == state->rx_data)
2545 				/* This buffer is still being read from - stop
2546 				** at the current read position */
2547 				end = state->rx_pos & VCHIQ_SLOT_MASK;
2548 
2549 			pos = 0;
2550 
2551 			while (pos < end) {
2552 				struct vchiq_header *header =
2553 					(struct vchiq_header *)(data + pos);
2554 				int msgid = header->msgid;
2555 				int port = VCHIQ_MSG_DSTPORT(msgid);
2556 
2557 				if ((port == service->localport) &&
2558 					(msgid & VCHIQ_MSGID_CLAIMED)) {
2559 					vchiq_log_info(vchiq_core_log_level,
2560 						"  fsi - hdr %pK", header);
2561 					release_slot(state, slot_info, header,
2562 						NULL);
2563 				}
2564 				pos += calc_stride(header->size);
2565 				if (pos > VCHIQ_SLOT_SIZE) {
2566 					vchiq_log_error(vchiq_core_log_level,
2567 						"fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2568 						pos, header, msgid,
2569 						header->msgid, header->size);
2570 					WARN(1, "invalid slot position\n");
2571 				}
2572 			}
2573 		}
2574 	}
2575 }
2576 
2577 static int
do_abort_bulks(struct vchiq_service * service)2578 do_abort_bulks(struct vchiq_service *service)
2579 {
2580 	enum vchiq_status status;
2581 
2582 	/* Abort any outstanding bulk transfers */
2583 	if (mutex_lock_killable(&service->bulk_mutex))
2584 		return 0;
2585 	abort_outstanding_bulks(service, &service->bulk_tx);
2586 	abort_outstanding_bulks(service, &service->bulk_rx);
2587 	mutex_unlock(&service->bulk_mutex);
2588 
2589 	status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
2590 	if (status == VCHIQ_SUCCESS)
2591 		status = notify_bulks(service, &service->bulk_rx,
2592 			0/*!retry_poll*/);
2593 	return (status == VCHIQ_SUCCESS);
2594 }
2595 
2596 static enum vchiq_status
close_service_complete(struct vchiq_service * service,int failstate)2597 close_service_complete(struct vchiq_service *service, int failstate)
2598 {
2599 	enum vchiq_status status;
2600 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2601 	int newstate;
2602 
2603 	switch (service->srvstate) {
2604 	case VCHIQ_SRVSTATE_OPEN:
2605 	case VCHIQ_SRVSTATE_CLOSESENT:
2606 	case VCHIQ_SRVSTATE_CLOSERECVD:
2607 		if (is_server) {
2608 			if (service->auto_close) {
2609 				service->client_id = 0;
2610 				service->remoteport = VCHIQ_PORT_FREE;
2611 				newstate = VCHIQ_SRVSTATE_LISTENING;
2612 			} else
2613 				newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2614 		} else
2615 			newstate = VCHIQ_SRVSTATE_CLOSED;
2616 		vchiq_set_service_state(service, newstate);
2617 		break;
2618 	case VCHIQ_SRVSTATE_LISTENING:
2619 		break;
2620 	default:
2621 		vchiq_log_error(vchiq_core_log_level,
2622 			"%s(%x) called in state %s", __func__,
2623 			service->handle, srvstate_names[service->srvstate]);
2624 		WARN(1, "%s in unexpected state\n", __func__);
2625 		return VCHIQ_ERROR;
2626 	}
2627 
2628 	status = make_service_callback(service,
2629 		VCHIQ_SERVICE_CLOSED, NULL, NULL);
2630 
2631 	if (status != VCHIQ_RETRY) {
2632 		int uc = service->service_use_count;
2633 		int i;
2634 		/* Complete the close process */
2635 		for (i = 0; i < uc; i++)
2636 			/* cater for cases where close is forced and the
2637 			** client may not close all it's handles */
2638 			vchiq_release_service_internal(service);
2639 
2640 		service->client_id = 0;
2641 		service->remoteport = VCHIQ_PORT_FREE;
2642 
2643 		if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
2644 			vchiq_free_service_internal(service);
2645 		else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2646 			if (is_server)
2647 				service->closing = 0;
2648 
2649 			complete(&service->remove_event);
2650 		}
2651 	} else
2652 		vchiq_set_service_state(service, failstate);
2653 
2654 	return status;
2655 }
2656 
2657 /* Called by the slot handler */
2658 enum vchiq_status
vchiq_close_service_internal(struct vchiq_service * service,int close_recvd)2659 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2660 {
2661 	struct vchiq_state *state = service->state;
2662 	enum vchiq_status status = VCHIQ_SUCCESS;
2663 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2664 
2665 	vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2666 		service->state->id, service->localport, close_recvd,
2667 		srvstate_names[service->srvstate]);
2668 
2669 	switch (service->srvstate) {
2670 	case VCHIQ_SRVSTATE_CLOSED:
2671 	case VCHIQ_SRVSTATE_HIDDEN:
2672 	case VCHIQ_SRVSTATE_LISTENING:
2673 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2674 		if (close_recvd)
2675 			vchiq_log_error(vchiq_core_log_level,
2676 				"%s(1) called "
2677 				"in state %s",
2678 				__func__, srvstate_names[service->srvstate]);
2679 		else if (is_server) {
2680 			if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2681 				status = VCHIQ_ERROR;
2682 			} else {
2683 				service->client_id = 0;
2684 				service->remoteport = VCHIQ_PORT_FREE;
2685 				if (service->srvstate ==
2686 					VCHIQ_SRVSTATE_CLOSEWAIT)
2687 					vchiq_set_service_state(service,
2688 						VCHIQ_SRVSTATE_LISTENING);
2689 			}
2690 			complete(&service->remove_event);
2691 		} else
2692 			vchiq_free_service_internal(service);
2693 		break;
2694 	case VCHIQ_SRVSTATE_OPENING:
2695 		if (close_recvd) {
2696 			/* The open was rejected - tell the user */
2697 			vchiq_set_service_state(service,
2698 				VCHIQ_SRVSTATE_CLOSEWAIT);
2699 			complete(&service->remove_event);
2700 		} else {
2701 			/* Shutdown mid-open - let the other side know */
2702 			status = queue_message(state, service,
2703 				VCHIQ_MAKE_MSG
2704 				(VCHIQ_MSG_CLOSE,
2705 				service->localport,
2706 				VCHIQ_MSG_DSTPORT(service->remoteport)),
2707 				NULL, NULL, 0, 0);
2708 		}
2709 		break;
2710 
2711 	case VCHIQ_SRVSTATE_OPENSYNC:
2712 		mutex_lock(&state->sync_mutex);
2713 		fallthrough;
2714 	case VCHIQ_SRVSTATE_OPEN:
2715 		if (close_recvd) {
2716 			if (!do_abort_bulks(service))
2717 				status = VCHIQ_RETRY;
2718 		}
2719 
2720 		release_service_messages(service);
2721 
2722 		if (status == VCHIQ_SUCCESS)
2723 			status = queue_message(state, service,
2724 				VCHIQ_MAKE_MSG
2725 				(VCHIQ_MSG_CLOSE,
2726 				service->localport,
2727 				VCHIQ_MSG_DSTPORT(service->remoteport)),
2728 				NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2729 
2730 		if (status == VCHIQ_SUCCESS) {
2731 			if (!close_recvd) {
2732 				/* Change the state while the mutex is
2733 				   still held */
2734 				vchiq_set_service_state(service,
2735 							VCHIQ_SRVSTATE_CLOSESENT);
2736 				mutex_unlock(&state->slot_mutex);
2737 				if (service->sync)
2738 					mutex_unlock(&state->sync_mutex);
2739 				break;
2740 			}
2741 		} else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
2742 			mutex_unlock(&state->sync_mutex);
2743 			break;
2744 		} else
2745 			break;
2746 
2747 		/* Change the state while the mutex is still held */
2748 		vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2749 		mutex_unlock(&state->slot_mutex);
2750 		if (service->sync)
2751 			mutex_unlock(&state->sync_mutex);
2752 
2753 		status = close_service_complete(service,
2754 				VCHIQ_SRVSTATE_CLOSERECVD);
2755 		break;
2756 
2757 	case VCHIQ_SRVSTATE_CLOSESENT:
2758 		if (!close_recvd)
2759 			/* This happens when a process is killed mid-close */
2760 			break;
2761 
2762 		if (!do_abort_bulks(service)) {
2763 			status = VCHIQ_RETRY;
2764 			break;
2765 		}
2766 
2767 		if (status == VCHIQ_SUCCESS)
2768 			status = close_service_complete(service,
2769 				VCHIQ_SRVSTATE_CLOSERECVD);
2770 		break;
2771 
2772 	case VCHIQ_SRVSTATE_CLOSERECVD:
2773 		if (!close_recvd && is_server)
2774 			/* Force into LISTENING mode */
2775 			vchiq_set_service_state(service,
2776 				VCHIQ_SRVSTATE_LISTENING);
2777 		status = close_service_complete(service,
2778 			VCHIQ_SRVSTATE_CLOSERECVD);
2779 		break;
2780 
2781 	default:
2782 		vchiq_log_error(vchiq_core_log_level,
2783 			"%s(%d) called in state %s", __func__,
2784 			close_recvd, srvstate_names[service->srvstate]);
2785 		break;
2786 	}
2787 
2788 	return status;
2789 }
2790 
2791 /* Called from the application process upon process death */
2792 void
vchiq_terminate_service_internal(struct vchiq_service * service)2793 vchiq_terminate_service_internal(struct vchiq_service *service)
2794 {
2795 	struct vchiq_state *state = service->state;
2796 
2797 	vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2798 		state->id, service->localport, service->remoteport);
2799 
2800 	mark_service_closing(service);
2801 
2802 	/* Mark the service for removal by the slot handler */
2803 	request_poll(state, service, VCHIQ_POLL_REMOVE);
2804 }
2805 
2806 /* Called from the slot handler */
2807 void
vchiq_free_service_internal(struct vchiq_service * service)2808 vchiq_free_service_internal(struct vchiq_service *service)
2809 {
2810 	struct vchiq_state *state = service->state;
2811 
2812 	vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2813 		state->id, service->localport);
2814 
2815 	switch (service->srvstate) {
2816 	case VCHIQ_SRVSTATE_OPENING:
2817 	case VCHIQ_SRVSTATE_CLOSED:
2818 	case VCHIQ_SRVSTATE_HIDDEN:
2819 	case VCHIQ_SRVSTATE_LISTENING:
2820 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2821 		break;
2822 	default:
2823 		vchiq_log_error(vchiq_core_log_level,
2824 			"%d: fsi - (%d) in state %s",
2825 			state->id, service->localport,
2826 			srvstate_names[service->srvstate]);
2827 		return;
2828 	}
2829 
2830 	vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2831 
2832 	complete(&service->remove_event);
2833 
2834 	/* Release the initial lock */
2835 	unlock_service(service);
2836 }
2837 
2838 enum vchiq_status
vchiq_connect_internal(struct vchiq_state * state,struct vchiq_instance * instance)2839 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2840 {
2841 	struct vchiq_service *service;
2842 	int i;
2843 
2844 	/* Find all services registered to this client and enable them. */
2845 	i = 0;
2846 	while ((service = next_service_by_instance(state, instance,
2847 		&i)) !=	NULL) {
2848 		if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2849 			vchiq_set_service_state(service,
2850 				VCHIQ_SRVSTATE_LISTENING);
2851 		unlock_service(service);
2852 	}
2853 
2854 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2855 		if (queue_message(state, NULL,
2856 			VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
2857 			0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2858 			return VCHIQ_RETRY;
2859 
2860 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2861 	}
2862 
2863 	if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2864 		if (wait_for_completion_interruptible(&state->connect))
2865 			return VCHIQ_RETRY;
2866 
2867 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2868 		complete(&state->connect);
2869 	}
2870 
2871 	return VCHIQ_SUCCESS;
2872 }
2873 
2874 enum vchiq_status
vchiq_shutdown_internal(struct vchiq_state * state,struct vchiq_instance * instance)2875 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2876 {
2877 	struct vchiq_service *service;
2878 	int i;
2879 
2880 	/* Find all services registered to this client and enable them. */
2881 	i = 0;
2882 	while ((service = next_service_by_instance(state, instance,
2883 		&i)) !=	NULL) {
2884 		(void)vchiq_remove_service(service->handle);
2885 		unlock_service(service);
2886 	}
2887 
2888 	return VCHIQ_SUCCESS;
2889 }
2890 
2891 enum vchiq_status
vchiq_close_service(unsigned int handle)2892 vchiq_close_service(unsigned int handle)
2893 {
2894 	/* Unregister the service */
2895 	struct vchiq_service *service = find_service_by_handle(handle);
2896 	enum vchiq_status status = VCHIQ_SUCCESS;
2897 
2898 	if (!service)
2899 		return VCHIQ_ERROR;
2900 
2901 	vchiq_log_info(vchiq_core_log_level,
2902 		"%d: close_service:%d",
2903 		service->state->id, service->localport);
2904 
2905 	if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2906 		(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2907 		(service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2908 		unlock_service(service);
2909 		return VCHIQ_ERROR;
2910 	}
2911 
2912 	mark_service_closing(service);
2913 
2914 	if (current == service->state->slot_handler_thread) {
2915 		status = vchiq_close_service_internal(service,
2916 			0/*!close_recvd*/);
2917 		WARN_ON(status == VCHIQ_RETRY);
2918 	} else {
2919 	/* Mark the service for termination by the slot handler */
2920 		request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2921 	}
2922 
2923 	while (1) {
2924 		if (wait_for_completion_interruptible(&service->remove_event)) {
2925 			status = VCHIQ_RETRY;
2926 			break;
2927 		}
2928 
2929 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2930 			(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2931 			(service->srvstate == VCHIQ_SRVSTATE_OPEN))
2932 			break;
2933 
2934 		vchiq_log_warning(vchiq_core_log_level,
2935 			"%d: close_service:%d - waiting in state %s",
2936 			service->state->id, service->localport,
2937 			srvstate_names[service->srvstate]);
2938 	}
2939 
2940 	if ((status == VCHIQ_SUCCESS) &&
2941 		(service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2942 		(service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2943 		status = VCHIQ_ERROR;
2944 
2945 	unlock_service(service);
2946 
2947 	return status;
2948 }
2949 EXPORT_SYMBOL(vchiq_close_service);
2950 
2951 enum vchiq_status
vchiq_remove_service(unsigned int handle)2952 vchiq_remove_service(unsigned int handle)
2953 {
2954 	/* Unregister the service */
2955 	struct vchiq_service *service = find_service_by_handle(handle);
2956 	enum vchiq_status status = VCHIQ_SUCCESS;
2957 
2958 	if (!service)
2959 		return VCHIQ_ERROR;
2960 
2961 	vchiq_log_info(vchiq_core_log_level,
2962 		"%d: remove_service:%d",
2963 		service->state->id, service->localport);
2964 
2965 	if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
2966 		unlock_service(service);
2967 		return VCHIQ_ERROR;
2968 	}
2969 
2970 	mark_service_closing(service);
2971 
2972 	if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
2973 		(current == service->state->slot_handler_thread)) {
2974 		/* Make it look like a client, because it must be removed and
2975 		   not left in the LISTENING state. */
2976 		service->public_fourcc = VCHIQ_FOURCC_INVALID;
2977 
2978 		status = vchiq_close_service_internal(service,
2979 			0/*!close_recvd*/);
2980 		WARN_ON(status == VCHIQ_RETRY);
2981 	} else {
2982 		/* Mark the service for removal by the slot handler */
2983 		request_poll(service->state, service, VCHIQ_POLL_REMOVE);
2984 	}
2985 	while (1) {
2986 		if (wait_for_completion_interruptible(&service->remove_event)) {
2987 			status = VCHIQ_RETRY;
2988 			break;
2989 		}
2990 
2991 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2992 			(service->srvstate == VCHIQ_SRVSTATE_OPEN))
2993 			break;
2994 
2995 		vchiq_log_warning(vchiq_core_log_level,
2996 			"%d: remove_service:%d - waiting in state %s",
2997 			service->state->id, service->localport,
2998 			srvstate_names[service->srvstate]);
2999 	}
3000 
3001 	if ((status == VCHIQ_SUCCESS) &&
3002 		(service->srvstate != VCHIQ_SRVSTATE_FREE))
3003 		status = VCHIQ_ERROR;
3004 
3005 	unlock_service(service);
3006 
3007 	return status;
3008 }
3009 
3010 /* This function may be called by kernel threads or user threads.
3011  * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3012  * received and the call should be retried after being returned to user
3013  * context.
3014  * When called in blocking mode, the userdata field points to a bulk_waiter
3015  * structure.
3016  */
vchiq_bulk_transfer(unsigned int handle,void * offset,void __user * uoffset,int size,void * userdata,enum vchiq_bulk_mode mode,enum vchiq_bulk_dir dir)3017 enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
3018 				   void *offset, void __user *uoffset,
3019 				   int size, void *userdata,
3020 				   enum vchiq_bulk_mode mode,
3021 				   enum vchiq_bulk_dir dir)
3022 {
3023 	struct vchiq_service *service = find_service_by_handle(handle);
3024 	struct vchiq_bulk_queue *queue;
3025 	struct vchiq_bulk *bulk;
3026 	struct vchiq_state *state;
3027 	struct bulk_waiter *bulk_waiter = NULL;
3028 	const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3029 	const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3030 		VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3031 	enum vchiq_status status = VCHIQ_ERROR;
3032 	int payload[2];
3033 
3034 	if (!service || service->srvstate != VCHIQ_SRVSTATE_OPEN ||
3035 	    (!offset && !uoffset) ||
3036 	    vchiq_check_service(service) != VCHIQ_SUCCESS)
3037 		goto error_exit;
3038 
3039 	switch (mode) {
3040 	case VCHIQ_BULK_MODE_NOCALLBACK:
3041 	case VCHIQ_BULK_MODE_CALLBACK:
3042 		break;
3043 	case VCHIQ_BULK_MODE_BLOCKING:
3044 		bulk_waiter = userdata;
3045 		init_completion(&bulk_waiter->event);
3046 		bulk_waiter->actual = 0;
3047 		bulk_waiter->bulk = NULL;
3048 		break;
3049 	case VCHIQ_BULK_MODE_WAITING:
3050 		bulk_waiter = userdata;
3051 		bulk = bulk_waiter->bulk;
3052 		goto waiting;
3053 	default:
3054 		goto error_exit;
3055 	}
3056 
3057 	state = service->state;
3058 
3059 	queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3060 		&service->bulk_tx : &service->bulk_rx;
3061 
3062 	if (mutex_lock_killable(&service->bulk_mutex)) {
3063 		status = VCHIQ_RETRY;
3064 		goto error_exit;
3065 	}
3066 
3067 	if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3068 		VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3069 		do {
3070 			mutex_unlock(&service->bulk_mutex);
3071 			if (wait_for_completion_interruptible(
3072 						&service->bulk_remove_event)) {
3073 				status = VCHIQ_RETRY;
3074 				goto error_exit;
3075 			}
3076 			if (mutex_lock_killable(&service->bulk_mutex)) {
3077 				status = VCHIQ_RETRY;
3078 				goto error_exit;
3079 			}
3080 		} while (queue->local_insert == queue->remove +
3081 				VCHIQ_NUM_SERVICE_BULKS);
3082 	}
3083 
3084 	bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3085 
3086 	bulk->mode = mode;
3087 	bulk->dir = dir;
3088 	bulk->userdata = userdata;
3089 	bulk->size = size;
3090 	bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3091 
3092 	if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir)
3093 			!= VCHIQ_SUCCESS)
3094 		goto unlock_error_exit;
3095 
3096 	wmb();
3097 
3098 	vchiq_log_info(vchiq_core_log_level,
3099 		"%d: bt (%d->%d) %cx %x@%pad %pK",
3100 		state->id, service->localport, service->remoteport, dir_char,
3101 		size, &bulk->data, userdata);
3102 
3103 	/* The slot mutex must be held when the service is being closed, so
3104 	   claim it here to ensure that isn't happening */
3105 	if (mutex_lock_killable(&state->slot_mutex)) {
3106 		status = VCHIQ_RETRY;
3107 		goto cancel_bulk_error_exit;
3108 	}
3109 
3110 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3111 		goto unlock_both_error_exit;
3112 
3113 	payload[0] = lower_32_bits(bulk->data);
3114 	payload[1] = bulk->size;
3115 	status = queue_message(state,
3116 			       NULL,
3117 			       VCHIQ_MAKE_MSG(dir_msgtype,
3118 					      service->localport,
3119 					      service->remoteport),
3120 			       memcpy_copy_callback,
3121 			       &payload,
3122 			       sizeof(payload),
3123 			       QMFLAGS_IS_BLOCKING |
3124 			       QMFLAGS_NO_MUTEX_LOCK |
3125 			       QMFLAGS_NO_MUTEX_UNLOCK);
3126 	if (status != VCHIQ_SUCCESS)
3127 		goto unlock_both_error_exit;
3128 
3129 	queue->local_insert++;
3130 
3131 	mutex_unlock(&state->slot_mutex);
3132 	mutex_unlock(&service->bulk_mutex);
3133 
3134 	vchiq_log_trace(vchiq_core_log_level,
3135 		"%d: bt:%d %cx li=%x ri=%x p=%x",
3136 		state->id,
3137 		service->localport, dir_char,
3138 		queue->local_insert, queue->remote_insert, queue->process);
3139 
3140 waiting:
3141 	unlock_service(service);
3142 
3143 	status = VCHIQ_SUCCESS;
3144 
3145 	if (bulk_waiter) {
3146 		bulk_waiter->bulk = bulk;
3147 		if (wait_for_completion_interruptible(&bulk_waiter->event))
3148 			status = VCHIQ_RETRY;
3149 		else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3150 			status = VCHIQ_ERROR;
3151 	}
3152 
3153 	return status;
3154 
3155 unlock_both_error_exit:
3156 	mutex_unlock(&state->slot_mutex);
3157 cancel_bulk_error_exit:
3158 	vchiq_complete_bulk(bulk);
3159 unlock_error_exit:
3160 	mutex_unlock(&service->bulk_mutex);
3161 
3162 error_exit:
3163 	if (service)
3164 		unlock_service(service);
3165 	return status;
3166 }
3167 
3168 enum vchiq_status
vchiq_queue_message(unsigned int handle,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size)3169 vchiq_queue_message(unsigned int handle,
3170 		    ssize_t (*copy_callback)(void *context, void *dest,
3171 					     size_t offset, size_t maxsize),
3172 		    void *context,
3173 		    size_t size)
3174 {
3175 	struct vchiq_service *service = find_service_by_handle(handle);
3176 	enum vchiq_status status = VCHIQ_ERROR;
3177 
3178 	if (!service ||
3179 		(vchiq_check_service(service) != VCHIQ_SUCCESS))
3180 		goto error_exit;
3181 
3182 	if (!size) {
3183 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3184 		goto error_exit;
3185 
3186 	}
3187 
3188 	if (size > VCHIQ_MAX_MSG_SIZE) {
3189 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3190 		goto error_exit;
3191 	}
3192 
3193 	switch (service->srvstate) {
3194 	case VCHIQ_SRVSTATE_OPEN:
3195 		status = queue_message(service->state, service,
3196 				VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3197 					service->localport,
3198 					service->remoteport),
3199 				copy_callback, context, size, 1);
3200 		break;
3201 	case VCHIQ_SRVSTATE_OPENSYNC:
3202 		status = queue_message_sync(service->state, service,
3203 				VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3204 					service->localport,
3205 					service->remoteport),
3206 				copy_callback, context, size, 1);
3207 		break;
3208 	default:
3209 		status = VCHIQ_ERROR;
3210 		break;
3211 	}
3212 
3213 error_exit:
3214 	if (service)
3215 		unlock_service(service);
3216 
3217 	return status;
3218 }
3219 
vchiq_queue_kernel_message(unsigned int handle,void * data,unsigned int size)3220 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
3221 {
3222 	enum vchiq_status status;
3223 
3224 	while (1) {
3225 		status = vchiq_queue_message(handle, memcpy_copy_callback,
3226 					     data, size);
3227 
3228 		/*
3229 		 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3230 		 * implement a retry mechanism since this function is supposed
3231 		 * to block until queued
3232 		 */
3233 		if (status != VCHIQ_RETRY)
3234 			break;
3235 
3236 		msleep(1);
3237 	}
3238 
3239 	return status;
3240 }
3241 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3242 
3243 void
vchiq_release_message(unsigned int handle,struct vchiq_header * header)3244 vchiq_release_message(unsigned int handle,
3245 		      struct vchiq_header *header)
3246 {
3247 	struct vchiq_service *service = find_service_by_handle(handle);
3248 	struct vchiq_shared_state *remote;
3249 	struct vchiq_state *state;
3250 	int slot_index;
3251 
3252 	if (!service)
3253 		return;
3254 
3255 	state = service->state;
3256 	remote = state->remote;
3257 
3258 	slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3259 
3260 	if ((slot_index >= remote->slot_first) &&
3261 		(slot_index <= remote->slot_last)) {
3262 		int msgid = header->msgid;
3263 
3264 		if (msgid & VCHIQ_MSGID_CLAIMED) {
3265 			struct vchiq_slot_info *slot_info =
3266 				SLOT_INFO_FROM_INDEX(state, slot_index);
3267 
3268 			release_slot(state, slot_info, header, service);
3269 		}
3270 	} else if (slot_index == remote->slot_sync)
3271 		release_message_sync(state, header);
3272 
3273 	unlock_service(service);
3274 }
3275 EXPORT_SYMBOL(vchiq_release_message);
3276 
3277 static void
release_message_sync(struct vchiq_state * state,struct vchiq_header * header)3278 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3279 {
3280 	header->msgid = VCHIQ_MSGID_PADDING;
3281 	remote_event_signal(&state->remote->sync_release);
3282 }
3283 
3284 enum vchiq_status
vchiq_get_peer_version(unsigned int handle,short * peer_version)3285 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3286 {
3287 	enum vchiq_status status = VCHIQ_ERROR;
3288 	struct vchiq_service *service = find_service_by_handle(handle);
3289 
3290 	if (!service ||
3291 	    (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
3292 	    !peer_version)
3293 		goto exit;
3294 	*peer_version = service->peer_version;
3295 	status = VCHIQ_SUCCESS;
3296 
3297 exit:
3298 	if (service)
3299 		unlock_service(service);
3300 	return status;
3301 }
3302 EXPORT_SYMBOL(vchiq_get_peer_version);
3303 
vchiq_get_config(struct vchiq_config * config)3304 void vchiq_get_config(struct vchiq_config *config)
3305 {
3306 	config->max_msg_size           = VCHIQ_MAX_MSG_SIZE;
3307 	config->bulk_threshold         = VCHIQ_MAX_MSG_SIZE;
3308 	config->max_outstanding_bulks  = VCHIQ_NUM_SERVICE_BULKS;
3309 	config->max_services           = VCHIQ_MAX_SERVICES;
3310 	config->version                = VCHIQ_VERSION;
3311 	config->version_min            = VCHIQ_VERSION_MIN;
3312 }
3313 
3314 enum vchiq_status
vchiq_set_service_option(unsigned int handle,enum vchiq_service_option option,int value)3315 vchiq_set_service_option(unsigned int handle,
3316 	enum vchiq_service_option option, int value)
3317 {
3318 	struct vchiq_service *service = find_service_by_handle(handle);
3319 	enum vchiq_status status = VCHIQ_ERROR;
3320 
3321 	if (service) {
3322 		switch (option) {
3323 		case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3324 			service->auto_close = value;
3325 			status = VCHIQ_SUCCESS;
3326 			break;
3327 
3328 		case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
3329 			struct vchiq_service_quota *service_quota =
3330 				&service->state->service_quotas[
3331 					service->localport];
3332 			if (value == 0)
3333 				value = service->state->default_slot_quota;
3334 			if ((value >= service_quota->slot_use_count) &&
3335 				 (value < (unsigned short)~0)) {
3336 				service_quota->slot_quota = value;
3337 				if ((value >= service_quota->slot_use_count) &&
3338 					(service_quota->message_quota >=
3339 					 service_quota->message_use_count)) {
3340 					/* Signal the service that it may have
3341 					** dropped below its quota */
3342 					complete(&service_quota->quota_event);
3343 				}
3344 				status = VCHIQ_SUCCESS;
3345 			}
3346 		} break;
3347 
3348 		case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
3349 			struct vchiq_service_quota *service_quota =
3350 				&service->state->service_quotas[
3351 					service->localport];
3352 			if (value == 0)
3353 				value = service->state->default_message_quota;
3354 			if ((value >= service_quota->message_use_count) &&
3355 				 (value < (unsigned short)~0)) {
3356 				service_quota->message_quota = value;
3357 				if ((value >=
3358 					service_quota->message_use_count) &&
3359 					(service_quota->slot_quota >=
3360 					service_quota->slot_use_count))
3361 					/* Signal the service that it may have
3362 					** dropped below its quota */
3363 					complete(&service_quota->quota_event);
3364 				status = VCHIQ_SUCCESS;
3365 			}
3366 		} break;
3367 
3368 		case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3369 			if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3370 				(service->srvstate ==
3371 				VCHIQ_SRVSTATE_LISTENING)) {
3372 				service->sync = value;
3373 				status = VCHIQ_SUCCESS;
3374 			}
3375 			break;
3376 
3377 		case VCHIQ_SERVICE_OPTION_TRACE:
3378 			service->trace = value;
3379 			status = VCHIQ_SUCCESS;
3380 			break;
3381 
3382 		default:
3383 			break;
3384 		}
3385 		unlock_service(service);
3386 	}
3387 
3388 	return status;
3389 }
3390 
3391 static int
vchiq_dump_shared_state(void * dump_context,struct vchiq_state * state,struct vchiq_shared_state * shared,const char * label)3392 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3393 			struct vchiq_shared_state *shared, const char *label)
3394 {
3395 	static const char *const debug_names[] = {
3396 		"<entries>",
3397 		"SLOT_HANDLER_COUNT",
3398 		"SLOT_HANDLER_LINE",
3399 		"PARSE_LINE",
3400 		"PARSE_HEADER",
3401 		"PARSE_MSGID",
3402 		"AWAIT_COMPLETION_LINE",
3403 		"DEQUEUE_MESSAGE_LINE",
3404 		"SERVICE_CALLBACK_LINE",
3405 		"MSG_QUEUE_FULL_COUNT",
3406 		"COMPLETION_QUEUE_FULL_COUNT"
3407 	};
3408 	int i;
3409 	char buf[80];
3410 	int len;
3411 	int err;
3412 
3413 	len = scnprintf(buf, sizeof(buf),
3414 		"  %s: slots %d-%d tx_pos=%x recycle=%x",
3415 		label, shared->slot_first, shared->slot_last,
3416 		shared->tx_pos, shared->slot_queue_recycle);
3417 	err = vchiq_dump(dump_context, buf, len + 1);
3418 	if (err)
3419 		return err;
3420 
3421 	len = scnprintf(buf, sizeof(buf),
3422 		"    Slots claimed:");
3423 	err = vchiq_dump(dump_context, buf, len + 1);
3424 	if (err)
3425 		return err;
3426 
3427 	for (i = shared->slot_first; i <= shared->slot_last; i++) {
3428 		struct vchiq_slot_info slot_info =
3429 						*SLOT_INFO_FROM_INDEX(state, i);
3430 		if (slot_info.use_count != slot_info.release_count) {
3431 			len = scnprintf(buf, sizeof(buf),
3432 				"      %d: %d/%d", i, slot_info.use_count,
3433 				slot_info.release_count);
3434 			err = vchiq_dump(dump_context, buf, len + 1);
3435 			if (err)
3436 				return err;
3437 		}
3438 	}
3439 
3440 	for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3441 		len = scnprintf(buf, sizeof(buf), "    DEBUG: %s = %d(%x)",
3442 			debug_names[i], shared->debug[i], shared->debug[i]);
3443 		err = vchiq_dump(dump_context, buf, len + 1);
3444 		if (err)
3445 			return err;
3446 	}
3447 	return 0;
3448 }
3449 
vchiq_dump_state(void * dump_context,struct vchiq_state * state)3450 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3451 {
3452 	char buf[80];
3453 	int len;
3454 	int i;
3455 	int err;
3456 
3457 	len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3458 		conn_state_names[state->conn_state]);
3459 	err = vchiq_dump(dump_context, buf, len + 1);
3460 	if (err)
3461 		return err;
3462 
3463 	len = scnprintf(buf, sizeof(buf),
3464 		"  tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3465 		state->local->tx_pos,
3466 		state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3467 		state->rx_pos,
3468 		state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3469 	err = vchiq_dump(dump_context, buf, len + 1);
3470 	if (err)
3471 		return err;
3472 
3473 	len = scnprintf(buf, sizeof(buf),
3474 		"  Version: %d (min %d)",
3475 		VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3476 	err = vchiq_dump(dump_context, buf, len + 1);
3477 	if (err)
3478 		return err;
3479 
3480 	if (VCHIQ_ENABLE_STATS) {
3481 		len = scnprintf(buf, sizeof(buf),
3482 			"  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
3483 			"error_count=%d",
3484 			state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3485 			state->stats.error_count);
3486 		err = vchiq_dump(dump_context, buf, len + 1);
3487 		if (err)
3488 			return err;
3489 	}
3490 
3491 	len = scnprintf(buf, sizeof(buf),
3492 		"  Slots: %d available (%d data), %d recyclable, %d stalls "
3493 		"(%d data)",
3494 		((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3495 			state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3496 		state->data_quota - state->data_use_count,
3497 		state->local->slot_queue_recycle - state->slot_queue_available,
3498 		state->stats.slot_stalls, state->stats.data_stalls);
3499 	err = vchiq_dump(dump_context, buf, len + 1);
3500 	if (err)
3501 		return err;
3502 
3503 	err = vchiq_dump_platform_state(dump_context);
3504 	if (err)
3505 		return err;
3506 
3507 	err = vchiq_dump_shared_state(dump_context,
3508 				      state,
3509 				      state->local,
3510 				      "Local");
3511 	if (err)
3512 		return err;
3513 	err = vchiq_dump_shared_state(dump_context,
3514 				      state,
3515 				      state->remote,
3516 				      "Remote");
3517 	if (err)
3518 		return err;
3519 
3520 	err = vchiq_dump_platform_instances(dump_context);
3521 	if (err)
3522 		return err;
3523 
3524 	for (i = 0; i < state->unused_service; i++) {
3525 		struct vchiq_service *service = find_service_by_port(state, i);
3526 
3527 		if (service) {
3528 			err = vchiq_dump_service_state(dump_context, service);
3529 			unlock_service(service);
3530 			if (err)
3531 				return err;
3532 		}
3533 	}
3534 	return 0;
3535 }
3536 
vchiq_dump_service_state(void * dump_context,struct vchiq_service * service)3537 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3538 {
3539 	char buf[80];
3540 	int len;
3541 	int err;
3542 	unsigned int ref_count;
3543 
3544 	/*Don't include the lock just taken*/
3545 	ref_count = kref_read(&service->ref_count) - 1;
3546 	len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3547 			service->localport, srvstate_names[service->srvstate],
3548 			ref_count);
3549 
3550 	if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3551 		char remoteport[30];
3552 		struct vchiq_service_quota *service_quota =
3553 			&service->state->service_quotas[service->localport];
3554 		int fourcc = service->base.fourcc;
3555 		int tx_pending, rx_pending;
3556 
3557 		if (service->remoteport != VCHIQ_PORT_FREE) {
3558 			int len2 = scnprintf(remoteport, sizeof(remoteport),
3559 				"%u", service->remoteport);
3560 
3561 			if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3562 				scnprintf(remoteport + len2,
3563 					sizeof(remoteport) - len2,
3564 					" (client %x)", service->client_id);
3565 		} else
3566 			strcpy(remoteport, "n/a");
3567 
3568 		len += scnprintf(buf + len, sizeof(buf) - len,
3569 			" '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3570 			VCHIQ_FOURCC_AS_4CHARS(fourcc),
3571 			remoteport,
3572 			service_quota->message_use_count,
3573 			service_quota->message_quota,
3574 			service_quota->slot_use_count,
3575 			service_quota->slot_quota);
3576 
3577 		err = vchiq_dump(dump_context, buf, len + 1);
3578 		if (err)
3579 			return err;
3580 
3581 		tx_pending = service->bulk_tx.local_insert -
3582 			service->bulk_tx.remote_insert;
3583 
3584 		rx_pending = service->bulk_rx.local_insert -
3585 			service->bulk_rx.remote_insert;
3586 
3587 		len = scnprintf(buf, sizeof(buf),
3588 			"  Bulk: tx_pending=%d (size %d),"
3589 			" rx_pending=%d (size %d)",
3590 			tx_pending,
3591 			tx_pending ? service->bulk_tx.bulks[
3592 			BULK_INDEX(service->bulk_tx.remove)].size : 0,
3593 			rx_pending,
3594 			rx_pending ? service->bulk_rx.bulks[
3595 			BULK_INDEX(service->bulk_rx.remove)].size : 0);
3596 
3597 		if (VCHIQ_ENABLE_STATS) {
3598 			err = vchiq_dump(dump_context, buf, len + 1);
3599 			if (err)
3600 				return err;
3601 
3602 			len = scnprintf(buf, sizeof(buf),
3603 				"  Ctrl: tx_count=%d, tx_bytes=%llu, "
3604 				"rx_count=%d, rx_bytes=%llu",
3605 				service->stats.ctrl_tx_count,
3606 				service->stats.ctrl_tx_bytes,
3607 				service->stats.ctrl_rx_count,
3608 				service->stats.ctrl_rx_bytes);
3609 			err = vchiq_dump(dump_context, buf, len + 1);
3610 			if (err)
3611 				return err;
3612 
3613 			len = scnprintf(buf, sizeof(buf),
3614 				"  Bulk: tx_count=%d, tx_bytes=%llu, "
3615 				"rx_count=%d, rx_bytes=%llu",
3616 				service->stats.bulk_tx_count,
3617 				service->stats.bulk_tx_bytes,
3618 				service->stats.bulk_rx_count,
3619 				service->stats.bulk_rx_bytes);
3620 			err = vchiq_dump(dump_context, buf, len + 1);
3621 			if (err)
3622 				return err;
3623 
3624 			len = scnprintf(buf, sizeof(buf),
3625 				"  %d quota stalls, %d slot stalls, "
3626 				"%d bulk stalls, %d aborted, %d errors",
3627 				service->stats.quota_stalls,
3628 				service->stats.slot_stalls,
3629 				service->stats.bulk_stalls,
3630 				service->stats.bulk_aborted_count,
3631 				service->stats.error_count);
3632 		}
3633 	}
3634 
3635 	err = vchiq_dump(dump_context, buf, len + 1);
3636 	if (err)
3637 		return err;
3638 
3639 	if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3640 		err = vchiq_dump_platform_service_state(dump_context, service);
3641 	return err;
3642 }
3643 
3644 void
vchiq_loud_error_header(void)3645 vchiq_loud_error_header(void)
3646 {
3647 	vchiq_log_error(vchiq_core_log_level,
3648 		"============================================================"
3649 		"================");
3650 	vchiq_log_error(vchiq_core_log_level,
3651 		"============================================================"
3652 		"================");
3653 	vchiq_log_error(vchiq_core_log_level, "=====");
3654 }
3655 
3656 void
vchiq_loud_error_footer(void)3657 vchiq_loud_error_footer(void)
3658 {
3659 	vchiq_log_error(vchiq_core_log_level, "=====");
3660 	vchiq_log_error(vchiq_core_log_level,
3661 		"============================================================"
3662 		"================");
3663 	vchiq_log_error(vchiq_core_log_level,
3664 		"============================================================"
3665 		"================");
3666 }
3667 
vchiq_send_remote_use(struct vchiq_state * state)3668 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3669 {
3670 	enum vchiq_status status = VCHIQ_RETRY;
3671 
3672 	if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3673 		status = queue_message(state, NULL,
3674 			VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
3675 			NULL, NULL, 0, 0);
3676 	return status;
3677 }
3678 
vchiq_send_remote_use_active(struct vchiq_state * state)3679 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3680 {
3681 	enum vchiq_status status = VCHIQ_RETRY;
3682 
3683 	if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3684 		status = queue_message(state, NULL,
3685 			VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
3686 			NULL, NULL, 0, 0);
3687 	return status;
3688 }
3689 
vchiq_log_dump_mem(const char * label,u32 addr,const void * void_mem,size_t num_bytes)3690 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3691 	size_t num_bytes)
3692 {
3693 	const u8  *mem = void_mem;
3694 	size_t          offset;
3695 	char            line_buf[100];
3696 	char           *s;
3697 
3698 	while (num_bytes > 0) {
3699 		s = line_buf;
3700 
3701 		for (offset = 0; offset < 16; offset++) {
3702 			if (offset < num_bytes)
3703 				s += scnprintf(s, 4, "%02x ", mem[offset]);
3704 			else
3705 				s += scnprintf(s, 4, "   ");
3706 		}
3707 
3708 		for (offset = 0; offset < 16; offset++) {
3709 			if (offset < num_bytes) {
3710 				u8 ch = mem[offset];
3711 
3712 				if ((ch < ' ') || (ch > '~'))
3713 					ch = '.';
3714 				*s++ = (char)ch;
3715 			}
3716 		}
3717 		*s++ = '\0';
3718 
3719 		if (label && (*label != '\0'))
3720 			vchiq_log_trace(VCHIQ_LOG_TRACE,
3721 				"%s: %08x: %s", label, addr, line_buf);
3722 		else
3723 			vchiq_log_trace(VCHIQ_LOG_TRACE,
3724 				"%08x: %s", addr, line_buf);
3725 
3726 		addr += 16;
3727 		mem += 16;
3728 		if (num_bytes > 16)
3729 			num_bytes -= 16;
3730 		else
3731 			num_bytes = 0;
3732 	}
3733 }
3734