1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
15
16 #include "vchiq_arm.h"
17 #include "vchiq_core.h"
18
19 #define VCHIQ_SLOT_HANDLER_STACK 8192
20
21 #define VCHIQ_MSG_PADDING 0 /* - */
22 #define VCHIQ_MSG_CONNECT 1 /* - */
23 #define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
24 #define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
25 #define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
26 #define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
27 #define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
28 #define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
29 #define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
30 #define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
31 #define VCHIQ_MSG_PAUSE 10 /* - */
32 #define VCHIQ_MSG_RESUME 11 /* - */
33 #define VCHIQ_MSG_REMOTE_USE 12 /* - */
34 #define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
35 #define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
36
37 #define TYPE_SHIFT 24
38
39 #define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
40 #define VCHIQ_PORT_FREE 0x1000
41 #define VCHIQ_PORT_IS_VALID(port) ((port) < VCHIQ_PORT_FREE)
42 #define VCHIQ_MAKE_MSG(type, srcport, dstport) \
43 (((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
44 #define VCHIQ_MSG_TYPE(msgid) ((unsigned int)(msgid) >> TYPE_SHIFT)
45 #define VCHIQ_MSG_SRCPORT(msgid) \
46 (unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff)
47 #define VCHIQ_MSG_DSTPORT(msgid) \
48 ((unsigned short)(msgid) & 0xfff)
49
50 #define MAKE_CONNECT (VCHIQ_MSG_CONNECT << TYPE_SHIFT)
51 #define MAKE_OPEN(srcport) \
52 ((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12))
53 #define MAKE_OPENACK(srcport, dstport) \
54 ((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
55 #define MAKE_CLOSE(srcport, dstport) \
56 ((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
57 #define MAKE_DATA(srcport, dstport) \
58 ((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
59 #define MAKE_PAUSE (VCHIQ_MSG_PAUSE << TYPE_SHIFT)
60 #define MAKE_RESUME (VCHIQ_MSG_RESUME << TYPE_SHIFT)
61 #define MAKE_REMOTE_USE (VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT)
62 #define MAKE_REMOTE_USE_ACTIVE (VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT)
63
64 /* Ensure the fields are wide enough */
65 static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
66 == 0);
67 static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
68 static_assert((unsigned int)VCHIQ_PORT_MAX <
69 (unsigned int)VCHIQ_PORT_FREE);
70
71 #define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
72 #define VCHIQ_MSGID_CLAIMED 0x40000000
73
74 #define VCHIQ_FOURCC_INVALID 0x00000000
75 #define VCHIQ_FOURCC_IS_LEGAL(fourcc) ((fourcc) != VCHIQ_FOURCC_INVALID)
76
77 #define VCHIQ_BULK_ACTUAL_ABORTED -1
78
79 #if VCHIQ_ENABLE_STATS
80 #define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
81 #define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
82 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
83 (service->stats. stat += addend)
84 #else
85 #define VCHIQ_STATS_INC(state, stat) ((void)0)
86 #define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
87 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
88 #endif
89
90 #define HANDLE_STATE_SHIFT 12
91
92 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
93 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
94 #define SLOT_INDEX_FROM_DATA(state, data) \
95 (((unsigned int)((char *)data - (char *)state->slot_data)) / \
96 VCHIQ_SLOT_SIZE)
97 #define SLOT_INDEX_FROM_INFO(state, info) \
98 ((unsigned int)(info - state->slot_info))
99 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
100 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
101 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
102 (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
103
104 #define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
105
106 #define NO_CLOSE_RECVD 0
107 #define CLOSE_RECVD 1
108
109 #define NO_RETRY_POLL 0
110 #define RETRY_POLL 1
111
112 struct vchiq_open_payload {
113 int fourcc;
114 int client_id;
115 short version;
116 short version_min;
117 };
118
119 struct vchiq_openack_payload {
120 short version;
121 };
122
123 enum {
124 QMFLAGS_IS_BLOCKING = BIT(0),
125 QMFLAGS_NO_MUTEX_LOCK = BIT(1),
126 QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
127 };
128
129 enum {
130 VCHIQ_POLL_TERMINATE,
131 VCHIQ_POLL_REMOVE,
132 VCHIQ_POLL_TXNOTIFY,
133 VCHIQ_POLL_RXNOTIFY,
134 VCHIQ_POLL_COUNT
135 };
136
137 /* we require this for consistency between endpoints */
138 static_assert(sizeof(struct vchiq_header) == 8);
139 static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
140
check_sizes(void)141 static inline void check_sizes(void)
142 {
143 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE);
144 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS);
145 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE);
146 BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header));
147 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS);
148 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS);
149 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
150 }
151
152 DEFINE_SPINLOCK(bulk_waiter_spinlock);
153 static DEFINE_SPINLOCK(quota_spinlock);
154
155 static unsigned int handle_seq;
156
157 static const char *const srvstate_names[] = {
158 "FREE",
159 "HIDDEN",
160 "LISTENING",
161 "OPENING",
162 "OPEN",
163 "OPENSYNC",
164 "CLOSESENT",
165 "CLOSERECVD",
166 "CLOSEWAIT",
167 "CLOSED"
168 };
169
170 static const char *const reason_names[] = {
171 "SERVICE_OPENED",
172 "SERVICE_CLOSED",
173 "MESSAGE_AVAILABLE",
174 "BULK_TRANSMIT_DONE",
175 "BULK_RECEIVE_DONE",
176 "BULK_TRANSMIT_ABORTED",
177 "BULK_RECEIVE_ABORTED"
178 };
179
180 static const char *const conn_state_names[] = {
181 "DISCONNECTED",
182 "CONNECTING",
183 "CONNECTED",
184 "PAUSING",
185 "PAUSE_SENT",
186 "PAUSED",
187 "RESUMING",
188 "PAUSE_TIMEOUT",
189 "RESUME_TIMEOUT"
190 };
191
192 static void
193 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
194
msg_type_str(unsigned int msg_type)195 static const char *msg_type_str(unsigned int msg_type)
196 {
197 switch (msg_type) {
198 case VCHIQ_MSG_PADDING: return "PADDING";
199 case VCHIQ_MSG_CONNECT: return "CONNECT";
200 case VCHIQ_MSG_OPEN: return "OPEN";
201 case VCHIQ_MSG_OPENACK: return "OPENACK";
202 case VCHIQ_MSG_CLOSE: return "CLOSE";
203 case VCHIQ_MSG_DATA: return "DATA";
204 case VCHIQ_MSG_BULK_RX: return "BULK_RX";
205 case VCHIQ_MSG_BULK_TX: return "BULK_TX";
206 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
207 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
208 case VCHIQ_MSG_PAUSE: return "PAUSE";
209 case VCHIQ_MSG_RESUME: return "RESUME";
210 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
211 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
212 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
213 }
214 return "???";
215 }
216
217 static inline void
set_service_state(struct vchiq_service * service,int newstate)218 set_service_state(struct vchiq_service *service, int newstate)
219 {
220 dev_dbg(service->state->dev, "core: %d: srv:%d %s->%s\n",
221 service->state->id, service->localport,
222 srvstate_names[service->srvstate],
223 srvstate_names[newstate]);
224 service->srvstate = newstate;
225 }
226
handle_to_service(struct vchiq_instance * instance,unsigned int handle)227 struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle)
228 {
229 int idx = handle & (VCHIQ_MAX_SERVICES - 1);
230
231 return rcu_dereference(instance->state->services[idx]);
232 }
233 struct vchiq_service *
find_service_by_handle(struct vchiq_instance * instance,unsigned int handle)234 find_service_by_handle(struct vchiq_instance *instance, unsigned int handle)
235 {
236 struct vchiq_service *service;
237
238 rcu_read_lock();
239 service = handle_to_service(instance, handle);
240 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
241 service->handle == handle &&
242 kref_get_unless_zero(&service->ref_count)) {
243 service = rcu_pointer_handoff(service);
244 rcu_read_unlock();
245 return service;
246 }
247 rcu_read_unlock();
248 dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
249 return NULL;
250 }
251
252 struct vchiq_service *
find_service_by_port(struct vchiq_state * state,unsigned int localport)253 find_service_by_port(struct vchiq_state *state, unsigned int localport)
254 {
255 if (localport <= VCHIQ_PORT_MAX) {
256 struct vchiq_service *service;
257
258 rcu_read_lock();
259 service = rcu_dereference(state->services[localport]);
260 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
261 kref_get_unless_zero(&service->ref_count)) {
262 service = rcu_pointer_handoff(service);
263 rcu_read_unlock();
264 return service;
265 }
266 rcu_read_unlock();
267 }
268 dev_dbg(state->dev, "core: Invalid port %u\n", localport);
269 return NULL;
270 }
271
272 struct vchiq_service *
find_service_for_instance(struct vchiq_instance * instance,unsigned int handle)273 find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
274 {
275 struct vchiq_service *service;
276
277 rcu_read_lock();
278 service = handle_to_service(instance, handle);
279 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
280 service->handle == handle &&
281 service->instance == instance &&
282 kref_get_unless_zero(&service->ref_count)) {
283 service = rcu_pointer_handoff(service);
284 rcu_read_unlock();
285 return service;
286 }
287 rcu_read_unlock();
288 dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
289 return NULL;
290 }
291
292 struct vchiq_service *
find_closed_service_for_instance(struct vchiq_instance * instance,unsigned int handle)293 find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
294 {
295 struct vchiq_service *service;
296
297 rcu_read_lock();
298 service = handle_to_service(instance, handle);
299 if (service &&
300 (service->srvstate == VCHIQ_SRVSTATE_FREE ||
301 service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
302 service->handle == handle &&
303 service->instance == instance &&
304 kref_get_unless_zero(&service->ref_count)) {
305 service = rcu_pointer_handoff(service);
306 rcu_read_unlock();
307 return service;
308 }
309 rcu_read_unlock();
310 dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
311 return service;
312 }
313
314 struct vchiq_service *
__next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)315 __next_service_by_instance(struct vchiq_state *state,
316 struct vchiq_instance *instance,
317 int *pidx)
318 {
319 struct vchiq_service *service = NULL;
320 int idx = *pidx;
321
322 while (idx < state->unused_service) {
323 struct vchiq_service *srv;
324
325 srv = rcu_dereference(state->services[idx]);
326 idx++;
327 if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
328 srv->instance == instance) {
329 service = srv;
330 break;
331 }
332 }
333
334 *pidx = idx;
335 return service;
336 }
337
338 struct vchiq_service *
next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)339 next_service_by_instance(struct vchiq_state *state,
340 struct vchiq_instance *instance,
341 int *pidx)
342 {
343 struct vchiq_service *service;
344
345 rcu_read_lock();
346 while (1) {
347 service = __next_service_by_instance(state, instance, pidx);
348 if (!service)
349 break;
350 if (kref_get_unless_zero(&service->ref_count)) {
351 service = rcu_pointer_handoff(service);
352 break;
353 }
354 }
355 rcu_read_unlock();
356 return service;
357 }
358
359 void
vchiq_service_get(struct vchiq_service * service)360 vchiq_service_get(struct vchiq_service *service)
361 {
362 if (!service) {
363 WARN(1, "%s service is NULL\n", __func__);
364 return;
365 }
366 kref_get(&service->ref_count);
367 }
368
service_release(struct kref * kref)369 static void service_release(struct kref *kref)
370 {
371 struct vchiq_service *service =
372 container_of(kref, struct vchiq_service, ref_count);
373 struct vchiq_state *state = service->state;
374
375 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
376 rcu_assign_pointer(state->services[service->localport], NULL);
377 if (service->userdata_term)
378 service->userdata_term(service->base.userdata);
379 kfree_rcu(service, rcu);
380 }
381
382 void
vchiq_service_put(struct vchiq_service * service)383 vchiq_service_put(struct vchiq_service *service)
384 {
385 if (!service) {
386 WARN(1, "%s: service is NULL\n", __func__);
387 return;
388 }
389 kref_put(&service->ref_count, service_release);
390 }
391
392 int
vchiq_get_client_id(struct vchiq_instance * instance,unsigned int handle)393 vchiq_get_client_id(struct vchiq_instance *instance, unsigned int handle)
394 {
395 struct vchiq_service *service;
396 int id;
397
398 rcu_read_lock();
399 service = handle_to_service(instance, handle);
400 id = service ? service->client_id : 0;
401 rcu_read_unlock();
402 return id;
403 }
404
405 void *
vchiq_get_service_userdata(struct vchiq_instance * instance,unsigned int handle)406 vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int handle)
407 {
408 void *userdata;
409 struct vchiq_service *service;
410
411 rcu_read_lock();
412 service = handle_to_service(instance, handle);
413 userdata = service ? service->base.userdata : NULL;
414 rcu_read_unlock();
415 return userdata;
416 }
417 EXPORT_SYMBOL(vchiq_get_service_userdata);
418
419 static void
mark_service_closing_internal(struct vchiq_service * service,int sh_thread)420 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
421 {
422 struct vchiq_state *state = service->state;
423 struct vchiq_service_quota *quota;
424
425 service->closing = 1;
426
427 /* Synchronise with other threads. */
428 mutex_lock(&state->recycle_mutex);
429 mutex_unlock(&state->recycle_mutex);
430 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
431 /*
432 * If we're pausing then the slot_mutex is held until resume
433 * by the slot handler. Therefore don't try to acquire this
434 * mutex if we're the slot handler and in the pause sent state.
435 * We don't need to in this case anyway.
436 */
437 mutex_lock(&state->slot_mutex);
438 mutex_unlock(&state->slot_mutex);
439 }
440
441 /* Unblock any sending thread. */
442 quota = &state->service_quotas[service->localport];
443 complete("a->quota_event);
444 }
445
446 static void
mark_service_closing(struct vchiq_service * service)447 mark_service_closing(struct vchiq_service *service)
448 {
449 mark_service_closing_internal(service, 0);
450 }
451
452 static inline int
make_service_callback(struct vchiq_service * service,enum vchiq_reason reason,struct vchiq_header * header,void * bulk_userdata)453 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
454 struct vchiq_header *header, void *bulk_userdata)
455 {
456 int status;
457
458 dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %pK, %pK)\n",
459 service->state->id, service->localport, reason_names[reason],
460 header, bulk_userdata);
461 status = service->base.callback(service->instance, reason, header, service->handle,
462 bulk_userdata);
463 if (status && (status != -EAGAIN)) {
464 dev_warn(service->state->dev,
465 "core: %d: ignoring ERROR from callback to service %x\n",
466 service->state->id, service->handle);
467 status = 0;
468 }
469
470 if (reason != VCHIQ_MESSAGE_AVAILABLE)
471 vchiq_release_message(service->instance, service->handle, header);
472
473 return status;
474 }
475
476 inline void
vchiq_set_conn_state(struct vchiq_state * state,enum vchiq_connstate newstate)477 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
478 {
479 enum vchiq_connstate oldstate = state->conn_state;
480
481 dev_dbg(state->dev, "core: %d: %s->%s\n",
482 state->id, conn_state_names[oldstate], conn_state_names[newstate]);
483 state->conn_state = newstate;
484 vchiq_platform_conn_state_changed(state, oldstate, newstate);
485 }
486
487 /* This initialises a single remote_event, and the associated wait_queue. */
488 static inline void
remote_event_create(wait_queue_head_t * wq,struct remote_event * event)489 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
490 {
491 event->armed = 0;
492 /*
493 * Don't clear the 'fired' flag because it may already have been set
494 * by the other side.
495 */
496 init_waitqueue_head(wq);
497 }
498
499 /*
500 * All the event waiting routines in VCHIQ used a custom semaphore
501 * implementation that filtered most signals. This achieved a behaviour similar
502 * to the "killable" family of functions. While cleaning up this code all the
503 * routines where switched to the "interruptible" family of functions, as the
504 * former was deemed unjustified and the use "killable" set all VCHIQ's
505 * threads in D state.
506 */
507 static inline int
remote_event_wait(wait_queue_head_t * wq,struct remote_event * event)508 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
509 {
510 if (!event->fired) {
511 event->armed = 1;
512 dsb(sy);
513 if (wait_event_interruptible(*wq, event->fired)) {
514 event->armed = 0;
515 return 0;
516 }
517 event->armed = 0;
518 /* Ensure that the peer sees that we are not waiting (armed == 0). */
519 wmb();
520 }
521
522 event->fired = 0;
523 return 1;
524 }
525
526 /*
527 * Acknowledge that the event has been signalled, and wake any waiters. Usually
528 * called as a result of the doorbell being rung.
529 */
530 static inline void
remote_event_signal_local(wait_queue_head_t * wq,struct remote_event * event)531 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
532 {
533 event->fired = 1;
534 event->armed = 0;
535 wake_up_all(wq);
536 }
537
538 /* Check if a single event has been signalled, waking the waiters if it has. */
539 static inline void
remote_event_poll(wait_queue_head_t * wq,struct remote_event * event)540 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
541 {
542 if (event->fired && event->armed)
543 remote_event_signal_local(wq, event);
544 }
545
546 /*
547 * VCHIQ used a small, fixed number of remote events. It is simplest to
548 * enumerate them here for polling.
549 */
550 void
remote_event_pollall(struct vchiq_state * state)551 remote_event_pollall(struct vchiq_state *state)
552 {
553 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
554 remote_event_poll(&state->sync_release_event, &state->local->sync_release);
555 remote_event_poll(&state->trigger_event, &state->local->trigger);
556 remote_event_poll(&state->recycle_event, &state->local->recycle);
557 }
558
559 /*
560 * Round up message sizes so that any space at the end of a slot is always big
561 * enough for a header. This relies on header size being a power of two, which
562 * has been verified earlier by a static assertion.
563 */
564
565 static inline size_t
calc_stride(size_t size)566 calc_stride(size_t size)
567 {
568 /* Allow room for the header */
569 size += sizeof(struct vchiq_header);
570
571 /* Round up */
572 return (size + sizeof(struct vchiq_header) - 1) &
573 ~(sizeof(struct vchiq_header) - 1);
574 }
575
576 /* Called by the slot handler thread */
577 static struct vchiq_service *
get_listening_service(struct vchiq_state * state,int fourcc)578 get_listening_service(struct vchiq_state *state, int fourcc)
579 {
580 int i;
581
582 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
583
584 rcu_read_lock();
585 for (i = 0; i < state->unused_service; i++) {
586 struct vchiq_service *service;
587
588 service = rcu_dereference(state->services[i]);
589 if (service &&
590 service->public_fourcc == fourcc &&
591 (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
592 (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
593 service->remoteport == VCHIQ_PORT_FREE)) &&
594 kref_get_unless_zero(&service->ref_count)) {
595 service = rcu_pointer_handoff(service);
596 rcu_read_unlock();
597 return service;
598 }
599 }
600 rcu_read_unlock();
601 return NULL;
602 }
603
604 /* Called by the slot handler thread */
605 static struct vchiq_service *
get_connected_service(struct vchiq_state * state,unsigned int port)606 get_connected_service(struct vchiq_state *state, unsigned int port)
607 {
608 int i;
609
610 rcu_read_lock();
611 for (i = 0; i < state->unused_service; i++) {
612 struct vchiq_service *service =
613 rcu_dereference(state->services[i]);
614
615 if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
616 service->remoteport == port &&
617 kref_get_unless_zero(&service->ref_count)) {
618 service = rcu_pointer_handoff(service);
619 rcu_read_unlock();
620 return service;
621 }
622 }
623 rcu_read_unlock();
624 return NULL;
625 }
626
627 inline void
request_poll(struct vchiq_state * state,struct vchiq_service * service,int poll_type)628 request_poll(struct vchiq_state *state, struct vchiq_service *service,
629 int poll_type)
630 {
631 u32 value;
632 int index;
633
634 if (!service)
635 goto skip_service;
636
637 do {
638 value = atomic_read(&service->poll_flags);
639 } while (atomic_cmpxchg(&service->poll_flags, value,
640 value | BIT(poll_type)) != value);
641
642 index = BITSET_WORD(service->localport);
643 do {
644 value = atomic_read(&state->poll_services[index]);
645 } while (atomic_cmpxchg(&state->poll_services[index],
646 value, value | BIT(service->localport & 0x1f)) != value);
647
648 skip_service:
649 state->poll_needed = 1;
650 /* Ensure the slot handler thread sees the poll_needed flag. */
651 wmb();
652
653 /* ... and ensure the slot handler runs. */
654 remote_event_signal_local(&state->trigger_event, &state->local->trigger);
655 }
656
657 /*
658 * Called from queue_message, by the slot handler and application threads,
659 * with slot_mutex held
660 */
661 static struct vchiq_header *
reserve_space(struct vchiq_state * state,size_t space,int is_blocking)662 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
663 {
664 struct vchiq_shared_state *local = state->local;
665 int tx_pos = state->local_tx_pos;
666 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
667
668 if (space > slot_space) {
669 struct vchiq_header *header;
670 /* Fill the remaining space with padding */
671 WARN_ON(!state->tx_data);
672 header = (struct vchiq_header *)
673 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
674 header->msgid = VCHIQ_MSGID_PADDING;
675 header->size = slot_space - sizeof(struct vchiq_header);
676
677 tx_pos += slot_space;
678 }
679
680 /* If necessary, get the next slot. */
681 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
682 int slot_index;
683
684 /* If there is no free slot... */
685
686 if (!try_wait_for_completion(&state->slot_available_event)) {
687 /* ...wait for one. */
688
689 VCHIQ_STATS_INC(state, slot_stalls);
690
691 /* But first, flush through the last slot. */
692 state->local_tx_pos = tx_pos;
693 local->tx_pos = tx_pos;
694 remote_event_signal(&state->remote->trigger);
695
696 if (!is_blocking ||
697 (wait_for_completion_interruptible(&state->slot_available_event)))
698 return NULL; /* No space available */
699 }
700
701 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
702 complete(&state->slot_available_event);
703 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
704 return NULL;
705 }
706
707 slot_index = local->slot_queue[SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
708 state->tx_data =
709 (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
710 }
711
712 state->local_tx_pos = tx_pos + space;
713
714 return (struct vchiq_header *)(state->tx_data +
715 (tx_pos & VCHIQ_SLOT_MASK));
716 }
717
718 static void
process_free_data_message(struct vchiq_state * state,u32 * service_found,struct vchiq_header * header)719 process_free_data_message(struct vchiq_state *state, u32 *service_found,
720 struct vchiq_header *header)
721 {
722 int msgid = header->msgid;
723 int port = VCHIQ_MSG_SRCPORT(msgid);
724 struct vchiq_service_quota *quota = &state->service_quotas[port];
725 int count;
726
727 spin_lock("a_spinlock);
728 count = quota->message_use_count;
729 if (count > 0)
730 quota->message_use_count = count - 1;
731 spin_unlock("a_spinlock);
732
733 if (count == quota->message_quota) {
734 /*
735 * Signal the service that it
736 * has dropped below its quota
737 */
738 complete("a->quota_event);
739 } else if (count == 0) {
740 dev_err(state->dev,
741 "core: service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
742 port, quota->message_use_count, header, msgid,
743 header->msgid, header->size);
744 WARN(1, "invalid message use count\n");
745 }
746 if (!BITSET_IS_SET(service_found, port)) {
747 /* Set the found bit for this service */
748 BITSET_SET(service_found, port);
749
750 spin_lock("a_spinlock);
751 count = quota->slot_use_count;
752 if (count > 0)
753 quota->slot_use_count = count - 1;
754 spin_unlock("a_spinlock);
755
756 if (count > 0) {
757 /*
758 * Signal the service in case
759 * it has dropped below its quota
760 */
761 complete("a->quota_event);
762 dev_dbg(state->dev, "core: %d: pfq:%d %x@%pK - slot_use->%d\n",
763 state->id, port, header->size, header, count - 1);
764 } else {
765 dev_err(state->dev,
766 "core: service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
767 port, count, header, msgid, header->msgid, header->size);
768 WARN(1, "bad slot use count\n");
769 }
770 }
771 }
772
773 /* Called by the recycle thread. */
774 static void
process_free_queue(struct vchiq_state * state,u32 * service_found,size_t length)775 process_free_queue(struct vchiq_state *state, u32 *service_found,
776 size_t length)
777 {
778 struct vchiq_shared_state *local = state->local;
779 int slot_queue_available;
780
781 /*
782 * Find slots which have been freed by the other side, and return them
783 * to the available queue.
784 */
785 slot_queue_available = state->slot_queue_available;
786
787 /*
788 * Use a memory barrier to ensure that any state that may have been
789 * modified by another thread is not masked by stale prefetched
790 * values.
791 */
792 mb();
793
794 while (slot_queue_available != local->slot_queue_recycle) {
795 unsigned int pos;
796 int slot_index = local->slot_queue[slot_queue_available &
797 VCHIQ_SLOT_QUEUE_MASK];
798 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
799 int data_found = 0;
800
801 slot_queue_available++;
802 /*
803 * Beware of the address dependency - data is calculated
804 * using an index written by the other side.
805 */
806 rmb();
807
808 dev_dbg(state->dev, "core: %d: pfq %d=%pK %x %x\n",
809 state->id, slot_index, data, local->slot_queue_recycle,
810 slot_queue_available);
811
812 /* Initialise the bitmask for services which have used this slot */
813 memset(service_found, 0, length);
814
815 pos = 0;
816
817 while (pos < VCHIQ_SLOT_SIZE) {
818 struct vchiq_header *header =
819 (struct vchiq_header *)(data + pos);
820 int msgid = header->msgid;
821
822 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
823 process_free_data_message(state, service_found,
824 header);
825 data_found = 1;
826 }
827
828 pos += calc_stride(header->size);
829 if (pos > VCHIQ_SLOT_SIZE) {
830 dev_err(state->dev,
831 "core: pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
832 pos, header, msgid, header->msgid, header->size);
833 WARN(1, "invalid slot position\n");
834 }
835 }
836
837 if (data_found) {
838 int count;
839
840 spin_lock("a_spinlock);
841 count = state->data_use_count;
842 if (count > 0)
843 state->data_use_count = count - 1;
844 spin_unlock("a_spinlock);
845 if (count == state->data_quota)
846 complete(&state->data_quota_event);
847 }
848
849 /*
850 * Don't allow the slot to be reused until we are no
851 * longer interested in it.
852 */
853 mb();
854
855 state->slot_queue_available = slot_queue_available;
856 complete(&state->slot_available_event);
857 }
858 }
859
860 static ssize_t
memcpy_copy_callback(void * context,void * dest,size_t offset,size_t maxsize)861 memcpy_copy_callback(void *context, void *dest, size_t offset, size_t maxsize)
862 {
863 memcpy(dest + offset, context + offset, maxsize);
864 return maxsize;
865 }
866
867 static ssize_t
copy_message_data(ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,void * dest,size_t size)868 copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t offset,
869 size_t maxsize),
870 void *context,
871 void *dest,
872 size_t size)
873 {
874 size_t pos = 0;
875
876 while (pos < size) {
877 ssize_t callback_result;
878 size_t max_bytes = size - pos;
879
880 callback_result = copy_callback(context, dest + pos, pos,
881 max_bytes);
882
883 if (callback_result < 0)
884 return callback_result;
885
886 if (!callback_result)
887 return -EIO;
888
889 if (callback_result > max_bytes)
890 return -EIO;
891
892 pos += callback_result;
893 }
894
895 return size;
896 }
897
898 /* Called by the slot handler and application threads */
899 static int
queue_message(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size,int flags)900 queue_message(struct vchiq_state *state, struct vchiq_service *service,
901 int msgid,
902 ssize_t (*copy_callback)(void *context, void *dest,
903 size_t offset, size_t maxsize),
904 void *context, size_t size, int flags)
905 {
906 struct vchiq_shared_state *local;
907 struct vchiq_service_quota *quota = NULL;
908 struct vchiq_header *header;
909 int type = VCHIQ_MSG_TYPE(msgid);
910
911 size_t stride;
912
913 local = state->local;
914
915 stride = calc_stride(size);
916
917 WARN_ON(stride > VCHIQ_SLOT_SIZE);
918
919 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
920 mutex_lock_killable(&state->slot_mutex))
921 return -EAGAIN;
922
923 if (type == VCHIQ_MSG_DATA) {
924 int tx_end_index;
925
926 if (!service) {
927 WARN(1, "%s: service is NULL\n", __func__);
928 mutex_unlock(&state->slot_mutex);
929 return -EINVAL;
930 }
931
932 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
933 QMFLAGS_NO_MUTEX_UNLOCK));
934
935 if (service->closing) {
936 /* The service has been closed */
937 mutex_unlock(&state->slot_mutex);
938 return -EHOSTDOWN;
939 }
940
941 quota = &state->service_quotas[service->localport];
942
943 spin_lock("a_spinlock);
944
945 /*
946 * Ensure this service doesn't use more than its quota of
947 * messages or slots
948 */
949 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
950
951 /*
952 * Ensure data messages don't use more than their quota of
953 * slots
954 */
955 while ((tx_end_index != state->previous_data_index) &&
956 (state->data_use_count == state->data_quota)) {
957 VCHIQ_STATS_INC(state, data_stalls);
958 spin_unlock("a_spinlock);
959 mutex_unlock(&state->slot_mutex);
960
961 if (wait_for_completion_interruptible(&state->data_quota_event))
962 return -EAGAIN;
963
964 mutex_lock(&state->slot_mutex);
965 spin_lock("a_spinlock);
966 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
967 if ((tx_end_index == state->previous_data_index) ||
968 (state->data_use_count < state->data_quota)) {
969 /* Pass the signal on to other waiters */
970 complete(&state->data_quota_event);
971 break;
972 }
973 }
974
975 while ((quota->message_use_count == quota->message_quota) ||
976 ((tx_end_index != quota->previous_tx_index) &&
977 (quota->slot_use_count == quota->slot_quota))) {
978 spin_unlock("a_spinlock);
979 dev_dbg(state->dev,
980 "core: %d: qm:%d %s,%zx - quota stall (msg %d, slot %d)\n",
981 state->id, service->localport, msg_type_str(type), size,
982 quota->message_use_count, quota->slot_use_count);
983 VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
984 mutex_unlock(&state->slot_mutex);
985 if (wait_for_completion_interruptible("a->quota_event))
986 return -EAGAIN;
987 if (service->closing)
988 return -EHOSTDOWN;
989 if (mutex_lock_killable(&state->slot_mutex))
990 return -EAGAIN;
991 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
992 /* The service has been closed */
993 mutex_unlock(&state->slot_mutex);
994 return -EHOSTDOWN;
995 }
996 spin_lock("a_spinlock);
997 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
998 }
999
1000 spin_unlock("a_spinlock);
1001 }
1002
1003 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
1004
1005 if (!header) {
1006 if (service)
1007 VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
1008 /*
1009 * In the event of a failure, return the mutex to the
1010 * state it was in
1011 */
1012 if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
1013 mutex_unlock(&state->slot_mutex);
1014 return -EAGAIN;
1015 }
1016
1017 if (type == VCHIQ_MSG_DATA) {
1018 ssize_t callback_result;
1019 int tx_end_index;
1020 int slot_use_count;
1021
1022 dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
1023 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1024 VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1025
1026 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
1027 QMFLAGS_NO_MUTEX_UNLOCK));
1028
1029 callback_result =
1030 copy_message_data(copy_callback, context,
1031 header->data, size);
1032
1033 if (callback_result < 0) {
1034 mutex_unlock(&state->slot_mutex);
1035 VCHIQ_SERVICE_STATS_INC(service, error_count);
1036 return -EINVAL;
1037 }
1038
1039 vchiq_log_dump_mem(state->dev, "Sent", 0,
1040 header->data,
1041 min_t(size_t, 16, callback_result));
1042
1043 spin_lock("a_spinlock);
1044 quota->message_use_count++;
1045
1046 tx_end_index =
1047 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
1048
1049 /*
1050 * If this transmission can't fit in the last slot used by any
1051 * service, the data_use_count must be increased.
1052 */
1053 if (tx_end_index != state->previous_data_index) {
1054 state->previous_data_index = tx_end_index;
1055 state->data_use_count++;
1056 }
1057
1058 /*
1059 * If this isn't the same slot last used by this service,
1060 * the service's slot_use_count must be increased.
1061 */
1062 if (tx_end_index != quota->previous_tx_index) {
1063 quota->previous_tx_index = tx_end_index;
1064 slot_use_count = ++quota->slot_use_count;
1065 } else {
1066 slot_use_count = 0;
1067 }
1068
1069 spin_unlock("a_spinlock);
1070
1071 if (slot_use_count)
1072 dev_dbg(state->dev, "core: %d: qm:%d %s,%zx - slot_use->%d (hdr %p)\n",
1073 state->id, service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1074 size, slot_use_count, header);
1075
1076 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1077 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1078 } else {
1079 dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
1080 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1081 VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1082 if (size != 0) {
1083 /*
1084 * It is assumed for now that this code path
1085 * only happens from calls inside this file.
1086 *
1087 * External callers are through the vchiq_queue_message
1088 * path which always sets the type to be VCHIQ_MSG_DATA
1089 *
1090 * At first glance this appears to be correct but
1091 * more review is needed.
1092 */
1093 copy_message_data(copy_callback, context,
1094 header->data, size);
1095 }
1096 VCHIQ_STATS_INC(state, ctrl_tx_count);
1097 }
1098
1099 header->msgid = msgid;
1100 header->size = size;
1101
1102 {
1103 int svc_fourcc;
1104
1105 svc_fourcc = service
1106 ? service->base.fourcc
1107 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1108
1109 dev_dbg(state->dev, "core_msg: Sent Msg %s(%u) to %p4cc s:%u d:%d len:%zu\n",
1110 msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1111 &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid), size);
1112 }
1113
1114 /* Make sure the new header is visible to the peer. */
1115 wmb();
1116
1117 /* Make the new tx_pos visible to the peer. */
1118 local->tx_pos = state->local_tx_pos;
1119 wmb();
1120
1121 if (service && (type == VCHIQ_MSG_CLOSE))
1122 set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1123
1124 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1125 mutex_unlock(&state->slot_mutex);
1126
1127 remote_event_signal(&state->remote->trigger);
1128
1129 return 0;
1130 }
1131
1132 /* Called by the slot handler and application threads */
1133 static int
queue_message_sync(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,int size,int is_blocking)1134 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1135 int msgid,
1136 ssize_t (*copy_callback)(void *context, void *dest,
1137 size_t offset, size_t maxsize),
1138 void *context, int size, int is_blocking)
1139 {
1140 struct vchiq_shared_state *local;
1141 struct vchiq_header *header;
1142 ssize_t callback_result;
1143 int svc_fourcc;
1144
1145 local = state->local;
1146
1147 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1148 mutex_lock_killable(&state->sync_mutex))
1149 return -EAGAIN;
1150
1151 remote_event_wait(&state->sync_release_event, &local->sync_release);
1152
1153 /* Ensure that reads don't overtake the remote_event_wait. */
1154 rmb();
1155
1156 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1157 local->slot_sync);
1158
1159 {
1160 int oldmsgid = header->msgid;
1161
1162 if (oldmsgid != VCHIQ_MSGID_PADDING)
1163 dev_err(state->dev, "core: %d: qms - msgid %x, not PADDING\n",
1164 state->id, oldmsgid);
1165 }
1166
1167 dev_dbg(state->dev, "sync: %d: qms %s@%pK,%x (%d->%d)\n",
1168 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1169 VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1170
1171 callback_result =
1172 copy_message_data(copy_callback, context,
1173 header->data, size);
1174
1175 if (callback_result < 0) {
1176 mutex_unlock(&state->slot_mutex);
1177 VCHIQ_SERVICE_STATS_INC(service, error_count);
1178 return -EINVAL;
1179 }
1180
1181 if (service) {
1182 vchiq_log_dump_mem(state->dev, "Sent", 0,
1183 header->data,
1184 min_t(size_t, 16, callback_result));
1185
1186 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1187 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1188 } else {
1189 VCHIQ_STATS_INC(state, ctrl_tx_count);
1190 }
1191
1192 header->size = size;
1193 header->msgid = msgid;
1194
1195
1196 svc_fourcc = service ? service->base.fourcc
1197 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1198
1199 dev_dbg(state->dev,
1200 "sync: Sent Sync Msg %s(%u) to %p4cc s:%u d:%d len:%d\n",
1201 msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1202 &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid),
1203 VCHIQ_MSG_DSTPORT(msgid), size);
1204
1205 remote_event_signal(&state->remote->sync_trigger);
1206
1207 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1208 mutex_unlock(&state->sync_mutex);
1209
1210 return 0;
1211 }
1212
1213 static inline void
claim_slot(struct vchiq_slot_info * slot)1214 claim_slot(struct vchiq_slot_info *slot)
1215 {
1216 slot->use_count++;
1217 }
1218
1219 static void
release_slot(struct vchiq_state * state,struct vchiq_slot_info * slot_info,struct vchiq_header * header,struct vchiq_service * service)1220 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1221 struct vchiq_header *header, struct vchiq_service *service)
1222 {
1223 mutex_lock(&state->recycle_mutex);
1224
1225 if (header) {
1226 int msgid = header->msgid;
1227
1228 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || (service && service->closing)) {
1229 mutex_unlock(&state->recycle_mutex);
1230 return;
1231 }
1232
1233 /* Rewrite the message header to prevent a double release */
1234 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1235 }
1236
1237 slot_info->release_count++;
1238
1239 if (slot_info->release_count == slot_info->use_count) {
1240 int slot_queue_recycle;
1241 /* Add to the freed queue */
1242
1243 /*
1244 * A read barrier is necessary here to prevent speculative
1245 * fetches of remote->slot_queue_recycle from overtaking the
1246 * mutex.
1247 */
1248 rmb();
1249
1250 slot_queue_recycle = state->remote->slot_queue_recycle;
1251 state->remote->slot_queue[slot_queue_recycle &
1252 VCHIQ_SLOT_QUEUE_MASK] =
1253 SLOT_INDEX_FROM_INFO(state, slot_info);
1254 state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1255 dev_dbg(state->dev, "core: %d: %d - recycle->%x\n",
1256 state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
1257 state->remote->slot_queue_recycle);
1258
1259 /*
1260 * A write barrier is necessary, but remote_event_signal
1261 * contains one.
1262 */
1263 remote_event_signal(&state->remote->recycle);
1264 }
1265
1266 mutex_unlock(&state->recycle_mutex);
1267 }
1268
1269 static inline enum vchiq_reason
get_bulk_reason(struct vchiq_bulk * bulk)1270 get_bulk_reason(struct vchiq_bulk *bulk)
1271 {
1272 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1273 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1274 return VCHIQ_BULK_TRANSMIT_ABORTED;
1275
1276 return VCHIQ_BULK_TRANSMIT_DONE;
1277 }
1278
1279 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1280 return VCHIQ_BULK_RECEIVE_ABORTED;
1281
1282 return VCHIQ_BULK_RECEIVE_DONE;
1283 }
1284
1285 /* Called by the slot handler - don't hold the bulk mutex */
1286 static int
notify_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue,int retry_poll)1287 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1288 int retry_poll)
1289 {
1290 int status = 0;
1291
1292 dev_dbg(service->state->dev,
1293 "core: %d: nb:%d %cx - p=%x rn=%x r=%x\n",
1294 service->state->id, service->localport,
1295 (queue == &service->bulk_tx) ? 't' : 'r',
1296 queue->process, queue->remote_notify, queue->remove);
1297
1298 queue->remote_notify = queue->process;
1299
1300 while (queue->remove != queue->remote_notify) {
1301 struct vchiq_bulk *bulk =
1302 &queue->bulks[BULK_INDEX(queue->remove)];
1303
1304 /*
1305 * Only generate callbacks for non-dummy bulk
1306 * requests, and non-terminated services
1307 */
1308 if (bulk->data && service->instance) {
1309 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1310 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1311 VCHIQ_SERVICE_STATS_INC(service, bulk_tx_count);
1312 VCHIQ_SERVICE_STATS_ADD(service, bulk_tx_bytes,
1313 bulk->actual);
1314 } else {
1315 VCHIQ_SERVICE_STATS_INC(service, bulk_rx_count);
1316 VCHIQ_SERVICE_STATS_ADD(service, bulk_rx_bytes,
1317 bulk->actual);
1318 }
1319 } else {
1320 VCHIQ_SERVICE_STATS_INC(service, bulk_aborted_count);
1321 }
1322 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1323 struct bulk_waiter *waiter;
1324
1325 spin_lock(&bulk_waiter_spinlock);
1326 waiter = bulk->userdata;
1327 if (waiter) {
1328 waiter->actual = bulk->actual;
1329 complete(&waiter->event);
1330 }
1331 spin_unlock(&bulk_waiter_spinlock);
1332 } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
1333 enum vchiq_reason reason =
1334 get_bulk_reason(bulk);
1335 status = make_service_callback(service, reason, NULL,
1336 bulk->userdata);
1337 if (status == -EAGAIN)
1338 break;
1339 }
1340 }
1341
1342 queue->remove++;
1343 complete(&service->bulk_remove_event);
1344 }
1345 if (!retry_poll)
1346 status = 0;
1347
1348 if (status == -EAGAIN)
1349 request_poll(service->state, service, (queue == &service->bulk_tx) ?
1350 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1351
1352 return status;
1353 }
1354
1355 static void
poll_services_of_group(struct vchiq_state * state,int group)1356 poll_services_of_group(struct vchiq_state *state, int group)
1357 {
1358 u32 flags = atomic_xchg(&state->poll_services[group], 0);
1359 int i;
1360
1361 for (i = 0; flags; i++) {
1362 struct vchiq_service *service;
1363 u32 service_flags;
1364
1365 if ((flags & BIT(i)) == 0)
1366 continue;
1367
1368 service = find_service_by_port(state, (group << 5) + i);
1369 flags &= ~BIT(i);
1370
1371 if (!service)
1372 continue;
1373
1374 service_flags = atomic_xchg(&service->poll_flags, 0);
1375 if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
1376 dev_dbg(state->dev, "core: %d: ps - remove %d<->%d\n",
1377 state->id, service->localport, service->remoteport);
1378
1379 /*
1380 * Make it look like a client, because
1381 * it must be removed and not left in
1382 * the LISTENING state.
1383 */
1384 service->public_fourcc = VCHIQ_FOURCC_INVALID;
1385
1386 if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
1387 request_poll(state, service, VCHIQ_POLL_REMOVE);
1388 } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
1389 dev_dbg(state->dev, "core: %d: ps - terminate %d<->%d\n",
1390 state->id, service->localport, service->remoteport);
1391 if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
1392 request_poll(state, service, VCHIQ_POLL_TERMINATE);
1393 }
1394 if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1395 notify_bulks(service, &service->bulk_tx, RETRY_POLL);
1396 if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1397 notify_bulks(service, &service->bulk_rx, RETRY_POLL);
1398 vchiq_service_put(service);
1399 }
1400 }
1401
1402 /* Called by the slot handler thread */
1403 static void
poll_services(struct vchiq_state * state)1404 poll_services(struct vchiq_state *state)
1405 {
1406 int group;
1407
1408 for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
1409 poll_services_of_group(state, group);
1410 }
1411
1412 /* Called with the bulk_mutex held */
1413 static void
abort_outstanding_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue)1414 abort_outstanding_bulks(struct vchiq_service *service,
1415 struct vchiq_bulk_queue *queue)
1416 {
1417 int is_tx = (queue == &service->bulk_tx);
1418
1419 dev_dbg(service->state->dev,
1420 "core: %d: aob:%d %cx - li=%x ri=%x p=%x\n",
1421 service->state->id, service->localport,
1422 is_tx ? 't' : 'r', queue->local_insert,
1423 queue->remote_insert, queue->process);
1424
1425 WARN_ON((int)(queue->local_insert - queue->process) < 0);
1426 WARN_ON((int)(queue->remote_insert - queue->process) < 0);
1427
1428 while ((queue->process != queue->local_insert) ||
1429 (queue->process != queue->remote_insert)) {
1430 struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)];
1431
1432 if (queue->process == queue->remote_insert) {
1433 /* fabricate a matching dummy bulk */
1434 bulk->remote_data = NULL;
1435 bulk->remote_size = 0;
1436 queue->remote_insert++;
1437 }
1438
1439 if (queue->process != queue->local_insert) {
1440 vchiq_complete_bulk(service->instance, bulk);
1441
1442 dev_dbg(service->state->dev,
1443 "core_msg: %s %p4cc d:%d ABORTED - tx len:%d, rx len:%d\n",
1444 is_tx ? "Send Bulk to" : "Recv Bulk from",
1445 &service->base.fourcc,
1446 service->remoteport, bulk->size, bulk->remote_size);
1447 } else {
1448 /* fabricate a matching dummy bulk */
1449 bulk->data = 0;
1450 bulk->size = 0;
1451 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1452 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1453 VCHIQ_BULK_RECEIVE;
1454 queue->local_insert++;
1455 }
1456
1457 queue->process++;
1458 }
1459 }
1460
1461 static int
parse_open(struct vchiq_state * state,struct vchiq_header * header)1462 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1463 {
1464 const struct vchiq_open_payload *payload;
1465 struct vchiq_service *service = NULL;
1466 int msgid, size;
1467 unsigned int localport, remoteport, fourcc;
1468 short version, version_min;
1469
1470 msgid = header->msgid;
1471 size = header->size;
1472 localport = VCHIQ_MSG_DSTPORT(msgid);
1473 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1474 if (size < sizeof(struct vchiq_open_payload))
1475 goto fail_open;
1476
1477 payload = (struct vchiq_open_payload *)header->data;
1478 fourcc = payload->fourcc;
1479 dev_dbg(state->dev, "core: %d: prs OPEN@%pK (%d->'%p4cc')\n",
1480 state->id, header, localport, &fourcc);
1481
1482 service = get_listening_service(state, fourcc);
1483 if (!service)
1484 goto fail_open;
1485
1486 /* A matching service exists */
1487 version = payload->version;
1488 version_min = payload->version_min;
1489
1490 if ((service->version < version_min) || (version < service->version_min)) {
1491 /* Version mismatch */
1492 dev_err(state->dev, "%d: service %d (%p4cc) version mismatch - local (%d, min %d) vs. remote (%d, min %d)",
1493 state->id, service->localport, &fourcc,
1494 service->version, service->version_min, version, version_min);
1495 vchiq_service_put(service);
1496 service = NULL;
1497 goto fail_open;
1498 }
1499 service->peer_version = version;
1500
1501 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1502 struct vchiq_openack_payload ack_payload = {
1503 service->version
1504 };
1505 int openack_id = MAKE_OPENACK(service->localport, remoteport);
1506
1507 if (state->version_common <
1508 VCHIQ_VERSION_SYNCHRONOUS_MODE)
1509 service->sync = 0;
1510
1511 /* Acknowledge the OPEN */
1512 if (service->sync) {
1513 if (queue_message_sync(state, NULL, openack_id, memcpy_copy_callback,
1514 &ack_payload, sizeof(ack_payload), 0) == -EAGAIN)
1515 goto bail_not_ready;
1516
1517 /* The service is now open */
1518 set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
1519 } else {
1520 if (queue_message(state, NULL, openack_id, memcpy_copy_callback,
1521 &ack_payload, sizeof(ack_payload), 0) == -EAGAIN)
1522 goto bail_not_ready;
1523
1524 /* The service is now open */
1525 set_service_state(service, VCHIQ_SRVSTATE_OPEN);
1526 }
1527 }
1528
1529 /* Success - the message has been dealt with */
1530 vchiq_service_put(service);
1531 return 1;
1532
1533 fail_open:
1534 /* No available service, or an invalid request - send a CLOSE */
1535 if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
1536 NULL, NULL, 0, 0) == -EAGAIN)
1537 goto bail_not_ready;
1538
1539 return 1;
1540
1541 bail_not_ready:
1542 if (service)
1543 vchiq_service_put(service);
1544
1545 return 0;
1546 }
1547
1548 /**
1549 * parse_message() - parses a single message from the rx slot
1550 * @state: vchiq state struct
1551 * @header: message header
1552 *
1553 * Context: Process context
1554 *
1555 * Return:
1556 * * >= 0 - size of the parsed message payload (without header)
1557 * * -EINVAL - fatal error occurred, bail out is required
1558 */
1559 static int
parse_message(struct vchiq_state * state,struct vchiq_header * header)1560 parse_message(struct vchiq_state *state, struct vchiq_header *header)
1561 {
1562 struct vchiq_service *service = NULL;
1563 unsigned int localport, remoteport;
1564 int msgid, size, type, ret = -EINVAL;
1565 int svc_fourcc;
1566
1567 DEBUG_INITIALISE(state->local);
1568
1569 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1570 msgid = header->msgid;
1571 DEBUG_VALUE(PARSE_MSGID, msgid);
1572 size = header->size;
1573 type = VCHIQ_MSG_TYPE(msgid);
1574 localport = VCHIQ_MSG_DSTPORT(msgid);
1575 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1576
1577 if (type != VCHIQ_MSG_DATA)
1578 VCHIQ_STATS_INC(state, ctrl_rx_count);
1579
1580 switch (type) {
1581 case VCHIQ_MSG_OPENACK:
1582 case VCHIQ_MSG_CLOSE:
1583 case VCHIQ_MSG_DATA:
1584 case VCHIQ_MSG_BULK_RX:
1585 case VCHIQ_MSG_BULK_TX:
1586 case VCHIQ_MSG_BULK_RX_DONE:
1587 case VCHIQ_MSG_BULK_TX_DONE:
1588 service = find_service_by_port(state, localport);
1589 if ((!service ||
1590 ((service->remoteport != remoteport) &&
1591 (service->remoteport != VCHIQ_PORT_FREE))) &&
1592 (localport == 0) &&
1593 (type == VCHIQ_MSG_CLOSE)) {
1594 /*
1595 * This could be a CLOSE from a client which
1596 * hadn't yet received the OPENACK - look for
1597 * the connected service
1598 */
1599 if (service)
1600 vchiq_service_put(service);
1601 service = get_connected_service(state, remoteport);
1602 if (service)
1603 dev_warn(state->dev,
1604 "core: %d: prs %s@%pK (%d->%d) - found connected service %d\n",
1605 state->id, msg_type_str(type), header,
1606 remoteport, localport, service->localport);
1607 }
1608
1609 if (!service) {
1610 dev_err(state->dev,
1611 "core: %d: prs %s@%pK (%d->%d) - invalid/closed service %d\n",
1612 state->id, msg_type_str(type), header, remoteport,
1613 localport, localport);
1614 goto skip_message;
1615 }
1616 break;
1617 default:
1618 break;
1619 }
1620
1621
1622 svc_fourcc = service ? service->base.fourcc
1623 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1624
1625 dev_dbg(state->dev, "core_msg: Rcvd Msg %s(%u) from %p4cc s:%d d:%d len:%d\n",
1626 msg_type_str(type), type, &svc_fourcc, remoteport, localport, size);
1627 if (size > 0)
1628 vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
1629
1630 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1631 calc_stride(size) > VCHIQ_SLOT_SIZE) {
1632 dev_err(state->dev, "core: header %pK (msgid %x) - size %x too big for slot\n",
1633 header, (unsigned int)msgid, (unsigned int)size);
1634 WARN(1, "oversized for slot\n");
1635 }
1636
1637 switch (type) {
1638 case VCHIQ_MSG_OPEN:
1639 WARN_ON(VCHIQ_MSG_DSTPORT(msgid));
1640 if (!parse_open(state, header))
1641 goto bail_not_ready;
1642 break;
1643 case VCHIQ_MSG_OPENACK:
1644 if (size >= sizeof(struct vchiq_openack_payload)) {
1645 const struct vchiq_openack_payload *payload =
1646 (struct vchiq_openack_payload *)
1647 header->data;
1648 service->peer_version = payload->version;
1649 }
1650 dev_dbg(state->dev,
1651 "core: %d: prs OPENACK@%pK,%x (%d->%d) v:%d\n",
1652 state->id, header, size, remoteport, localport,
1653 service->peer_version);
1654 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1655 service->remoteport = remoteport;
1656 set_service_state(service, VCHIQ_SRVSTATE_OPEN);
1657 complete(&service->remove_event);
1658 } else {
1659 dev_err(state->dev, "core: OPENACK received in state %s\n",
1660 srvstate_names[service->srvstate]);
1661 }
1662 break;
1663 case VCHIQ_MSG_CLOSE:
1664 WARN_ON(size); /* There should be no data */
1665
1666 dev_dbg(state->dev, "core: %d: prs CLOSE@%pK (%d->%d)\n",
1667 state->id, header, remoteport, localport);
1668
1669 mark_service_closing_internal(service, 1);
1670
1671 if (vchiq_close_service_internal(service, CLOSE_RECVD) == -EAGAIN)
1672 goto bail_not_ready;
1673
1674 dev_dbg(state->dev, "core: Close Service %p4cc s:%u d:%d\n",
1675 &service->base.fourcc, service->localport, service->remoteport);
1676 break;
1677 case VCHIQ_MSG_DATA:
1678 dev_dbg(state->dev, "core: %d: prs DATA@%pK,%x (%d->%d)\n",
1679 state->id, header, size, remoteport, localport);
1680
1681 if ((service->remoteport == remoteport) &&
1682 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1683 header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1684 claim_slot(state->rx_info);
1685 DEBUG_TRACE(PARSE_LINE);
1686 if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
1687 NULL) == -EAGAIN) {
1688 DEBUG_TRACE(PARSE_LINE);
1689 goto bail_not_ready;
1690 }
1691 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1692 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, size);
1693 } else {
1694 VCHIQ_STATS_INC(state, error_count);
1695 }
1696 break;
1697 case VCHIQ_MSG_CONNECT:
1698 dev_dbg(state->dev, "core: %d: prs CONNECT@%pK\n",
1699 state->id, header);
1700 state->version_common = ((struct vchiq_slot_zero *)
1701 state->slot_data)->version;
1702 complete(&state->connect);
1703 break;
1704 case VCHIQ_MSG_BULK_RX:
1705 case VCHIQ_MSG_BULK_TX:
1706 /*
1707 * We should never receive a bulk request from the
1708 * other side since we're not setup to perform as the
1709 * master.
1710 */
1711 WARN_ON(1);
1712 break;
1713 case VCHIQ_MSG_BULK_RX_DONE:
1714 case VCHIQ_MSG_BULK_TX_DONE:
1715 if ((service->remoteport == remoteport) &&
1716 (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1717 struct vchiq_bulk_queue *queue;
1718 struct vchiq_bulk *bulk;
1719
1720 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1721 &service->bulk_rx : &service->bulk_tx;
1722
1723 DEBUG_TRACE(PARSE_LINE);
1724 if (mutex_lock_killable(&service->bulk_mutex)) {
1725 DEBUG_TRACE(PARSE_LINE);
1726 goto bail_not_ready;
1727 }
1728 if ((int)(queue->remote_insert -
1729 queue->local_insert) >= 0) {
1730 dev_err(state->dev,
1731 "core: %d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)\n",
1732 state->id, msg_type_str(type), header, remoteport,
1733 localport, queue->remote_insert, queue->local_insert);
1734 mutex_unlock(&service->bulk_mutex);
1735 break;
1736 }
1737 if (queue->process != queue->remote_insert) {
1738 pr_err("%s: p %x != ri %x\n",
1739 __func__,
1740 queue->process,
1741 queue->remote_insert);
1742 mutex_unlock(&service->bulk_mutex);
1743 goto bail_not_ready;
1744 }
1745
1746 bulk = &queue->bulks[BULK_INDEX(queue->remote_insert)];
1747 bulk->actual = *(int *)header->data;
1748 queue->remote_insert++;
1749
1750 dev_dbg(state->dev, "core: %d: prs %s@%pK (%d->%d) %x@%pad\n",
1751 state->id, msg_type_str(type), header, remoteport,
1752 localport, bulk->actual, &bulk->data);
1753
1754 dev_dbg(state->dev, "core: %d: prs:%d %cx li=%x ri=%x p=%x\n",
1755 state->id, localport,
1756 (type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
1757 queue->local_insert, queue->remote_insert, queue->process);
1758
1759 DEBUG_TRACE(PARSE_LINE);
1760 WARN_ON(queue->process == queue->local_insert);
1761 vchiq_complete_bulk(service->instance, bulk);
1762 queue->process++;
1763 mutex_unlock(&service->bulk_mutex);
1764 DEBUG_TRACE(PARSE_LINE);
1765 notify_bulks(service, queue, RETRY_POLL);
1766 DEBUG_TRACE(PARSE_LINE);
1767 }
1768 break;
1769 case VCHIQ_MSG_PADDING:
1770 dev_dbg(state->dev, "core: %d: prs PADDING@%pK,%x\n",
1771 state->id, header, size);
1772 break;
1773 case VCHIQ_MSG_PAUSE:
1774 /* If initiated, signal the application thread */
1775 dev_dbg(state->dev, "core: %d: prs PAUSE@%pK,%x\n",
1776 state->id, header, size);
1777 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1778 dev_err(state->dev, "core: %d: PAUSE received in state PAUSED\n",
1779 state->id);
1780 break;
1781 }
1782 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1783 /* Send a PAUSE in response */
1784 if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
1785 QMFLAGS_NO_MUTEX_UNLOCK) == -EAGAIN)
1786 goto bail_not_ready;
1787 }
1788 /* At this point slot_mutex is held */
1789 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1790 break;
1791 case VCHIQ_MSG_RESUME:
1792 dev_dbg(state->dev, "core: %d: prs RESUME@%pK,%x\n",
1793 state->id, header, size);
1794 /* Release the slot mutex */
1795 mutex_unlock(&state->slot_mutex);
1796 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1797 break;
1798
1799 case VCHIQ_MSG_REMOTE_USE:
1800 vchiq_on_remote_use(state);
1801 break;
1802 case VCHIQ_MSG_REMOTE_RELEASE:
1803 vchiq_on_remote_release(state);
1804 break;
1805 case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1806 break;
1807
1808 default:
1809 dev_err(state->dev, "core: %d: prs invalid msgid %x@%pK,%x\n",
1810 state->id, msgid, header, size);
1811 WARN(1, "invalid message\n");
1812 break;
1813 }
1814
1815 skip_message:
1816 ret = size;
1817
1818 bail_not_ready:
1819 if (service)
1820 vchiq_service_put(service);
1821
1822 return ret;
1823 }
1824
1825 /* Called by the slot handler thread */
1826 static void
parse_rx_slots(struct vchiq_state * state)1827 parse_rx_slots(struct vchiq_state *state)
1828 {
1829 struct vchiq_shared_state *remote = state->remote;
1830 int tx_pos;
1831
1832 DEBUG_INITIALISE(state->local);
1833
1834 tx_pos = remote->tx_pos;
1835
1836 while (state->rx_pos != tx_pos) {
1837 struct vchiq_header *header;
1838 int size;
1839
1840 DEBUG_TRACE(PARSE_LINE);
1841 if (!state->rx_data) {
1842 int rx_index;
1843
1844 WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK);
1845 rx_index = remote->slot_queue[
1846 SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1847 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1848 rx_index);
1849 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1850
1851 /*
1852 * Initialise use_count to one, and increment
1853 * release_count at the end of the slot to avoid
1854 * releasing the slot prematurely.
1855 */
1856 state->rx_info->use_count = 1;
1857 state->rx_info->release_count = 0;
1858 }
1859
1860 header = (struct vchiq_header *)(state->rx_data +
1861 (state->rx_pos & VCHIQ_SLOT_MASK));
1862 size = parse_message(state, header);
1863 if (size < 0)
1864 return;
1865
1866 state->rx_pos += calc_stride(size);
1867
1868 DEBUG_TRACE(PARSE_LINE);
1869 /*
1870 * Perform some housekeeping when the end of the slot is
1871 * reached.
1872 */
1873 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1874 /* Remove the extra reference count. */
1875 release_slot(state, state->rx_info, NULL, NULL);
1876 state->rx_data = NULL;
1877 }
1878 }
1879 }
1880
1881 /**
1882 * handle_poll() - handle service polling and other rare conditions
1883 * @state: vchiq state struct
1884 *
1885 * Context: Process context
1886 *
1887 * Return:
1888 * * 0 - poll handled successful
1889 * * -EAGAIN - retry later
1890 */
1891 static int
handle_poll(struct vchiq_state * state)1892 handle_poll(struct vchiq_state *state)
1893 {
1894 switch (state->conn_state) {
1895 case VCHIQ_CONNSTATE_CONNECTED:
1896 /* Poll the services as requested */
1897 poll_services(state);
1898 break;
1899
1900 case VCHIQ_CONNSTATE_PAUSING:
1901 if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
1902 QMFLAGS_NO_MUTEX_UNLOCK) != -EAGAIN) {
1903 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT);
1904 } else {
1905 /* Retry later */
1906 return -EAGAIN;
1907 }
1908 break;
1909
1910 case VCHIQ_CONNSTATE_RESUMING:
1911 if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0,
1912 QMFLAGS_NO_MUTEX_LOCK) != -EAGAIN) {
1913 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1914 } else {
1915 /*
1916 * This should really be impossible,
1917 * since the PAUSE should have flushed
1918 * through outstanding messages.
1919 */
1920 dev_err(state->dev, "core: Failed to send RESUME message\n");
1921 }
1922 break;
1923 default:
1924 break;
1925 }
1926
1927 return 0;
1928 }
1929
1930 /* Called by the slot handler thread */
1931 static int
slot_handler_func(void * v)1932 slot_handler_func(void *v)
1933 {
1934 struct vchiq_state *state = v;
1935 struct vchiq_shared_state *local = state->local;
1936
1937 DEBUG_INITIALISE(local);
1938
1939 while (1) {
1940 DEBUG_COUNT(SLOT_HANDLER_COUNT);
1941 DEBUG_TRACE(SLOT_HANDLER_LINE);
1942 remote_event_wait(&state->trigger_event, &local->trigger);
1943
1944 /* Ensure that reads don't overtake the remote_event_wait. */
1945 rmb();
1946
1947 DEBUG_TRACE(SLOT_HANDLER_LINE);
1948 if (state->poll_needed) {
1949 state->poll_needed = 0;
1950
1951 /*
1952 * Handle service polling and other rare conditions here
1953 * out of the mainline code
1954 */
1955 if (handle_poll(state) == -EAGAIN)
1956 state->poll_needed = 1;
1957 }
1958
1959 DEBUG_TRACE(SLOT_HANDLER_LINE);
1960 parse_rx_slots(state);
1961 }
1962 return 0;
1963 }
1964
1965 /* Called by the recycle thread */
1966 static int
recycle_func(void * v)1967 recycle_func(void *v)
1968 {
1969 struct vchiq_state *state = v;
1970 struct vchiq_shared_state *local = state->local;
1971 u32 *found;
1972 size_t length;
1973
1974 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1975
1976 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1977 GFP_KERNEL);
1978 if (!found)
1979 return -ENOMEM;
1980
1981 while (1) {
1982 remote_event_wait(&state->recycle_event, &local->recycle);
1983
1984 process_free_queue(state, found, length);
1985 }
1986 return 0;
1987 }
1988
1989 /* Called by the sync thread */
1990 static int
sync_func(void * v)1991 sync_func(void *v)
1992 {
1993 struct vchiq_state *state = v;
1994 struct vchiq_shared_state *local = state->local;
1995 struct vchiq_header *header =
1996 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1997 state->remote->slot_sync);
1998 int svc_fourcc;
1999
2000 while (1) {
2001 struct vchiq_service *service;
2002 int msgid, size;
2003 int type;
2004 unsigned int localport, remoteport;
2005
2006 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2007
2008 /* Ensure that reads don't overtake the remote_event_wait. */
2009 rmb();
2010
2011 msgid = header->msgid;
2012 size = header->size;
2013 type = VCHIQ_MSG_TYPE(msgid);
2014 localport = VCHIQ_MSG_DSTPORT(msgid);
2015 remoteport = VCHIQ_MSG_SRCPORT(msgid);
2016
2017 service = find_service_by_port(state, localport);
2018
2019 if (!service) {
2020 dev_err(state->dev,
2021 "sync: %d: sf %s@%pK (%d->%d) - invalid/closed service %d\n",
2022 state->id, msg_type_str(type), header, remoteport,
2023 localport, localport);
2024 release_message_sync(state, header);
2025 continue;
2026 }
2027
2028 svc_fourcc = service->base.fourcc;
2029
2030 dev_dbg(state->dev, "sync: Rcvd Msg %s from %p4cc s:%d d:%d len:%d\n",
2031 msg_type_str(type), &svc_fourcc, remoteport, localport, size);
2032 if (size > 0)
2033 vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
2034
2035 switch (type) {
2036 case VCHIQ_MSG_OPENACK:
2037 if (size >= sizeof(struct vchiq_openack_payload)) {
2038 const struct vchiq_openack_payload *payload =
2039 (struct vchiq_openack_payload *)
2040 header->data;
2041 service->peer_version = payload->version;
2042 }
2043 dev_err(state->dev, "sync: %d: sf OPENACK@%pK,%x (%d->%d) v:%d\n",
2044 state->id, header, size, remoteport, localport,
2045 service->peer_version);
2046 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2047 service->remoteport = remoteport;
2048 set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
2049 service->sync = 1;
2050 complete(&service->remove_event);
2051 }
2052 release_message_sync(state, header);
2053 break;
2054
2055 case VCHIQ_MSG_DATA:
2056 dev_dbg(state->dev, "sync: %d: sf DATA@%pK,%x (%d->%d)\n",
2057 state->id, header, size, remoteport, localport);
2058
2059 if ((service->remoteport == remoteport) &&
2060 (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
2061 if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
2062 NULL) == -EAGAIN)
2063 dev_err(state->dev,
2064 "sync: error: synchronous callback to service %d returns -EAGAIN\n",
2065 localport);
2066 }
2067 break;
2068
2069 default:
2070 dev_err(state->dev, "sync: error: %d: sf unexpected msgid %x@%pK,%x\n",
2071 state->id, msgid, header, size);
2072 release_message_sync(state, header);
2073 break;
2074 }
2075
2076 vchiq_service_put(service);
2077 }
2078
2079 return 0;
2080 }
2081
2082 inline const char *
get_conn_state_name(enum vchiq_connstate conn_state)2083 get_conn_state_name(enum vchiq_connstate conn_state)
2084 {
2085 return conn_state_names[conn_state];
2086 }
2087
2088 struct vchiq_slot_zero *
vchiq_init_slots(struct device * dev,void * mem_base,int mem_size)2089 vchiq_init_slots(struct device *dev, void *mem_base, int mem_size)
2090 {
2091 int mem_align =
2092 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2093 struct vchiq_slot_zero *slot_zero =
2094 (struct vchiq_slot_zero *)(mem_base + mem_align);
2095 int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE;
2096 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2097
2098 check_sizes();
2099
2100 /* Ensure there is enough memory to run an absolutely minimum system */
2101 num_slots -= first_data_slot;
2102
2103 if (num_slots < 4) {
2104 dev_err(dev, "core: %s: Insufficient memory %x bytes\n",
2105 __func__, mem_size);
2106 return NULL;
2107 }
2108
2109 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2110
2111 slot_zero->magic = VCHIQ_MAGIC;
2112 slot_zero->version = VCHIQ_VERSION;
2113 slot_zero->version_min = VCHIQ_VERSION_MIN;
2114 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2115 slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2116 slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2117 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2118
2119 slot_zero->master.slot_sync = first_data_slot;
2120 slot_zero->master.slot_first = first_data_slot + 1;
2121 slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1;
2122 slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2);
2123 slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1;
2124 slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2125
2126 return slot_zero;
2127 }
2128
2129 int
vchiq_init_state(struct vchiq_state * state,struct vchiq_slot_zero * slot_zero,struct device * dev)2130 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev)
2131 {
2132 struct vchiq_shared_state *local;
2133 struct vchiq_shared_state *remote;
2134 char threadname[16];
2135 int i, ret;
2136
2137 local = &slot_zero->slave;
2138 remote = &slot_zero->master;
2139
2140 if (local->initialised) {
2141 if (remote->initialised)
2142 dev_err(dev, "local state has already been initialised\n");
2143 else
2144 dev_err(dev, "master/slave mismatch two slaves\n");
2145
2146 return -EINVAL;
2147 }
2148
2149 memset(state, 0, sizeof(struct vchiq_state));
2150
2151 state->dev = dev;
2152
2153 /*
2154 * initialize shared state pointers
2155 */
2156
2157 state->local = local;
2158 state->remote = remote;
2159 state->slot_data = (struct vchiq_slot *)slot_zero;
2160
2161 /*
2162 * initialize events and mutexes
2163 */
2164
2165 init_completion(&state->connect);
2166 mutex_init(&state->mutex);
2167 mutex_init(&state->slot_mutex);
2168 mutex_init(&state->recycle_mutex);
2169 mutex_init(&state->sync_mutex);
2170 mutex_init(&state->bulk_transfer_mutex);
2171
2172 init_completion(&state->slot_available_event);
2173 init_completion(&state->slot_remove_event);
2174 init_completion(&state->data_quota_event);
2175
2176 state->slot_queue_available = 0;
2177
2178 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2179 struct vchiq_service_quota *quota = &state->service_quotas[i];
2180 init_completion("a->quota_event);
2181 }
2182
2183 for (i = local->slot_first; i <= local->slot_last; i++) {
2184 local->slot_queue[state->slot_queue_available] = i;
2185 state->slot_queue_available++;
2186 complete(&state->slot_available_event);
2187 }
2188
2189 state->default_slot_quota = state->slot_queue_available / 2;
2190 state->default_message_quota =
2191 min_t(unsigned short, state->default_slot_quota * 256, ~0);
2192
2193 state->previous_data_index = -1;
2194 state->data_use_count = 0;
2195 state->data_quota = state->slot_queue_available - 1;
2196
2197 remote_event_create(&state->trigger_event, &local->trigger);
2198 local->tx_pos = 0;
2199 remote_event_create(&state->recycle_event, &local->recycle);
2200 local->slot_queue_recycle = state->slot_queue_available;
2201 remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2202 remote_event_create(&state->sync_release_event, &local->sync_release);
2203
2204 /* At start-of-day, the slot is empty and available */
2205 ((struct vchiq_header *)
2206 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2207 VCHIQ_MSGID_PADDING;
2208 remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2209
2210 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2211
2212 ret = vchiq_platform_init_state(state);
2213 if (ret)
2214 return ret;
2215
2216 /*
2217 * bring up slot handler thread
2218 */
2219 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2220 state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname);
2221
2222 if (IS_ERR(state->slot_handler_thread)) {
2223 dev_err(state->dev, "couldn't create thread %s\n", threadname);
2224 return PTR_ERR(state->slot_handler_thread);
2225 }
2226 set_user_nice(state->slot_handler_thread, -19);
2227
2228 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2229 state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname);
2230 if (IS_ERR(state->recycle_thread)) {
2231 dev_err(state->dev, "couldn't create thread %s\n", threadname);
2232 ret = PTR_ERR(state->recycle_thread);
2233 goto fail_free_handler_thread;
2234 }
2235 set_user_nice(state->recycle_thread, -19);
2236
2237 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2238 state->sync_thread = kthread_create(&sync_func, (void *)state, threadname);
2239 if (IS_ERR(state->sync_thread)) {
2240 dev_err(state->dev, "couldn't create thread %s\n", threadname);
2241 ret = PTR_ERR(state->sync_thread);
2242 goto fail_free_recycle_thread;
2243 }
2244 set_user_nice(state->sync_thread, -20);
2245
2246 wake_up_process(state->slot_handler_thread);
2247 wake_up_process(state->recycle_thread);
2248 wake_up_process(state->sync_thread);
2249
2250 /* Indicate readiness to the other side */
2251 local->initialised = 1;
2252
2253 return 0;
2254
2255 fail_free_recycle_thread:
2256 kthread_stop(state->recycle_thread);
2257 fail_free_handler_thread:
2258 kthread_stop(state->slot_handler_thread);
2259
2260 return ret;
2261 }
2262
vchiq_msg_queue_push(struct vchiq_instance * instance,unsigned int handle,struct vchiq_header * header)2263 void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle,
2264 struct vchiq_header *header)
2265 {
2266 struct vchiq_service *service = find_service_by_handle(instance, handle);
2267 int pos;
2268
2269 if (!service)
2270 return;
2271
2272 while (service->msg_queue_write == service->msg_queue_read +
2273 VCHIQ_MAX_SLOTS) {
2274 if (wait_for_completion_interruptible(&service->msg_queue_pop))
2275 flush_signals(current);
2276 }
2277
2278 pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2279 service->msg_queue_write++;
2280 service->msg_queue[pos] = header;
2281
2282 complete(&service->msg_queue_push);
2283 }
2284 EXPORT_SYMBOL(vchiq_msg_queue_push);
2285
vchiq_msg_hold(struct vchiq_instance * instance,unsigned int handle)2286 struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle)
2287 {
2288 struct vchiq_service *service = find_service_by_handle(instance, handle);
2289 struct vchiq_header *header;
2290 int pos;
2291
2292 if (!service)
2293 return NULL;
2294
2295 if (service->msg_queue_write == service->msg_queue_read)
2296 return NULL;
2297
2298 while (service->msg_queue_write == service->msg_queue_read) {
2299 if (wait_for_completion_interruptible(&service->msg_queue_push))
2300 flush_signals(current);
2301 }
2302
2303 pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2304 service->msg_queue_read++;
2305 header = service->msg_queue[pos];
2306
2307 complete(&service->msg_queue_pop);
2308
2309 return header;
2310 }
2311 EXPORT_SYMBOL(vchiq_msg_hold);
2312
vchiq_validate_params(struct vchiq_state * state,const struct vchiq_service_params_kernel * params)2313 static int vchiq_validate_params(struct vchiq_state *state,
2314 const struct vchiq_service_params_kernel *params)
2315 {
2316 if (!params->callback || !params->fourcc) {
2317 dev_err(state->dev, "Can't add service, invalid params\n");
2318 return -EINVAL;
2319 }
2320
2321 return 0;
2322 }
2323
2324 /* Called from application thread when a client or server service is created. */
2325 struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state * state,const struct vchiq_service_params_kernel * params,int srvstate,struct vchiq_instance * instance,void (* userdata_term)(void * userdata))2326 vchiq_add_service_internal(struct vchiq_state *state,
2327 const struct vchiq_service_params_kernel *params,
2328 int srvstate, struct vchiq_instance *instance,
2329 void (*userdata_term)(void *userdata))
2330 {
2331 struct vchiq_service *service;
2332 struct vchiq_service __rcu **pservice = NULL;
2333 struct vchiq_service_quota *quota;
2334 int ret;
2335 int i;
2336
2337 ret = vchiq_validate_params(state, params);
2338 if (ret)
2339 return NULL;
2340
2341 service = kzalloc(sizeof(*service), GFP_KERNEL);
2342 if (!service)
2343 return service;
2344
2345 service->base.fourcc = params->fourcc;
2346 service->base.callback = params->callback;
2347 service->base.userdata = params->userdata;
2348 service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
2349 kref_init(&service->ref_count);
2350 service->srvstate = VCHIQ_SRVSTATE_FREE;
2351 service->userdata_term = userdata_term;
2352 service->localport = VCHIQ_PORT_FREE;
2353 service->remoteport = VCHIQ_PORT_FREE;
2354
2355 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2356 VCHIQ_FOURCC_INVALID : params->fourcc;
2357 service->auto_close = 1;
2358 atomic_set(&service->poll_flags, 0);
2359 service->version = params->version;
2360 service->version_min = params->version_min;
2361 service->state = state;
2362 service->instance = instance;
2363 init_completion(&service->remove_event);
2364 init_completion(&service->bulk_remove_event);
2365 init_completion(&service->msg_queue_pop);
2366 init_completion(&service->msg_queue_push);
2367 mutex_init(&service->bulk_mutex);
2368
2369 /*
2370 * Although it is perfectly possible to use a spinlock
2371 * to protect the creation of services, it is overkill as it
2372 * disables interrupts while the array is searched.
2373 * The only danger is of another thread trying to create a
2374 * service - service deletion is safe.
2375 * Therefore it is preferable to use state->mutex which,
2376 * although slower to claim, doesn't block interrupts while
2377 * it is held.
2378 */
2379
2380 mutex_lock(&state->mutex);
2381
2382 /* Prepare to use a previously unused service */
2383 if (state->unused_service < VCHIQ_MAX_SERVICES)
2384 pservice = &state->services[state->unused_service];
2385
2386 if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2387 for (i = 0; i < state->unused_service; i++) {
2388 if (!rcu_access_pointer(state->services[i])) {
2389 pservice = &state->services[i];
2390 break;
2391 }
2392 }
2393 } else {
2394 rcu_read_lock();
2395 for (i = (state->unused_service - 1); i >= 0; i--) {
2396 struct vchiq_service *srv;
2397
2398 srv = rcu_dereference(state->services[i]);
2399 if (!srv) {
2400 pservice = &state->services[i];
2401 } else if ((srv->public_fourcc == params->fourcc) &&
2402 ((srv->instance != instance) ||
2403 (srv->base.callback != params->callback))) {
2404 /*
2405 * There is another server using this
2406 * fourcc which doesn't match.
2407 */
2408 pservice = NULL;
2409 break;
2410 }
2411 }
2412 rcu_read_unlock();
2413 }
2414
2415 if (pservice) {
2416 service->localport = (pservice - state->services);
2417 if (!handle_seq)
2418 handle_seq = VCHIQ_MAX_STATES *
2419 VCHIQ_MAX_SERVICES;
2420 service->handle = handle_seq |
2421 (state->id * VCHIQ_MAX_SERVICES) |
2422 service->localport;
2423 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2424 rcu_assign_pointer(*pservice, service);
2425 if (pservice == &state->services[state->unused_service])
2426 state->unused_service++;
2427 }
2428
2429 mutex_unlock(&state->mutex);
2430
2431 if (!pservice) {
2432 kfree(service);
2433 return NULL;
2434 }
2435
2436 quota = &state->service_quotas[service->localport];
2437 quota->slot_quota = state->default_slot_quota;
2438 quota->message_quota = state->default_message_quota;
2439 if (quota->slot_use_count == 0)
2440 quota->previous_tx_index =
2441 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2442 - 1;
2443
2444 /* Bring this service online */
2445 set_service_state(service, srvstate);
2446
2447 dev_dbg(state->dev, "core_msg: %s Service %p4cc SrcPort:%d\n",
2448 (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
2449 ¶ms->fourcc, service->localport);
2450
2451 /* Don't unlock the service - leave it with a ref_count of 1. */
2452
2453 return service;
2454 }
2455
2456 int
vchiq_open_service_internal(struct vchiq_service * service,int client_id)2457 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2458 {
2459 struct vchiq_open_payload payload = {
2460 service->base.fourcc,
2461 client_id,
2462 service->version,
2463 service->version_min
2464 };
2465 int status = 0;
2466
2467 service->client_id = client_id;
2468 vchiq_use_service_internal(service);
2469 status = queue_message(service->state,
2470 NULL, MAKE_OPEN(service->localport),
2471 memcpy_copy_callback,
2472 &payload,
2473 sizeof(payload),
2474 QMFLAGS_IS_BLOCKING);
2475
2476 if (status)
2477 return status;
2478
2479 /* Wait for the ACK/NAK */
2480 if (wait_for_completion_interruptible(&service->remove_event)) {
2481 status = -EAGAIN;
2482 vchiq_release_service_internal(service);
2483 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2484 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2485 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2486 dev_err(service->state->dev,
2487 "core: %d: osi - srvstate = %s (ref %u)\n",
2488 service->state->id, srvstate_names[service->srvstate],
2489 kref_read(&service->ref_count));
2490 status = -EINVAL;
2491 VCHIQ_SERVICE_STATS_INC(service, error_count);
2492 vchiq_release_service_internal(service);
2493 }
2494
2495 return status;
2496 }
2497
2498 static void
release_service_messages(struct vchiq_service * service)2499 release_service_messages(struct vchiq_service *service)
2500 {
2501 struct vchiq_state *state = service->state;
2502 int slot_last = state->remote->slot_last;
2503 int i;
2504
2505 /* Release any claimed messages aimed at this service */
2506
2507 if (service->sync) {
2508 struct vchiq_header *header =
2509 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2510 state->remote->slot_sync);
2511 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2512 release_message_sync(state, header);
2513
2514 return;
2515 }
2516
2517 for (i = state->remote->slot_first; i <= slot_last; i++) {
2518 struct vchiq_slot_info *slot_info =
2519 SLOT_INFO_FROM_INDEX(state, i);
2520 unsigned int pos, end;
2521 char *data;
2522
2523 if (slot_info->release_count == slot_info->use_count)
2524 continue;
2525
2526 data = (char *)SLOT_DATA_FROM_INDEX(state, i);
2527 end = VCHIQ_SLOT_SIZE;
2528 if (data == state->rx_data)
2529 /*
2530 * This buffer is still being read from - stop
2531 * at the current read position
2532 */
2533 end = state->rx_pos & VCHIQ_SLOT_MASK;
2534
2535 pos = 0;
2536
2537 while (pos < end) {
2538 struct vchiq_header *header =
2539 (struct vchiq_header *)(data + pos);
2540 int msgid = header->msgid;
2541 int port = VCHIQ_MSG_DSTPORT(msgid);
2542
2543 if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
2544 dev_dbg(state->dev, "core: fsi - hdr %pK\n", header);
2545 release_slot(state, slot_info, header, NULL);
2546 }
2547 pos += calc_stride(header->size);
2548 if (pos > VCHIQ_SLOT_SIZE) {
2549 dev_err(state->dev,
2550 "core: fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
2551 pos, header, msgid, header->msgid, header->size);
2552 WARN(1, "invalid slot position\n");
2553 }
2554 }
2555 }
2556 }
2557
2558 static int
do_abort_bulks(struct vchiq_service * service)2559 do_abort_bulks(struct vchiq_service *service)
2560 {
2561 int status;
2562
2563 /* Abort any outstanding bulk transfers */
2564 if (mutex_lock_killable(&service->bulk_mutex))
2565 return 0;
2566 abort_outstanding_bulks(service, &service->bulk_tx);
2567 abort_outstanding_bulks(service, &service->bulk_rx);
2568 mutex_unlock(&service->bulk_mutex);
2569
2570 status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
2571 if (status)
2572 return 0;
2573
2574 status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
2575 return !status;
2576 }
2577
2578 static int
close_service_complete(struct vchiq_service * service,int failstate)2579 close_service_complete(struct vchiq_service *service, int failstate)
2580 {
2581 int status;
2582 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2583 int newstate;
2584
2585 switch (service->srvstate) {
2586 case VCHIQ_SRVSTATE_OPEN:
2587 case VCHIQ_SRVSTATE_CLOSESENT:
2588 case VCHIQ_SRVSTATE_CLOSERECVD:
2589 if (is_server) {
2590 if (service->auto_close) {
2591 service->client_id = 0;
2592 service->remoteport = VCHIQ_PORT_FREE;
2593 newstate = VCHIQ_SRVSTATE_LISTENING;
2594 } else {
2595 newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2596 }
2597 } else {
2598 newstate = VCHIQ_SRVSTATE_CLOSED;
2599 }
2600 set_service_state(service, newstate);
2601 break;
2602 case VCHIQ_SRVSTATE_LISTENING:
2603 break;
2604 default:
2605 dev_err(service->state->dev, "core: (%x) called in state %s\n",
2606 service->handle, srvstate_names[service->srvstate]);
2607 WARN(1, "%s in unexpected state\n", __func__);
2608 return -EINVAL;
2609 }
2610
2611 status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL);
2612
2613 if (status != -EAGAIN) {
2614 int uc = service->service_use_count;
2615 int i;
2616 /* Complete the close process */
2617 for (i = 0; i < uc; i++)
2618 /*
2619 * cater for cases where close is forced and the
2620 * client may not close all it's handles
2621 */
2622 vchiq_release_service_internal(service);
2623
2624 service->client_id = 0;
2625 service->remoteport = VCHIQ_PORT_FREE;
2626
2627 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
2628 vchiq_free_service_internal(service);
2629 } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2630 if (is_server)
2631 service->closing = 0;
2632
2633 complete(&service->remove_event);
2634 }
2635 } else {
2636 set_service_state(service, failstate);
2637 }
2638
2639 return status;
2640 }
2641
2642 /* Called by the slot handler */
2643 int
vchiq_close_service_internal(struct vchiq_service * service,int close_recvd)2644 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2645 {
2646 struct vchiq_state *state = service->state;
2647 int status = 0;
2648 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2649 int close_id = MAKE_CLOSE(service->localport,
2650 VCHIQ_MSG_DSTPORT(service->remoteport));
2651
2652 dev_dbg(state->dev, "core: %d: csi:%d,%d (%s)\n",
2653 service->state->id, service->localport, close_recvd,
2654 srvstate_names[service->srvstate]);
2655
2656 switch (service->srvstate) {
2657 case VCHIQ_SRVSTATE_CLOSED:
2658 case VCHIQ_SRVSTATE_HIDDEN:
2659 case VCHIQ_SRVSTATE_LISTENING:
2660 case VCHIQ_SRVSTATE_CLOSEWAIT:
2661 if (close_recvd) {
2662 dev_err(state->dev, "core: (1) called in state %s\n",
2663 srvstate_names[service->srvstate]);
2664 } else if (is_server) {
2665 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2666 status = -EINVAL;
2667 } else {
2668 service->client_id = 0;
2669 service->remoteport = VCHIQ_PORT_FREE;
2670 if (service->srvstate == VCHIQ_SRVSTATE_CLOSEWAIT)
2671 set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2672 }
2673 complete(&service->remove_event);
2674 } else {
2675 vchiq_free_service_internal(service);
2676 }
2677 break;
2678 case VCHIQ_SRVSTATE_OPENING:
2679 if (close_recvd) {
2680 /* The open was rejected - tell the user */
2681 set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT);
2682 complete(&service->remove_event);
2683 } else {
2684 /* Shutdown mid-open - let the other side know */
2685 status = queue_message(state, service, close_id, NULL, NULL, 0, 0);
2686 }
2687 break;
2688
2689 case VCHIQ_SRVSTATE_OPENSYNC:
2690 mutex_lock(&state->sync_mutex);
2691 fallthrough;
2692 case VCHIQ_SRVSTATE_OPEN:
2693 if (close_recvd) {
2694 if (!do_abort_bulks(service))
2695 status = -EAGAIN;
2696 }
2697
2698 release_service_messages(service);
2699
2700 if (!status)
2701 status = queue_message(state, service, close_id, NULL,
2702 NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2703
2704 if (status) {
2705 if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
2706 mutex_unlock(&state->sync_mutex);
2707 break;
2708 }
2709
2710 if (!close_recvd) {
2711 /* Change the state while the mutex is still held */
2712 set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
2713 mutex_unlock(&state->slot_mutex);
2714 if (service->sync)
2715 mutex_unlock(&state->sync_mutex);
2716 break;
2717 }
2718
2719 /* Change the state while the mutex is still held */
2720 set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2721 mutex_unlock(&state->slot_mutex);
2722 if (service->sync)
2723 mutex_unlock(&state->sync_mutex);
2724
2725 status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2726 break;
2727
2728 case VCHIQ_SRVSTATE_CLOSESENT:
2729 if (!close_recvd)
2730 /* This happens when a process is killed mid-close */
2731 break;
2732
2733 if (!do_abort_bulks(service)) {
2734 status = -EAGAIN;
2735 break;
2736 }
2737
2738 if (!status)
2739 status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2740 break;
2741
2742 case VCHIQ_SRVSTATE_CLOSERECVD:
2743 if (!close_recvd && is_server)
2744 /* Force into LISTENING mode */
2745 set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2746 status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2747 break;
2748
2749 default:
2750 dev_err(state->dev, "core: (%d) called in state %s\n",
2751 close_recvd, srvstate_names[service->srvstate]);
2752 break;
2753 }
2754
2755 return status;
2756 }
2757
2758 /* Called from the application process upon process death */
2759 void
vchiq_terminate_service_internal(struct vchiq_service * service)2760 vchiq_terminate_service_internal(struct vchiq_service *service)
2761 {
2762 struct vchiq_state *state = service->state;
2763
2764 dev_dbg(state->dev, "core: %d: tsi - (%d<->%d)\n",
2765 state->id, service->localport, service->remoteport);
2766
2767 mark_service_closing(service);
2768
2769 /* Mark the service for removal by the slot handler */
2770 request_poll(state, service, VCHIQ_POLL_REMOVE);
2771 }
2772
2773 /* Called from the slot handler */
2774 void
vchiq_free_service_internal(struct vchiq_service * service)2775 vchiq_free_service_internal(struct vchiq_service *service)
2776 {
2777 struct vchiq_state *state = service->state;
2778
2779 dev_dbg(state->dev, "core: %d: fsi - (%d)\n", state->id, service->localport);
2780
2781 switch (service->srvstate) {
2782 case VCHIQ_SRVSTATE_OPENING:
2783 case VCHIQ_SRVSTATE_CLOSED:
2784 case VCHIQ_SRVSTATE_HIDDEN:
2785 case VCHIQ_SRVSTATE_LISTENING:
2786 case VCHIQ_SRVSTATE_CLOSEWAIT:
2787 break;
2788 default:
2789 dev_err(state->dev, "core: %d: fsi - (%d) in state %s\n",
2790 state->id, service->localport, srvstate_names[service->srvstate]);
2791 return;
2792 }
2793
2794 set_service_state(service, VCHIQ_SRVSTATE_FREE);
2795
2796 complete(&service->remove_event);
2797
2798 /* Release the initial lock */
2799 vchiq_service_put(service);
2800 }
2801
2802 int
vchiq_connect_internal(struct vchiq_state * state,struct vchiq_instance * instance)2803 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2804 {
2805 struct vchiq_service *service;
2806 int i;
2807
2808 /* Find all services registered to this client and enable them. */
2809 i = 0;
2810 while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
2811 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2812 set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2813 vchiq_service_put(service);
2814 }
2815
2816 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2817 if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0,
2818 QMFLAGS_IS_BLOCKING) == -EAGAIN)
2819 return -EAGAIN;
2820
2821 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2822 }
2823
2824 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2825 if (wait_for_completion_interruptible(&state->connect))
2826 return -EAGAIN;
2827
2828 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2829 complete(&state->connect);
2830 }
2831
2832 return 0;
2833 }
2834
2835 void
vchiq_shutdown_internal(struct vchiq_state * state,struct vchiq_instance * instance)2836 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2837 {
2838 struct vchiq_service *service;
2839 int i;
2840
2841 /* Find all services registered to this client and remove them. */
2842 i = 0;
2843 while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
2844 (void)vchiq_remove_service(instance, service->handle);
2845 vchiq_service_put(service);
2846 }
2847 }
2848
2849 int
vchiq_close_service(struct vchiq_instance * instance,unsigned int handle)2850 vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
2851 {
2852 /* Unregister the service */
2853 struct vchiq_service *service = find_service_by_handle(instance, handle);
2854 int status = 0;
2855
2856 if (!service)
2857 return -EINVAL;
2858
2859 dev_dbg(service->state->dev, "core: %d: close_service:%d\n",
2860 service->state->id, service->localport);
2861
2862 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2863 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2864 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2865 vchiq_service_put(service);
2866 return -EINVAL;
2867 }
2868
2869 mark_service_closing(service);
2870
2871 if (current == service->state->slot_handler_thread) {
2872 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
2873 WARN_ON(status == -EAGAIN);
2874 } else {
2875 /* Mark the service for termination by the slot handler */
2876 request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2877 }
2878
2879 while (1) {
2880 if (wait_for_completion_interruptible(&service->remove_event)) {
2881 status = -EAGAIN;
2882 break;
2883 }
2884
2885 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2886 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2887 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2888 break;
2889
2890 dev_warn(service->state->dev,
2891 "core: %d: close_service:%d - waiting in state %s\n",
2892 service->state->id, service->localport,
2893 srvstate_names[service->srvstate]);
2894 }
2895
2896 if (!status &&
2897 (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2898 (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2899 status = -EINVAL;
2900
2901 vchiq_service_put(service);
2902
2903 return status;
2904 }
2905 EXPORT_SYMBOL(vchiq_close_service);
2906
2907 int
vchiq_remove_service(struct vchiq_instance * instance,unsigned int handle)2908 vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
2909 {
2910 /* Unregister the service */
2911 struct vchiq_service *service = find_service_by_handle(instance, handle);
2912 int status = 0;
2913
2914 if (!service)
2915 return -EINVAL;
2916
2917 dev_dbg(service->state->dev, "core: %d: remove_service:%d\n",
2918 service->state->id, service->localport);
2919
2920 if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
2921 vchiq_service_put(service);
2922 return -EINVAL;
2923 }
2924
2925 mark_service_closing(service);
2926
2927 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
2928 (current == service->state->slot_handler_thread)) {
2929 /*
2930 * Make it look like a client, because it must be removed and
2931 * not left in the LISTENING state.
2932 */
2933 service->public_fourcc = VCHIQ_FOURCC_INVALID;
2934
2935 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
2936 WARN_ON(status == -EAGAIN);
2937 } else {
2938 /* Mark the service for removal by the slot handler */
2939 request_poll(service->state, service, VCHIQ_POLL_REMOVE);
2940 }
2941 while (1) {
2942 if (wait_for_completion_interruptible(&service->remove_event)) {
2943 status = -EAGAIN;
2944 break;
2945 }
2946
2947 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2948 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2949 break;
2950
2951 dev_warn(service->state->dev,
2952 "core: %d: remove_service:%d - waiting in state %s\n",
2953 service->state->id, service->localport,
2954 srvstate_names[service->srvstate]);
2955 }
2956
2957 if (!status && (service->srvstate != VCHIQ_SRVSTATE_FREE))
2958 status = -EINVAL;
2959
2960 vchiq_service_put(service);
2961
2962 return status;
2963 }
2964
2965 /*
2966 * This function may be called by kernel threads or user threads.
2967 * User threads may receive -EAGAIN to indicate that a signal has been
2968 * received and the call should be retried after being returned to user
2969 * context.
2970 * When called in blocking mode, the userdata field points to a bulk_waiter
2971 * structure.
2972 */
vchiq_bulk_transfer(struct vchiq_instance * instance,unsigned int handle,void * offset,void __user * uoffset,int size,void * userdata,enum vchiq_bulk_mode mode,enum vchiq_bulk_dir dir)2973 int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
2974 void *offset, void __user *uoffset, int size, void *userdata,
2975 enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir)
2976 {
2977 struct vchiq_service *service = find_service_by_handle(instance, handle);
2978 struct vchiq_bulk_queue *queue;
2979 struct vchiq_bulk *bulk;
2980 struct vchiq_state *state;
2981 struct bulk_waiter *bulk_waiter = NULL;
2982 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
2983 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
2984 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
2985 int status = -EINVAL;
2986 int payload[2];
2987
2988 if (!service)
2989 goto error_exit;
2990
2991 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
2992 goto error_exit;
2993
2994 if (!offset && !uoffset)
2995 goto error_exit;
2996
2997 if (vchiq_check_service(service))
2998 goto error_exit;
2999
3000 switch (mode) {
3001 case VCHIQ_BULK_MODE_NOCALLBACK:
3002 case VCHIQ_BULK_MODE_CALLBACK:
3003 break;
3004 case VCHIQ_BULK_MODE_BLOCKING:
3005 bulk_waiter = userdata;
3006 init_completion(&bulk_waiter->event);
3007 bulk_waiter->actual = 0;
3008 bulk_waiter->bulk = NULL;
3009 break;
3010 case VCHIQ_BULK_MODE_WAITING:
3011 bulk_waiter = userdata;
3012 bulk = bulk_waiter->bulk;
3013 goto waiting;
3014 default:
3015 goto error_exit;
3016 }
3017
3018 state = service->state;
3019
3020 queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3021 &service->bulk_tx : &service->bulk_rx;
3022
3023 if (mutex_lock_killable(&service->bulk_mutex)) {
3024 status = -EAGAIN;
3025 goto error_exit;
3026 }
3027
3028 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3029 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3030 do {
3031 mutex_unlock(&service->bulk_mutex);
3032 if (wait_for_completion_interruptible(&service->bulk_remove_event)) {
3033 status = -EAGAIN;
3034 goto error_exit;
3035 }
3036 if (mutex_lock_killable(&service->bulk_mutex)) {
3037 status = -EAGAIN;
3038 goto error_exit;
3039 }
3040 } while (queue->local_insert == queue->remove +
3041 VCHIQ_NUM_SERVICE_BULKS);
3042 }
3043
3044 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3045
3046 bulk->mode = mode;
3047 bulk->dir = dir;
3048 bulk->userdata = userdata;
3049 bulk->size = size;
3050 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3051
3052 if (vchiq_prepare_bulk_data(instance, bulk, offset, uoffset, size, dir))
3053 goto unlock_error_exit;
3054
3055 /*
3056 * Ensure that the bulk data record is visible to the peer
3057 * before proceeding.
3058 */
3059 wmb();
3060
3061 dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
3062 state->id, service->localport, service->remoteport,
3063 dir_char, size, &bulk->data, userdata);
3064
3065 /*
3066 * The slot mutex must be held when the service is being closed, so
3067 * claim it here to ensure that isn't happening
3068 */
3069 if (mutex_lock_killable(&state->slot_mutex)) {
3070 status = -EAGAIN;
3071 goto cancel_bulk_error_exit;
3072 }
3073
3074 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3075 goto unlock_both_error_exit;
3076
3077 payload[0] = lower_32_bits(bulk->data);
3078 payload[1] = bulk->size;
3079 status = queue_message(state,
3080 NULL,
3081 VCHIQ_MAKE_MSG(dir_msgtype,
3082 service->localport,
3083 service->remoteport),
3084 memcpy_copy_callback,
3085 &payload,
3086 sizeof(payload),
3087 QMFLAGS_IS_BLOCKING |
3088 QMFLAGS_NO_MUTEX_LOCK |
3089 QMFLAGS_NO_MUTEX_UNLOCK);
3090 if (status)
3091 goto unlock_both_error_exit;
3092
3093 queue->local_insert++;
3094
3095 mutex_unlock(&state->slot_mutex);
3096 mutex_unlock(&service->bulk_mutex);
3097
3098 dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n",
3099 state->id, service->localport, dir_char, queue->local_insert,
3100 queue->remote_insert, queue->process);
3101
3102 waiting:
3103 vchiq_service_put(service);
3104
3105 status = 0;
3106
3107 if (bulk_waiter) {
3108 bulk_waiter->bulk = bulk;
3109 if (wait_for_completion_interruptible(&bulk_waiter->event))
3110 status = -EAGAIN;
3111 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3112 status = -EINVAL;
3113 }
3114
3115 return status;
3116
3117 unlock_both_error_exit:
3118 mutex_unlock(&state->slot_mutex);
3119 cancel_bulk_error_exit:
3120 vchiq_complete_bulk(service->instance, bulk);
3121 unlock_error_exit:
3122 mutex_unlock(&service->bulk_mutex);
3123
3124 error_exit:
3125 if (service)
3126 vchiq_service_put(service);
3127 return status;
3128 }
3129
3130 int
vchiq_queue_message(struct vchiq_instance * instance,unsigned int handle,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size)3131 vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
3132 ssize_t (*copy_callback)(void *context, void *dest,
3133 size_t offset, size_t maxsize),
3134 void *context,
3135 size_t size)
3136 {
3137 struct vchiq_service *service = find_service_by_handle(instance, handle);
3138 int status = -EINVAL;
3139 int data_id;
3140
3141 if (!service)
3142 goto error_exit;
3143
3144 if (vchiq_check_service(service))
3145 goto error_exit;
3146
3147 if (!size) {
3148 VCHIQ_SERVICE_STATS_INC(service, error_count);
3149 goto error_exit;
3150 }
3151
3152 if (size > VCHIQ_MAX_MSG_SIZE) {
3153 VCHIQ_SERVICE_STATS_INC(service, error_count);
3154 goto error_exit;
3155 }
3156
3157 data_id = MAKE_DATA(service->localport, service->remoteport);
3158
3159 switch (service->srvstate) {
3160 case VCHIQ_SRVSTATE_OPEN:
3161 status = queue_message(service->state, service, data_id,
3162 copy_callback, context, size, 1);
3163 break;
3164 case VCHIQ_SRVSTATE_OPENSYNC:
3165 status = queue_message_sync(service->state, service, data_id,
3166 copy_callback, context, size, 1);
3167 break;
3168 default:
3169 status = -EINVAL;
3170 break;
3171 }
3172
3173 error_exit:
3174 if (service)
3175 vchiq_service_put(service);
3176
3177 return status;
3178 }
3179
vchiq_queue_kernel_message(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size)3180 int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, void *data,
3181 unsigned int size)
3182 {
3183 int status;
3184
3185 while (1) {
3186 status = vchiq_queue_message(instance, handle, memcpy_copy_callback,
3187 data, size);
3188
3189 /*
3190 * vchiq_queue_message() may return -EAGAIN, so we need to
3191 * implement a retry mechanism since this function is supposed
3192 * to block until queued
3193 */
3194 if (status != -EAGAIN)
3195 break;
3196
3197 msleep(1);
3198 }
3199
3200 return status;
3201 }
3202 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3203
3204 void
vchiq_release_message(struct vchiq_instance * instance,unsigned int handle,struct vchiq_header * header)3205 vchiq_release_message(struct vchiq_instance *instance, unsigned int handle,
3206 struct vchiq_header *header)
3207 {
3208 struct vchiq_service *service = find_service_by_handle(instance, handle);
3209 struct vchiq_shared_state *remote;
3210 struct vchiq_state *state;
3211 int slot_index;
3212
3213 if (!service)
3214 return;
3215
3216 state = service->state;
3217 remote = state->remote;
3218
3219 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3220
3221 if ((slot_index >= remote->slot_first) &&
3222 (slot_index <= remote->slot_last)) {
3223 int msgid = header->msgid;
3224
3225 if (msgid & VCHIQ_MSGID_CLAIMED) {
3226 struct vchiq_slot_info *slot_info =
3227 SLOT_INFO_FROM_INDEX(state, slot_index);
3228
3229 release_slot(state, slot_info, header, service);
3230 }
3231 } else if (slot_index == remote->slot_sync) {
3232 release_message_sync(state, header);
3233 }
3234
3235 vchiq_service_put(service);
3236 }
3237 EXPORT_SYMBOL(vchiq_release_message);
3238
3239 static void
release_message_sync(struct vchiq_state * state,struct vchiq_header * header)3240 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3241 {
3242 header->msgid = VCHIQ_MSGID_PADDING;
3243 remote_event_signal(&state->remote->sync_release);
3244 }
3245
3246 int
vchiq_get_peer_version(struct vchiq_instance * instance,unsigned int handle,short * peer_version)3247 vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, short *peer_version)
3248 {
3249 int status = -EINVAL;
3250 struct vchiq_service *service = find_service_by_handle(instance, handle);
3251
3252 if (!service)
3253 goto exit;
3254
3255 if (vchiq_check_service(service))
3256 goto exit;
3257
3258 if (!peer_version)
3259 goto exit;
3260
3261 *peer_version = service->peer_version;
3262 status = 0;
3263
3264 exit:
3265 if (service)
3266 vchiq_service_put(service);
3267 return status;
3268 }
3269 EXPORT_SYMBOL(vchiq_get_peer_version);
3270
vchiq_get_config(struct vchiq_config * config)3271 void vchiq_get_config(struct vchiq_config *config)
3272 {
3273 config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
3274 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
3275 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
3276 config->max_services = VCHIQ_MAX_SERVICES;
3277 config->version = VCHIQ_VERSION;
3278 config->version_min = VCHIQ_VERSION_MIN;
3279 }
3280
3281 int
vchiq_set_service_option(struct vchiq_instance * instance,unsigned int handle,enum vchiq_service_option option,int value)3282 vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle,
3283 enum vchiq_service_option option, int value)
3284 {
3285 struct vchiq_service *service = find_service_by_handle(instance, handle);
3286 struct vchiq_service_quota *quota;
3287 int ret = -EINVAL;
3288
3289 if (!service)
3290 return -EINVAL;
3291
3292 switch (option) {
3293 case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3294 service->auto_close = value;
3295 ret = 0;
3296 break;
3297
3298 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3299 quota = &service->state->service_quotas[service->localport];
3300 if (value == 0)
3301 value = service->state->default_slot_quota;
3302 if ((value >= quota->slot_use_count) &&
3303 (value < (unsigned short)~0)) {
3304 quota->slot_quota = value;
3305 if ((value >= quota->slot_use_count) &&
3306 (quota->message_quota >= quota->message_use_count))
3307 /*
3308 * Signal the service that it may have
3309 * dropped below its quota
3310 */
3311 complete("a->quota_event);
3312 ret = 0;
3313 }
3314 break;
3315
3316 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3317 quota = &service->state->service_quotas[service->localport];
3318 if (value == 0)
3319 value = service->state->default_message_quota;
3320 if ((value >= quota->message_use_count) &&
3321 (value < (unsigned short)~0)) {
3322 quota->message_quota = value;
3323 if ((value >= quota->message_use_count) &&
3324 (quota->slot_quota >= quota->slot_use_count))
3325 /*
3326 * Signal the service that it may have
3327 * dropped below its quota
3328 */
3329 complete("a->quota_event);
3330 ret = 0;
3331 }
3332 break;
3333
3334 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3335 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3336 (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3337 service->sync = value;
3338 ret = 0;
3339 }
3340 break;
3341
3342 case VCHIQ_SERVICE_OPTION_TRACE:
3343 service->trace = value;
3344 ret = 0;
3345 break;
3346
3347 default:
3348 break;
3349 }
3350 vchiq_service_put(service);
3351
3352 return ret;
3353 }
3354
3355 static void
vchiq_dump_shared_state(struct seq_file * f,struct vchiq_state * state,struct vchiq_shared_state * shared,const char * label)3356 vchiq_dump_shared_state(struct seq_file *f, struct vchiq_state *state,
3357 struct vchiq_shared_state *shared, const char *label)
3358 {
3359 static const char *const debug_names[] = {
3360 "<entries>",
3361 "SLOT_HANDLER_COUNT",
3362 "SLOT_HANDLER_LINE",
3363 "PARSE_LINE",
3364 "PARSE_HEADER",
3365 "PARSE_MSGID",
3366 "AWAIT_COMPLETION_LINE",
3367 "DEQUEUE_MESSAGE_LINE",
3368 "SERVICE_CALLBACK_LINE",
3369 "MSG_QUEUE_FULL_COUNT",
3370 "COMPLETION_QUEUE_FULL_COUNT"
3371 };
3372 int i;
3373
3374 seq_printf(f, " %s: slots %d-%d tx_pos=%x recycle=%x\n",
3375 label, shared->slot_first, shared->slot_last,
3376 shared->tx_pos, shared->slot_queue_recycle);
3377
3378 seq_puts(f, " Slots claimed:\n");
3379
3380 for (i = shared->slot_first; i <= shared->slot_last; i++) {
3381 struct vchiq_slot_info slot_info =
3382 *SLOT_INFO_FROM_INDEX(state, i);
3383 if (slot_info.use_count != slot_info.release_count) {
3384 seq_printf(f, " %d: %d/%d\n", i, slot_info.use_count,
3385 slot_info.release_count);
3386 }
3387 }
3388
3389 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3390 seq_printf(f, " DEBUG: %s = %d(%x)\n",
3391 debug_names[i], shared->debug[i], shared->debug[i]);
3392 }
3393 }
3394
3395 static void
vchiq_dump_service_state(struct seq_file * f,struct vchiq_service * service)3396 vchiq_dump_service_state(struct seq_file *f, struct vchiq_service *service)
3397 {
3398 unsigned int ref_count;
3399
3400 /*Don't include the lock just taken*/
3401 ref_count = kref_read(&service->ref_count) - 1;
3402 seq_printf(f, "Service %u: %s (ref %u)", service->localport,
3403 srvstate_names[service->srvstate], ref_count);
3404
3405 if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3406 char remoteport[30];
3407 struct vchiq_service_quota *quota =
3408 &service->state->service_quotas[service->localport];
3409 int fourcc = service->base.fourcc;
3410 int tx_pending, rx_pending, tx_size = 0, rx_size = 0;
3411
3412 if (service->remoteport != VCHIQ_PORT_FREE) {
3413 int len2 = scnprintf(remoteport, sizeof(remoteport),
3414 "%u", service->remoteport);
3415
3416 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3417 scnprintf(remoteport + len2, sizeof(remoteport) - len2,
3418 " (client %x)", service->client_id);
3419 } else {
3420 strscpy(remoteport, "n/a", sizeof(remoteport));
3421 }
3422
3423 seq_printf(f, " '%p4cc' remote %s (msg use %d/%d, slot use %d/%d)\n",
3424 &fourcc, remoteport,
3425 quota->message_use_count, quota->message_quota,
3426 quota->slot_use_count, quota->slot_quota);
3427
3428 tx_pending = service->bulk_tx.local_insert -
3429 service->bulk_tx.remote_insert;
3430 if (tx_pending) {
3431 unsigned int i = BULK_INDEX(service->bulk_tx.remove);
3432
3433 tx_size = service->bulk_tx.bulks[i].size;
3434 }
3435
3436 rx_pending = service->bulk_rx.local_insert -
3437 service->bulk_rx.remote_insert;
3438 if (rx_pending) {
3439 unsigned int i = BULK_INDEX(service->bulk_rx.remove);
3440
3441 rx_size = service->bulk_rx.bulks[i].size;
3442 }
3443
3444 seq_printf(f, " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)\n",
3445 tx_pending, tx_size, rx_pending, rx_size);
3446
3447 if (VCHIQ_ENABLE_STATS) {
3448 seq_printf(f, " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n",
3449 service->stats.ctrl_tx_count,
3450 service->stats.ctrl_tx_bytes,
3451 service->stats.ctrl_rx_count,
3452 service->stats.ctrl_rx_bytes);
3453
3454 seq_printf(f, " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n",
3455 service->stats.bulk_tx_count,
3456 service->stats.bulk_tx_bytes,
3457 service->stats.bulk_rx_count,
3458 service->stats.bulk_rx_bytes);
3459
3460 seq_printf(f, " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors\n",
3461 service->stats.quota_stalls,
3462 service->stats.slot_stalls,
3463 service->stats.bulk_stalls,
3464 service->stats.bulk_aborted_count,
3465 service->stats.error_count);
3466 }
3467 }
3468
3469 vchiq_dump_platform_service_state(f, service);
3470 }
3471
vchiq_dump_state(struct seq_file * f,struct vchiq_state * state)3472 void vchiq_dump_state(struct seq_file *f, struct vchiq_state *state)
3473 {
3474 int i;
3475
3476 seq_printf(f, "State %d: %s\n", state->id,
3477 conn_state_names[state->conn_state]);
3478
3479 seq_printf(f, " tx_pos=%x(@%pK), rx_pos=%x(@%pK)\n",
3480 state->local->tx_pos,
3481 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3482 state->rx_pos,
3483 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3484
3485 seq_printf(f, " Version: %d (min %d)\n", VCHIQ_VERSION,
3486 VCHIQ_VERSION_MIN);
3487
3488 if (VCHIQ_ENABLE_STATS) {
3489 seq_printf(f, " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d\n",
3490 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3491 state->stats.error_count);
3492 }
3493
3494 seq_printf(f, " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)\n",
3495 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3496 state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3497 state->data_quota - state->data_use_count,
3498 state->local->slot_queue_recycle - state->slot_queue_available,
3499 state->stats.slot_stalls, state->stats.data_stalls);
3500
3501 vchiq_dump_platform_state(f);
3502
3503 vchiq_dump_shared_state(f, state, state->local, "Local");
3504
3505 vchiq_dump_shared_state(f, state, state->remote, "Remote");
3506
3507 vchiq_dump_platform_instances(f);
3508
3509 for (i = 0; i < state->unused_service; i++) {
3510 struct vchiq_service *service = find_service_by_port(state, i);
3511
3512 if (service) {
3513 vchiq_dump_service_state(f, service);
3514 vchiq_service_put(service);
3515 }
3516 }
3517 }
3518
vchiq_send_remote_use(struct vchiq_state * state)3519 int vchiq_send_remote_use(struct vchiq_state *state)
3520 {
3521 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3522 return -ENOTCONN;
3523
3524 return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0);
3525 }
3526
vchiq_send_remote_use_active(struct vchiq_state * state)3527 int vchiq_send_remote_use_active(struct vchiq_state *state)
3528 {
3529 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3530 return -ENOTCONN;
3531
3532 return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE,
3533 NULL, NULL, 0, 0);
3534 }
3535
vchiq_log_dump_mem(struct device * dev,const char * label,u32 addr,const void * void_mem,size_t num_bytes)3536 void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
3537 const void *void_mem, size_t num_bytes)
3538 {
3539 const u8 *mem = void_mem;
3540 size_t offset;
3541 char line_buf[100];
3542 char *s;
3543
3544 while (num_bytes > 0) {
3545 s = line_buf;
3546
3547 for (offset = 0; offset < 16; offset++) {
3548 if (offset < num_bytes)
3549 s += scnprintf(s, 4, "%02x ", mem[offset]);
3550 else
3551 s += scnprintf(s, 4, " ");
3552 }
3553
3554 for (offset = 0; offset < 16; offset++) {
3555 if (offset < num_bytes) {
3556 u8 ch = mem[offset];
3557
3558 if ((ch < ' ') || (ch > '~'))
3559 ch = '.';
3560 *s++ = (char)ch;
3561 }
3562 }
3563 *s++ = '\0';
3564
3565 if (label && (*label != '\0'))
3566 dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf);
3567 else
3568 dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf);
3569
3570 addr += 16;
3571 mem += 16;
3572 if (num_bytes > 16)
3573 num_bytes -= 16;
3574 else
3575 num_bytes = 0;
3576 }
3577 }
3578