1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Mailbox: Common code for Mailbox controllers and users
4  *
5  * Copyright (C) 2013-2014 Linaro Ltd.
6  * Author: Jassi Brar <jassisinghbrar@gmail.com>
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/err.h>
12 #include <linux/mailbox_client.h>
13 #include <linux/mailbox_controller.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/of.h>
17 #include <linux/spinlock.h>
18 
19 #include "mailbox.h"
20 
21 static LIST_HEAD(mbox_cons);
22 static DEFINE_MUTEX(con_mutex);
23 
add_to_rbuf(struct mbox_chan * chan,void * mssg)24 static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
25 {
26 	int idx;
27 	unsigned long flags;
28 
29 	spin_lock_irqsave(&chan->lock, flags);
30 
31 	/* See if there is any space left */
32 	if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
33 		spin_unlock_irqrestore(&chan->lock, flags);
34 		return -ENOBUFS;
35 	}
36 
37 	idx = chan->msg_free;
38 	chan->msg_data[idx] = mssg;
39 	chan->msg_count++;
40 
41 	if (idx == MBOX_TX_QUEUE_LEN - 1)
42 		chan->msg_free = 0;
43 	else
44 		chan->msg_free++;
45 
46 	spin_unlock_irqrestore(&chan->lock, flags);
47 
48 	return idx;
49 }
50 
msg_submit(struct mbox_chan * chan)51 static void msg_submit(struct mbox_chan *chan)
52 {
53 	unsigned count, idx;
54 	unsigned long flags;
55 	void *data;
56 	int err = -EBUSY;
57 
58 	spin_lock_irqsave(&chan->lock, flags);
59 
60 	if (!chan->msg_count || chan->active_req)
61 		goto exit;
62 
63 	count = chan->msg_count;
64 	idx = chan->msg_free;
65 	if (idx >= count)
66 		idx -= count;
67 	else
68 		idx += MBOX_TX_QUEUE_LEN - count;
69 
70 	data = chan->msg_data[idx];
71 
72 	if (chan->cl->tx_prepare)
73 		chan->cl->tx_prepare(chan->cl, data);
74 	/* Try to submit a message to the MBOX controller */
75 	err = chan->mbox->ops->send_data(chan, data);
76 	if (!err) {
77 		chan->active_req = data;
78 		chan->msg_count--;
79 	}
80 exit:
81 	spin_unlock_irqrestore(&chan->lock, flags);
82 
83 	if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
84 		/* kick start the timer immediately to avoid delays */
85 		spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
86 		hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
87 		spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
88 	}
89 }
90 
tx_tick(struct mbox_chan * chan,int r)91 static void tx_tick(struct mbox_chan *chan, int r)
92 {
93 	unsigned long flags;
94 	void *mssg;
95 
96 	spin_lock_irqsave(&chan->lock, flags);
97 	mssg = chan->active_req;
98 	chan->active_req = NULL;
99 	spin_unlock_irqrestore(&chan->lock, flags);
100 
101 	/* Submit next message */
102 	msg_submit(chan);
103 
104 	if (!mssg)
105 		return;
106 
107 	/* Notify the client */
108 	if (chan->cl->tx_done)
109 		chan->cl->tx_done(chan->cl, mssg, r);
110 
111 	if (r != -ETIME && chan->cl->tx_block)
112 		complete(&chan->tx_complete);
113 }
114 
txdone_hrtimer(struct hrtimer * hrtimer)115 static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
116 {
117 	struct mbox_controller *mbox =
118 		container_of(hrtimer, struct mbox_controller, poll_hrt);
119 	bool txdone, resched = false;
120 	int i;
121 	unsigned long flags;
122 
123 	for (i = 0; i < mbox->num_chans; i++) {
124 		struct mbox_chan *chan = &mbox->chans[i];
125 
126 		if (chan->active_req && chan->cl) {
127 			txdone = chan->mbox->ops->last_tx_done(chan);
128 			if (txdone)
129 				tx_tick(chan, 0);
130 			else
131 				resched = true;
132 		}
133 	}
134 
135 	if (resched) {
136 		spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
137 		if (!hrtimer_is_queued(hrtimer))
138 			hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
139 		spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
140 
141 		return HRTIMER_RESTART;
142 	}
143 	return HRTIMER_NORESTART;
144 }
145 
146 /**
147  * mbox_chan_received_data - A way for controller driver to push data
148  *				received from remote to the upper layer.
149  * @chan: Pointer to the mailbox channel on which RX happened.
150  * @mssg: Client specific message typecasted as void *
151  *
152  * After startup and before shutdown any data received on the chan
153  * is passed on to the API via atomic mbox_chan_received_data().
154  * The controller should ACK the RX only after this call returns.
155  */
mbox_chan_received_data(struct mbox_chan * chan,void * mssg)156 void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
157 {
158 	/* No buffering the received data */
159 	if (chan->cl->rx_callback)
160 		chan->cl->rx_callback(chan->cl, mssg);
161 }
162 EXPORT_SYMBOL_GPL(mbox_chan_received_data);
163 
164 /**
165  * mbox_chan_txdone - A way for controller driver to notify the
166  *			framework that the last TX has completed.
167  * @chan: Pointer to the mailbox chan on which TX happened.
168  * @r: Status of last TX - OK or ERROR
169  *
170  * The controller that has IRQ for TX ACK calls this atomic API
171  * to tick the TX state machine. It works only if txdone_irq
172  * is set by the controller.
173  */
mbox_chan_txdone(struct mbox_chan * chan,int r)174 void mbox_chan_txdone(struct mbox_chan *chan, int r)
175 {
176 	if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
177 		dev_err(chan->mbox->dev,
178 		       "Controller can't run the TX ticker\n");
179 		return;
180 	}
181 
182 	tx_tick(chan, r);
183 }
184 EXPORT_SYMBOL_GPL(mbox_chan_txdone);
185 
186 /**
187  * mbox_client_txdone - The way for a client to run the TX state machine.
188  * @chan: Mailbox channel assigned to this client.
189  * @r: Success status of last transmission.
190  *
191  * The client/protocol had received some 'ACK' packet and it notifies
192  * the API that the last packet was sent successfully. This only works
193  * if the controller can't sense TX-Done.
194  */
mbox_client_txdone(struct mbox_chan * chan,int r)195 void mbox_client_txdone(struct mbox_chan *chan, int r)
196 {
197 	if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
198 		dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
199 		return;
200 	}
201 
202 	tx_tick(chan, r);
203 }
204 EXPORT_SYMBOL_GPL(mbox_client_txdone);
205 
206 /**
207  * mbox_client_peek_data - A way for client driver to pull data
208  *			received from remote by the controller.
209  * @chan: Mailbox channel assigned to this client.
210  *
211  * A poke to controller driver for any received data.
212  * The data is actually passed onto client via the
213  * mbox_chan_received_data()
214  * The call can be made from atomic context, so the controller's
215  * implementation of peek_data() must not sleep.
216  *
217  * Return: True, if controller has, and is going to push after this,
218  *          some data.
219  *         False, if controller doesn't have any data to be read.
220  */
mbox_client_peek_data(struct mbox_chan * chan)221 bool mbox_client_peek_data(struct mbox_chan *chan)
222 {
223 	if (chan->mbox->ops->peek_data)
224 		return chan->mbox->ops->peek_data(chan);
225 
226 	return false;
227 }
228 EXPORT_SYMBOL_GPL(mbox_client_peek_data);
229 
230 /**
231  * mbox_send_message -	For client to submit a message to be
232  *				sent to the remote.
233  * @chan: Mailbox channel assigned to this client.
234  * @mssg: Client specific message typecasted.
235  *
236  * For client to submit data to the controller destined for a remote
237  * processor. If the client had set 'tx_block', the call will return
238  * either when the remote receives the data or when 'tx_tout' millisecs
239  * run out.
240  *  In non-blocking mode, the requests are buffered by the API and a
241  * non-negative token is returned for each queued request. If the request
242  * is not queued, a negative token is returned. Upon failure or successful
243  * TX, the API calls 'tx_done' from atomic context, from which the client
244  * could submit yet another request.
245  * The pointer to message should be preserved until it is sent
246  * over the chan, i.e, tx_done() is made.
247  * This function could be called from atomic context as it simply
248  * queues the data and returns a token against the request.
249  *
250  * Return: Non-negative integer for successful submission (non-blocking mode)
251  *	or transmission over chan (blocking mode).
252  *	Negative value denotes failure.
253  */
mbox_send_message(struct mbox_chan * chan,void * mssg)254 int mbox_send_message(struct mbox_chan *chan, void *mssg)
255 {
256 	int t;
257 
258 	if (!chan || !chan->cl)
259 		return -EINVAL;
260 
261 	t = add_to_rbuf(chan, mssg);
262 	if (t < 0) {
263 		dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
264 		return t;
265 	}
266 
267 	msg_submit(chan);
268 
269 	if (chan->cl->tx_block) {
270 		unsigned long wait;
271 		int ret;
272 
273 		if (!chan->cl->tx_tout) /* wait forever */
274 			wait = msecs_to_jiffies(3600000);
275 		else
276 			wait = msecs_to_jiffies(chan->cl->tx_tout);
277 
278 		ret = wait_for_completion_timeout(&chan->tx_complete, wait);
279 		if (ret == 0) {
280 			t = -ETIME;
281 			tx_tick(chan, t);
282 		}
283 	}
284 
285 	return t;
286 }
287 EXPORT_SYMBOL_GPL(mbox_send_message);
288 
289 /**
290  * mbox_flush - flush a mailbox channel
291  * @chan: mailbox channel to flush
292  * @timeout: time, in milliseconds, to allow the flush operation to succeed
293  *
294  * Mailbox controllers that need to work in atomic context can implement the
295  * ->flush() callback to busy loop until a transmission has been completed.
296  * The implementation must call mbox_chan_txdone() upon success. Clients can
297  * call the mbox_flush() function at any time after mbox_send_message() to
298  * flush the transmission. After the function returns success, the mailbox
299  * transmission is guaranteed to have completed.
300  *
301  * Returns: 0 on success or a negative error code on failure.
302  */
mbox_flush(struct mbox_chan * chan,unsigned long timeout)303 int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
304 {
305 	int ret;
306 
307 	if (!chan->mbox->ops->flush)
308 		return -ENOTSUPP;
309 
310 	ret = chan->mbox->ops->flush(chan, timeout);
311 	if (ret < 0)
312 		tx_tick(chan, ret);
313 
314 	return ret;
315 }
316 EXPORT_SYMBOL_GPL(mbox_flush);
317 
__mbox_bind_client(struct mbox_chan * chan,struct mbox_client * cl)318 static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
319 {
320 	struct device *dev = cl->dev;
321 	unsigned long flags;
322 	int ret;
323 
324 	if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
325 		dev_dbg(dev, "%s: mailbox not free\n", __func__);
326 		return -EBUSY;
327 	}
328 
329 	spin_lock_irqsave(&chan->lock, flags);
330 	chan->msg_free = 0;
331 	chan->msg_count = 0;
332 	chan->active_req = NULL;
333 	chan->cl = cl;
334 	init_completion(&chan->tx_complete);
335 
336 	if (chan->txdone_method	== TXDONE_BY_POLL && cl->knows_txdone)
337 		chan->txdone_method = TXDONE_BY_ACK;
338 
339 	spin_unlock_irqrestore(&chan->lock, flags);
340 
341 	if (chan->mbox->ops->startup) {
342 		ret = chan->mbox->ops->startup(chan);
343 
344 		if (ret) {
345 			dev_err(dev, "Unable to startup the chan (%d)\n", ret);
346 			mbox_free_channel(chan);
347 			return ret;
348 		}
349 	}
350 
351 	return 0;
352 }
353 
354 /**
355  * mbox_bind_client - Request a mailbox channel.
356  * @chan: The mailbox channel to bind the client to.
357  * @cl: Identity of the client requesting the channel.
358  *
359  * The Client specifies its requirements and capabilities while asking for
360  * a mailbox channel. It can't be called from atomic context.
361  * The channel is exclusively allocated and can't be used by another
362  * client before the owner calls mbox_free_channel.
363  * After assignment, any packet received on this channel will be
364  * handed over to the client via the 'rx_callback'.
365  * The framework holds reference to the client, so the mbox_client
366  * structure shouldn't be modified until the mbox_free_channel returns.
367  *
368  * Return: 0 if the channel was assigned to the client successfully.
369  *         <0 for request failure.
370  */
mbox_bind_client(struct mbox_chan * chan,struct mbox_client * cl)371 int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
372 {
373 	int ret;
374 
375 	mutex_lock(&con_mutex);
376 	ret = __mbox_bind_client(chan, cl);
377 	mutex_unlock(&con_mutex);
378 
379 	return ret;
380 }
381 EXPORT_SYMBOL_GPL(mbox_bind_client);
382 
383 /**
384  * mbox_request_channel - Request a mailbox channel.
385  * @cl: Identity of the client requesting the channel.
386  * @index: Index of mailbox specifier in 'mboxes' property.
387  *
388  * The Client specifies its requirements and capabilities while asking for
389  * a mailbox channel. It can't be called from atomic context.
390  * The channel is exclusively allocated and can't be used by another
391  * client before the owner calls mbox_free_channel.
392  * After assignment, any packet received on this channel will be
393  * handed over to the client via the 'rx_callback'.
394  * The framework holds reference to the client, so the mbox_client
395  * structure shouldn't be modified until the mbox_free_channel returns.
396  *
397  * Return: Pointer to the channel assigned to the client if successful.
398  *		ERR_PTR for request failure.
399  */
mbox_request_channel(struct mbox_client * cl,int index)400 struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
401 {
402 	struct device *dev = cl->dev;
403 	struct mbox_controller *mbox;
404 	struct of_phandle_args spec;
405 	struct mbox_chan *chan;
406 	int ret;
407 
408 	if (!dev || !dev->of_node) {
409 		pr_debug("%s: No owner device node\n", __func__);
410 		return ERR_PTR(-ENODEV);
411 	}
412 
413 	ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
414 					 index, &spec);
415 	if (ret) {
416 		dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
417 		return ERR_PTR(ret);
418 	}
419 
420 	mutex_lock(&con_mutex);
421 
422 	chan = ERR_PTR(-EPROBE_DEFER);
423 	list_for_each_entry(mbox, &mbox_cons, node)
424 		if (mbox->dev->of_node == spec.np) {
425 			chan = mbox->of_xlate(mbox, &spec);
426 			if (!IS_ERR(chan))
427 				break;
428 		}
429 
430 	of_node_put(spec.np);
431 
432 	if (IS_ERR(chan)) {
433 		mutex_unlock(&con_mutex);
434 		return chan;
435 	}
436 
437 	ret = __mbox_bind_client(chan, cl);
438 	if (ret)
439 		chan = ERR_PTR(ret);
440 
441 	mutex_unlock(&con_mutex);
442 	return chan;
443 }
444 EXPORT_SYMBOL_GPL(mbox_request_channel);
445 
mbox_request_channel_byname(struct mbox_client * cl,const char * name)446 struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
447 					      const char *name)
448 {
449 	struct device_node *np = cl->dev->of_node;
450 	int index;
451 
452 	if (!np) {
453 		dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
454 		return ERR_PTR(-EINVAL);
455 	}
456 
457 	index = of_property_match_string(np, "mbox-names", name);
458 	if (index < 0) {
459 		dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
460 			__func__, name);
461 		return ERR_PTR(-EINVAL);
462 	}
463 	return mbox_request_channel(cl, index);
464 }
465 EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
466 
467 /**
468  * mbox_free_channel - The client relinquishes control of a mailbox
469  *			channel by this call.
470  * @chan: The mailbox channel to be freed.
471  */
mbox_free_channel(struct mbox_chan * chan)472 void mbox_free_channel(struct mbox_chan *chan)
473 {
474 	unsigned long flags;
475 
476 	if (!chan || !chan->cl)
477 		return;
478 
479 	if (chan->mbox->ops->shutdown)
480 		chan->mbox->ops->shutdown(chan);
481 
482 	/* The queued TX requests are simply aborted, no callbacks are made */
483 	spin_lock_irqsave(&chan->lock, flags);
484 	chan->cl = NULL;
485 	chan->active_req = NULL;
486 	if (chan->txdone_method == TXDONE_BY_ACK)
487 		chan->txdone_method = TXDONE_BY_POLL;
488 
489 	module_put(chan->mbox->dev->driver->owner);
490 	spin_unlock_irqrestore(&chan->lock, flags);
491 }
492 EXPORT_SYMBOL_GPL(mbox_free_channel);
493 
494 static struct mbox_chan *
of_mbox_index_xlate(struct mbox_controller * mbox,const struct of_phandle_args * sp)495 of_mbox_index_xlate(struct mbox_controller *mbox,
496 		    const struct of_phandle_args *sp)
497 {
498 	int ind = sp->args[0];
499 
500 	if (ind >= mbox->num_chans)
501 		return ERR_PTR(-EINVAL);
502 
503 	return &mbox->chans[ind];
504 }
505 
506 /**
507  * mbox_controller_register - Register the mailbox controller
508  * @mbox:	Pointer to the mailbox controller.
509  *
510  * The controller driver registers its communication channels
511  */
mbox_controller_register(struct mbox_controller * mbox)512 int mbox_controller_register(struct mbox_controller *mbox)
513 {
514 	int i, txdone;
515 
516 	/* Sanity check */
517 	if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
518 		return -EINVAL;
519 
520 	if (mbox->txdone_irq)
521 		txdone = TXDONE_BY_IRQ;
522 	else if (mbox->txdone_poll)
523 		txdone = TXDONE_BY_POLL;
524 	else /* It has to be ACK then */
525 		txdone = TXDONE_BY_ACK;
526 
527 	if (txdone == TXDONE_BY_POLL) {
528 
529 		if (!mbox->ops->last_tx_done) {
530 			dev_err(mbox->dev, "last_tx_done method is absent\n");
531 			return -EINVAL;
532 		}
533 
534 		hrtimer_setup(&mbox->poll_hrt, txdone_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
535 		spin_lock_init(&mbox->poll_hrt_lock);
536 	}
537 
538 	for (i = 0; i < mbox->num_chans; i++) {
539 		struct mbox_chan *chan = &mbox->chans[i];
540 
541 		chan->cl = NULL;
542 		chan->mbox = mbox;
543 		chan->txdone_method = txdone;
544 		spin_lock_init(&chan->lock);
545 	}
546 
547 	if (!mbox->of_xlate)
548 		mbox->of_xlate = of_mbox_index_xlate;
549 
550 	mutex_lock(&con_mutex);
551 	list_add_tail(&mbox->node, &mbox_cons);
552 	mutex_unlock(&con_mutex);
553 
554 	return 0;
555 }
556 EXPORT_SYMBOL_GPL(mbox_controller_register);
557 
558 /**
559  * mbox_controller_unregister - Unregister the mailbox controller
560  * @mbox:	Pointer to the mailbox controller.
561  */
mbox_controller_unregister(struct mbox_controller * mbox)562 void mbox_controller_unregister(struct mbox_controller *mbox)
563 {
564 	int i;
565 
566 	if (!mbox)
567 		return;
568 
569 	mutex_lock(&con_mutex);
570 
571 	list_del(&mbox->node);
572 
573 	for (i = 0; i < mbox->num_chans; i++)
574 		mbox_free_channel(&mbox->chans[i]);
575 
576 	if (mbox->txdone_poll)
577 		hrtimer_cancel(&mbox->poll_hrt);
578 
579 	mutex_unlock(&con_mutex);
580 }
581 EXPORT_SYMBOL_GPL(mbox_controller_unregister);
582 
__devm_mbox_controller_unregister(struct device * dev,void * res)583 static void __devm_mbox_controller_unregister(struct device *dev, void *res)
584 {
585 	struct mbox_controller **mbox = res;
586 
587 	mbox_controller_unregister(*mbox);
588 }
589 
devm_mbox_controller_match(struct device * dev,void * res,void * data)590 static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
591 {
592 	struct mbox_controller **mbox = res;
593 
594 	if (WARN_ON(!mbox || !*mbox))
595 		return 0;
596 
597 	return *mbox == data;
598 }
599 
600 /**
601  * devm_mbox_controller_register() - managed mbox_controller_register()
602  * @dev: device owning the mailbox controller being registered
603  * @mbox: mailbox controller being registered
604  *
605  * This function adds a device-managed resource that will make sure that the
606  * mailbox controller, which is registered using mbox_controller_register()
607  * as part of this function, will be unregistered along with the rest of
608  * device-managed resources upon driver probe failure or driver removal.
609  *
610  * Returns 0 on success or a negative error code on failure.
611  */
devm_mbox_controller_register(struct device * dev,struct mbox_controller * mbox)612 int devm_mbox_controller_register(struct device *dev,
613 				  struct mbox_controller *mbox)
614 {
615 	struct mbox_controller **ptr;
616 	int err;
617 
618 	ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr),
619 			   GFP_KERNEL);
620 	if (!ptr)
621 		return -ENOMEM;
622 
623 	err = mbox_controller_register(mbox);
624 	if (err < 0) {
625 		devres_free(ptr);
626 		return err;
627 	}
628 
629 	devres_add(dev, ptr);
630 	*ptr = mbox;
631 
632 	return 0;
633 }
634 EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
635 
636 /**
637  * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
638  * @dev: device owning the mailbox controller being unregistered
639  * @mbox: mailbox controller being unregistered
640  *
641  * This function unregisters the mailbox controller and removes the device-
642  * managed resource that was set up to automatically unregister the mailbox
643  * controller on driver probe failure or driver removal. It's typically not
644  * necessary to call this function.
645  */
devm_mbox_controller_unregister(struct device * dev,struct mbox_controller * mbox)646 void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
647 {
648 	WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
649 			       devm_mbox_controller_match, mbox));
650 }
651 EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
652