1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2020 Samsung Electronics Co., Ltd.
4  * Copyright 2020 Google LLC.
5  * Copyright 2024 Linaro Ltd.
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/bitmap.h>
10 #include <linux/bits.h>
11 #include <linux/cleanup.h>
12 #include <linux/container_of.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/firmware/samsung/exynos-acpm-protocol.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/mailbox/exynos-message.h>
19 #include <linux/mailbox_client.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/math.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 #include <linux/of_platform.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 
30 #include "exynos-acpm.h"
31 #include "exynos-acpm-pmic.h"
32 
33 #define ACPM_PROTOCOL_SEQNUM		GENMASK(21, 16)
34 
35 /* The unit of counter is 20 us. 5000 * 20 = 100 ms */
36 #define ACPM_POLL_TIMEOUT		5000
37 #define ACPM_TX_TIMEOUT_US		500000
38 
39 #define ACPM_GS101_INITDATA_BASE	0xa000
40 
41 /**
42  * struct acpm_shmem - shared memory configuration information.
43  * @reserved:	unused fields.
44  * @chans:	offset to array of struct acpm_chan_shmem.
45  * @reserved1:	unused fields.
46  * @num_chans:	number of channels.
47  */
48 struct acpm_shmem {
49 	u32 reserved[2];
50 	u32 chans;
51 	u32 reserved1[3];
52 	u32 num_chans;
53 };
54 
55 /**
56  * struct acpm_chan_shmem - descriptor of a shared memory channel.
57  *
58  * @id:			channel ID.
59  * @reserved:		unused fields.
60  * @rx_rear:		rear pointer of APM RX queue (TX for AP).
61  * @rx_front:		front pointer of APM RX queue (TX for AP).
62  * @rx_base:		base address of APM RX queue (TX for AP).
63  * @reserved1:		unused fields.
64  * @tx_rear:		rear pointer of APM TX queue (RX for AP).
65  * @tx_front:		front pointer of APM TX queue (RX for AP).
66  * @tx_base:		base address of APM TX queue (RX for AP).
67  * @qlen:		queue length. Applies to both TX/RX queues.
68  * @mlen:		message length. Applies to both TX/RX queues.
69  * @reserved2:		unused fields.
70  * @poll_completion:	true when the channel works on polling.
71  */
72 struct acpm_chan_shmem {
73 	u32 id;
74 	u32 reserved[3];
75 	u32 rx_rear;
76 	u32 rx_front;
77 	u32 rx_base;
78 	u32 reserved1[3];
79 	u32 tx_rear;
80 	u32 tx_front;
81 	u32 tx_base;
82 	u32 qlen;
83 	u32 mlen;
84 	u32 reserved2[2];
85 	u32 poll_completion;
86 };
87 
88 /**
89  * struct acpm_queue - exynos acpm queue.
90  *
91  * @rear:	rear address of the queue.
92  * @front:	front address of the queue.
93  * @base:	base address of the queue.
94  */
95 struct acpm_queue {
96 	void __iomem *rear;
97 	void __iomem *front;
98 	void __iomem *base;
99 };
100 
101 /**
102  * struct acpm_rx_data - RX queue data.
103  *
104  * @cmd:	pointer to where the data shall be saved.
105  * @n_cmd:	number of 32-bit commands.
106  * @response:	true if the client expects the RX data.
107  */
108 struct acpm_rx_data {
109 	u32 *cmd;
110 	size_t n_cmd;
111 	bool response;
112 };
113 
114 #define ACPM_SEQNUM_MAX    64
115 
116 /**
117  * struct acpm_chan - driver internal representation of a channel.
118  * @cl:		mailbox client.
119  * @chan:	mailbox channel.
120  * @acpm:	pointer to driver private data.
121  * @tx:		TX queue. The enqueue is done by the host.
122  *			- front index is written by the host.
123  *			- rear index is written by the firmware.
124  *
125  * @rx:		RX queue. The enqueue is done by the firmware.
126  *			- front index is written by the firmware.
127  *			- rear index is written by the host.
128  * @tx_lock:	protects TX queue.
129  * @rx_lock:	protects RX queue.
130  * @qlen:	queue length. Applies to both TX/RX queues.
131  * @mlen:	message length. Applies to both TX/RX queues.
132  * @seqnum:	sequence number of the last message enqueued on TX queue.
133  * @id:		channel ID.
134  * @poll_completion:	indicates if the transfer needs to be polled for
135  *			completion or interrupt mode is used.
136  * @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues.
137  * @rx_data:	internal buffer used to drain the RX queue.
138  */
139 struct acpm_chan {
140 	struct mbox_client cl;
141 	struct mbox_chan *chan;
142 	struct acpm_info *acpm;
143 	struct acpm_queue tx;
144 	struct acpm_queue rx;
145 	struct mutex tx_lock;
146 	struct mutex rx_lock;
147 
148 	unsigned int qlen;
149 	unsigned int mlen;
150 	u8 seqnum;
151 	u8 id;
152 	bool poll_completion;
153 
154 	DECLARE_BITMAP(bitmap_seqnum, ACPM_SEQNUM_MAX - 1);
155 	struct acpm_rx_data rx_data[ACPM_SEQNUM_MAX];
156 };
157 
158 /**
159  * struct acpm_info - driver's private data.
160  * @shmem:	pointer to the SRAM configuration data.
161  * @sram_base:	base address of SRAM.
162  * @chans:	pointer to the ACPM channel parameters retrieved from SRAM.
163  * @dev:	pointer to the exynos-acpm device.
164  * @handle:	instance of acpm_handle to send to clients.
165  * @num_chans:	number of channels available for this controller.
166  */
167 struct acpm_info {
168 	struct acpm_shmem __iomem *shmem;
169 	void __iomem *sram_base;
170 	struct acpm_chan *chans;
171 	struct device *dev;
172 	struct acpm_handle handle;
173 	u32 num_chans;
174 };
175 
176 /**
177  * struct acpm_match_data - of_device_id data.
178  * @initdata_base:	offset in SRAM where the channels configuration resides.
179  */
180 struct acpm_match_data {
181 	loff_t initdata_base;
182 };
183 
184 #define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl)
185 #define handle_to_acpm_info(h) container_of(h, struct acpm_info, handle)
186 
187 /**
188  * acpm_get_saved_rx() - get the response if it was already saved.
189  * @achan:	ACPM channel info.
190  * @xfer:	reference to the transfer to get response for.
191  * @tx_seqnum:	xfer TX sequence number.
192  */
acpm_get_saved_rx(struct acpm_chan * achan,const struct acpm_xfer * xfer,u32 tx_seqnum)193 static void acpm_get_saved_rx(struct acpm_chan *achan,
194 			      const struct acpm_xfer *xfer, u32 tx_seqnum)
195 {
196 	const struct acpm_rx_data *rx_data = &achan->rx_data[tx_seqnum - 1];
197 	u32 rx_seqnum;
198 
199 	if (!rx_data->response)
200 		return;
201 
202 	rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, rx_data->cmd[0]);
203 
204 	if (rx_seqnum == tx_seqnum) {
205 		memcpy(xfer->rxd, rx_data->cmd, xfer->rxlen);
206 		clear_bit(rx_seqnum - 1, achan->bitmap_seqnum);
207 	}
208 }
209 
210 /**
211  * acpm_get_rx() - get response from RX queue.
212  * @achan:	ACPM channel info.
213  * @xfer:	reference to the transfer to get response for.
214  *
215  * Return: 0 on success, -errno otherwise.
216  */
acpm_get_rx(struct acpm_chan * achan,const struct acpm_xfer * xfer)217 static int acpm_get_rx(struct acpm_chan *achan, const struct acpm_xfer *xfer)
218 {
219 	u32 rx_front, rx_seqnum, tx_seqnum, seqnum;
220 	const void __iomem *base, *addr;
221 	struct acpm_rx_data *rx_data;
222 	u32 i, val, mlen;
223 	bool rx_set = false;
224 
225 	guard(mutex)(&achan->rx_lock);
226 
227 	rx_front = readl(achan->rx.front);
228 	i = readl(achan->rx.rear);
229 
230 	tx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
231 
232 	if (i == rx_front) {
233 		acpm_get_saved_rx(achan, xfer, tx_seqnum);
234 		return 0;
235 	}
236 
237 	base = achan->rx.base;
238 	mlen = achan->mlen;
239 
240 	/* Drain RX queue. */
241 	do {
242 		/* Read RX seqnum. */
243 		addr = base + mlen * i;
244 		val = readl(addr);
245 
246 		rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val);
247 		if (!rx_seqnum)
248 			return -EIO;
249 		/*
250 		 * mssg seqnum starts with value 1, whereas the driver considers
251 		 * the first mssg at index 0.
252 		 */
253 		seqnum = rx_seqnum - 1;
254 		rx_data = &achan->rx_data[seqnum];
255 
256 		if (rx_data->response) {
257 			if (rx_seqnum == tx_seqnum) {
258 				__ioread32_copy(xfer->rxd, addr,
259 						xfer->rxlen / 4);
260 				rx_set = true;
261 				clear_bit(seqnum, achan->bitmap_seqnum);
262 			} else {
263 				/*
264 				 * The RX data corresponds to another request.
265 				 * Save the data to drain the queue, but don't
266 				 * clear yet the bitmap. It will be cleared
267 				 * after the response is copied to the request.
268 				 */
269 				__ioread32_copy(rx_data->cmd, addr,
270 						xfer->rxlen / 4);
271 			}
272 		} else {
273 			clear_bit(seqnum, achan->bitmap_seqnum);
274 		}
275 
276 		i = (i + 1) % achan->qlen;
277 	} while (i != rx_front);
278 
279 	/* We saved all responses, mark RX empty. */
280 	writel(rx_front, achan->rx.rear);
281 
282 	/*
283 	 * If the response was not in this iteration of the queue, check if the
284 	 * RX data was previously saved.
285 	 */
286 	if (!rx_set)
287 		acpm_get_saved_rx(achan, xfer, tx_seqnum);
288 
289 	return 0;
290 }
291 
292 /**
293  * acpm_dequeue_by_polling() - RX dequeue by polling.
294  * @achan:	ACPM channel info.
295  * @xfer:	reference to the transfer being waited for.
296  *
297  * Return: 0 on success, -errno otherwise.
298  */
acpm_dequeue_by_polling(struct acpm_chan * achan,const struct acpm_xfer * xfer)299 static int acpm_dequeue_by_polling(struct acpm_chan *achan,
300 				   const struct acpm_xfer *xfer)
301 {
302 	struct device *dev = achan->acpm->dev;
303 	unsigned int cnt_20us = 0;
304 	u32 seqnum;
305 	int ret;
306 
307 	seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, xfer->txd[0]);
308 
309 	do {
310 		ret = acpm_get_rx(achan, xfer);
311 		if (ret)
312 			return ret;
313 
314 		if (!test_bit(seqnum - 1, achan->bitmap_seqnum))
315 			return 0;
316 
317 		/* Determined experimentally. */
318 		usleep_range(20, 30);
319 		cnt_20us++;
320 	} while (cnt_20us < ACPM_POLL_TIMEOUT);
321 
322 	dev_err(dev, "Timeout! ch:%u s:%u bitmap:%lx, cnt_20us = %d.\n",
323 		achan->id, seqnum, achan->bitmap_seqnum[0], cnt_20us);
324 
325 	return -ETIME;
326 }
327 
328 /**
329  * acpm_wait_for_queue_slots() - wait for queue slots.
330  *
331  * @achan:		ACPM channel info.
332  * @next_tx_front:	next front index of the TX queue.
333  *
334  * Return: 0 on success, -errno otherwise.
335  */
acpm_wait_for_queue_slots(struct acpm_chan * achan,u32 next_tx_front)336 static int acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front)
337 {
338 	u32 val, ret;
339 
340 	/*
341 	 * Wait for RX front to keep up with TX front. Make sure there's at
342 	 * least one element between them.
343 	 */
344 	ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0,
345 				 ACPM_TX_TIMEOUT_US);
346 	if (ret) {
347 		dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n");
348 		return ret;
349 	}
350 
351 	ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0,
352 				 ACPM_TX_TIMEOUT_US);
353 	if (ret)
354 		dev_err(achan->acpm->dev, "TX queue is full.\n");
355 
356 	return ret;
357 }
358 
359 /**
360  * acpm_prepare_xfer() - prepare a transfer before writing the message to the
361  * TX queue.
362  * @achan:	ACPM channel info.
363  * @xfer:	reference to the transfer being prepared.
364  */
acpm_prepare_xfer(struct acpm_chan * achan,const struct acpm_xfer * xfer)365 static void acpm_prepare_xfer(struct acpm_chan *achan,
366 			      const struct acpm_xfer *xfer)
367 {
368 	struct acpm_rx_data *rx_data;
369 	u32 *txd = (u32 *)xfer->txd;
370 
371 	/* Prevent chan->seqnum from being re-used */
372 	do {
373 		if (++achan->seqnum == ACPM_SEQNUM_MAX)
374 			achan->seqnum = 1;
375 	} while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum));
376 
377 	txd[0] |= FIELD_PREP(ACPM_PROTOCOL_SEQNUM, achan->seqnum);
378 
379 	/* Clear data for upcoming responses */
380 	rx_data = &achan->rx_data[achan->seqnum - 1];
381 	memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd);
382 	if (xfer->rxd)
383 		rx_data->response = true;
384 
385 	/* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */
386 	set_bit(achan->seqnum - 1, achan->bitmap_seqnum);
387 }
388 
389 /**
390  * acpm_wait_for_message_response - an helper to group all possible ways of
391  * waiting for a synchronous message response.
392  *
393  * @achan:	ACPM channel info.
394  * @xfer:	reference to the transfer being waited for.
395  *
396  * Return: 0 on success, -errno otherwise.
397  */
acpm_wait_for_message_response(struct acpm_chan * achan,const struct acpm_xfer * xfer)398 static int acpm_wait_for_message_response(struct acpm_chan *achan,
399 					  const struct acpm_xfer *xfer)
400 {
401 	/* Just polling mode supported for now. */
402 	return acpm_dequeue_by_polling(achan, xfer);
403 }
404 
405 /**
406  * acpm_do_xfer() - do one transfer.
407  * @handle:	pointer to the acpm handle.
408  * @xfer:	transfer to initiate and wait for response.
409  *
410  * Return: 0 on success, -errno otherwise.
411  */
acpm_do_xfer(const struct acpm_handle * handle,const struct acpm_xfer * xfer)412 int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
413 {
414 	struct acpm_info *acpm = handle_to_acpm_info(handle);
415 	struct exynos_mbox_msg msg;
416 	struct acpm_chan *achan;
417 	u32 idx, tx_front;
418 	int ret;
419 
420 	if (xfer->acpm_chan_id >= acpm->num_chans)
421 		return -EINVAL;
422 
423 	achan = &acpm->chans[xfer->acpm_chan_id];
424 
425 	if (!xfer->txd || xfer->txlen > achan->mlen || xfer->rxlen > achan->mlen)
426 		return -EINVAL;
427 
428 	if (!achan->poll_completion) {
429 		dev_err(achan->acpm->dev, "Interrupt mode not supported\n");
430 		return -EOPNOTSUPP;
431 	}
432 
433 	scoped_guard(mutex, &achan->tx_lock) {
434 		tx_front = readl(achan->tx.front);
435 		idx = (tx_front + 1) % achan->qlen;
436 
437 		ret = acpm_wait_for_queue_slots(achan, idx);
438 		if (ret)
439 			return ret;
440 
441 		acpm_prepare_xfer(achan, xfer);
442 
443 		/* Write TX command. */
444 		__iowrite32_copy(achan->tx.base + achan->mlen * tx_front,
445 				 xfer->txd, xfer->txlen / 4);
446 
447 		/* Advance TX front. */
448 		writel(idx, achan->tx.front);
449 	}
450 
451 	msg.chan_id = xfer->acpm_chan_id;
452 	msg.chan_type = EXYNOS_MBOX_CHAN_TYPE_DOORBELL;
453 	ret = mbox_send_message(achan->chan, (void *)&msg);
454 	if (ret < 0)
455 		return ret;
456 
457 	ret = acpm_wait_for_message_response(achan, xfer);
458 
459 	/*
460 	 * NOTE: we might prefer not to need the mailbox ticker to manage the
461 	 * transfer queueing since the protocol layer queues things by itself.
462 	 * Unfortunately, we have to kick the mailbox framework after we have
463 	 * received our message.
464 	 */
465 	mbox_client_txdone(achan->chan, ret);
466 
467 	return ret;
468 }
469 
470 /**
471  * acpm_chan_shmem_get_params() - get channel parameters and addresses of the
472  * TX/RX queues.
473  * @achan:	ACPM channel info.
474  * @chan_shmem:	__iomem pointer to a channel described in shared memory.
475  */
acpm_chan_shmem_get_params(struct acpm_chan * achan,struct acpm_chan_shmem __iomem * chan_shmem)476 static void acpm_chan_shmem_get_params(struct acpm_chan *achan,
477 				struct acpm_chan_shmem __iomem *chan_shmem)
478 {
479 	void __iomem *base = achan->acpm->sram_base;
480 	struct acpm_queue *rx = &achan->rx;
481 	struct acpm_queue *tx = &achan->tx;
482 
483 	achan->mlen = readl(&chan_shmem->mlen);
484 	achan->poll_completion = readl(&chan_shmem->poll_completion);
485 	achan->id = readl(&chan_shmem->id);
486 	achan->qlen = readl(&chan_shmem->qlen);
487 
488 	tx->base = base + readl(&chan_shmem->rx_base);
489 	tx->rear = base + readl(&chan_shmem->rx_rear);
490 	tx->front = base + readl(&chan_shmem->rx_front);
491 
492 	rx->base = base + readl(&chan_shmem->tx_base);
493 	rx->rear = base + readl(&chan_shmem->tx_rear);
494 	rx->front = base + readl(&chan_shmem->tx_front);
495 
496 	dev_vdbg(achan->acpm->dev, "ID = %d poll = %d, mlen = %d, qlen = %d\n",
497 		 achan->id, achan->poll_completion, achan->mlen, achan->qlen);
498 }
499 
500 /**
501  * acpm_achan_alloc_cmds() - allocate buffers for retrieving data from the ACPM
502  * firmware.
503  * @achan:	ACPM channel info.
504  *
505  * Return: 0 on success, -errno otherwise.
506  */
acpm_achan_alloc_cmds(struct acpm_chan * achan)507 static int acpm_achan_alloc_cmds(struct acpm_chan *achan)
508 {
509 	struct device *dev = achan->acpm->dev;
510 	struct acpm_rx_data *rx_data;
511 	size_t cmd_size, n_cmd;
512 	int i;
513 
514 	if (achan->mlen == 0)
515 		return 0;
516 
517 	cmd_size = sizeof(*(achan->rx_data[0].cmd));
518 	n_cmd = DIV_ROUND_UP_ULL(achan->mlen, cmd_size);
519 
520 	for (i = 0; i < ACPM_SEQNUM_MAX; i++) {
521 		rx_data = &achan->rx_data[i];
522 		rx_data->n_cmd = n_cmd;
523 		rx_data->cmd = devm_kcalloc(dev, n_cmd, cmd_size, GFP_KERNEL);
524 		if (!rx_data->cmd)
525 			return -ENOMEM;
526 	}
527 
528 	return 0;
529 }
530 
531 /**
532  * acpm_free_mbox_chans() - free mailbox channels.
533  * @acpm:	pointer to driver data.
534  */
acpm_free_mbox_chans(struct acpm_info * acpm)535 static void acpm_free_mbox_chans(struct acpm_info *acpm)
536 {
537 	int i;
538 
539 	for (i = 0; i < acpm->num_chans; i++)
540 		if (!IS_ERR_OR_NULL(acpm->chans[i].chan))
541 			mbox_free_channel(acpm->chans[i].chan);
542 }
543 
544 /**
545  * acpm_channels_init() - initialize channels based on the configuration data in
546  * the shared memory.
547  * @acpm:	pointer to driver data.
548  *
549  * Return: 0 on success, -errno otherwise.
550  */
acpm_channels_init(struct acpm_info * acpm)551 static int acpm_channels_init(struct acpm_info *acpm)
552 {
553 	struct acpm_shmem __iomem *shmem = acpm->shmem;
554 	struct acpm_chan_shmem __iomem *chans_shmem;
555 	struct device *dev = acpm->dev;
556 	int i, ret;
557 
558 	acpm->num_chans = readl(&shmem->num_chans);
559 	acpm->chans = devm_kcalloc(dev, acpm->num_chans, sizeof(*acpm->chans),
560 				   GFP_KERNEL);
561 	if (!acpm->chans)
562 		return -ENOMEM;
563 
564 	chans_shmem = acpm->sram_base + readl(&shmem->chans);
565 
566 	for (i = 0; i < acpm->num_chans; i++) {
567 		struct acpm_chan_shmem __iomem *chan_shmem = &chans_shmem[i];
568 		struct acpm_chan *achan = &acpm->chans[i];
569 		struct mbox_client *cl = &achan->cl;
570 
571 		achan->acpm = acpm;
572 
573 		acpm_chan_shmem_get_params(achan, chan_shmem);
574 
575 		ret = acpm_achan_alloc_cmds(achan);
576 		if (ret)
577 			return ret;
578 
579 		mutex_init(&achan->rx_lock);
580 		mutex_init(&achan->tx_lock);
581 
582 		cl->dev = dev;
583 
584 		achan->chan = mbox_request_channel(cl, 0);
585 		if (IS_ERR(achan->chan)) {
586 			acpm_free_mbox_chans(acpm);
587 			return PTR_ERR(achan->chan);
588 		}
589 	}
590 
591 	return 0;
592 }
593 
594 /**
595  * acpm_setup_ops() - setup the operations structures.
596  * @acpm:	pointer to the driver data.
597  */
acpm_setup_ops(struct acpm_info * acpm)598 static void acpm_setup_ops(struct acpm_info *acpm)
599 {
600 	struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops;
601 
602 	pmic_ops->read_reg = acpm_pmic_read_reg;
603 	pmic_ops->bulk_read = acpm_pmic_bulk_read;
604 	pmic_ops->write_reg = acpm_pmic_write_reg;
605 	pmic_ops->bulk_write = acpm_pmic_bulk_write;
606 	pmic_ops->update_reg = acpm_pmic_update_reg;
607 }
608 
acpm_probe(struct platform_device * pdev)609 static int acpm_probe(struct platform_device *pdev)
610 {
611 	const struct acpm_match_data *match_data;
612 	struct device *dev = &pdev->dev;
613 	struct device_node *shmem;
614 	struct acpm_info *acpm;
615 	resource_size_t size;
616 	struct resource res;
617 	int ret;
618 
619 	acpm = devm_kzalloc(dev, sizeof(*acpm), GFP_KERNEL);
620 	if (!acpm)
621 		return -ENOMEM;
622 
623 	shmem = of_parse_phandle(dev->of_node, "shmem", 0);
624 	ret = of_address_to_resource(shmem, 0, &res);
625 	of_node_put(shmem);
626 	if (ret)
627 		return dev_err_probe(dev, ret,
628 				     "Failed to get shared memory.\n");
629 
630 	size = resource_size(&res);
631 	acpm->sram_base = devm_ioremap(dev, res.start, size);
632 	if (!acpm->sram_base)
633 		return dev_err_probe(dev, -ENOMEM,
634 				     "Failed to ioremap shared memory.\n");
635 
636 	match_data = of_device_get_match_data(dev);
637 	if (!match_data)
638 		return dev_err_probe(dev, -EINVAL,
639 				     "Failed to get match data.\n");
640 
641 	acpm->shmem = acpm->sram_base + match_data->initdata_base;
642 	acpm->dev = dev;
643 
644 	ret = acpm_channels_init(acpm);
645 	if (ret)
646 		return ret;
647 
648 	acpm_setup_ops(acpm);
649 
650 	platform_set_drvdata(pdev, acpm);
651 
652 	return 0;
653 }
654 
655 /**
656  * acpm_handle_put() - release the handle acquired by acpm_get_by_phandle.
657  * @handle:	Handle acquired by acpm_get_by_phandle.
658  */
acpm_handle_put(const struct acpm_handle * handle)659 static void acpm_handle_put(const struct acpm_handle *handle)
660 {
661 	struct acpm_info *acpm = handle_to_acpm_info(handle);
662 	struct device *dev = acpm->dev;
663 
664 	module_put(dev->driver->owner);
665 	/* Drop reference taken with of_find_device_by_node(). */
666 	put_device(dev);
667 }
668 
669 /**
670  * devm_acpm_release() - devres release method.
671  * @dev: pointer to device.
672  * @res: pointer to resource.
673  */
devm_acpm_release(struct device * dev,void * res)674 static void devm_acpm_release(struct device *dev, void *res)
675 {
676 	acpm_handle_put(*(struct acpm_handle **)res);
677 }
678 
679 /**
680  * acpm_get_by_phandle() - get the ACPM handle using DT phandle.
681  * @dev:        device pointer requesting ACPM handle.
682  * @property:   property name containing phandle on ACPM node.
683  *
684  * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
685  */
acpm_get_by_phandle(struct device * dev,const char * property)686 static const struct acpm_handle *acpm_get_by_phandle(struct device *dev,
687 						     const char *property)
688 {
689 	struct platform_device *pdev;
690 	struct device_node *acpm_np;
691 	struct device_link *link;
692 	struct acpm_info *acpm;
693 
694 	acpm_np = of_parse_phandle(dev->of_node, property, 0);
695 	if (!acpm_np)
696 		return ERR_PTR(-ENODEV);
697 
698 	pdev = of_find_device_by_node(acpm_np);
699 	if (!pdev) {
700 		dev_err(dev, "Cannot find device node %s\n", acpm_np->name);
701 		of_node_put(acpm_np);
702 		return ERR_PTR(-EPROBE_DEFER);
703 	}
704 
705 	of_node_put(acpm_np);
706 
707 	acpm = platform_get_drvdata(pdev);
708 	if (!acpm) {
709 		dev_err(dev, "Cannot get drvdata from %s\n",
710 			dev_name(&pdev->dev));
711 		platform_device_put(pdev);
712 		return ERR_PTR(-EPROBE_DEFER);
713 	}
714 
715 	if (!try_module_get(pdev->dev.driver->owner)) {
716 		dev_err(dev, "Cannot get module reference.\n");
717 		platform_device_put(pdev);
718 		return ERR_PTR(-EPROBE_DEFER);
719 	}
720 
721 	link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
722 	if (!link) {
723 		dev_err(&pdev->dev,
724 			"Failed to create device link to consumer %s.\n",
725 			dev_name(dev));
726 		platform_device_put(pdev);
727 		module_put(pdev->dev.driver->owner);
728 		return ERR_PTR(-EINVAL);
729 	}
730 
731 	return &acpm->handle;
732 }
733 
734 /**
735  * devm_acpm_get_by_phandle() - managed get handle using phandle.
736  * @dev:        device pointer requesting ACPM handle.
737  * @property:   property name containing phandle on ACPM node.
738  *
739  * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
740  */
devm_acpm_get_by_phandle(struct device * dev,const char * property)741 const struct acpm_handle *devm_acpm_get_by_phandle(struct device *dev,
742 						   const char *property)
743 {
744 	const struct acpm_handle **ptr, *handle;
745 
746 	ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL);
747 	if (!ptr)
748 		return ERR_PTR(-ENOMEM);
749 
750 	handle = acpm_get_by_phandle(dev, property);
751 	if (!IS_ERR(handle)) {
752 		*ptr = handle;
753 		devres_add(dev, ptr);
754 	} else {
755 		devres_free(ptr);
756 	}
757 
758 	return handle;
759 }
760 
761 static const struct acpm_match_data acpm_gs101 = {
762 	.initdata_base = ACPM_GS101_INITDATA_BASE,
763 };
764 
765 static const struct of_device_id acpm_match[] = {
766 	{
767 		.compatible = "google,gs101-acpm-ipc",
768 		.data = &acpm_gs101,
769 	},
770 	{},
771 };
772 MODULE_DEVICE_TABLE(of, acpm_match);
773 
774 static struct platform_driver acpm_driver = {
775 	.probe	= acpm_probe,
776 	.driver	= {
777 		.name = "exynos-acpm-protocol",
778 		.of_match_table	= acpm_match,
779 	},
780 };
781 module_platform_driver(acpm_driver);
782 
783 MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
784 MODULE_DESCRIPTION("Samsung Exynos ACPM mailbox protocol driver");
785 MODULE_LICENSE("GPL");
786