1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (C) 2017 Broadcom
3
4 /*
5 * Broadcom FlexRM Mailbox Driver
6 *
7 * Each Broadcom FlexSparx4 offload engine is implemented as an
8 * extension to Broadcom FlexRM ring manager. The FlexRM ring
9 * manager provides a set of rings which can be used to submit
10 * work to a FlexSparx4 offload engine.
11 *
12 * This driver creates a mailbox controller using a set of FlexRM
13 * rings where each mailbox channel represents a separate FlexRM ring.
14 */
15
16 #include <asm/barrier.h>
17 #include <asm/byteorder.h>
18 #include <linux/atomic.h>
19 #include <linux/bitmap.h>
20 #include <linux/debugfs.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmapool.h>
25 #include <linux/err.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28 #include <linux/mailbox_controller.h>
29 #include <linux/mailbox/brcm-message.h>
30 #include <linux/module.h>
31 #include <linux/msi.h>
32 #include <linux/of_address.h>
33 #include <linux/of_irq.h>
34 #include <linux/platform_device.h>
35 #include <linux/spinlock.h>
36
37 /* ====== FlexRM register defines ===== */
38
39 /* FlexRM configuration */
40 #define RING_REGS_SIZE 0x10000
41 #define RING_DESC_SIZE 8
42 #define RING_DESC_INDEX(offset) \
43 ((offset) / RING_DESC_SIZE)
44 #define RING_DESC_OFFSET(index) \
45 ((index) * RING_DESC_SIZE)
46 #define RING_MAX_REQ_COUNT 1024
47 #define RING_BD_ALIGN_ORDER 12
48 #define RING_BD_ALIGN_CHECK(addr) \
49 (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1)))
50 #define RING_BD_TOGGLE_INVALID(offset) \
51 (((offset) >> RING_BD_ALIGN_ORDER) & 0x1)
52 #define RING_BD_TOGGLE_VALID(offset) \
53 (!RING_BD_TOGGLE_INVALID(offset))
54 #define RING_BD_DESC_PER_REQ 32
55 #define RING_BD_DESC_COUNT \
56 (RING_MAX_REQ_COUNT * RING_BD_DESC_PER_REQ)
57 #define RING_BD_SIZE \
58 (RING_BD_DESC_COUNT * RING_DESC_SIZE)
59 #define RING_CMPL_ALIGN_ORDER 13
60 #define RING_CMPL_DESC_COUNT RING_MAX_REQ_COUNT
61 #define RING_CMPL_SIZE \
62 (RING_CMPL_DESC_COUNT * RING_DESC_SIZE)
63 #define RING_VER_MAGIC 0x76303031
64
65 /* Per-Ring register offsets */
66 #define RING_VER 0x000
67 #define RING_BD_START_ADDR 0x004
68 #define RING_BD_READ_PTR 0x008
69 #define RING_BD_WRITE_PTR 0x00c
70 #define RING_BD_READ_PTR_DDR_LS 0x010
71 #define RING_BD_READ_PTR_DDR_MS 0x014
72 #define RING_CMPL_START_ADDR 0x018
73 #define RING_CMPL_WRITE_PTR 0x01c
74 #define RING_NUM_REQ_RECV_LS 0x020
75 #define RING_NUM_REQ_RECV_MS 0x024
76 #define RING_NUM_REQ_TRANS_LS 0x028
77 #define RING_NUM_REQ_TRANS_MS 0x02c
78 #define RING_NUM_REQ_OUTSTAND 0x030
79 #define RING_CONTROL 0x034
80 #define RING_FLUSH_DONE 0x038
81 #define RING_MSI_ADDR_LS 0x03c
82 #define RING_MSI_ADDR_MS 0x040
83 #define RING_MSI_CONTROL 0x048
84 #define RING_BD_READ_PTR_DDR_CONTROL 0x04c
85 #define RING_MSI_DATA_VALUE 0x064
86
87 /* Register RING_BD_START_ADDR fields */
88 #define BD_LAST_UPDATE_HW_SHIFT 28
89 #define BD_LAST_UPDATE_HW_MASK 0x1
90 #define BD_START_ADDR_VALUE(pa) \
91 ((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
92 #define BD_START_ADDR_DECODE(val) \
93 ((dma_addr_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
94
95 /* Register RING_CMPL_START_ADDR fields */
96 #define CMPL_START_ADDR_VALUE(pa) \
97 ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
98
99 /* Register RING_CONTROL fields */
100 #define CONTROL_MASK_DISABLE_CONTROL 12
101 #define CONTROL_FLUSH_SHIFT 5
102 #define CONTROL_ACTIVE_SHIFT 4
103 #define CONTROL_RATE_ADAPT_MASK 0xf
104 #define CONTROL_RATE_DYNAMIC 0x0
105 #define CONTROL_RATE_FAST 0x8
106 #define CONTROL_RATE_MEDIUM 0x9
107 #define CONTROL_RATE_SLOW 0xa
108 #define CONTROL_RATE_IDLE 0xb
109
110 /* Register RING_FLUSH_DONE fields */
111 #define FLUSH_DONE_MASK 0x1
112
113 /* Register RING_MSI_CONTROL fields */
114 #define MSI_TIMER_VAL_SHIFT 16
115 #define MSI_TIMER_VAL_MASK 0xffff
116 #define MSI_ENABLE_SHIFT 15
117 #define MSI_ENABLE_MASK 0x1
118 #define MSI_COUNT_SHIFT 0
119 #define MSI_COUNT_MASK 0x3ff
120
121 /* Register RING_BD_READ_PTR_DDR_CONTROL fields */
122 #define BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16
123 #define BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff
124 #define BD_READ_PTR_DDR_ENABLE_SHIFT 15
125 #define BD_READ_PTR_DDR_ENABLE_MASK 0x1
126
127 /* ====== FlexRM ring descriptor defines ===== */
128
129 /* Completion descriptor format */
130 #define CMPL_OPAQUE_SHIFT 0
131 #define CMPL_OPAQUE_MASK 0xffff
132 #define CMPL_ENGINE_STATUS_SHIFT 16
133 #define CMPL_ENGINE_STATUS_MASK 0xffff
134 #define CMPL_DME_STATUS_SHIFT 32
135 #define CMPL_DME_STATUS_MASK 0xffff
136 #define CMPL_RM_STATUS_SHIFT 48
137 #define CMPL_RM_STATUS_MASK 0xffff
138
139 /* Completion DME status code */
140 #define DME_STATUS_MEM_COR_ERR BIT(0)
141 #define DME_STATUS_MEM_UCOR_ERR BIT(1)
142 #define DME_STATUS_FIFO_UNDERFLOW BIT(2)
143 #define DME_STATUS_FIFO_OVERFLOW BIT(3)
144 #define DME_STATUS_RRESP_ERR BIT(4)
145 #define DME_STATUS_BRESP_ERR BIT(5)
146 #define DME_STATUS_ERROR_MASK (DME_STATUS_MEM_COR_ERR | \
147 DME_STATUS_MEM_UCOR_ERR | \
148 DME_STATUS_FIFO_UNDERFLOW | \
149 DME_STATUS_FIFO_OVERFLOW | \
150 DME_STATUS_RRESP_ERR | \
151 DME_STATUS_BRESP_ERR)
152
153 /* Completion RM status code */
154 #define RM_STATUS_CODE_SHIFT 0
155 #define RM_STATUS_CODE_MASK 0x3ff
156 #define RM_STATUS_CODE_GOOD 0x0
157 #define RM_STATUS_CODE_AE_TIMEOUT 0x3ff
158
159 /* General descriptor format */
160 #define DESC_TYPE_SHIFT 60
161 #define DESC_TYPE_MASK 0xf
162 #define DESC_PAYLOAD_SHIFT 0
163 #define DESC_PAYLOAD_MASK 0x0fffffffffffffff
164
165 /* Null descriptor format */
166 #define NULL_TYPE 0
167 #define NULL_TOGGLE_SHIFT 58
168 #define NULL_TOGGLE_MASK 0x1
169
170 /* Header descriptor format */
171 #define HEADER_TYPE 1
172 #define HEADER_TOGGLE_SHIFT 58
173 #define HEADER_TOGGLE_MASK 0x1
174 #define HEADER_ENDPKT_SHIFT 57
175 #define HEADER_ENDPKT_MASK 0x1
176 #define HEADER_STARTPKT_SHIFT 56
177 #define HEADER_STARTPKT_MASK 0x1
178 #define HEADER_BDCOUNT_SHIFT 36
179 #define HEADER_BDCOUNT_MASK 0x1f
180 #define HEADER_BDCOUNT_MAX HEADER_BDCOUNT_MASK
181 #define HEADER_FLAGS_SHIFT 16
182 #define HEADER_FLAGS_MASK 0xffff
183 #define HEADER_OPAQUE_SHIFT 0
184 #define HEADER_OPAQUE_MASK 0xffff
185
186 /* Source (SRC) descriptor format */
187 #define SRC_TYPE 2
188 #define SRC_LENGTH_SHIFT 44
189 #define SRC_LENGTH_MASK 0xffff
190 #define SRC_ADDR_SHIFT 0
191 #define SRC_ADDR_MASK 0x00000fffffffffff
192
193 /* Destination (DST) descriptor format */
194 #define DST_TYPE 3
195 #define DST_LENGTH_SHIFT 44
196 #define DST_LENGTH_MASK 0xffff
197 #define DST_ADDR_SHIFT 0
198 #define DST_ADDR_MASK 0x00000fffffffffff
199
200 /* Immediate (IMM) descriptor format */
201 #define IMM_TYPE 4
202 #define IMM_DATA_SHIFT 0
203 #define IMM_DATA_MASK 0x0fffffffffffffff
204
205 /* Next pointer (NPTR) descriptor format */
206 #define NPTR_TYPE 5
207 #define NPTR_TOGGLE_SHIFT 58
208 #define NPTR_TOGGLE_MASK 0x1
209 #define NPTR_ADDR_SHIFT 0
210 #define NPTR_ADDR_MASK 0x00000fffffffffff
211
212 /* Mega source (MSRC) descriptor format */
213 #define MSRC_TYPE 6
214 #define MSRC_LENGTH_SHIFT 44
215 #define MSRC_LENGTH_MASK 0xffff
216 #define MSRC_ADDR_SHIFT 0
217 #define MSRC_ADDR_MASK 0x00000fffffffffff
218
219 /* Mega destination (MDST) descriptor format */
220 #define MDST_TYPE 7
221 #define MDST_LENGTH_SHIFT 44
222 #define MDST_LENGTH_MASK 0xffff
223 #define MDST_ADDR_SHIFT 0
224 #define MDST_ADDR_MASK 0x00000fffffffffff
225
226 /* Source with tlast (SRCT) descriptor format */
227 #define SRCT_TYPE 8
228 #define SRCT_LENGTH_SHIFT 44
229 #define SRCT_LENGTH_MASK 0xffff
230 #define SRCT_ADDR_SHIFT 0
231 #define SRCT_ADDR_MASK 0x00000fffffffffff
232
233 /* Destination with tlast (DSTT) descriptor format */
234 #define DSTT_TYPE 9
235 #define DSTT_LENGTH_SHIFT 44
236 #define DSTT_LENGTH_MASK 0xffff
237 #define DSTT_ADDR_SHIFT 0
238 #define DSTT_ADDR_MASK 0x00000fffffffffff
239
240 /* Immediate with tlast (IMMT) descriptor format */
241 #define IMMT_TYPE 10
242 #define IMMT_DATA_SHIFT 0
243 #define IMMT_DATA_MASK 0x0fffffffffffffff
244
245 /* Descriptor helper macros */
246 #define DESC_DEC(_d, _s, _m) (((_d) >> (_s)) & (_m))
247 #define DESC_ENC(_d, _v, _s, _m) \
248 do { \
249 (_d) &= ~((u64)(_m) << (_s)); \
250 (_d) |= (((u64)(_v) & (_m)) << (_s)); \
251 } while (0)
252
253 /* ====== FlexRM data structures ===== */
254
255 struct flexrm_ring {
256 /* Unprotected members */
257 int num;
258 struct flexrm_mbox *mbox;
259 void __iomem *regs;
260 bool irq_requested;
261 unsigned int irq;
262 cpumask_t irq_aff_hint;
263 unsigned int msi_timer_val;
264 unsigned int msi_count_threshold;
265 struct brcm_message *requests[RING_MAX_REQ_COUNT];
266 void *bd_base;
267 dma_addr_t bd_dma_base;
268 u32 bd_write_offset;
269 void *cmpl_base;
270 dma_addr_t cmpl_dma_base;
271 /* Atomic stats */
272 atomic_t msg_send_count;
273 atomic_t msg_cmpl_count;
274 /* Protected members */
275 spinlock_t lock;
276 DECLARE_BITMAP(requests_bmap, RING_MAX_REQ_COUNT);
277 u32 cmpl_read_offset;
278 };
279
280 struct flexrm_mbox {
281 struct device *dev;
282 void __iomem *regs;
283 u32 num_rings;
284 struct flexrm_ring *rings;
285 struct dma_pool *bd_pool;
286 struct dma_pool *cmpl_pool;
287 struct dentry *root;
288 struct mbox_controller controller;
289 };
290
291 /* ====== FlexRM ring descriptor helper routines ===== */
292
flexrm_read_desc(void * desc_ptr)293 static u64 flexrm_read_desc(void *desc_ptr)
294 {
295 return le64_to_cpu(*((u64 *)desc_ptr));
296 }
297
flexrm_write_desc(void * desc_ptr,u64 desc)298 static void flexrm_write_desc(void *desc_ptr, u64 desc)
299 {
300 *((u64 *)desc_ptr) = cpu_to_le64(desc);
301 }
302
flexrm_cmpl_desc_to_reqid(u64 cmpl_desc)303 static u32 flexrm_cmpl_desc_to_reqid(u64 cmpl_desc)
304 {
305 return (u32)(cmpl_desc & CMPL_OPAQUE_MASK);
306 }
307
flexrm_cmpl_desc_to_error(u64 cmpl_desc)308 static int flexrm_cmpl_desc_to_error(u64 cmpl_desc)
309 {
310 u32 status;
311
312 status = DESC_DEC(cmpl_desc, CMPL_DME_STATUS_SHIFT,
313 CMPL_DME_STATUS_MASK);
314 if (status & DME_STATUS_ERROR_MASK)
315 return -EIO;
316
317 status = DESC_DEC(cmpl_desc, CMPL_RM_STATUS_SHIFT,
318 CMPL_RM_STATUS_MASK);
319 status &= RM_STATUS_CODE_MASK;
320 if (status == RM_STATUS_CODE_AE_TIMEOUT)
321 return -ETIMEDOUT;
322
323 return 0;
324 }
325
flexrm_is_next_table_desc(void * desc_ptr)326 static bool flexrm_is_next_table_desc(void *desc_ptr)
327 {
328 u64 desc = flexrm_read_desc(desc_ptr);
329 u32 type = DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
330
331 return (type == NPTR_TYPE) ? true : false;
332 }
333
flexrm_next_table_desc(u32 toggle,dma_addr_t next_addr)334 static u64 flexrm_next_table_desc(u32 toggle, dma_addr_t next_addr)
335 {
336 u64 desc = 0;
337
338 DESC_ENC(desc, NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
339 DESC_ENC(desc, toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK);
340 DESC_ENC(desc, next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK);
341
342 return desc;
343 }
344
flexrm_null_desc(u32 toggle)345 static u64 flexrm_null_desc(u32 toggle)
346 {
347 u64 desc = 0;
348
349 DESC_ENC(desc, NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
350 DESC_ENC(desc, toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK);
351
352 return desc;
353 }
354
flexrm_estimate_header_desc_count(u32 nhcnt)355 static u32 flexrm_estimate_header_desc_count(u32 nhcnt)
356 {
357 u32 hcnt = nhcnt / HEADER_BDCOUNT_MAX;
358
359 if (!(nhcnt % HEADER_BDCOUNT_MAX))
360 hcnt += 1;
361
362 return hcnt;
363 }
364
flexrm_flip_header_toggle(void * desc_ptr)365 static void flexrm_flip_header_toggle(void *desc_ptr)
366 {
367 u64 desc = flexrm_read_desc(desc_ptr);
368
369 if (desc & ((u64)0x1 << HEADER_TOGGLE_SHIFT))
370 desc &= ~((u64)0x1 << HEADER_TOGGLE_SHIFT);
371 else
372 desc |= ((u64)0x1 << HEADER_TOGGLE_SHIFT);
373
374 flexrm_write_desc(desc_ptr, desc);
375 }
376
flexrm_header_desc(u32 toggle,u32 startpkt,u32 endpkt,u32 bdcount,u32 flags,u32 opaque)377 static u64 flexrm_header_desc(u32 toggle, u32 startpkt, u32 endpkt,
378 u32 bdcount, u32 flags, u32 opaque)
379 {
380 u64 desc = 0;
381
382 DESC_ENC(desc, HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
383 DESC_ENC(desc, toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK);
384 DESC_ENC(desc, startpkt, HEADER_STARTPKT_SHIFT, HEADER_STARTPKT_MASK);
385 DESC_ENC(desc, endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK);
386 DESC_ENC(desc, bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK);
387 DESC_ENC(desc, flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK);
388 DESC_ENC(desc, opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK);
389
390 return desc;
391 }
392
flexrm_enqueue_desc(u32 nhpos,u32 nhcnt,u32 reqid,u64 desc,void ** desc_ptr,u32 * toggle,void * start_desc,void * end_desc)393 static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid,
394 u64 desc, void **desc_ptr, u32 *toggle,
395 void *start_desc, void *end_desc)
396 {
397 u64 d;
398 u32 nhavail, _toggle, _startpkt, _endpkt, _bdcount;
399
400 /* Sanity check */
401 if (nhcnt <= nhpos)
402 return;
403
404 /*
405 * Each request or packet start with a HEADER descriptor followed
406 * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
407 * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
408 * following a HEADER descriptor is represented by BDCOUNT field
409 * of HEADER descriptor. The max value of BDCOUNT field is 31 which
410 * means we can only have 31 non-HEADER descriptors following one
411 * HEADER descriptor.
412 *
413 * In general use, number of non-HEADER descriptors can easily go
414 * beyond 31. To tackle this situation, we have packet (or request)
415 * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.
416 *
417 * To use packet extension, the first HEADER descriptor of request
418 * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
419 * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
420 * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the
421 * TOGGLE bit of the first HEADER will be set to invalid state to
422 * ensure that FlexRM does not start fetching descriptors till all
423 * descriptors are enqueued. The user of this function will flip
424 * the TOGGLE bit of first HEADER after all descriptors are
425 * enqueued.
426 */
427
428 if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
429 /* Prepare the header descriptor */
430 nhavail = (nhcnt - nhpos);
431 _toggle = (nhpos == 0) ? !(*toggle) : (*toggle);
432 _startpkt = (nhpos == 0) ? 0x1 : 0x0;
433 _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
434 _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
435 nhavail : HEADER_BDCOUNT_MAX;
436 if (nhavail <= HEADER_BDCOUNT_MAX)
437 _bdcount = nhavail;
438 else
439 _bdcount = HEADER_BDCOUNT_MAX;
440 d = flexrm_header_desc(_toggle, _startpkt, _endpkt,
441 _bdcount, 0x0, reqid);
442
443 /* Write header descriptor */
444 flexrm_write_desc(*desc_ptr, d);
445
446 /* Point to next descriptor */
447 *desc_ptr += sizeof(desc);
448 if (*desc_ptr == end_desc)
449 *desc_ptr = start_desc;
450
451 /* Skip next pointer descriptors */
452 while (flexrm_is_next_table_desc(*desc_ptr)) {
453 *toggle = (*toggle) ? 0 : 1;
454 *desc_ptr += sizeof(desc);
455 if (*desc_ptr == end_desc)
456 *desc_ptr = start_desc;
457 }
458 }
459
460 /* Write desired descriptor */
461 flexrm_write_desc(*desc_ptr, desc);
462
463 /* Point to next descriptor */
464 *desc_ptr += sizeof(desc);
465 if (*desc_ptr == end_desc)
466 *desc_ptr = start_desc;
467
468 /* Skip next pointer descriptors */
469 while (flexrm_is_next_table_desc(*desc_ptr)) {
470 *toggle = (*toggle) ? 0 : 1;
471 *desc_ptr += sizeof(desc);
472 if (*desc_ptr == end_desc)
473 *desc_ptr = start_desc;
474 }
475 }
476
flexrm_src_desc(dma_addr_t addr,unsigned int length)477 static u64 flexrm_src_desc(dma_addr_t addr, unsigned int length)
478 {
479 u64 desc = 0;
480
481 DESC_ENC(desc, SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
482 DESC_ENC(desc, length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK);
483 DESC_ENC(desc, addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK);
484
485 return desc;
486 }
487
flexrm_msrc_desc(dma_addr_t addr,unsigned int length_div_16)488 static u64 flexrm_msrc_desc(dma_addr_t addr, unsigned int length_div_16)
489 {
490 u64 desc = 0;
491
492 DESC_ENC(desc, MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
493 DESC_ENC(desc, length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK);
494 DESC_ENC(desc, addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK);
495
496 return desc;
497 }
498
flexrm_dst_desc(dma_addr_t addr,unsigned int length)499 static u64 flexrm_dst_desc(dma_addr_t addr, unsigned int length)
500 {
501 u64 desc = 0;
502
503 DESC_ENC(desc, DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
504 DESC_ENC(desc, length, DST_LENGTH_SHIFT, DST_LENGTH_MASK);
505 DESC_ENC(desc, addr, DST_ADDR_SHIFT, DST_ADDR_MASK);
506
507 return desc;
508 }
509
flexrm_mdst_desc(dma_addr_t addr,unsigned int length_div_16)510 static u64 flexrm_mdst_desc(dma_addr_t addr, unsigned int length_div_16)
511 {
512 u64 desc = 0;
513
514 DESC_ENC(desc, MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
515 DESC_ENC(desc, length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK);
516 DESC_ENC(desc, addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK);
517
518 return desc;
519 }
520
flexrm_imm_desc(u64 data)521 static u64 flexrm_imm_desc(u64 data)
522 {
523 u64 desc = 0;
524
525 DESC_ENC(desc, IMM_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
526 DESC_ENC(desc, data, IMM_DATA_SHIFT, IMM_DATA_MASK);
527
528 return desc;
529 }
530
flexrm_srct_desc(dma_addr_t addr,unsigned int length)531 static u64 flexrm_srct_desc(dma_addr_t addr, unsigned int length)
532 {
533 u64 desc = 0;
534
535 DESC_ENC(desc, SRCT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
536 DESC_ENC(desc, length, SRCT_LENGTH_SHIFT, SRCT_LENGTH_MASK);
537 DESC_ENC(desc, addr, SRCT_ADDR_SHIFT, SRCT_ADDR_MASK);
538
539 return desc;
540 }
541
flexrm_dstt_desc(dma_addr_t addr,unsigned int length)542 static u64 flexrm_dstt_desc(dma_addr_t addr, unsigned int length)
543 {
544 u64 desc = 0;
545
546 DESC_ENC(desc, DSTT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
547 DESC_ENC(desc, length, DSTT_LENGTH_SHIFT, DSTT_LENGTH_MASK);
548 DESC_ENC(desc, addr, DSTT_ADDR_SHIFT, DSTT_ADDR_MASK);
549
550 return desc;
551 }
552
flexrm_immt_desc(u64 data)553 static u64 flexrm_immt_desc(u64 data)
554 {
555 u64 desc = 0;
556
557 DESC_ENC(desc, IMMT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
558 DESC_ENC(desc, data, IMMT_DATA_SHIFT, IMMT_DATA_MASK);
559
560 return desc;
561 }
562
flexrm_spu_sanity_check(struct brcm_message * msg)563 static bool flexrm_spu_sanity_check(struct brcm_message *msg)
564 {
565 struct scatterlist *sg;
566
567 if (!msg->spu.src || !msg->spu.dst)
568 return false;
569 for (sg = msg->spu.src; sg; sg = sg_next(sg)) {
570 if (sg->length & 0xf) {
571 if (sg->length > SRC_LENGTH_MASK)
572 return false;
573 } else {
574 if (sg->length > (MSRC_LENGTH_MASK * 16))
575 return false;
576 }
577 }
578 for (sg = msg->spu.dst; sg; sg = sg_next(sg)) {
579 if (sg->length & 0xf) {
580 if (sg->length > DST_LENGTH_MASK)
581 return false;
582 } else {
583 if (sg->length > (MDST_LENGTH_MASK * 16))
584 return false;
585 }
586 }
587
588 return true;
589 }
590
flexrm_spu_estimate_nonheader_desc_count(struct brcm_message * msg)591 static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg)
592 {
593 u32 cnt = 0;
594 unsigned int dst_target = 0;
595 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
596
597 while (src_sg || dst_sg) {
598 if (src_sg) {
599 cnt++;
600 dst_target = src_sg->length;
601 src_sg = sg_next(src_sg);
602 } else
603 dst_target = UINT_MAX;
604
605 while (dst_target && dst_sg) {
606 cnt++;
607 if (dst_sg->length < dst_target)
608 dst_target -= dst_sg->length;
609 else
610 dst_target = 0;
611 dst_sg = sg_next(dst_sg);
612 }
613 }
614
615 return cnt;
616 }
617
flexrm_spu_dma_map(struct device * dev,struct brcm_message * msg)618 static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
619 {
620 int rc;
621
622 rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
623 DMA_TO_DEVICE);
624 if (!rc)
625 return -EIO;
626
627 rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
628 DMA_FROM_DEVICE);
629 if (!rc) {
630 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
631 DMA_TO_DEVICE);
632 return -EIO;
633 }
634
635 return 0;
636 }
637
flexrm_spu_dma_unmap(struct device * dev,struct brcm_message * msg)638 static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg)
639 {
640 dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
641 DMA_FROM_DEVICE);
642 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
643 DMA_TO_DEVICE);
644 }
645
flexrm_spu_write_descs(struct brcm_message * msg,u32 nhcnt,u32 reqid,void * desc_ptr,u32 toggle,void * start_desc,void * end_desc)646 static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt,
647 u32 reqid, void *desc_ptr, u32 toggle,
648 void *start_desc, void *end_desc)
649 {
650 u64 d;
651 u32 nhpos = 0;
652 void *orig_desc_ptr = desc_ptr;
653 unsigned int dst_target = 0;
654 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
655
656 while (src_sg || dst_sg) {
657 if (src_sg) {
658 if (sg_dma_len(src_sg) & 0xf)
659 d = flexrm_src_desc(sg_dma_address(src_sg),
660 sg_dma_len(src_sg));
661 else
662 d = flexrm_msrc_desc(sg_dma_address(src_sg),
663 sg_dma_len(src_sg)/16);
664 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
665 d, &desc_ptr, &toggle,
666 start_desc, end_desc);
667 nhpos++;
668 dst_target = sg_dma_len(src_sg);
669 src_sg = sg_next(src_sg);
670 } else
671 dst_target = UINT_MAX;
672
673 while (dst_target && dst_sg) {
674 if (sg_dma_len(dst_sg) & 0xf)
675 d = flexrm_dst_desc(sg_dma_address(dst_sg),
676 sg_dma_len(dst_sg));
677 else
678 d = flexrm_mdst_desc(sg_dma_address(dst_sg),
679 sg_dma_len(dst_sg)/16);
680 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
681 d, &desc_ptr, &toggle,
682 start_desc, end_desc);
683 nhpos++;
684 if (sg_dma_len(dst_sg) < dst_target)
685 dst_target -= sg_dma_len(dst_sg);
686 else
687 dst_target = 0;
688 dst_sg = sg_next(dst_sg);
689 }
690 }
691
692 /* Null descriptor with invalid toggle bit */
693 flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
694
695 /* Ensure that descriptors have been written to memory */
696 wmb();
697
698 /* Flip toggle bit in header */
699 flexrm_flip_header_toggle(orig_desc_ptr);
700
701 return desc_ptr;
702 }
703
flexrm_sba_sanity_check(struct brcm_message * msg)704 static bool flexrm_sba_sanity_check(struct brcm_message *msg)
705 {
706 u32 i;
707
708 if (!msg->sba.cmds || !msg->sba.cmds_count)
709 return false;
710
711 for (i = 0; i < msg->sba.cmds_count; i++) {
712 if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
713 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) &&
714 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT))
715 return false;
716 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) &&
717 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
718 return false;
719 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) &&
720 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
721 return false;
722 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) &&
723 (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK))
724 return false;
725 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) &&
726 (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK))
727 return false;
728 }
729
730 return true;
731 }
732
flexrm_sba_estimate_nonheader_desc_count(struct brcm_message * msg)733 static u32 flexrm_sba_estimate_nonheader_desc_count(struct brcm_message *msg)
734 {
735 u32 i, cnt;
736
737 cnt = 0;
738 for (i = 0; i < msg->sba.cmds_count; i++) {
739 cnt++;
740
741 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
742 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C))
743 cnt++;
744
745 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP)
746 cnt++;
747
748 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)
749 cnt++;
750 }
751
752 return cnt;
753 }
754
flexrm_sba_write_descs(struct brcm_message * msg,u32 nhcnt,u32 reqid,void * desc_ptr,u32 toggle,void * start_desc,void * end_desc)755 static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt,
756 u32 reqid, void *desc_ptr, u32 toggle,
757 void *start_desc, void *end_desc)
758 {
759 u64 d;
760 u32 i, nhpos = 0;
761 struct brcm_sba_command *c;
762 void *orig_desc_ptr = desc_ptr;
763
764 /* Convert SBA commands into descriptors */
765 for (i = 0; i < msg->sba.cmds_count; i++) {
766 c = &msg->sba.cmds[i];
767
768 if ((c->flags & BRCM_SBA_CMD_HAS_RESP) &&
769 (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) {
770 /* Destination response descriptor */
771 d = flexrm_dst_desc(c->resp, c->resp_len);
772 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
773 d, &desc_ptr, &toggle,
774 start_desc, end_desc);
775 nhpos++;
776 } else if (c->flags & BRCM_SBA_CMD_HAS_RESP) {
777 /* Destination response with tlast descriptor */
778 d = flexrm_dstt_desc(c->resp, c->resp_len);
779 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
780 d, &desc_ptr, &toggle,
781 start_desc, end_desc);
782 nhpos++;
783 }
784
785 if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) {
786 /* Destination with tlast descriptor */
787 d = flexrm_dstt_desc(c->data, c->data_len);
788 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
789 d, &desc_ptr, &toggle,
790 start_desc, end_desc);
791 nhpos++;
792 }
793
794 if (c->flags & BRCM_SBA_CMD_TYPE_B) {
795 /* Command as immediate descriptor */
796 d = flexrm_imm_desc(c->cmd);
797 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
798 d, &desc_ptr, &toggle,
799 start_desc, end_desc);
800 nhpos++;
801 } else {
802 /* Command as immediate descriptor with tlast */
803 d = flexrm_immt_desc(c->cmd);
804 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
805 d, &desc_ptr, &toggle,
806 start_desc, end_desc);
807 nhpos++;
808 }
809
810 if ((c->flags & BRCM_SBA_CMD_TYPE_B) ||
811 (c->flags & BRCM_SBA_CMD_TYPE_C)) {
812 /* Source with tlast descriptor */
813 d = flexrm_srct_desc(c->data, c->data_len);
814 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
815 d, &desc_ptr, &toggle,
816 start_desc, end_desc);
817 nhpos++;
818 }
819 }
820
821 /* Null descriptor with invalid toggle bit */
822 flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
823
824 /* Ensure that descriptors have been written to memory */
825 wmb();
826
827 /* Flip toggle bit in header */
828 flexrm_flip_header_toggle(orig_desc_ptr);
829
830 return desc_ptr;
831 }
832
flexrm_sanity_check(struct brcm_message * msg)833 static bool flexrm_sanity_check(struct brcm_message *msg)
834 {
835 if (!msg)
836 return false;
837
838 switch (msg->type) {
839 case BRCM_MESSAGE_SPU:
840 return flexrm_spu_sanity_check(msg);
841 case BRCM_MESSAGE_SBA:
842 return flexrm_sba_sanity_check(msg);
843 default:
844 return false;
845 };
846 }
847
flexrm_estimate_nonheader_desc_count(struct brcm_message * msg)848 static u32 flexrm_estimate_nonheader_desc_count(struct brcm_message *msg)
849 {
850 if (!msg)
851 return 0;
852
853 switch (msg->type) {
854 case BRCM_MESSAGE_SPU:
855 return flexrm_spu_estimate_nonheader_desc_count(msg);
856 case BRCM_MESSAGE_SBA:
857 return flexrm_sba_estimate_nonheader_desc_count(msg);
858 default:
859 return 0;
860 };
861 }
862
flexrm_dma_map(struct device * dev,struct brcm_message * msg)863 static int flexrm_dma_map(struct device *dev, struct brcm_message *msg)
864 {
865 if (!dev || !msg)
866 return -EINVAL;
867
868 switch (msg->type) {
869 case BRCM_MESSAGE_SPU:
870 return flexrm_spu_dma_map(dev, msg);
871 default:
872 break;
873 }
874
875 return 0;
876 }
877
flexrm_dma_unmap(struct device * dev,struct brcm_message * msg)878 static void flexrm_dma_unmap(struct device *dev, struct brcm_message *msg)
879 {
880 if (!dev || !msg)
881 return;
882
883 switch (msg->type) {
884 case BRCM_MESSAGE_SPU:
885 flexrm_spu_dma_unmap(dev, msg);
886 break;
887 default:
888 break;
889 }
890 }
891
flexrm_write_descs(struct brcm_message * msg,u32 nhcnt,u32 reqid,void * desc_ptr,u32 toggle,void * start_desc,void * end_desc)892 static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt,
893 u32 reqid, void *desc_ptr, u32 toggle,
894 void *start_desc, void *end_desc)
895 {
896 if (!msg || !desc_ptr || !start_desc || !end_desc)
897 return ERR_PTR(-ENOTSUPP);
898
899 if ((desc_ptr < start_desc) || (end_desc <= desc_ptr))
900 return ERR_PTR(-ERANGE);
901
902 switch (msg->type) {
903 case BRCM_MESSAGE_SPU:
904 return flexrm_spu_write_descs(msg, nhcnt, reqid,
905 desc_ptr, toggle,
906 start_desc, end_desc);
907 case BRCM_MESSAGE_SBA:
908 return flexrm_sba_write_descs(msg, nhcnt, reqid,
909 desc_ptr, toggle,
910 start_desc, end_desc);
911 default:
912 return ERR_PTR(-ENOTSUPP);
913 };
914 }
915
916 /* ====== FlexRM driver helper routines ===== */
917
flexrm_write_config_in_seqfile(struct flexrm_mbox * mbox,struct seq_file * file)918 static void flexrm_write_config_in_seqfile(struct flexrm_mbox *mbox,
919 struct seq_file *file)
920 {
921 int i;
922 const char *state;
923 struct flexrm_ring *ring;
924
925 seq_printf(file, "%-5s %-9s %-18s %-10s %-18s %-10s\n",
926 "Ring#", "State", "BD_Addr", "BD_Size",
927 "Cmpl_Addr", "Cmpl_Size");
928
929 for (i = 0; i < mbox->num_rings; i++) {
930 ring = &mbox->rings[i];
931 if (readl(ring->regs + RING_CONTROL) &
932 BIT(CONTROL_ACTIVE_SHIFT))
933 state = "active";
934 else
935 state = "inactive";
936 seq_printf(file,
937 "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n",
938 ring->num, state,
939 (unsigned long long)ring->bd_dma_base,
940 (u32)RING_BD_SIZE,
941 (unsigned long long)ring->cmpl_dma_base,
942 (u32)RING_CMPL_SIZE);
943 }
944 }
945
flexrm_write_stats_in_seqfile(struct flexrm_mbox * mbox,struct seq_file * file)946 static void flexrm_write_stats_in_seqfile(struct flexrm_mbox *mbox,
947 struct seq_file *file)
948 {
949 int i;
950 u32 val, bd_read_offset;
951 struct flexrm_ring *ring;
952
953 seq_printf(file, "%-5s %-10s %-10s %-10s %-11s %-11s\n",
954 "Ring#", "BD_Read", "BD_Write",
955 "Cmpl_Read", "Submitted", "Completed");
956
957 for (i = 0; i < mbox->num_rings; i++) {
958 ring = &mbox->rings[i];
959 bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
960 val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
961 bd_read_offset *= RING_DESC_SIZE;
962 bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) -
963 ring->bd_dma_base);
964 seq_printf(file, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n",
965 ring->num,
966 (u32)bd_read_offset,
967 (u32)ring->bd_write_offset,
968 (u32)ring->cmpl_read_offset,
969 (u32)atomic_read(&ring->msg_send_count),
970 (u32)atomic_read(&ring->msg_cmpl_count));
971 }
972 }
973
flexrm_new_request(struct flexrm_ring * ring,struct brcm_message * batch_msg,struct brcm_message * msg)974 static int flexrm_new_request(struct flexrm_ring *ring,
975 struct brcm_message *batch_msg,
976 struct brcm_message *msg)
977 {
978 void *next;
979 unsigned long flags;
980 u32 val, count, nhcnt;
981 u32 read_offset, write_offset;
982 bool exit_cleanup = false;
983 int ret = 0, reqid;
984
985 /* Do sanity check on message */
986 if (!flexrm_sanity_check(msg))
987 return -EIO;
988 msg->error = 0;
989
990 /* If no requests possible then save data pointer and goto done. */
991 spin_lock_irqsave(&ring->lock, flags);
992 reqid = bitmap_find_free_region(ring->requests_bmap,
993 RING_MAX_REQ_COUNT, 0);
994 spin_unlock_irqrestore(&ring->lock, flags);
995 if (reqid < 0)
996 return -ENOSPC;
997 ring->requests[reqid] = msg;
998
999 /* Do DMA mappings for the message */
1000 ret = flexrm_dma_map(ring->mbox->dev, msg);
1001 if (ret < 0) {
1002 ring->requests[reqid] = NULL;
1003 spin_lock_irqsave(&ring->lock, flags);
1004 bitmap_release_region(ring->requests_bmap, reqid, 0);
1005 spin_unlock_irqrestore(&ring->lock, flags);
1006 return ret;
1007 }
1008
1009 /* Determine current HW BD read offset */
1010 read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
1011 val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
1012 read_offset *= RING_DESC_SIZE;
1013 read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base);
1014
1015 /*
1016 * Number required descriptors = number of non-header descriptors +
1017 * number of header descriptors +
1018 * 1x null descriptor
1019 */
1020 nhcnt = flexrm_estimate_nonheader_desc_count(msg);
1021 count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1;
1022
1023 /* Check for available descriptor space. */
1024 write_offset = ring->bd_write_offset;
1025 while (count) {
1026 if (!flexrm_is_next_table_desc(ring->bd_base + write_offset))
1027 count--;
1028 write_offset += RING_DESC_SIZE;
1029 if (write_offset == RING_BD_SIZE)
1030 write_offset = 0x0;
1031 if (write_offset == read_offset)
1032 break;
1033 }
1034 if (count) {
1035 ret = -ENOSPC;
1036 exit_cleanup = true;
1037 goto exit;
1038 }
1039
1040 /* Write descriptors to ring */
1041 next = flexrm_write_descs(msg, nhcnt, reqid,
1042 ring->bd_base + ring->bd_write_offset,
1043 RING_BD_TOGGLE_VALID(ring->bd_write_offset),
1044 ring->bd_base, ring->bd_base + RING_BD_SIZE);
1045 if (IS_ERR(next)) {
1046 ret = PTR_ERR(next);
1047 exit_cleanup = true;
1048 goto exit;
1049 }
1050
1051 /* Save ring BD write offset */
1052 ring->bd_write_offset = (unsigned long)(next - ring->bd_base);
1053
1054 /* Increment number of messages sent */
1055 atomic_inc_return(&ring->msg_send_count);
1056
1057 exit:
1058 /* Update error status in message */
1059 msg->error = ret;
1060
1061 /* Cleanup if we failed */
1062 if (exit_cleanup) {
1063 flexrm_dma_unmap(ring->mbox->dev, msg);
1064 ring->requests[reqid] = NULL;
1065 spin_lock_irqsave(&ring->lock, flags);
1066 bitmap_release_region(ring->requests_bmap, reqid, 0);
1067 spin_unlock_irqrestore(&ring->lock, flags);
1068 }
1069
1070 return ret;
1071 }
1072
flexrm_process_completions(struct flexrm_ring * ring)1073 static int flexrm_process_completions(struct flexrm_ring *ring)
1074 {
1075 u64 desc;
1076 int err, count = 0;
1077 unsigned long flags;
1078 struct brcm_message *msg = NULL;
1079 u32 reqid, cmpl_read_offset, cmpl_write_offset;
1080 struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];
1081
1082 spin_lock_irqsave(&ring->lock, flags);
1083
1084 /*
1085 * Get current completion read and write offset
1086 *
1087 * Note: We should read completion write pointer at least once
1088 * after we get a MSI interrupt because HW maintains internal
1089 * MSI status which will allow next MSI interrupt only after
1090 * completion write pointer is read.
1091 */
1092 cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
1093 cmpl_write_offset *= RING_DESC_SIZE;
1094 cmpl_read_offset = ring->cmpl_read_offset;
1095 ring->cmpl_read_offset = cmpl_write_offset;
1096
1097 spin_unlock_irqrestore(&ring->lock, flags);
1098
1099 /* For each completed request notify mailbox clients */
1100 reqid = 0;
1101 while (cmpl_read_offset != cmpl_write_offset) {
1102 /* Dequeue next completion descriptor */
1103 desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));
1104
1105 /* Next read offset */
1106 cmpl_read_offset += RING_DESC_SIZE;
1107 if (cmpl_read_offset == RING_CMPL_SIZE)
1108 cmpl_read_offset = 0;
1109
1110 /* Decode error from completion descriptor */
1111 err = flexrm_cmpl_desc_to_error(desc);
1112 if (err < 0) {
1113 dev_warn(ring->mbox->dev,
1114 "ring%d got completion desc=0x%lx with error %d\n",
1115 ring->num, (unsigned long)desc, err);
1116 }
1117
1118 /* Determine request id from completion descriptor */
1119 reqid = flexrm_cmpl_desc_to_reqid(desc);
1120
1121 /* Determine message pointer based on reqid */
1122 msg = ring->requests[reqid];
1123 if (!msg) {
1124 dev_warn(ring->mbox->dev,
1125 "ring%d null msg pointer for completion desc=0x%lx\n",
1126 ring->num, (unsigned long)desc);
1127 continue;
1128 }
1129
1130 /* Release reqid for recycling */
1131 ring->requests[reqid] = NULL;
1132 spin_lock_irqsave(&ring->lock, flags);
1133 bitmap_release_region(ring->requests_bmap, reqid, 0);
1134 spin_unlock_irqrestore(&ring->lock, flags);
1135
1136 /* Unmap DMA mappings */
1137 flexrm_dma_unmap(ring->mbox->dev, msg);
1138
1139 /* Give-back message to mailbox client */
1140 msg->error = err;
1141 mbox_chan_received_data(chan, msg);
1142
1143 /* Increment number of completions processed */
1144 atomic_inc_return(&ring->msg_cmpl_count);
1145 count++;
1146 }
1147
1148 return count;
1149 }
1150
1151 /* ====== FlexRM Debugfs callbacks ====== */
1152
flexrm_debugfs_conf_show(struct seq_file * file,void * offset)1153 static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset)
1154 {
1155 struct flexrm_mbox *mbox = dev_get_drvdata(file->private);
1156
1157 /* Write config in file */
1158 flexrm_write_config_in_seqfile(mbox, file);
1159
1160 return 0;
1161 }
1162
flexrm_debugfs_stats_show(struct seq_file * file,void * offset)1163 static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset)
1164 {
1165 struct flexrm_mbox *mbox = dev_get_drvdata(file->private);
1166
1167 /* Write stats in file */
1168 flexrm_write_stats_in_seqfile(mbox, file);
1169
1170 return 0;
1171 }
1172
1173 /* ====== FlexRM interrupt handler ===== */
1174
flexrm_irq_thread(int irq,void * dev_id)1175 static irqreturn_t flexrm_irq_thread(int irq, void *dev_id)
1176 {
1177 flexrm_process_completions(dev_id);
1178
1179 return IRQ_HANDLED;
1180 }
1181
1182 /* ====== FlexRM mailbox callbacks ===== */
1183
flexrm_send_data(struct mbox_chan * chan,void * data)1184 static int flexrm_send_data(struct mbox_chan *chan, void *data)
1185 {
1186 int i, rc;
1187 struct flexrm_ring *ring = chan->con_priv;
1188 struct brcm_message *msg = data;
1189
1190 if (msg->type == BRCM_MESSAGE_BATCH) {
1191 for (i = msg->batch.msgs_queued;
1192 i < msg->batch.msgs_count; i++) {
1193 rc = flexrm_new_request(ring, msg,
1194 &msg->batch.msgs[i]);
1195 if (rc) {
1196 msg->error = rc;
1197 return rc;
1198 }
1199 msg->batch.msgs_queued++;
1200 }
1201 return 0;
1202 }
1203
1204 return flexrm_new_request(ring, NULL, data);
1205 }
1206
flexrm_peek_data(struct mbox_chan * chan)1207 static bool flexrm_peek_data(struct mbox_chan *chan)
1208 {
1209 int cnt = flexrm_process_completions(chan->con_priv);
1210
1211 return (cnt > 0) ? true : false;
1212 }
1213
flexrm_startup(struct mbox_chan * chan)1214 static int flexrm_startup(struct mbox_chan *chan)
1215 {
1216 u64 d;
1217 u32 val, off;
1218 int ret = 0;
1219 dma_addr_t next_addr;
1220 struct flexrm_ring *ring = chan->con_priv;
1221
1222 /* Allocate BD memory */
1223 ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
1224 GFP_KERNEL, &ring->bd_dma_base);
1225 if (!ring->bd_base) {
1226 dev_err(ring->mbox->dev,
1227 "can't allocate BD memory for ring%d\n",
1228 ring->num);
1229 ret = -ENOMEM;
1230 goto fail;
1231 }
1232
1233 /* Configure next table pointer entries in BD memory */
1234 for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) {
1235 next_addr = off + RING_DESC_SIZE;
1236 if (next_addr == RING_BD_SIZE)
1237 next_addr = 0;
1238 next_addr += ring->bd_dma_base;
1239 if (RING_BD_ALIGN_CHECK(next_addr))
1240 d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off),
1241 next_addr);
1242 else
1243 d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off));
1244 flexrm_write_desc(ring->bd_base + off, d);
1245 }
1246
1247 /* Allocate completion memory */
1248 ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool,
1249 GFP_KERNEL, &ring->cmpl_dma_base);
1250 if (!ring->cmpl_base) {
1251 dev_err(ring->mbox->dev,
1252 "can't allocate completion memory for ring%d\n",
1253 ring->num);
1254 ret = -ENOMEM;
1255 goto fail_free_bd_memory;
1256 }
1257
1258 /* Request IRQ */
1259 if (ring->irq == UINT_MAX) {
1260 dev_err(ring->mbox->dev,
1261 "ring%d IRQ not available\n", ring->num);
1262 ret = -ENODEV;
1263 goto fail_free_cmpl_memory;
1264 }
1265 ret = request_threaded_irq(ring->irq, NULL, flexrm_irq_thread,
1266 IRQF_ONESHOT, dev_name(ring->mbox->dev), ring);
1267 if (ret) {
1268 dev_err(ring->mbox->dev,
1269 "failed to request ring%d IRQ\n", ring->num);
1270 goto fail_free_cmpl_memory;
1271 }
1272 ring->irq_requested = true;
1273
1274 /* Set IRQ affinity hint */
1275 ring->irq_aff_hint = CPU_MASK_NONE;
1276 val = ring->mbox->num_rings;
1277 val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
1278 cpumask_set_cpu((ring->num / val) % num_online_cpus(),
1279 &ring->irq_aff_hint);
1280 ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint);
1281 if (ret) {
1282 dev_err(ring->mbox->dev,
1283 "failed to set IRQ affinity hint for ring%d\n",
1284 ring->num);
1285 goto fail_free_irq;
1286 }
1287
1288 /* Disable/inactivate ring */
1289 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1290
1291 /* Program BD start address */
1292 val = BD_START_ADDR_VALUE(ring->bd_dma_base);
1293 writel_relaxed(val, ring->regs + RING_BD_START_ADDR);
1294
1295 /* BD write pointer will be same as HW write pointer */
1296 ring->bd_write_offset =
1297 readl_relaxed(ring->regs + RING_BD_WRITE_PTR);
1298 ring->bd_write_offset *= RING_DESC_SIZE;
1299
1300 /* Program completion start address */
1301 val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
1302 writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);
1303
1304 /* Completion read pointer will be same as HW write pointer */
1305 ring->cmpl_read_offset =
1306 readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
1307 ring->cmpl_read_offset *= RING_DESC_SIZE;
1308
1309 /* Read ring Tx, Rx, and Outstanding counts to clear */
1310 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS);
1311 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS);
1312 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS);
1313 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS);
1314 readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND);
1315
1316 /* Configure RING_MSI_CONTROL */
1317 val = 0;
1318 val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT);
1319 val |= BIT(MSI_ENABLE_SHIFT);
1320 val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
1321 writel_relaxed(val, ring->regs + RING_MSI_CONTROL);
1322
1323 /* Enable/activate ring */
1324 val = BIT(CONTROL_ACTIVE_SHIFT);
1325 writel_relaxed(val, ring->regs + RING_CONTROL);
1326
1327 /* Reset stats to zero */
1328 atomic_set(&ring->msg_send_count, 0);
1329 atomic_set(&ring->msg_cmpl_count, 0);
1330
1331 return 0;
1332
1333 fail_free_irq:
1334 free_irq(ring->irq, ring);
1335 ring->irq_requested = false;
1336 fail_free_cmpl_memory:
1337 dma_pool_free(ring->mbox->cmpl_pool,
1338 ring->cmpl_base, ring->cmpl_dma_base);
1339 ring->cmpl_base = NULL;
1340 fail_free_bd_memory:
1341 dma_pool_free(ring->mbox->bd_pool,
1342 ring->bd_base, ring->bd_dma_base);
1343 ring->bd_base = NULL;
1344 fail:
1345 return ret;
1346 }
1347
flexrm_shutdown(struct mbox_chan * chan)1348 static void flexrm_shutdown(struct mbox_chan *chan)
1349 {
1350 u32 reqid;
1351 unsigned int timeout;
1352 struct brcm_message *msg;
1353 struct flexrm_ring *ring = chan->con_priv;
1354
1355 /* Disable/inactivate ring */
1356 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1357
1358 /* Set ring flush state */
1359 timeout = 1000; /* timeout of 1s */
1360 writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
1361 ring->regs + RING_CONTROL);
1362 do {
1363 if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
1364 FLUSH_DONE_MASK)
1365 break;
1366 mdelay(1);
1367 } while (--timeout);
1368 if (!timeout)
1369 dev_err(ring->mbox->dev,
1370 "setting ring%d flush state timedout\n", ring->num);
1371
1372 /* Clear ring flush state */
1373 timeout = 1000; /* timeout of 1s */
1374 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1375 do {
1376 if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
1377 FLUSH_DONE_MASK))
1378 break;
1379 mdelay(1);
1380 } while (--timeout);
1381 if (!timeout)
1382 dev_err(ring->mbox->dev,
1383 "clearing ring%d flush state timedout\n", ring->num);
1384
1385 /* Abort all in-flight requests */
1386 for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
1387 msg = ring->requests[reqid];
1388 if (!msg)
1389 continue;
1390
1391 /* Release reqid for recycling */
1392 ring->requests[reqid] = NULL;
1393
1394 /* Unmap DMA mappings */
1395 flexrm_dma_unmap(ring->mbox->dev, msg);
1396
1397 /* Give-back message to mailbox client */
1398 msg->error = -EIO;
1399 mbox_chan_received_data(chan, msg);
1400 }
1401
1402 /* Clear requests bitmap */
1403 bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
1404
1405 /* Release IRQ */
1406 if (ring->irq_requested) {
1407 irq_update_affinity_hint(ring->irq, NULL);
1408 free_irq(ring->irq, ring);
1409 ring->irq_requested = false;
1410 }
1411
1412 /* Free-up completion descriptor ring */
1413 if (ring->cmpl_base) {
1414 dma_pool_free(ring->mbox->cmpl_pool,
1415 ring->cmpl_base, ring->cmpl_dma_base);
1416 ring->cmpl_base = NULL;
1417 }
1418
1419 /* Free-up BD descriptor ring */
1420 if (ring->bd_base) {
1421 dma_pool_free(ring->mbox->bd_pool,
1422 ring->bd_base, ring->bd_dma_base);
1423 ring->bd_base = NULL;
1424 }
1425 }
1426
1427 static const struct mbox_chan_ops flexrm_mbox_chan_ops = {
1428 .send_data = flexrm_send_data,
1429 .startup = flexrm_startup,
1430 .shutdown = flexrm_shutdown,
1431 .peek_data = flexrm_peek_data,
1432 };
1433
flexrm_mbox_of_xlate(struct mbox_controller * cntlr,const struct of_phandle_args * pa)1434 static struct mbox_chan *flexrm_mbox_of_xlate(struct mbox_controller *cntlr,
1435 const struct of_phandle_args *pa)
1436 {
1437 struct mbox_chan *chan;
1438 struct flexrm_ring *ring;
1439
1440 if (pa->args_count < 3)
1441 return ERR_PTR(-EINVAL);
1442
1443 if (pa->args[0] >= cntlr->num_chans)
1444 return ERR_PTR(-ENOENT);
1445
1446 if (pa->args[1] > MSI_COUNT_MASK)
1447 return ERR_PTR(-EINVAL);
1448
1449 if (pa->args[2] > MSI_TIMER_VAL_MASK)
1450 return ERR_PTR(-EINVAL);
1451
1452 chan = &cntlr->chans[pa->args[0]];
1453 ring = chan->con_priv;
1454 ring->msi_count_threshold = pa->args[1];
1455 ring->msi_timer_val = pa->args[2];
1456
1457 return chan;
1458 }
1459
1460 /* ====== FlexRM platform driver ===== */
1461
flexrm_mbox_msi_write(struct msi_desc * desc,struct msi_msg * msg)1462 static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
1463 {
1464 struct device *dev = msi_desc_to_dev(desc);
1465 struct flexrm_mbox *mbox = dev_get_drvdata(dev);
1466 struct flexrm_ring *ring = &mbox->rings[desc->msi_index];
1467
1468 /* Configure per-Ring MSI registers */
1469 writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
1470 writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS);
1471 writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE);
1472 }
1473
flexrm_mbox_probe(struct platform_device * pdev)1474 static int flexrm_mbox_probe(struct platform_device *pdev)
1475 {
1476 int index, ret = 0;
1477 void __iomem *regs;
1478 void __iomem *regs_end;
1479 struct resource *iomem;
1480 struct flexrm_ring *ring;
1481 struct flexrm_mbox *mbox;
1482 struct device *dev = &pdev->dev;
1483
1484 /* Allocate driver mailbox struct */
1485 mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
1486 if (!mbox) {
1487 ret = -ENOMEM;
1488 goto fail;
1489 }
1490 mbox->dev = dev;
1491 platform_set_drvdata(pdev, mbox);
1492
1493 /* Get resource for registers and map registers of all rings */
1494 mbox->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &iomem);
1495 if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) {
1496 ret = -ENODEV;
1497 goto fail;
1498 } else if (IS_ERR(mbox->regs)) {
1499 ret = PTR_ERR(mbox->regs);
1500 goto fail;
1501 }
1502 regs_end = mbox->regs + resource_size(iomem);
1503
1504 /* Scan and count available rings */
1505 mbox->num_rings = 0;
1506 for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) {
1507 if (readl_relaxed(regs + RING_VER) == RING_VER_MAGIC)
1508 mbox->num_rings++;
1509 }
1510 if (!mbox->num_rings) {
1511 ret = -ENODEV;
1512 goto fail;
1513 }
1514
1515 /* Allocate driver ring structs */
1516 ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL);
1517 if (!ring) {
1518 ret = -ENOMEM;
1519 goto fail;
1520 }
1521 mbox->rings = ring;
1522
1523 /* Initialize members of driver ring structs */
1524 regs = mbox->regs;
1525 for (index = 0; index < mbox->num_rings; index++) {
1526 ring = &mbox->rings[index];
1527 ring->num = index;
1528 ring->mbox = mbox;
1529 while ((regs < regs_end) &&
1530 (readl_relaxed(regs + RING_VER) != RING_VER_MAGIC))
1531 regs += RING_REGS_SIZE;
1532 if (regs_end <= regs) {
1533 ret = -ENODEV;
1534 goto fail;
1535 }
1536 ring->regs = regs;
1537 regs += RING_REGS_SIZE;
1538 ring->irq = UINT_MAX;
1539 ring->irq_requested = false;
1540 ring->msi_timer_val = MSI_TIMER_VAL_MASK;
1541 ring->msi_count_threshold = 0x1;
1542 memset(ring->requests, 0, sizeof(ring->requests));
1543 ring->bd_base = NULL;
1544 ring->bd_dma_base = 0;
1545 ring->cmpl_base = NULL;
1546 ring->cmpl_dma_base = 0;
1547 atomic_set(&ring->msg_send_count, 0);
1548 atomic_set(&ring->msg_cmpl_count, 0);
1549 spin_lock_init(&ring->lock);
1550 bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
1551 ring->cmpl_read_offset = 0;
1552 }
1553
1554 /* FlexRM is capable of 40-bit physical addresses only */
1555 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
1556 if (ret) {
1557 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1558 if (ret)
1559 goto fail;
1560 }
1561
1562 /* Create DMA pool for ring BD memory */
1563 mbox->bd_pool = dma_pool_create("bd", dev, RING_BD_SIZE,
1564 1 << RING_BD_ALIGN_ORDER, 0);
1565 if (!mbox->bd_pool) {
1566 ret = -ENOMEM;
1567 goto fail;
1568 }
1569
1570 /* Create DMA pool for ring completion memory */
1571 mbox->cmpl_pool = dma_pool_create("cmpl", dev, RING_CMPL_SIZE,
1572 1 << RING_CMPL_ALIGN_ORDER, 0);
1573 if (!mbox->cmpl_pool) {
1574 ret = -ENOMEM;
1575 goto fail_destroy_bd_pool;
1576 }
1577
1578 /* Allocate platform MSIs for each ring */
1579 ret = platform_device_msi_init_and_alloc_irqs(dev, mbox->num_rings,
1580 flexrm_mbox_msi_write);
1581 if (ret)
1582 goto fail_destroy_cmpl_pool;
1583
1584 /* Save alloced IRQ numbers for each ring */
1585 for (index = 0; index < mbox->num_rings; index++)
1586 mbox->rings[index].irq = msi_get_virq(dev, index);
1587
1588 /* Check availability of debugfs */
1589 if (!debugfs_initialized())
1590 goto skip_debugfs;
1591
1592 /* Create debugfs root entry */
1593 mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL);
1594
1595 /* Create debugfs config entry */
1596 debugfs_create_devm_seqfile(mbox->dev, "config", mbox->root,
1597 flexrm_debugfs_conf_show);
1598
1599 /* Create debugfs stats entry */
1600 debugfs_create_devm_seqfile(mbox->dev, "stats", mbox->root,
1601 flexrm_debugfs_stats_show);
1602
1603 skip_debugfs:
1604
1605 /* Initialize mailbox controller */
1606 mbox->controller.txdone_irq = false;
1607 mbox->controller.txdone_poll = false;
1608 mbox->controller.ops = &flexrm_mbox_chan_ops;
1609 mbox->controller.dev = dev;
1610 mbox->controller.num_chans = mbox->num_rings;
1611 mbox->controller.of_xlate = flexrm_mbox_of_xlate;
1612 mbox->controller.chans = devm_kcalloc(dev, mbox->num_rings,
1613 sizeof(*mbox->controller.chans), GFP_KERNEL);
1614 if (!mbox->controller.chans) {
1615 ret = -ENOMEM;
1616 goto fail_free_debugfs_root;
1617 }
1618 for (index = 0; index < mbox->num_rings; index++)
1619 mbox->controller.chans[index].con_priv = &mbox->rings[index];
1620
1621 /* Register mailbox controller */
1622 ret = devm_mbox_controller_register(dev, &mbox->controller);
1623 if (ret)
1624 goto fail_free_debugfs_root;
1625
1626 dev_info(dev, "registered flexrm mailbox with %d channels\n",
1627 mbox->controller.num_chans);
1628
1629 return 0;
1630
1631 fail_free_debugfs_root:
1632 debugfs_remove_recursive(mbox->root);
1633 platform_device_msi_free_irqs_all(dev);
1634 fail_destroy_cmpl_pool:
1635 dma_pool_destroy(mbox->cmpl_pool);
1636 fail_destroy_bd_pool:
1637 dma_pool_destroy(mbox->bd_pool);
1638 fail:
1639 return ret;
1640 }
1641
flexrm_mbox_remove(struct platform_device * pdev)1642 static void flexrm_mbox_remove(struct platform_device *pdev)
1643 {
1644 struct device *dev = &pdev->dev;
1645 struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
1646
1647 debugfs_remove_recursive(mbox->root);
1648
1649 platform_device_msi_free_irqs_all(dev);
1650
1651 dma_pool_destroy(mbox->cmpl_pool);
1652 dma_pool_destroy(mbox->bd_pool);
1653 }
1654
1655 static const struct of_device_id flexrm_mbox_of_match[] = {
1656 { .compatible = "brcm,iproc-flexrm-mbox", },
1657 {},
1658 };
1659 MODULE_DEVICE_TABLE(of, flexrm_mbox_of_match);
1660
1661 static struct platform_driver flexrm_mbox_driver = {
1662 .driver = {
1663 .name = "brcm-flexrm-mbox",
1664 .of_match_table = flexrm_mbox_of_match,
1665 },
1666 .probe = flexrm_mbox_probe,
1667 .remove = flexrm_mbox_remove,
1668 };
1669 module_platform_driver(flexrm_mbox_driver);
1670
1671 MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1672 MODULE_DESCRIPTION("Broadcom FlexRM mailbox driver");
1673 MODULE_LICENSE("GPL v2");
1674