1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Silvaco dual-role I3C master driver
4 *
5 * Copyright (C) 2020 Silvaco
6 * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7 * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG 0x000
26 #define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define SVC_I3C_MCONFIG_SKEW_MASK GENMASK(27, 25)
36 #define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
37
38 #define SVC_I3C_MCTRL 0x084
39 #define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
40 #define SVC_I3C_MCTRL_REQUEST_NONE 0
41 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
42 #define SVC_I3C_MCTRL_REQUEST_STOP 2
43 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
44 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
45 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
46 #define SVC_I3C_MCTRL_TYPE_I3C 0
47 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
48 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0
49 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
50 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
51 #define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
52 #define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
53 #define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
54 #define SVC_I3C_MCTRL_DIR_WRITE 0
55 #define SVC_I3C_MCTRL_DIR_READ 1
56 #define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
57 #define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
58
59 #define SVC_I3C_MSTATUS 0x088
60 #define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
61 #define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
62 #define SVC_I3C_MSTATUS_STATE_SLVREQ(x) (SVC_I3C_MSTATUS_STATE(x) == 1)
63 #define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
64 #define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
65 #define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
66 #define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
67 #define SVC_I3C_MSTATUS_IBITYPE_IBI 1
68 #define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
69 #define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
70 #define SVC_I3C_MINT_SLVSTART BIT(8)
71 #define SVC_I3C_MINT_MCTRLDONE BIT(9)
72 #define SVC_I3C_MINT_COMPLETE BIT(10)
73 #define SVC_I3C_MINT_RXPEND BIT(11)
74 #define SVC_I3C_MINT_TXNOTFULL BIT(12)
75 #define SVC_I3C_MINT_IBIWON BIT(13)
76 #define SVC_I3C_MINT_ERRWARN BIT(15)
77 #define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
78 #define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
79 #define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
80 #define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
81 #define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
82 #define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
83 #define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
84 #define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
85
86 #define SVC_I3C_IBIRULES 0x08C
87 #define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
88 ((addr) & 0x3F) << ((slot) * 6))
89 #define SVC_I3C_IBIRULES_ADDRS 5
90 #define SVC_I3C_IBIRULES_MSB0 BIT(30)
91 #define SVC_I3C_IBIRULES_NOBYTE BIT(31)
92 #define SVC_I3C_IBIRULES_MANDBYTE 0
93 #define SVC_I3C_MINTSET 0x090
94 #define SVC_I3C_MINTCLR 0x094
95 #define SVC_I3C_MINTMASKED 0x098
96 #define SVC_I3C_MERRWARN 0x09C
97 #define SVC_I3C_MERRWARN_NACK BIT(2)
98 #define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
99 #define SVC_I3C_MDMACTRL 0x0A0
100 #define SVC_I3C_MDATACTRL 0x0AC
101 #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
102 #define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
103 #define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
104 #define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
105 #define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
106 #define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
107 #define SVC_I3C_MDATACTRL_TXCOUNT(x) FIELD_GET(GENMASK(20, 16), (x))
108 #define SVC_I3C_MDATACTRL_TXFULL BIT(30)
109 #define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
110
111 #define SVC_I3C_MWDATAB 0x0B0
112 #define SVC_I3C_MWDATAB_END BIT(8)
113
114 #define SVC_I3C_MWDATABE 0x0B4
115 #define SVC_I3C_MWDATAH 0x0B8
116 #define SVC_I3C_MWDATAHE 0x0BC
117 #define SVC_I3C_MRDATAB 0x0C0
118 #define SVC_I3C_MRDATAH 0x0C8
119 #define SVC_I3C_MWDATAB1 0x0CC
120 #define SVC_I3C_MWMSG_SDR 0x0D0
121 #define SVC_I3C_MRMSG_SDR 0x0D4
122 #define SVC_I3C_MWMSG_DDR 0x0D8
123 #define SVC_I3C_MRMSG_DDR 0x0DC
124
125 #define SVC_I3C_MDYNADDR 0x0E4
126 #define SVC_MDYNADDR_VALID BIT(0)
127 #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
128
129 #define SVC_I3C_MAX_DEVS 32
130 #define SVC_I3C_PM_TIMEOUT_MS 1000
131
132 /* This parameter depends on the implementation and may be tuned */
133 #define SVC_I3C_FIFO_SIZE 16
134 #define SVC_I3C_PPBAUD_MAX 15
135 #define SVC_I3C_QUICK_I2C_CLK 4170000
136
137 #define SVC_I3C_EVENT_IBI GENMASK(7, 0)
138 #define SVC_I3C_EVENT_HOTJOIN BIT(31)
139
140 /*
141 * SVC_I3C_QUIRK_FIFO_EMPTY:
142 * I3C HW stalls the write transfer if the transmit FIFO becomes empty,
143 * when new data is written to FIFO, I3C HW resumes the transfer but
144 * the first transmitted data bit may have the wrong value.
145 * Workaround:
146 * Fill the FIFO in advance to prevent FIFO from becoming empty.
147 */
148 #define SVC_I3C_QUIRK_FIFO_EMPTY BIT(0)
149 /*
150 * SVC_I3C_QUIRK_FLASE_SLVSTART:
151 * I3C HW may generate an invalid SlvStart event when emitting a STOP.
152 * If it is a true SlvStart, the MSTATUS state is SLVREQ.
153 */
154 #define SVC_I3C_QUIRK_FALSE_SLVSTART BIT(1)
155 /*
156 * SVC_I3C_QUIRK_DAA_CORRUPT:
157 * When MCONFIG.SKEW=0 and MCONFIG.ODHPP=0, the ENTDAA transaction gets
158 * corrupted and results in a no repeated-start condition at the end of
159 * address assignment.
160 * Workaround:
161 * Set MCONFIG.SKEW to 1 before initiating the DAA process. After the DAA
162 * process is completed, return MCONFIG.SKEW to its previous value.
163 */
164 #define SVC_I3C_QUIRK_DAA_CORRUPT BIT(2)
165
166 struct svc_i3c_cmd {
167 u8 addr;
168 bool rnw;
169 u8 *in;
170 const void *out;
171 unsigned int len;
172 unsigned int actual_len;
173 struct i3c_priv_xfer *xfer;
174 bool continued;
175 };
176
177 struct svc_i3c_xfer {
178 struct list_head node;
179 struct completion comp;
180 int ret;
181 unsigned int type;
182 unsigned int ncmds;
183 struct svc_i3c_cmd cmds[] __counted_by(ncmds);
184 };
185
186 struct svc_i3c_regs_save {
187 u32 mconfig;
188 u32 mdynaddr;
189 };
190
191 struct svc_i3c_drvdata {
192 u32 quirks;
193 };
194
195 /**
196 * struct svc_i3c_master - Silvaco I3C Master structure
197 * @base: I3C master controller
198 * @dev: Corresponding device
199 * @regs: Memory mapping
200 * @saved_regs: Volatile values for PM operations
201 * @free_slots: Bit array of available slots
202 * @addrs: Array containing the dynamic addresses of each attached device
203 * @descs: Array of descriptors, one per attached device
204 * @hj_work: Hot-join work
205 * @irq: Main interrupt
206 * @num_clks: I3C clock number
207 * @fclk: Fast clock (bus)
208 * @clks: I3C clock array
209 * @xferqueue: Transfer queue structure
210 * @xferqueue.list: List member
211 * @xferqueue.cur: Current ongoing transfer
212 * @xferqueue.lock: Queue lock
213 * @ibi: IBI structure
214 * @ibi.num_slots: Number of slots available in @ibi.slots
215 * @ibi.slots: Available IBI slots
216 * @ibi.tbq_slot: To be queued IBI slot
217 * @ibi.lock: IBI lock
218 * @lock: Transfer lock, protect between IBI work thread and callbacks from master
219 * @drvdata: Driver data
220 * @enabled_events: Bit masks for enable events (IBI, HotJoin).
221 * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
222 */
223 struct svc_i3c_master {
224 struct i3c_master_controller base;
225 struct device *dev;
226 void __iomem *regs;
227 struct svc_i3c_regs_save saved_regs;
228 u32 free_slots;
229 u8 addrs[SVC_I3C_MAX_DEVS];
230 struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
231 struct work_struct hj_work;
232 int irq;
233 int num_clks;
234 struct clk *fclk;
235 struct clk_bulk_data *clks;
236 struct {
237 struct list_head list;
238 struct svc_i3c_xfer *cur;
239 /* Prevent races between transfers */
240 spinlock_t lock;
241 } xferqueue;
242 struct {
243 unsigned int num_slots;
244 struct i3c_dev_desc **slots;
245 struct i3c_ibi_slot *tbq_slot;
246 /* Prevent races within IBI handlers */
247 spinlock_t lock;
248 } ibi;
249 struct mutex lock;
250 const struct svc_i3c_drvdata *drvdata;
251 u32 enabled_events;
252 u32 mctrl_config;
253 };
254
255 /**
256 * struct svc_i3c_i2c_dev_data - Device specific data
257 * @index: Index in the master tables corresponding to this device
258 * @ibi: IBI slot index in the master structure
259 * @ibi_pool: IBI pool associated to this device
260 */
261 struct svc_i3c_i2c_dev_data {
262 u8 index;
263 int ibi;
264 struct i3c_generic_ibi_pool *ibi_pool;
265 };
266
svc_has_quirk(struct svc_i3c_master * master,u32 quirk)267 static inline bool svc_has_quirk(struct svc_i3c_master *master, u32 quirk)
268 {
269 return (master->drvdata->quirks & quirk);
270 }
271
svc_has_daa_corrupt(struct svc_i3c_master * master)272 static inline bool svc_has_daa_corrupt(struct svc_i3c_master *master)
273 {
274 return ((master->drvdata->quirks & SVC_I3C_QUIRK_DAA_CORRUPT) &&
275 !(master->mctrl_config &
276 (SVC_I3C_MCONFIG_SKEW_MASK | SVC_I3C_MCONFIG_ODHPP(1))));
277 }
278
is_events_enabled(struct svc_i3c_master * master,u32 mask)279 static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
280 {
281 return !!(master->enabled_events & mask);
282 }
283
svc_i3c_master_error(struct svc_i3c_master * master)284 static bool svc_i3c_master_error(struct svc_i3c_master *master)
285 {
286 u32 mstatus, merrwarn;
287
288 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
289 if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
290 merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
291 writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
292
293 /* Ignore timeout error */
294 if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
295 dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
296 mstatus, merrwarn);
297 return false;
298 }
299
300 dev_err(master->dev,
301 "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
302 mstatus, merrwarn);
303
304 return true;
305 }
306
307 return false;
308 }
309
svc_i3c_master_enable_interrupts(struct svc_i3c_master * master,u32 mask)310 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
311 {
312 writel(mask, master->regs + SVC_I3C_MINTSET);
313 }
314
svc_i3c_master_disable_interrupts(struct svc_i3c_master * master)315 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
316 {
317 u32 mask = readl(master->regs + SVC_I3C_MINTSET);
318
319 writel(mask, master->regs + SVC_I3C_MINTCLR);
320 }
321
svc_i3c_master_clear_merrwarn(struct svc_i3c_master * master)322 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
323 {
324 /* Clear pending warnings */
325 writel(readl(master->regs + SVC_I3C_MERRWARN),
326 master->regs + SVC_I3C_MERRWARN);
327 }
328
svc_i3c_master_flush_fifo(struct svc_i3c_master * master)329 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
330 {
331 /* Flush FIFOs */
332 writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
333 master->regs + SVC_I3C_MDATACTRL);
334 }
335
svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master * master)336 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
337 {
338 u32 reg;
339
340 /* Set RX and TX tigger levels, flush FIFOs */
341 reg = SVC_I3C_MDATACTRL_FLUSHTB |
342 SVC_I3C_MDATACTRL_FLUSHRB |
343 SVC_I3C_MDATACTRL_UNLOCK_TRIG |
344 SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
345 SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
346 writel(reg, master->regs + SVC_I3C_MDATACTRL);
347 }
348
svc_i3c_master_reset(struct svc_i3c_master * master)349 static void svc_i3c_master_reset(struct svc_i3c_master *master)
350 {
351 svc_i3c_master_clear_merrwarn(master);
352 svc_i3c_master_reset_fifo_trigger(master);
353 svc_i3c_master_disable_interrupts(master);
354 }
355
356 static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller * master)357 to_svc_i3c_master(struct i3c_master_controller *master)
358 {
359 return container_of(master, struct svc_i3c_master, base);
360 }
361
svc_i3c_master_hj_work(struct work_struct * work)362 static void svc_i3c_master_hj_work(struct work_struct *work)
363 {
364 struct svc_i3c_master *master;
365
366 master = container_of(work, struct svc_i3c_master, hj_work);
367 i3c_master_do_daa(&master->base);
368 }
369
370 static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master * master,unsigned int ibiaddr)371 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
372 unsigned int ibiaddr)
373 {
374 int i;
375
376 for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
377 if (master->addrs[i] == ibiaddr)
378 break;
379
380 if (i == SVC_I3C_MAX_DEVS)
381 return NULL;
382
383 return master->descs[i];
384 }
385
svc_i3c_master_emit_stop(struct svc_i3c_master * master)386 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
387 {
388 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
389
390 /*
391 * This delay is necessary after the emission of a stop, otherwise eg.
392 * repeating IBIs do not get detected. There is a note in the manual
393 * about it, stating that the stop condition might not be settled
394 * correctly if a start condition follows too rapidly.
395 */
396 udelay(1);
397 }
398
svc_i3c_master_handle_ibi(struct svc_i3c_master * master,struct i3c_dev_desc * dev)399 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
400 struct i3c_dev_desc *dev)
401 {
402 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
403 struct i3c_ibi_slot *slot;
404 unsigned int count;
405 u32 mdatactrl;
406 int ret, val;
407 u8 *buf;
408
409 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
410 if (!slot)
411 return -ENOSPC;
412
413 slot->len = 0;
414 buf = slot->data;
415
416 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
417 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
418 if (ret) {
419 dev_err(master->dev, "Timeout when polling for COMPLETE\n");
420 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
421 return ret;
422 }
423
424 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
425 slot->len < SVC_I3C_FIFO_SIZE) {
426 mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
427 count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
428 readsb(master->regs + SVC_I3C_MRDATAB, buf, count);
429 slot->len += count;
430 buf += count;
431 }
432
433 master->ibi.tbq_slot = slot;
434
435 return 0;
436 }
437
svc_i3c_master_ack_ibi(struct svc_i3c_master * master,bool mandatory_byte)438 static int svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
439 bool mandatory_byte)
440 {
441 unsigned int ibi_ack_nack;
442 u32 reg;
443
444 ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
445 if (mandatory_byte)
446 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
447 else
448 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
449
450 writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
451
452 return readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
453 SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000);
454
455 }
456
svc_i3c_master_nack_ibi(struct svc_i3c_master * master)457 static int svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
458 {
459 int ret;
460 u32 reg;
461
462 writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
463 SVC_I3C_MCTRL_IBIRESP_NACK,
464 master->regs + SVC_I3C_MCTRL);
465
466 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
467 SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000);
468 return ret;
469 }
470
svc_i3c_master_handle_ibi_won(struct svc_i3c_master * master,u32 mstatus)471 static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 mstatus)
472 {
473 u32 ibitype;
474 int ret = 0;
475
476 ibitype = SVC_I3C_MSTATUS_IBITYPE(mstatus);
477
478 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
479
480 /* Hardware can't auto emit NACK for hot join and master request */
481 switch (ibitype) {
482 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
483 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
484 ret = svc_i3c_master_nack_ibi(master);
485 }
486
487 return ret;
488 }
489
svc_i3c_master_ibi_isr(struct svc_i3c_master * master)490 static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
491 {
492 struct svc_i3c_i2c_dev_data *data;
493 unsigned int ibitype, ibiaddr;
494 struct i3c_dev_desc *dev;
495 u32 status, val;
496 int ret;
497
498 /*
499 * According to I3C spec ver 1.1, 09-Jun-2021, section 5.1.2.5:
500 *
501 * The I3C Controller shall hold SCL low while the Bus is in ACK/NACK Phase of I3C/I2C
502 * transfer. But maximum stall time is 100us. The IRQs have to be disabled to prevent
503 * schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if
504 * any irq or schedule happen during transaction.
505 */
506 guard(spinlock)(&master->xferqueue.lock);
507
508 /*
509 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
510 * readl_relaxed_poll_timeout() to return immediately. Consequently,
511 * ibitype will be 0 since it was last updated only after the 8th SCL
512 * cycle, leading to missed client IBI handlers.
513 *
514 * A typical scenario is when IBIWON occurs and bus arbitration is lost
515 * at svc_i3c_master_priv_xfers().
516 *
517 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
518 */
519 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
520
521 /*
522 * Write REQUEST_START_ADDR request to emit broadcast address for arbitration,
523 * instend of using AUTO_IBI.
524 *
525 * Using AutoIBI request may cause controller to remain in AutoIBI state when
526 * there is a glitch on SDA line (high->low->high).
527 * 1. SDA high->low, raising an interrupt to execute IBI isr.
528 * 2. SDA low->high.
529 * 3. IBI isr writes an AutoIBI request.
530 * 4. The controller will not start AutoIBI process because SDA is not low.
531 * 5. IBIWON polling times out.
532 * 6. Controller reamins in AutoIBI state and doesn't accept EmitStop request.
533 */
534 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
535 SVC_I3C_MCTRL_TYPE_I3C |
536 SVC_I3C_MCTRL_IBIRESP_MANUAL |
537 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE) |
538 SVC_I3C_MCTRL_ADDR(I3C_BROADCAST_ADDR),
539 master->regs + SVC_I3C_MCTRL);
540
541 /* Wait for IBIWON, should take approximately 100us */
542 ret = readl_relaxed_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, val,
543 SVC_I3C_MSTATUS_IBIWON(val), 0, 100);
544 if (ret) {
545 dev_err(master->dev, "Timeout when polling for IBIWON\n");
546 svc_i3c_master_emit_stop(master);
547 return;
548 }
549
550 status = readl(master->regs + SVC_I3C_MSTATUS);
551 ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
552 ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
553
554 /* Handle the critical responses to IBI's */
555 switch (ibitype) {
556 case SVC_I3C_MSTATUS_IBITYPE_IBI:
557 dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
558 if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) {
559 svc_i3c_master_nack_ibi(master);
560 } else {
561 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
562 svc_i3c_master_ack_ibi(master, true);
563 else
564 svc_i3c_master_ack_ibi(master, false);
565 svc_i3c_master_handle_ibi(master, dev);
566 }
567 break;
568 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
569 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
570 svc_i3c_master_ack_ibi(master, false);
571 else
572 svc_i3c_master_nack_ibi(master);
573 break;
574 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
575 svc_i3c_master_nack_ibi(master);
576 break;
577 default:
578 break;
579 }
580
581 /*
582 * If an error happened, we probably got interrupted and the exchange
583 * timedout. In this case we just drop everything, emit a stop and wait
584 * for the slave to interrupt again.
585 */
586 if (svc_i3c_master_error(master)) {
587 if (master->ibi.tbq_slot) {
588 data = i3c_dev_get_master_data(dev);
589 i3c_generic_ibi_recycle_slot(data->ibi_pool,
590 master->ibi.tbq_slot);
591 master->ibi.tbq_slot = NULL;
592 }
593
594 svc_i3c_master_emit_stop(master);
595
596 return;
597 }
598
599 /* Handle the non critical tasks */
600 switch (ibitype) {
601 case SVC_I3C_MSTATUS_IBITYPE_IBI:
602 svc_i3c_master_emit_stop(master);
603 if (dev) {
604 i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
605 master->ibi.tbq_slot = NULL;
606 }
607 break;
608 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
609 svc_i3c_master_emit_stop(master);
610 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
611 queue_work(master->base.wq, &master->hj_work);
612 break;
613 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
614 svc_i3c_master_emit_stop(master);
615 break;
616 default:
617 break;
618 }
619 }
620
svc_i3c_master_irq_handler(int irq,void * dev_id)621 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
622 {
623 struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
624 u32 active = readl(master->regs + SVC_I3C_MSTATUS);
625
626 if (!SVC_I3C_MSTATUS_SLVSTART(active))
627 return IRQ_NONE;
628
629 /* Clear the interrupt status */
630 writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
631
632 /* Ignore the false event */
633 if (svc_has_quirk(master, SVC_I3C_QUIRK_FALSE_SLVSTART) &&
634 !SVC_I3C_MSTATUS_STATE_SLVREQ(active))
635 return IRQ_HANDLED;
636
637 /*
638 * The SDA line remains low until the request is processed.
639 * Receive the request in the interrupt context to respond promptly
640 * and restore the bus to idle state.
641 */
642 svc_i3c_master_ibi_isr(master);
643
644 return IRQ_HANDLED;
645 }
646
svc_i3c_master_set_speed(struct i3c_master_controller * m,enum i3c_open_drain_speed speed)647 static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
648 enum i3c_open_drain_speed speed)
649 {
650 struct svc_i3c_master *master = to_svc_i3c_master(m);
651 struct i3c_bus *bus = i3c_master_get_bus(&master->base);
652 u32 ppbaud, odbaud, odhpp, mconfig;
653 unsigned long fclk_rate;
654 int ret;
655
656 ret = pm_runtime_resume_and_get(master->dev);
657 if (ret < 0) {
658 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
659 return ret;
660 }
661
662 switch (speed) {
663 case I3C_OPEN_DRAIN_SLOW_SPEED:
664 fclk_rate = clk_get_rate(master->fclk);
665 if (!fclk_rate) {
666 ret = -EINVAL;
667 goto rpm_out;
668 }
669 /*
670 * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first
671 * broadcast address is visible to all I2C/I3C devices on the I3C bus.
672 * I3C device working as a I2C device will turn off its 50ns Spike
673 * Filter to change to I3C mode.
674 */
675 mconfig = master->mctrl_config;
676 ppbaud = FIELD_GET(GENMASK(11, 8), mconfig);
677 odhpp = 0;
678 odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1;
679 mconfig &= ~GENMASK(24, 16);
680 mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp);
681 writel(mconfig, master->regs + SVC_I3C_MCONFIG);
682 break;
683 case I3C_OPEN_DRAIN_NORMAL_SPEED:
684 writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
685 break;
686 }
687
688 rpm_out:
689 pm_runtime_put_autosuspend(master->dev);
690
691 return ret;
692 }
693
svc_i3c_master_bus_init(struct i3c_master_controller * m)694 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
695 {
696 struct svc_i3c_master *master = to_svc_i3c_master(m);
697 struct i3c_bus *bus = i3c_master_get_bus(m);
698 struct i3c_device_info info = {};
699 unsigned long fclk_rate, fclk_period_ns;
700 unsigned long i2c_period_ns, i2c_scl_rate, i3c_scl_rate;
701 unsigned int high_period_ns, od_low_period_ns;
702 u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
703 int ret;
704
705 ret = pm_runtime_resume_and_get(master->dev);
706 if (ret < 0) {
707 dev_err(master->dev,
708 "<%s> cannot resume i3c bus master, err: %d\n",
709 __func__, ret);
710 return ret;
711 }
712
713 /* Timings derivation */
714 fclk_rate = clk_get_rate(master->fclk);
715 if (!fclk_rate) {
716 ret = -EINVAL;
717 goto rpm_out;
718 }
719
720 fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
721 i2c_period_ns = DIV_ROUND_UP(1000000000, bus->scl_rate.i2c);
722 i2c_scl_rate = bus->scl_rate.i2c;
723 i3c_scl_rate = bus->scl_rate.i3c;
724
725 /*
726 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
727 * Simplest configuration is using a 50% duty-cycle of 40ns.
728 */
729 ppbaud = DIV_ROUND_UP(fclk_rate / 2, i3c_scl_rate) - 1;
730 pplow = 0;
731
732 /*
733 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
734 * duty-cycle tuned so that high levels are filetered out by
735 * the 50ns filter (target being 40ns).
736 */
737 odhpp = 1;
738 high_period_ns = (ppbaud + 1) * fclk_period_ns;
739 odbaud = DIV_ROUND_UP(fclk_rate, SVC_I3C_QUICK_I2C_CLK * (1 + ppbaud)) - 2;
740 od_low_period_ns = (odbaud + 1) * high_period_ns;
741
742 switch (bus->mode) {
743 case I3C_BUS_MODE_PURE:
744 i2cbaud = 0;
745 odstop = 0;
746 break;
747 case I3C_BUS_MODE_MIXED_FAST:
748 /*
749 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
750 * between the high and low period does not really matter.
751 */
752 i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
753 odstop = 1;
754 break;
755 case I3C_BUS_MODE_MIXED_LIMITED:
756 case I3C_BUS_MODE_MIXED_SLOW:
757 /* I3C PP + I3C OP + I2C OP both use i2c clk rate */
758 if (ppbaud > SVC_I3C_PPBAUD_MAX) {
759 ppbaud = SVC_I3C_PPBAUD_MAX;
760 pplow = DIV_ROUND_UP(fclk_rate, i3c_scl_rate) - (2 + 2 * ppbaud);
761 }
762
763 high_period_ns = (ppbaud + 1) * fclk_period_ns;
764 odhpp = 0;
765 odbaud = DIV_ROUND_UP(fclk_rate, i2c_scl_rate * (2 + 2 * ppbaud)) - 1;
766
767 od_low_period_ns = (odbaud + 1) * high_period_ns;
768 i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
769 odstop = 1;
770 break;
771 default:
772 goto rpm_out;
773 }
774
775 reg = SVC_I3C_MCONFIG_MASTER_EN |
776 SVC_I3C_MCONFIG_DISTO(0) |
777 SVC_I3C_MCONFIG_HKEEP(0) |
778 SVC_I3C_MCONFIG_ODSTOP(odstop) |
779 SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
780 SVC_I3C_MCONFIG_PPLOW(pplow) |
781 SVC_I3C_MCONFIG_ODBAUD(odbaud) |
782 SVC_I3C_MCONFIG_ODHPP(odhpp) |
783 SVC_I3C_MCONFIG_SKEW(0) |
784 SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
785 writel(reg, master->regs + SVC_I3C_MCONFIG);
786
787 master->mctrl_config = reg;
788 /* Master core's registration */
789 ret = i3c_master_get_free_addr(m, 0);
790 if (ret < 0)
791 goto rpm_out;
792
793 info.dyn_addr = ret;
794
795 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
796 master->regs + SVC_I3C_MDYNADDR);
797
798 ret = i3c_master_set_info(&master->base, &info);
799 if (ret)
800 goto rpm_out;
801
802 rpm_out:
803 pm_runtime_put_autosuspend(master->dev);
804
805 return ret;
806 }
807
svc_i3c_master_bus_cleanup(struct i3c_master_controller * m)808 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
809 {
810 struct svc_i3c_master *master = to_svc_i3c_master(m);
811 int ret;
812
813 ret = pm_runtime_resume_and_get(master->dev);
814 if (ret < 0) {
815 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
816 return;
817 }
818
819 svc_i3c_master_disable_interrupts(master);
820
821 /* Disable master */
822 writel(0, master->regs + SVC_I3C_MCONFIG);
823
824 pm_runtime_put_autosuspend(master->dev);
825 }
826
svc_i3c_master_reserve_slot(struct svc_i3c_master * master)827 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
828 {
829 unsigned int slot;
830
831 if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
832 return -ENOSPC;
833
834 slot = ffs(master->free_slots) - 1;
835
836 master->free_slots &= ~BIT(slot);
837
838 return slot;
839 }
840
svc_i3c_master_release_slot(struct svc_i3c_master * master,unsigned int slot)841 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
842 unsigned int slot)
843 {
844 master->free_slots |= BIT(slot);
845 }
846
svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)847 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
848 {
849 struct i3c_master_controller *m = i3c_dev_get_master(dev);
850 struct svc_i3c_master *master = to_svc_i3c_master(m);
851 struct svc_i3c_i2c_dev_data *data;
852 int slot;
853
854 slot = svc_i3c_master_reserve_slot(master);
855 if (slot < 0)
856 return slot;
857
858 data = kzalloc(sizeof(*data), GFP_KERNEL);
859 if (!data) {
860 svc_i3c_master_release_slot(master, slot);
861 return -ENOMEM;
862 }
863
864 data->ibi = -1;
865 data->index = slot;
866 master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
867 dev->info.static_addr;
868 master->descs[slot] = dev;
869
870 i3c_dev_set_master_data(dev, data);
871
872 return 0;
873 }
874
svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)875 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
876 u8 old_dyn_addr)
877 {
878 struct i3c_master_controller *m = i3c_dev_get_master(dev);
879 struct svc_i3c_master *master = to_svc_i3c_master(m);
880 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
881
882 master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
883 dev->info.static_addr;
884
885 return 0;
886 }
887
svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)888 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
889 {
890 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
891 struct i3c_master_controller *m = i3c_dev_get_master(dev);
892 struct svc_i3c_master *master = to_svc_i3c_master(m);
893
894 master->addrs[data->index] = 0;
895 svc_i3c_master_release_slot(master, data->index);
896
897 kfree(data);
898 }
899
svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)900 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
901 {
902 struct i3c_master_controller *m = i2c_dev_get_master(dev);
903 struct svc_i3c_master *master = to_svc_i3c_master(m);
904 struct svc_i3c_i2c_dev_data *data;
905 int slot;
906
907 slot = svc_i3c_master_reserve_slot(master);
908 if (slot < 0)
909 return slot;
910
911 data = kzalloc(sizeof(*data), GFP_KERNEL);
912 if (!data) {
913 svc_i3c_master_release_slot(master, slot);
914 return -ENOMEM;
915 }
916
917 data->index = slot;
918 master->addrs[slot] = dev->addr;
919
920 i2c_dev_set_master_data(dev, data);
921
922 return 0;
923 }
924
svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)925 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
926 {
927 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
928 struct i3c_master_controller *m = i2c_dev_get_master(dev);
929 struct svc_i3c_master *master = to_svc_i3c_master(m);
930
931 svc_i3c_master_release_slot(master, data->index);
932
933 kfree(data);
934 }
935
svc_i3c_master_readb(struct svc_i3c_master * master,u8 * dst,unsigned int len)936 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
937 unsigned int len)
938 {
939 int ret, i;
940 u32 reg;
941
942 for (i = 0; i < len; i++) {
943 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
944 reg,
945 SVC_I3C_MSTATUS_RXPEND(reg),
946 0, 1000);
947 if (ret)
948 return ret;
949
950 dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
951 }
952
953 return 0;
954 }
955
svc_i3c_master_do_daa_locked(struct svc_i3c_master * master,u8 * addrs,unsigned int * count)956 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
957 u8 *addrs, unsigned int *count)
958 {
959 u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
960 unsigned int dev_nb = 0, last_addr = 0, dyn_addr = 0;
961 u32 reg;
962 int ret, i;
963
964 svc_i3c_master_flush_fifo(master);
965
966 while (true) {
967 /* clean SVC_I3C_MINT_IBIWON w1c bits */
968 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
969
970 /* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA.
971 *
972 * ENTER DAA:
973 * 1 will issue START, 7E, ENTDAA, and then emits 7E/R to process first target.
974 * 2 Stops just before the new Dynamic Address (DA) is to be emitted.
975 *
976 * PROCESS DAA:
977 * 1 The DA is written using MWDATAB or ADDR bits 6:0.
978 * 2 ProcessDAA is requested again to write the new address, and then starts the
979 * next (START, 7E, ENTDAA) unless marked to STOP; an MSTATUS indicating NACK
980 * means DA was not accepted (e.g. parity error). If PROCESSDAA is NACKed on the
981 * 7E/R, which means no more Slaves need a DA, then a COMPLETE will be signaled
982 * (along with DONE), and a STOP issued automatically.
983 */
984 writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
985 SVC_I3C_MCTRL_TYPE_I3C |
986 SVC_I3C_MCTRL_IBIRESP_NACK |
987 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
988 master->regs + SVC_I3C_MCTRL);
989
990 /*
991 * Either one slave will send its ID, or the assignment process
992 * is done.
993 */
994 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
995 reg,
996 SVC_I3C_MSTATUS_RXPEND(reg) |
997 SVC_I3C_MSTATUS_MCTRLDONE(reg),
998 1, 1000);
999 if (ret)
1000 break;
1001
1002 if (SVC_I3C_MSTATUS_RXPEND(reg)) {
1003 u8 data[6];
1004
1005 /*
1006 * One slave sends its ID to request for address assignment,
1007 * prefilling the dynamic address can reduce SCL clock stalls
1008 * and also fix the SVC_I3C_QUIRK_FIFO_EMPTY quirk.
1009 *
1010 * Ideally, prefilling before the processDAA command is better.
1011 * However, it requires an additional check to write the dyn_addr
1012 * at the right time because the driver needs to write the processDAA
1013 * command twice for one assignment.
1014 * Prefilling here is safe and efficient because the FIFO starts
1015 * filling within a few hundred nanoseconds, which is significantly
1016 * faster compared to the 64 SCL clock cycles.
1017 */
1018 ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
1019 if (ret < 0)
1020 break;
1021
1022 dyn_addr = ret;
1023 writel(dyn_addr, master->regs + SVC_I3C_MWDATAB);
1024
1025 /*
1026 * We only care about the 48-bit provisioned ID yet to
1027 * be sure a device does not nack an address twice.
1028 * Otherwise, we would just need to flush the RX FIFO.
1029 */
1030 ret = svc_i3c_master_readb(master, data, 6);
1031 if (ret)
1032 break;
1033
1034 for (i = 0; i < 6; i++)
1035 prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
1036
1037 /* We do not care about the BCR and DCR yet */
1038 ret = svc_i3c_master_readb(master, data, 2);
1039 if (ret)
1040 break;
1041 } else if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1042 ret = svc_i3c_master_handle_ibi_won(master, reg);
1043 if (ret)
1044 break;
1045 continue;
1046 } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
1047 if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
1048 SVC_I3C_MSTATUS_COMPLETE(reg)) {
1049 /*
1050 * All devices received and acked they dynamic
1051 * address, this is the natural end of the DAA
1052 * procedure.
1053 *
1054 * Hardware will auto emit STOP at this case.
1055 */
1056 *count = dev_nb;
1057 return 0;
1058
1059 } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
1060 /* No I3C devices attached */
1061 if (dev_nb == 0) {
1062 /*
1063 * Hardware can't treat first NACK for ENTAA as normal
1064 * COMPLETE. So need manual emit STOP.
1065 */
1066 ret = 0;
1067 *count = 0;
1068 break;
1069 }
1070
1071 /*
1072 * A slave device nacked the address, this is
1073 * allowed only once, DAA will be stopped and
1074 * then resumed. The same device is supposed to
1075 * answer again immediately and shall ack the
1076 * address this time.
1077 */
1078 if (prov_id[dev_nb] == nacking_prov_id) {
1079 ret = -EIO;
1080 break;
1081 }
1082
1083 dev_nb--;
1084 nacking_prov_id = prov_id[dev_nb];
1085 svc_i3c_master_emit_stop(master);
1086
1087 continue;
1088 } else {
1089 break;
1090 }
1091 }
1092
1093 /* Wait for the slave to be ready to receive its address */
1094 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
1095 reg,
1096 SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
1097 SVC_I3C_MSTATUS_STATE_DAA(reg) &&
1098 SVC_I3C_MSTATUS_BETWEEN(reg),
1099 0, 1000);
1100 if (ret)
1101 break;
1102
1103 addrs[dev_nb] = dyn_addr;
1104 dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
1105 dev_nb, addrs[dev_nb]);
1106 last_addr = addrs[dev_nb++];
1107 }
1108
1109 /* Need manual issue STOP except for Complete condition */
1110 svc_i3c_master_emit_stop(master);
1111 svc_i3c_master_flush_fifo(master);
1112
1113 return ret;
1114 }
1115
svc_i3c_update_ibirules(struct svc_i3c_master * master)1116 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
1117 {
1118 struct i3c_dev_desc *dev;
1119 u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
1120 unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
1121 nobyte_addr_ko = 0;
1122 bool list_mbyte = false, list_nobyte = false;
1123
1124 /* Create the IBIRULES register for both cases */
1125 i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
1126 if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP))
1127 continue;
1128
1129 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
1130 reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
1131 dev->info.dyn_addr);
1132
1133 /* IBI rules cannot be applied to devices with MSb=1 */
1134 if (dev->info.dyn_addr & BIT(7))
1135 mbyte_addr_ko++;
1136 else
1137 mbyte_addr_ok++;
1138 } else {
1139 reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
1140 dev->info.dyn_addr);
1141
1142 /* IBI rules cannot be applied to devices with MSb=1 */
1143 if (dev->info.dyn_addr & BIT(7))
1144 nobyte_addr_ko++;
1145 else
1146 nobyte_addr_ok++;
1147 }
1148 }
1149
1150 /* Device list cannot be handled by hardware */
1151 if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
1152 list_mbyte = true;
1153
1154 if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
1155 list_nobyte = true;
1156
1157 /* No list can be properly handled, return an error */
1158 if (!list_mbyte && !list_nobyte)
1159 return -ERANGE;
1160
1161 /* Pick the first list that can be handled by hardware, randomly */
1162 if (list_mbyte)
1163 writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
1164 else
1165 writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
1166
1167 return 0;
1168 }
1169
svc_i3c_master_do_daa(struct i3c_master_controller * m)1170 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
1171 {
1172 struct svc_i3c_master *master = to_svc_i3c_master(m);
1173 u8 addrs[SVC_I3C_MAX_DEVS];
1174 unsigned long flags;
1175 unsigned int dev_nb;
1176 int ret, i;
1177
1178 ret = pm_runtime_resume_and_get(master->dev);
1179 if (ret < 0) {
1180 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1181 return ret;
1182 }
1183
1184 spin_lock_irqsave(&master->xferqueue.lock, flags);
1185
1186 if (svc_has_daa_corrupt(master))
1187 writel(master->mctrl_config | SVC_I3C_MCONFIG_SKEW(1),
1188 master->regs + SVC_I3C_MCONFIG);
1189
1190 ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
1191
1192 if (svc_has_daa_corrupt(master))
1193 writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
1194
1195 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1196
1197 svc_i3c_master_clear_merrwarn(master);
1198 if (ret)
1199 goto rpm_out;
1200
1201 /*
1202 * Register all devices who participated to the core
1203 *
1204 * If two devices (A and B) are detected in DAA and address 0xa is assigned to
1205 * device A and 0xb to device B, a failure in i3c_master_add_i3c_dev_locked()
1206 * for device A (addr: 0xa) could prevent device B (addr: 0xb) from being
1207 * registered on the bus. The I3C stack might still consider 0xb a free
1208 * address. If a subsequent Hotjoin occurs, 0xb might be assigned to Device A,
1209 * causing both devices A and B to use the same address 0xb, violating the I3C
1210 * specification.
1211 *
1212 * The return value for i3c_master_add_i3c_dev_locked() should not be checked
1213 * because subsequent steps will scan the entire I3C bus, independent of
1214 * whether i3c_master_add_i3c_dev_locked() returns success.
1215 *
1216 * If device A registration fails, there is still a chance to register device
1217 * B. i3c_master_add_i3c_dev_locked() can reset DAA if a failure occurs while
1218 * retrieving device information.
1219 */
1220 for (i = 0; i < dev_nb; i++)
1221 i3c_master_add_i3c_dev_locked(m, addrs[i]);
1222
1223 /* Configure IBI auto-rules */
1224 ret = svc_i3c_update_ibirules(master);
1225 if (ret)
1226 dev_err(master->dev, "Cannot handle such a list of devices");
1227
1228 rpm_out:
1229 pm_runtime_put_autosuspend(master->dev);
1230
1231 return ret;
1232 }
1233
svc_i3c_master_read(struct svc_i3c_master * master,u8 * in,unsigned int len)1234 static int svc_i3c_master_read(struct svc_i3c_master *master,
1235 u8 *in, unsigned int len)
1236 {
1237 int offset = 0, i;
1238 u32 mdctrl, mstatus;
1239 bool completed = false;
1240 unsigned int count;
1241 unsigned long start = jiffies;
1242
1243 while (!completed) {
1244 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
1245 if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
1246 completed = true;
1247
1248 if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
1249 dev_dbg(master->dev, "I3C read timeout\n");
1250 return -ETIMEDOUT;
1251 }
1252
1253 mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
1254 count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
1255 if (offset + count > len) {
1256 dev_err(master->dev, "I3C receive length too long!\n");
1257 return -EINVAL;
1258 }
1259 for (i = 0; i < count; i++)
1260 in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
1261
1262 offset += count;
1263 }
1264
1265 return offset;
1266 }
1267
svc_i3c_master_write(struct svc_i3c_master * master,const u8 * out,unsigned int len)1268 static int svc_i3c_master_write(struct svc_i3c_master *master,
1269 const u8 *out, unsigned int len)
1270 {
1271 int offset = 0, ret;
1272 u32 mdctrl;
1273
1274 while (offset < len) {
1275 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
1276 mdctrl,
1277 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
1278 0, 1000);
1279 if (ret)
1280 return ret;
1281
1282 /*
1283 * The last byte to be sent over the bus must either have the
1284 * "end" bit set or be written in MWDATABE.
1285 */
1286 if (likely(offset < (len - 1)))
1287 writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
1288 else
1289 writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
1290 }
1291
1292 return 0;
1293 }
1294
svc_i3c_master_xfer(struct svc_i3c_master * master,bool rnw,unsigned int xfer_type,u8 addr,u8 * in,const u8 * out,unsigned int xfer_len,unsigned int * actual_len,bool continued,bool repeat_start)1295 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1296 bool rnw, unsigned int xfer_type, u8 addr,
1297 u8 *in, const u8 *out, unsigned int xfer_len,
1298 unsigned int *actual_len, bool continued, bool repeat_start)
1299 {
1300 int retry = repeat_start ? 1 : 2;
1301 u32 reg;
1302 int ret;
1303
1304 /* clean SVC_I3C_MINT_IBIWON w1c bits */
1305 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1306
1307
1308 while (retry--) {
1309 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1310 xfer_type |
1311 SVC_I3C_MCTRL_IBIRESP_NACK |
1312 SVC_I3C_MCTRL_DIR(rnw) |
1313 SVC_I3C_MCTRL_ADDR(addr) |
1314 SVC_I3C_MCTRL_RDTERM(*actual_len),
1315 master->regs + SVC_I3C_MCTRL);
1316
1317 /*
1318 * The entire transaction can consist of multiple write transfers.
1319 * Prefilling before EmitStartAddr causes the data to be emitted
1320 * immediately, becoming part of the previous transfer.
1321 * The only way to work around this hardware issue is to let the
1322 * FIFO start filling as soon as possible after EmitStartAddr.
1323 */
1324 if (svc_has_quirk(master, SVC_I3C_QUIRK_FIFO_EMPTY) && !rnw && xfer_len) {
1325 u32 space, end, len;
1326
1327 reg = readl(master->regs + SVC_I3C_MDATACTRL);
1328 space = SVC_I3C_FIFO_SIZE - SVC_I3C_MDATACTRL_TXCOUNT(reg);
1329 if (space) {
1330 end = xfer_len > space ? 0 : SVC_I3C_MWDATAB_END;
1331 len = min_t(u32, xfer_len, space);
1332 writesb(master->regs + SVC_I3C_MWDATAB1, out, len - 1);
1333 /* Mark END bit if this is the last byte */
1334 writel(out[len - 1] | end, master->regs + SVC_I3C_MWDATAB);
1335 xfer_len -= len;
1336 out += len;
1337 }
1338 }
1339
1340 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1341 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1342 if (ret)
1343 goto emit_stop;
1344
1345 /*
1346 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a
1347 * Frame with I3C Target Address.
1348 *
1349 * The I3C Controller normally should start a Frame, the Address may be arbitrated,
1350 * and so the Controller shall monitor to see whether an In-Band Interrupt request,
1351 * a Controller Role Request (i.e., Secondary Controller requests to become the
1352 * Active Controller), or a Hot-Join Request has been made.
1353 *
1354 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, issue
1355 * repeat start. Address arbitrate only happen at START, never happen at REPEAT
1356 * start.
1357 */
1358 if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1359 ret = svc_i3c_master_handle_ibi_won(master, reg);
1360 if (ret)
1361 goto emit_stop;
1362 continue;
1363 }
1364
1365 if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1366 /*
1367 * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
1368 * If the Controller chooses to start an I3C Message with an I3C Dynamic
1369 * Address, then special provisions shall be made because that same I3C
1370 * Target may be initiating an IBI or a Controller Role Request. So, one of
1371 * three things may happen: (skip 1, 2)
1372 *
1373 * 3. The Addresses match and the RnW bits also match, and so neither
1374 * Controller nor Target will ACK since both are expecting the other side to
1375 * provide ACK. As a result, each side might think it had "won" arbitration,
1376 * but neither side would continue, as each would subsequently see that the
1377 * other did not provide ACK.
1378 * ...
1379 * For either value of RnW: Due to the NACK, the Controller shall defer the
1380 * Private Write or Private Read, and should typically transmit the Target
1381 * Address again after a Repeated START (i.e., the next one or any one prior
1382 * to a STOP in the Frame). Since the Address Header following a Repeated
1383 * START is not arbitrated, the Controller will always win (see Section
1384 * 5.1.2.2.4).
1385 */
1386 if (retry && addr != 0x7e) {
1387 writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
1388 } else {
1389 ret = -ENXIO;
1390 *actual_len = 0;
1391 goto emit_stop;
1392 }
1393 } else {
1394 break;
1395 }
1396 }
1397
1398 if (rnw)
1399 ret = svc_i3c_master_read(master, in, xfer_len);
1400 else
1401 ret = svc_i3c_master_write(master, out, xfer_len);
1402 if (ret < 0)
1403 goto emit_stop;
1404
1405 if (rnw)
1406 *actual_len = ret;
1407
1408 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1409 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1410 if (ret)
1411 goto emit_stop;
1412
1413 writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1414
1415 if (!continued) {
1416 svc_i3c_master_emit_stop(master);
1417
1418 /* Wait idle if stop is sent. */
1419 readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1420 SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1421 }
1422
1423 return 0;
1424
1425 emit_stop:
1426 svc_i3c_master_emit_stop(master);
1427 svc_i3c_master_clear_merrwarn(master);
1428 svc_i3c_master_flush_fifo(master);
1429
1430 return ret;
1431 }
1432
1433 static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master * master,unsigned int ncmds)1434 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1435 {
1436 struct svc_i3c_xfer *xfer;
1437
1438 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1439 if (!xfer)
1440 return NULL;
1441
1442 INIT_LIST_HEAD(&xfer->node);
1443 xfer->ncmds = ncmds;
1444 xfer->ret = -ETIMEDOUT;
1445
1446 return xfer;
1447 }
1448
svc_i3c_master_free_xfer(struct svc_i3c_xfer * xfer)1449 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1450 {
1451 kfree(xfer);
1452 }
1453
svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1454 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1455 struct svc_i3c_xfer *xfer)
1456 {
1457 if (master->xferqueue.cur == xfer)
1458 master->xferqueue.cur = NULL;
1459 else
1460 list_del_init(&xfer->node);
1461 }
1462
svc_i3c_master_dequeue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1463 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1464 struct svc_i3c_xfer *xfer)
1465 {
1466 unsigned long flags;
1467
1468 spin_lock_irqsave(&master->xferqueue.lock, flags);
1469 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1470 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1471 }
1472
svc_i3c_master_start_xfer_locked(struct svc_i3c_master * master)1473 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1474 {
1475 struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1476 int ret, i;
1477
1478 if (!xfer)
1479 return;
1480
1481 svc_i3c_master_clear_merrwarn(master);
1482 svc_i3c_master_flush_fifo(master);
1483
1484 for (i = 0; i < xfer->ncmds; i++) {
1485 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1486
1487 ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1488 cmd->addr, cmd->in, cmd->out,
1489 cmd->len, &cmd->actual_len,
1490 cmd->continued, i > 0);
1491 /* cmd->xfer is NULL if I2C or CCC transfer */
1492 if (cmd->xfer)
1493 cmd->xfer->actual_len = cmd->actual_len;
1494
1495 if (ret)
1496 break;
1497 }
1498
1499 xfer->ret = ret;
1500 complete(&xfer->comp);
1501
1502 if (ret < 0)
1503 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1504
1505 xfer = list_first_entry_or_null(&master->xferqueue.list,
1506 struct svc_i3c_xfer,
1507 node);
1508 if (xfer)
1509 list_del_init(&xfer->node);
1510
1511 master->xferqueue.cur = xfer;
1512 svc_i3c_master_start_xfer_locked(master);
1513 }
1514
svc_i3c_master_enqueue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1515 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1516 struct svc_i3c_xfer *xfer)
1517 {
1518 unsigned long flags;
1519 int ret;
1520
1521 ret = pm_runtime_resume_and_get(master->dev);
1522 if (ret < 0) {
1523 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1524 return;
1525 }
1526
1527 init_completion(&xfer->comp);
1528 spin_lock_irqsave(&master->xferqueue.lock, flags);
1529 if (master->xferqueue.cur) {
1530 list_add_tail(&xfer->node, &master->xferqueue.list);
1531 } else {
1532 master->xferqueue.cur = xfer;
1533 svc_i3c_master_start_xfer_locked(master);
1534 }
1535 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1536
1537 pm_runtime_put_autosuspend(master->dev);
1538 }
1539
1540 static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller * master,const struct i3c_ccc_cmd * cmd)1541 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1542 const struct i3c_ccc_cmd *cmd)
1543 {
1544 /* No software support for CCC commands targeting more than one slave */
1545 return (cmd->ndests == 1);
1546 }
1547
svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1548 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1549 struct i3c_ccc_cmd *ccc)
1550 {
1551 unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1552 struct svc_i3c_xfer *xfer;
1553 struct svc_i3c_cmd *cmd;
1554 u8 *buf;
1555 int ret;
1556
1557 xfer = svc_i3c_master_alloc_xfer(master, 1);
1558 if (!xfer)
1559 return -ENOMEM;
1560
1561 buf = kmalloc(xfer_len, GFP_KERNEL);
1562 if (!buf) {
1563 svc_i3c_master_free_xfer(xfer);
1564 return -ENOMEM;
1565 }
1566
1567 buf[0] = ccc->id;
1568 memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1569
1570 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1571
1572 cmd = &xfer->cmds[0];
1573 cmd->addr = ccc->dests[0].addr;
1574 cmd->rnw = ccc->rnw;
1575 cmd->in = NULL;
1576 cmd->out = buf;
1577 cmd->len = xfer_len;
1578 cmd->actual_len = 0;
1579 cmd->continued = false;
1580
1581 mutex_lock(&master->lock);
1582 svc_i3c_master_enqueue_xfer(master, xfer);
1583 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1584 svc_i3c_master_dequeue_xfer(master, xfer);
1585 mutex_unlock(&master->lock);
1586
1587 ret = xfer->ret;
1588 kfree(buf);
1589 svc_i3c_master_free_xfer(xfer);
1590
1591 return ret;
1592 }
1593
svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1594 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1595 struct i3c_ccc_cmd *ccc)
1596 {
1597 unsigned int xfer_len = ccc->dests[0].payload.len;
1598 unsigned int actual_len = ccc->rnw ? xfer_len : 0;
1599 struct svc_i3c_xfer *xfer;
1600 struct svc_i3c_cmd *cmd;
1601 int ret;
1602
1603 xfer = svc_i3c_master_alloc_xfer(master, 2);
1604 if (!xfer)
1605 return -ENOMEM;
1606
1607 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1608
1609 /* Broadcasted message */
1610 cmd = &xfer->cmds[0];
1611 cmd->addr = I3C_BROADCAST_ADDR;
1612 cmd->rnw = 0;
1613 cmd->in = NULL;
1614 cmd->out = &ccc->id;
1615 cmd->len = 1;
1616 cmd->actual_len = 0;
1617 cmd->continued = true;
1618
1619 /* Directed message */
1620 cmd = &xfer->cmds[1];
1621 cmd->addr = ccc->dests[0].addr;
1622 cmd->rnw = ccc->rnw;
1623 cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1624 cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data;
1625 cmd->len = xfer_len;
1626 cmd->actual_len = actual_len;
1627 cmd->continued = false;
1628
1629 mutex_lock(&master->lock);
1630 svc_i3c_master_enqueue_xfer(master, xfer);
1631 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1632 svc_i3c_master_dequeue_xfer(master, xfer);
1633 mutex_unlock(&master->lock);
1634
1635 if (cmd->actual_len != xfer_len)
1636 ccc->dests[0].payload.len = cmd->actual_len;
1637
1638 ret = xfer->ret;
1639 svc_i3c_master_free_xfer(xfer);
1640
1641 return ret;
1642 }
1643
svc_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)1644 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1645 struct i3c_ccc_cmd *cmd)
1646 {
1647 struct svc_i3c_master *master = to_svc_i3c_master(m);
1648 bool broadcast = cmd->id < 0x80;
1649 int ret;
1650
1651 if (broadcast)
1652 ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1653 else
1654 ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1655
1656 if (ret)
1657 cmd->err = I3C_ERROR_M2;
1658
1659 return ret;
1660 }
1661
svc_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)1662 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1663 struct i3c_priv_xfer *xfers,
1664 int nxfers)
1665 {
1666 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1667 struct svc_i3c_master *master = to_svc_i3c_master(m);
1668 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1669 struct svc_i3c_xfer *xfer;
1670 int ret, i;
1671
1672 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1673 if (!xfer)
1674 return -ENOMEM;
1675
1676 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1677
1678 for (i = 0; i < nxfers; i++) {
1679 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1680
1681 cmd->xfer = &xfers[i];
1682 cmd->addr = master->addrs[data->index];
1683 cmd->rnw = xfers[i].rnw;
1684 cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1685 cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1686 cmd->len = xfers[i].len;
1687 cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
1688 cmd->continued = (i + 1) < nxfers;
1689 }
1690
1691 mutex_lock(&master->lock);
1692 svc_i3c_master_enqueue_xfer(master, xfer);
1693 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1694 svc_i3c_master_dequeue_xfer(master, xfer);
1695 mutex_unlock(&master->lock);
1696
1697 ret = xfer->ret;
1698 svc_i3c_master_free_xfer(xfer);
1699
1700 return ret;
1701 }
1702
svc_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,struct i2c_msg * xfers,int nxfers)1703 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1704 struct i2c_msg *xfers,
1705 int nxfers)
1706 {
1707 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1708 struct svc_i3c_master *master = to_svc_i3c_master(m);
1709 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1710 struct svc_i3c_xfer *xfer;
1711 int ret, i;
1712
1713 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1714 if (!xfer)
1715 return -ENOMEM;
1716
1717 xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1718
1719 for (i = 0; i < nxfers; i++) {
1720 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1721
1722 cmd->addr = master->addrs[data->index];
1723 cmd->rnw = xfers[i].flags & I2C_M_RD;
1724 cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1725 cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1726 cmd->len = xfers[i].len;
1727 cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
1728 cmd->continued = (i + 1 < nxfers);
1729 }
1730
1731 mutex_lock(&master->lock);
1732 svc_i3c_master_enqueue_xfer(master, xfer);
1733 if (!wait_for_completion_timeout(&xfer->comp, m->i2c.timeout))
1734 svc_i3c_master_dequeue_xfer(master, xfer);
1735 mutex_unlock(&master->lock);
1736
1737 ret = xfer->ret;
1738 svc_i3c_master_free_xfer(xfer);
1739
1740 return ret;
1741 }
1742
svc_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1743 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1744 const struct i3c_ibi_setup *req)
1745 {
1746 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1747 struct svc_i3c_master *master = to_svc_i3c_master(m);
1748 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1749 unsigned long flags;
1750 unsigned int i;
1751
1752 if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1753 dev_err(master->dev, "IBI max payload %d should be < %d\n",
1754 dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1755 return -ERANGE;
1756 }
1757
1758 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1759 if (IS_ERR(data->ibi_pool))
1760 return PTR_ERR(data->ibi_pool);
1761
1762 spin_lock_irqsave(&master->ibi.lock, flags);
1763 for (i = 0; i < master->ibi.num_slots; i++) {
1764 if (!master->ibi.slots[i]) {
1765 data->ibi = i;
1766 master->ibi.slots[i] = dev;
1767 break;
1768 }
1769 }
1770 spin_unlock_irqrestore(&master->ibi.lock, flags);
1771
1772 if (i < master->ibi.num_slots)
1773 return 0;
1774
1775 i3c_generic_ibi_free_pool(data->ibi_pool);
1776 data->ibi_pool = NULL;
1777
1778 return -ENOSPC;
1779 }
1780
svc_i3c_master_free_ibi(struct i3c_dev_desc * dev)1781 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1782 {
1783 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1784 struct svc_i3c_master *master = to_svc_i3c_master(m);
1785 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1786 unsigned long flags;
1787
1788 spin_lock_irqsave(&master->ibi.lock, flags);
1789 master->ibi.slots[data->ibi] = NULL;
1790 data->ibi = -1;
1791 spin_unlock_irqrestore(&master->ibi.lock, flags);
1792
1793 i3c_generic_ibi_free_pool(data->ibi_pool);
1794 }
1795
svc_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1796 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1797 {
1798 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1799 struct svc_i3c_master *master = to_svc_i3c_master(m);
1800 int ret;
1801
1802 ret = pm_runtime_resume_and_get(master->dev);
1803 if (ret < 0) {
1804 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1805 return ret;
1806 }
1807
1808 master->enabled_events++;
1809 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1810
1811 return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1812 }
1813
svc_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1814 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1815 {
1816 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1817 struct svc_i3c_master *master = to_svc_i3c_master(m);
1818 int ret;
1819
1820 master->enabled_events--;
1821 if (!master->enabled_events)
1822 svc_i3c_master_disable_interrupts(master);
1823
1824 ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1825
1826 pm_runtime_put_autosuspend(master->dev);
1827
1828 return ret;
1829 }
1830
svc_i3c_master_enable_hotjoin(struct i3c_master_controller * m)1831 static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
1832 {
1833 struct svc_i3c_master *master = to_svc_i3c_master(m);
1834 int ret;
1835
1836 ret = pm_runtime_resume_and_get(master->dev);
1837 if (ret < 0) {
1838 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1839 return ret;
1840 }
1841
1842 master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
1843
1844 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1845
1846 return 0;
1847 }
1848
svc_i3c_master_disable_hotjoin(struct i3c_master_controller * m)1849 static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
1850 {
1851 struct svc_i3c_master *master = to_svc_i3c_master(m);
1852
1853 master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
1854
1855 if (!master->enabled_events)
1856 svc_i3c_master_disable_interrupts(master);
1857
1858 pm_runtime_put_autosuspend(master->dev);
1859
1860 return 0;
1861 }
1862
svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1863 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1864 struct i3c_ibi_slot *slot)
1865 {
1866 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1867
1868 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1869 }
1870
1871 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1872 .bus_init = svc_i3c_master_bus_init,
1873 .bus_cleanup = svc_i3c_master_bus_cleanup,
1874 .attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1875 .detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1876 .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1877 .attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1878 .detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1879 .do_daa = svc_i3c_master_do_daa,
1880 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1881 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1882 .priv_xfers = svc_i3c_master_priv_xfers,
1883 .i2c_xfers = svc_i3c_master_i2c_xfers,
1884 .request_ibi = svc_i3c_master_request_ibi,
1885 .free_ibi = svc_i3c_master_free_ibi,
1886 .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1887 .enable_ibi = svc_i3c_master_enable_ibi,
1888 .disable_ibi = svc_i3c_master_disable_ibi,
1889 .enable_hotjoin = svc_i3c_master_enable_hotjoin,
1890 .disable_hotjoin = svc_i3c_master_disable_hotjoin,
1891 .set_speed = svc_i3c_master_set_speed,
1892 };
1893
svc_i3c_master_probe(struct platform_device * pdev)1894 static int svc_i3c_master_probe(struct platform_device *pdev)
1895 {
1896 struct device *dev = &pdev->dev;
1897 struct svc_i3c_master *master;
1898 int ret, i;
1899
1900 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1901 if (!master)
1902 return -ENOMEM;
1903
1904 master->drvdata = of_device_get_match_data(dev);
1905 if (!master->drvdata)
1906 return -EINVAL;
1907
1908 master->regs = devm_platform_ioremap_resource(pdev, 0);
1909 if (IS_ERR(master->regs))
1910 return PTR_ERR(master->regs);
1911
1912 master->num_clks = devm_clk_bulk_get_all(dev, &master->clks);
1913 if (master->num_clks < 0)
1914 return dev_err_probe(dev, -EINVAL, "can't get I3C clocks\n");
1915
1916 for (i = 0; i < master->num_clks; i++) {
1917 if (!strcmp(master->clks[i].id, "fast_clk"))
1918 break;
1919 }
1920
1921 if (i == master->num_clks)
1922 return dev_err_probe(dev, -EINVAL,
1923 "can't get I3C peripheral clock\n");
1924
1925 master->fclk = master->clks[i].clk;
1926 if (IS_ERR(master->fclk))
1927 return PTR_ERR(master->fclk);
1928
1929 master->irq = platform_get_irq(pdev, 0);
1930 if (master->irq < 0)
1931 return master->irq;
1932
1933 master->dev = dev;
1934 ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
1935 if (ret)
1936 return dev_err_probe(dev, ret, "can't enable I3C clocks\n");
1937
1938 INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1939 mutex_init(&master->lock);
1940
1941 ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1942 IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1943 if (ret)
1944 goto err_disable_clks;
1945
1946 master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1947
1948 spin_lock_init(&master->xferqueue.lock);
1949 INIT_LIST_HEAD(&master->xferqueue.list);
1950
1951 spin_lock_init(&master->ibi.lock);
1952 master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1953 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1954 sizeof(*master->ibi.slots),
1955 GFP_KERNEL);
1956 if (!master->ibi.slots) {
1957 ret = -ENOMEM;
1958 goto err_disable_clks;
1959 }
1960
1961 platform_set_drvdata(pdev, master);
1962
1963 pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1964 pm_runtime_use_autosuspend(&pdev->dev);
1965 pm_runtime_get_noresume(&pdev->dev);
1966 pm_runtime_set_active(&pdev->dev);
1967 pm_runtime_enable(&pdev->dev);
1968
1969 svc_i3c_master_reset(master);
1970
1971 /* Register the master */
1972 ret = i3c_master_register(&master->base, &pdev->dev,
1973 &svc_i3c_master_ops, false);
1974 if (ret)
1975 goto rpm_disable;
1976
1977 pm_runtime_put_autosuspend(&pdev->dev);
1978
1979 return 0;
1980
1981 rpm_disable:
1982 pm_runtime_dont_use_autosuspend(&pdev->dev);
1983 pm_runtime_put_noidle(&pdev->dev);
1984 pm_runtime_disable(&pdev->dev);
1985 pm_runtime_set_suspended(&pdev->dev);
1986
1987 err_disable_clks:
1988 clk_bulk_disable_unprepare(master->num_clks, master->clks);
1989
1990 return ret;
1991 }
1992
svc_i3c_master_remove(struct platform_device * pdev)1993 static void svc_i3c_master_remove(struct platform_device *pdev)
1994 {
1995 struct svc_i3c_master *master = platform_get_drvdata(pdev);
1996
1997 cancel_work_sync(&master->hj_work);
1998 i3c_master_unregister(&master->base);
1999
2000 pm_runtime_dont_use_autosuspend(&pdev->dev);
2001 pm_runtime_disable(&pdev->dev);
2002 }
2003
svc_i3c_save_regs(struct svc_i3c_master * master)2004 static void svc_i3c_save_regs(struct svc_i3c_master *master)
2005 {
2006 master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
2007 master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
2008 }
2009
svc_i3c_restore_regs(struct svc_i3c_master * master)2010 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
2011 {
2012 if (readl(master->regs + SVC_I3C_MDYNADDR) !=
2013 master->saved_regs.mdynaddr) {
2014 writel(master->saved_regs.mconfig,
2015 master->regs + SVC_I3C_MCONFIG);
2016 writel(master->saved_regs.mdynaddr,
2017 master->regs + SVC_I3C_MDYNADDR);
2018 }
2019 }
2020
svc_i3c_runtime_suspend(struct device * dev)2021 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
2022 {
2023 struct svc_i3c_master *master = dev_get_drvdata(dev);
2024
2025 svc_i3c_save_regs(master);
2026 clk_bulk_disable_unprepare(master->num_clks, master->clks);
2027 pinctrl_pm_select_sleep_state(dev);
2028
2029 return 0;
2030 }
2031
svc_i3c_runtime_resume(struct device * dev)2032 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
2033 {
2034 struct svc_i3c_master *master = dev_get_drvdata(dev);
2035 int ret;
2036
2037 pinctrl_pm_select_default_state(dev);
2038 ret = clk_bulk_prepare_enable(master->num_clks, master->clks);
2039 if (ret)
2040 return ret;
2041
2042 svc_i3c_restore_regs(master);
2043
2044 return 0;
2045 }
2046
2047 static const struct dev_pm_ops svc_i3c_pm_ops = {
2048 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2049 pm_runtime_force_resume)
2050 SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
2051 svc_i3c_runtime_resume, NULL)
2052 };
2053
2054 static const struct svc_i3c_drvdata npcm845_drvdata = {
2055 .quirks = SVC_I3C_QUIRK_FIFO_EMPTY |
2056 SVC_I3C_QUIRK_FALSE_SLVSTART |
2057 SVC_I3C_QUIRK_DAA_CORRUPT,
2058 };
2059
2060 static const struct svc_i3c_drvdata svc_default_drvdata = {};
2061
2062 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
2063 { .compatible = "nuvoton,npcm845-i3c", .data = &npcm845_drvdata },
2064 { .compatible = "silvaco,i3c-master-v1", .data = &svc_default_drvdata },
2065 { /* sentinel */ },
2066 };
2067 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
2068
2069 static struct platform_driver svc_i3c_master = {
2070 .probe = svc_i3c_master_probe,
2071 .remove = svc_i3c_master_remove,
2072 .driver = {
2073 .name = "silvaco-i3c-master",
2074 .of_match_table = svc_i3c_master_of_match_tbl,
2075 .pm = &svc_i3c_pm_ops,
2076 },
2077 };
2078 module_platform_driver(svc_i3c_master);
2079
2080 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
2081 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
2082 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
2083 MODULE_LICENSE("GPL v2");
2084