1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Silvaco dual-role I3C master driver
4 *
5 * Copyright (C) 2020 Silvaco
6 * Author: Miquel RAYNAL <miquel.raynal@bootlin.com>
7 * Based on a work from: Conor Culhane <conor.culhane@silvaco.com>
8 */
9
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/completion.h>
13 #include <linux/errno.h>
14 #include <linux/i3c/master.h>
15 #include <linux/interrupt.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm_runtime.h>
23
24 /* Master Mode Registers */
25 #define SVC_I3C_MCONFIG 0x000
26 #define SVC_I3C_MCONFIG_MASTER_EN BIT(0)
27 #define SVC_I3C_MCONFIG_DISTO(x) FIELD_PREP(BIT(3), (x))
28 #define SVC_I3C_MCONFIG_HKEEP(x) FIELD_PREP(GENMASK(5, 4), (x))
29 #define SVC_I3C_MCONFIG_ODSTOP(x) FIELD_PREP(BIT(6), (x))
30 #define SVC_I3C_MCONFIG_PPBAUD(x) FIELD_PREP(GENMASK(11, 8), (x))
31 #define SVC_I3C_MCONFIG_PPLOW(x) FIELD_PREP(GENMASK(15, 12), (x))
32 #define SVC_I3C_MCONFIG_ODBAUD(x) FIELD_PREP(GENMASK(23, 16), (x))
33 #define SVC_I3C_MCONFIG_ODHPP(x) FIELD_PREP(BIT(24), (x))
34 #define SVC_I3C_MCONFIG_SKEW(x) FIELD_PREP(GENMASK(27, 25), (x))
35 #define SVC_I3C_MCONFIG_SKEW_MASK GENMASK(27, 25)
36 #define SVC_I3C_MCONFIG_I2CBAUD(x) FIELD_PREP(GENMASK(31, 28), (x))
37
38 #define SVC_I3C_MCTRL 0x084
39 #define SVC_I3C_MCTRL_REQUEST_MASK GENMASK(2, 0)
40 #define SVC_I3C_MCTRL_REQUEST_NONE 0
41 #define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
42 #define SVC_I3C_MCTRL_REQUEST_STOP 2
43 #define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
44 #define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
45 #define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
46 #define SVC_I3C_MCTRL_TYPE_I3C 0
47 #define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
48 #define SVC_I3C_MCTRL_IBIRESP_AUTO 0
49 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
50 #define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
51 #define SVC_I3C_MCTRL_IBIRESP_NACK BIT(6)
52 #define SVC_I3C_MCTRL_IBIRESP_MANUAL GENMASK(7, 6)
53 #define SVC_I3C_MCTRL_DIR(x) FIELD_PREP(BIT(8), (x))
54 #define SVC_I3C_MCTRL_DIR_WRITE 0
55 #define SVC_I3C_MCTRL_DIR_READ 1
56 #define SVC_I3C_MCTRL_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
57 #define SVC_I3C_MCTRL_RDTERM(x) FIELD_PREP(GENMASK(23, 16), (x))
58
59 #define SVC_I3C_MSTATUS 0x088
60 #define SVC_I3C_MSTATUS_STATE(x) FIELD_GET(GENMASK(2, 0), (x))
61 #define SVC_I3C_MSTATUS_STATE_DAA(x) (SVC_I3C_MSTATUS_STATE(x) == 5)
62 #define SVC_I3C_MSTATUS_STATE_SLVREQ(x) (SVC_I3C_MSTATUS_STATE(x) == 1)
63 #define SVC_I3C_MSTATUS_STATE_IDLE(x) (SVC_I3C_MSTATUS_STATE(x) == 0)
64 #define SVC_I3C_MSTATUS_BETWEEN(x) FIELD_GET(BIT(4), (x))
65 #define SVC_I3C_MSTATUS_NACKED(x) FIELD_GET(BIT(5), (x))
66 #define SVC_I3C_MSTATUS_IBITYPE(x) FIELD_GET(GENMASK(7, 6), (x))
67 #define SVC_I3C_MSTATUS_IBITYPE_IBI 1
68 #define SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST 2
69 #define SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN 3
70 #define SVC_I3C_MINT_SLVSTART BIT(8)
71 #define SVC_I3C_MINT_MCTRLDONE BIT(9)
72 #define SVC_I3C_MINT_COMPLETE BIT(10)
73 #define SVC_I3C_MINT_RXPEND BIT(11)
74 #define SVC_I3C_MINT_TXNOTFULL BIT(12)
75 #define SVC_I3C_MINT_IBIWON BIT(13)
76 #define SVC_I3C_MINT_ERRWARN BIT(15)
77 #define SVC_I3C_MSTATUS_SLVSTART(x) FIELD_GET(SVC_I3C_MINT_SLVSTART, (x))
78 #define SVC_I3C_MSTATUS_MCTRLDONE(x) FIELD_GET(SVC_I3C_MINT_MCTRLDONE, (x))
79 #define SVC_I3C_MSTATUS_COMPLETE(x) FIELD_GET(SVC_I3C_MINT_COMPLETE, (x))
80 #define SVC_I3C_MSTATUS_RXPEND(x) FIELD_GET(SVC_I3C_MINT_RXPEND, (x))
81 #define SVC_I3C_MSTATUS_TXNOTFULL(x) FIELD_GET(SVC_I3C_MINT_TXNOTFULL, (x))
82 #define SVC_I3C_MSTATUS_IBIWON(x) FIELD_GET(SVC_I3C_MINT_IBIWON, (x))
83 #define SVC_I3C_MSTATUS_ERRWARN(x) FIELD_GET(SVC_I3C_MINT_ERRWARN, (x))
84 #define SVC_I3C_MSTATUS_IBIADDR(x) FIELD_GET(GENMASK(30, 24), (x))
85
86 #define SVC_I3C_IBIRULES 0x08C
87 #define SVC_I3C_IBIRULES_ADDR(slot, addr) FIELD_PREP(GENMASK(29, 0), \
88 ((addr) & 0x3F) << ((slot) * 6))
89 #define SVC_I3C_IBIRULES_ADDRS 5
90 #define SVC_I3C_IBIRULES_MSB0 BIT(30)
91 #define SVC_I3C_IBIRULES_NOBYTE BIT(31)
92 #define SVC_I3C_IBIRULES_MANDBYTE 0
93 #define SVC_I3C_MINTSET 0x090
94 #define SVC_I3C_MINTCLR 0x094
95 #define SVC_I3C_MINTMASKED 0x098
96 #define SVC_I3C_MERRWARN 0x09C
97 #define SVC_I3C_MERRWARN_NACK BIT(2)
98 #define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
99 #define SVC_I3C_MDMACTRL 0x0A0
100 #define SVC_I3C_MDATACTRL 0x0AC
101 #define SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
102 #define SVC_I3C_MDATACTRL_FLUSHRB BIT(1)
103 #define SVC_I3C_MDATACTRL_UNLOCK_TRIG BIT(3)
104 #define SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL GENMASK(5, 4)
105 #define SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY 0
106 #define SVC_I3C_MDATACTRL_RXCOUNT(x) FIELD_GET(GENMASK(28, 24), (x))
107 #define SVC_I3C_MDATACTRL_TXFULL BIT(30)
108 #define SVC_I3C_MDATACTRL_RXEMPTY BIT(31)
109
110 #define SVC_I3C_MWDATAB 0x0B0
111 #define SVC_I3C_MWDATAB_END BIT(8)
112
113 #define SVC_I3C_MWDATABE 0x0B4
114 #define SVC_I3C_MWDATAH 0x0B8
115 #define SVC_I3C_MWDATAHE 0x0BC
116 #define SVC_I3C_MRDATAB 0x0C0
117 #define SVC_I3C_MRDATAH 0x0C8
118 #define SVC_I3C_MWDATAB1 0x0CC
119 #define SVC_I3C_MWMSG_SDR 0x0D0
120 #define SVC_I3C_MRMSG_SDR 0x0D4
121 #define SVC_I3C_MWMSG_DDR 0x0D8
122 #define SVC_I3C_MRMSG_DDR 0x0DC
123
124 #define SVC_I3C_MDYNADDR 0x0E4
125 #define SVC_MDYNADDR_VALID BIT(0)
126 #define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
127
128 #define SVC_I3C_MAX_DEVS 32
129 #define SVC_I3C_PM_TIMEOUT_MS 1000
130
131 /* This parameter depends on the implementation and may be tuned */
132 #define SVC_I3C_FIFO_SIZE 16
133 #define SVC_I3C_PPBAUD_MAX 15
134 #define SVC_I3C_QUICK_I2C_CLK 4170000
135
136 #define SVC_I3C_EVENT_IBI GENMASK(7, 0)
137 #define SVC_I3C_EVENT_HOTJOIN BIT(31)
138
139 /*
140 * SVC_I3C_QUIRK_FIFO_EMPTY:
141 * I3C HW stalls the write transfer if the transmit FIFO becomes empty,
142 * when new data is written to FIFO, I3C HW resumes the transfer but
143 * the first transmitted data bit may have the wrong value.
144 * Workaround:
145 * Fill the FIFO in advance to prevent FIFO from becoming empty.
146 */
147 #define SVC_I3C_QUIRK_FIFO_EMPTY BIT(0)
148 /*
149 * SVC_I3C_QUIRK_FLASE_SLVSTART:
150 * I3C HW may generate an invalid SlvStart event when emitting a STOP.
151 * If it is a true SlvStart, the MSTATUS state is SLVREQ.
152 */
153 #define SVC_I3C_QUIRK_FALSE_SLVSTART BIT(1)
154 /*
155 * SVC_I3C_QUIRK_DAA_CORRUPT:
156 * When MCONFIG.SKEW=0 and MCONFIG.ODHPP=0, the ENTDAA transaction gets
157 * corrupted and results in a no repeated-start condition at the end of
158 * address assignment.
159 * Workaround:
160 * Set MCONFIG.SKEW to 1 before initiating the DAA process. After the DAA
161 * process is completed, return MCONFIG.SKEW to its previous value.
162 */
163 #define SVC_I3C_QUIRK_DAA_CORRUPT BIT(2)
164
165 struct svc_i3c_cmd {
166 u8 addr;
167 bool rnw;
168 u8 *in;
169 const void *out;
170 unsigned int len;
171 unsigned int actual_len;
172 struct i3c_priv_xfer *xfer;
173 bool continued;
174 };
175
176 struct svc_i3c_xfer {
177 struct list_head node;
178 struct completion comp;
179 int ret;
180 unsigned int type;
181 unsigned int ncmds;
182 struct svc_i3c_cmd cmds[] __counted_by(ncmds);
183 };
184
185 struct svc_i3c_regs_save {
186 u32 mconfig;
187 u32 mdynaddr;
188 };
189
190 struct svc_i3c_drvdata {
191 u32 quirks;
192 };
193
194 /**
195 * struct svc_i3c_master - Silvaco I3C Master structure
196 * @base: I3C master controller
197 * @dev: Corresponding device
198 * @regs: Memory mapping
199 * @saved_regs: Volatile values for PM operations
200 * @free_slots: Bit array of available slots
201 * @addrs: Array containing the dynamic addresses of each attached device
202 * @descs: Array of descriptors, one per attached device
203 * @hj_work: Hot-join work
204 * @ibi_work: IBI work
205 * @irq: Main interrupt
206 * @pclk: System clock
207 * @fclk: Fast clock (bus)
208 * @sclk: Slow clock (other events)
209 * @xferqueue: Transfer queue structure
210 * @xferqueue.list: List member
211 * @xferqueue.cur: Current ongoing transfer
212 * @xferqueue.lock: Queue lock
213 * @ibi: IBI structure
214 * @ibi.num_slots: Number of slots available in @ibi.slots
215 * @ibi.slots: Available IBI slots
216 * @ibi.tbq_slot: To be queued IBI slot
217 * @ibi.lock: IBI lock
218 * @lock: Transfer lock, protect between IBI work thread and callbacks from master
219 * @drvdata: Driver data
220 * @enabled_events: Bit masks for enable events (IBI, HotJoin).
221 * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back.
222 */
223 struct svc_i3c_master {
224 struct i3c_master_controller base;
225 struct device *dev;
226 void __iomem *regs;
227 struct svc_i3c_regs_save saved_regs;
228 u32 free_slots;
229 u8 addrs[SVC_I3C_MAX_DEVS];
230 struct i3c_dev_desc *descs[SVC_I3C_MAX_DEVS];
231 struct work_struct hj_work;
232 struct work_struct ibi_work;
233 int irq;
234 struct clk *pclk;
235 struct clk *fclk;
236 struct clk *sclk;
237 struct {
238 struct list_head list;
239 struct svc_i3c_xfer *cur;
240 /* Prevent races between transfers */
241 spinlock_t lock;
242 } xferqueue;
243 struct {
244 unsigned int num_slots;
245 struct i3c_dev_desc **slots;
246 struct i3c_ibi_slot *tbq_slot;
247 /* Prevent races within IBI handlers */
248 spinlock_t lock;
249 } ibi;
250 struct mutex lock;
251 const struct svc_i3c_drvdata *drvdata;
252 u32 enabled_events;
253 u32 mctrl_config;
254 };
255
256 /**
257 * struct svc_i3c_i2c_dev_data - Device specific data
258 * @index: Index in the master tables corresponding to this device
259 * @ibi: IBI slot index in the master structure
260 * @ibi_pool: IBI pool associated to this device
261 */
262 struct svc_i3c_i2c_dev_data {
263 u8 index;
264 int ibi;
265 struct i3c_generic_ibi_pool *ibi_pool;
266 };
267
svc_has_quirk(struct svc_i3c_master * master,u32 quirk)268 static inline bool svc_has_quirk(struct svc_i3c_master *master, u32 quirk)
269 {
270 return (master->drvdata->quirks & quirk);
271 }
272
svc_has_daa_corrupt(struct svc_i3c_master * master)273 static inline bool svc_has_daa_corrupt(struct svc_i3c_master *master)
274 {
275 return ((master->drvdata->quirks & SVC_I3C_QUIRK_DAA_CORRUPT) &&
276 !(master->mctrl_config &
277 (SVC_I3C_MCONFIG_SKEW_MASK | SVC_I3C_MCONFIG_ODHPP(1))));
278 }
279
is_events_enabled(struct svc_i3c_master * master,u32 mask)280 static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask)
281 {
282 return !!(master->enabled_events & mask);
283 }
284
svc_i3c_master_error(struct svc_i3c_master * master)285 static bool svc_i3c_master_error(struct svc_i3c_master *master)
286 {
287 u32 mstatus, merrwarn;
288
289 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
290 if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
291 merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
292 writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
293
294 /* Ignore timeout error */
295 if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
296 dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
297 mstatus, merrwarn);
298 return false;
299 }
300
301 dev_err(master->dev,
302 "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
303 mstatus, merrwarn);
304
305 return true;
306 }
307
308 return false;
309 }
310
svc_i3c_master_enable_interrupts(struct svc_i3c_master * master,u32 mask)311 static void svc_i3c_master_enable_interrupts(struct svc_i3c_master *master, u32 mask)
312 {
313 writel(mask, master->regs + SVC_I3C_MINTSET);
314 }
315
svc_i3c_master_disable_interrupts(struct svc_i3c_master * master)316 static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
317 {
318 u32 mask = readl(master->regs + SVC_I3C_MINTSET);
319
320 writel(mask, master->regs + SVC_I3C_MINTCLR);
321 }
322
svc_i3c_master_clear_merrwarn(struct svc_i3c_master * master)323 static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
324 {
325 /* Clear pending warnings */
326 writel(readl(master->regs + SVC_I3C_MERRWARN),
327 master->regs + SVC_I3C_MERRWARN);
328 }
329
svc_i3c_master_flush_fifo(struct svc_i3c_master * master)330 static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
331 {
332 /* Flush FIFOs */
333 writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
334 master->regs + SVC_I3C_MDATACTRL);
335 }
336
svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master * master)337 static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
338 {
339 u32 reg;
340
341 /* Set RX and TX tigger levels, flush FIFOs */
342 reg = SVC_I3C_MDATACTRL_FLUSHTB |
343 SVC_I3C_MDATACTRL_FLUSHRB |
344 SVC_I3C_MDATACTRL_UNLOCK_TRIG |
345 SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
346 SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
347 writel(reg, master->regs + SVC_I3C_MDATACTRL);
348 }
349
svc_i3c_master_reset(struct svc_i3c_master * master)350 static void svc_i3c_master_reset(struct svc_i3c_master *master)
351 {
352 svc_i3c_master_clear_merrwarn(master);
353 svc_i3c_master_reset_fifo_trigger(master);
354 svc_i3c_master_disable_interrupts(master);
355 }
356
357 static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller * master)358 to_svc_i3c_master(struct i3c_master_controller *master)
359 {
360 return container_of(master, struct svc_i3c_master, base);
361 }
362
svc_i3c_master_hj_work(struct work_struct * work)363 static void svc_i3c_master_hj_work(struct work_struct *work)
364 {
365 struct svc_i3c_master *master;
366
367 master = container_of(work, struct svc_i3c_master, hj_work);
368 i3c_master_do_daa(&master->base);
369 }
370
371 static struct i3c_dev_desc *
svc_i3c_master_dev_from_addr(struct svc_i3c_master * master,unsigned int ibiaddr)372 svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
373 unsigned int ibiaddr)
374 {
375 int i;
376
377 for (i = 0; i < SVC_I3C_MAX_DEVS; i++)
378 if (master->addrs[i] == ibiaddr)
379 break;
380
381 if (i == SVC_I3C_MAX_DEVS)
382 return NULL;
383
384 return master->descs[i];
385 }
386
svc_i3c_master_emit_stop(struct svc_i3c_master * master)387 static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
388 {
389 writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
390
391 /*
392 * This delay is necessary after the emission of a stop, otherwise eg.
393 * repeating IBIs do not get detected. There is a note in the manual
394 * about it, stating that the stop condition might not be settled
395 * correctly if a start condition follows too rapidly.
396 */
397 udelay(1);
398 }
399
svc_i3c_master_handle_ibi(struct svc_i3c_master * master,struct i3c_dev_desc * dev)400 static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
401 struct i3c_dev_desc *dev)
402 {
403 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
404 struct i3c_ibi_slot *slot;
405 unsigned int count;
406 u32 mdatactrl;
407 int ret, val;
408 u8 *buf;
409
410 slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
411 if (!slot)
412 return -ENOSPC;
413
414 slot->len = 0;
415 buf = slot->data;
416
417 ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
418 SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
419 if (ret) {
420 dev_err(master->dev, "Timeout when polling for COMPLETE\n");
421 return ret;
422 }
423
424 while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
425 slot->len < SVC_I3C_FIFO_SIZE) {
426 mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
427 count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl);
428 readsb(master->regs + SVC_I3C_MRDATAB, buf, count);
429 slot->len += count;
430 buf += count;
431 }
432
433 master->ibi.tbq_slot = slot;
434
435 return 0;
436 }
437
svc_i3c_master_ack_ibi(struct svc_i3c_master * master,bool mandatory_byte)438 static int svc_i3c_master_ack_ibi(struct svc_i3c_master *master,
439 bool mandatory_byte)
440 {
441 unsigned int ibi_ack_nack;
442 u32 reg;
443
444 ibi_ack_nack = SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK;
445 if (mandatory_byte)
446 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE;
447 else
448 ibi_ack_nack |= SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE;
449
450 writel(ibi_ack_nack, master->regs + SVC_I3C_MCTRL);
451
452 return readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
453 SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000);
454
455 }
456
svc_i3c_master_nack_ibi(struct svc_i3c_master * master)457 static int svc_i3c_master_nack_ibi(struct svc_i3c_master *master)
458 {
459 int ret;
460 u32 reg;
461
462 writel(SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK |
463 SVC_I3C_MCTRL_IBIRESP_NACK,
464 master->regs + SVC_I3C_MCTRL);
465
466 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
467 SVC_I3C_MSTATUS_MCTRLDONE(reg), 1, 1000);
468 return ret;
469 }
470
svc_i3c_master_handle_ibi_won(struct svc_i3c_master * master,u32 mstatus)471 static int svc_i3c_master_handle_ibi_won(struct svc_i3c_master *master, u32 mstatus)
472 {
473 u32 ibitype;
474 int ret = 0;
475
476 ibitype = SVC_I3C_MSTATUS_IBITYPE(mstatus);
477
478 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
479
480 /* Hardware can't auto emit NACK for hot join and master request */
481 switch (ibitype) {
482 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
483 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
484 ret = svc_i3c_master_nack_ibi(master);
485 }
486
487 return ret;
488 }
489
svc_i3c_master_ibi_work(struct work_struct * work)490 static void svc_i3c_master_ibi_work(struct work_struct *work)
491 {
492 struct svc_i3c_master *master = container_of(work, struct svc_i3c_master, ibi_work);
493 struct svc_i3c_i2c_dev_data *data;
494 unsigned int ibitype, ibiaddr;
495 struct i3c_dev_desc *dev;
496 u32 status, val;
497 int ret;
498
499 /*
500 * According to I3C spec ver 1.1, 09-Jun-2021, section 5.1.2.5:
501 *
502 * The I3C Controller shall hold SCL low while the Bus is in ACK/NACK Phase of I3C/I2C
503 * transfer. But maximum stall time is 100us. The IRQs have to be disabled to prevent
504 * schedule during the whole I3C transaction, otherwise, the I3C bus timeout may happen if
505 * any irq or schedule happen during transaction.
506 */
507 guard(spinlock_irqsave)(&master->xferqueue.lock);
508
509 /*
510 * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
511 * readl_relaxed_poll_timeout() to return immediately. Consequently,
512 * ibitype will be 0 since it was last updated only after the 8th SCL
513 * cycle, leading to missed client IBI handlers.
514 *
515 * A typical scenario is when IBIWON occurs and bus arbitration is lost
516 * at svc_i3c_master_priv_xfers().
517 *
518 * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
519 */
520 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
521
522 /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
523 writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
524 SVC_I3C_MCTRL_IBIRESP_AUTO,
525 master->regs + SVC_I3C_MCTRL);
526
527 /* Wait for IBIWON, should take approximately 100us */
528 ret = readl_relaxed_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, val,
529 SVC_I3C_MSTATUS_IBIWON(val), 0, 100);
530 if (ret) {
531 dev_err(master->dev, "Timeout when polling for IBIWON\n");
532 svc_i3c_master_emit_stop(master);
533 goto reenable_ibis;
534 }
535
536 status = readl(master->regs + SVC_I3C_MSTATUS);
537 ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
538 ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
539
540 /* Handle the critical responses to IBI's */
541 switch (ibitype) {
542 case SVC_I3C_MSTATUS_IBITYPE_IBI:
543 dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
544 if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
545 svc_i3c_master_nack_ibi(master);
546 else
547 svc_i3c_master_handle_ibi(master, dev);
548 break;
549 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
550 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
551 svc_i3c_master_ack_ibi(master, false);
552 else
553 svc_i3c_master_nack_ibi(master);
554 break;
555 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
556 svc_i3c_master_nack_ibi(master);
557 break;
558 default:
559 break;
560 }
561
562 /*
563 * If an error happened, we probably got interrupted and the exchange
564 * timedout. In this case we just drop everything, emit a stop and wait
565 * for the slave to interrupt again.
566 */
567 if (svc_i3c_master_error(master)) {
568 if (master->ibi.tbq_slot) {
569 data = i3c_dev_get_master_data(dev);
570 i3c_generic_ibi_recycle_slot(data->ibi_pool,
571 master->ibi.tbq_slot);
572 master->ibi.tbq_slot = NULL;
573 }
574
575 svc_i3c_master_emit_stop(master);
576
577 goto reenable_ibis;
578 }
579
580 /* Handle the non critical tasks */
581 switch (ibitype) {
582 case SVC_I3C_MSTATUS_IBITYPE_IBI:
583 if (dev) {
584 i3c_master_queue_ibi(dev, master->ibi.tbq_slot);
585 master->ibi.tbq_slot = NULL;
586 }
587 svc_i3c_master_emit_stop(master);
588 break;
589 case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
590 svc_i3c_master_emit_stop(master);
591 if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
592 queue_work(master->base.wq, &master->hj_work);
593 break;
594 case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST:
595 svc_i3c_master_emit_stop(master);
596 break;
597 default:
598 break;
599 }
600
601 reenable_ibis:
602 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
603 }
604
svc_i3c_master_irq_handler(int irq,void * dev_id)605 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
606 {
607 struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
608 u32 active = readl(master->regs + SVC_I3C_MSTATUS);
609
610 if (!SVC_I3C_MSTATUS_SLVSTART(active))
611 return IRQ_NONE;
612
613 /* Clear the interrupt status */
614 writel(SVC_I3C_MINT_SLVSTART, master->regs + SVC_I3C_MSTATUS);
615
616 /* Ignore the false event */
617 if (svc_has_quirk(master, SVC_I3C_QUIRK_FALSE_SLVSTART) &&
618 !SVC_I3C_MSTATUS_STATE_SLVREQ(active))
619 return IRQ_HANDLED;
620
621 svc_i3c_master_disable_interrupts(master);
622
623 /* Handle the interrupt in a non atomic context */
624 queue_work(master->base.wq, &master->ibi_work);
625
626 return IRQ_HANDLED;
627 }
628
svc_i3c_master_set_speed(struct i3c_master_controller * m,enum i3c_open_drain_speed speed)629 static int svc_i3c_master_set_speed(struct i3c_master_controller *m,
630 enum i3c_open_drain_speed speed)
631 {
632 struct svc_i3c_master *master = to_svc_i3c_master(m);
633 struct i3c_bus *bus = i3c_master_get_bus(&master->base);
634 u32 ppbaud, odbaud, odhpp, mconfig;
635 unsigned long fclk_rate;
636 int ret;
637
638 ret = pm_runtime_resume_and_get(master->dev);
639 if (ret < 0) {
640 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
641 return ret;
642 }
643
644 switch (speed) {
645 case I3C_OPEN_DRAIN_SLOW_SPEED:
646 fclk_rate = clk_get_rate(master->fclk);
647 if (!fclk_rate) {
648 ret = -EINVAL;
649 goto rpm_out;
650 }
651 /*
652 * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first
653 * broadcast address is visible to all I2C/I3C devices on the I3C bus.
654 * I3C device working as a I2C device will turn off its 50ns Spike
655 * Filter to change to I3C mode.
656 */
657 mconfig = master->mctrl_config;
658 ppbaud = FIELD_GET(GENMASK(11, 8), mconfig);
659 odhpp = 0;
660 odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1;
661 mconfig &= ~GENMASK(24, 16);
662 mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp);
663 writel(mconfig, master->regs + SVC_I3C_MCONFIG);
664 break;
665 case I3C_OPEN_DRAIN_NORMAL_SPEED:
666 writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
667 break;
668 }
669
670 rpm_out:
671 pm_runtime_mark_last_busy(master->dev);
672 pm_runtime_put_autosuspend(master->dev);
673
674 return ret;
675 }
676
svc_i3c_master_bus_init(struct i3c_master_controller * m)677 static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
678 {
679 struct svc_i3c_master *master = to_svc_i3c_master(m);
680 struct i3c_bus *bus = i3c_master_get_bus(m);
681 struct i3c_device_info info = {};
682 unsigned long fclk_rate, fclk_period_ns;
683 unsigned long i2c_period_ns, i2c_scl_rate, i3c_scl_rate;
684 unsigned int high_period_ns, od_low_period_ns;
685 u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
686 int ret;
687
688 ret = pm_runtime_resume_and_get(master->dev);
689 if (ret < 0) {
690 dev_err(master->dev,
691 "<%s> cannot resume i3c bus master, err: %d\n",
692 __func__, ret);
693 return ret;
694 }
695
696 /* Timings derivation */
697 fclk_rate = clk_get_rate(master->fclk);
698 if (!fclk_rate) {
699 ret = -EINVAL;
700 goto rpm_out;
701 }
702
703 fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
704 i2c_period_ns = DIV_ROUND_UP(1000000000, bus->scl_rate.i2c);
705 i2c_scl_rate = bus->scl_rate.i2c;
706 i3c_scl_rate = bus->scl_rate.i3c;
707
708 /*
709 * Using I3C Push-Pull mode, target is 12.5MHz/80ns period.
710 * Simplest configuration is using a 50% duty-cycle of 40ns.
711 */
712 ppbaud = DIV_ROUND_UP(fclk_rate / 2, i3c_scl_rate) - 1;
713 pplow = 0;
714
715 /*
716 * Using I3C Open-Drain mode, target is 4.17MHz/240ns with a
717 * duty-cycle tuned so that high levels are filetered out by
718 * the 50ns filter (target being 40ns).
719 */
720 odhpp = 1;
721 high_period_ns = (ppbaud + 1) * fclk_period_ns;
722 odbaud = DIV_ROUND_UP(fclk_rate, SVC_I3C_QUICK_I2C_CLK * (1 + ppbaud)) - 2;
723 od_low_period_ns = (odbaud + 1) * high_period_ns;
724
725 switch (bus->mode) {
726 case I3C_BUS_MODE_PURE:
727 i2cbaud = 0;
728 odstop = 0;
729 break;
730 case I3C_BUS_MODE_MIXED_FAST:
731 /*
732 * Using I2C Fm+ mode, target is 1MHz/1000ns, the difference
733 * between the high and low period does not really matter.
734 */
735 i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
736 odstop = 1;
737 break;
738 case I3C_BUS_MODE_MIXED_LIMITED:
739 case I3C_BUS_MODE_MIXED_SLOW:
740 /* I3C PP + I3C OP + I2C OP both use i2c clk rate */
741 if (ppbaud > SVC_I3C_PPBAUD_MAX) {
742 ppbaud = SVC_I3C_PPBAUD_MAX;
743 pplow = DIV_ROUND_UP(fclk_rate, i3c_scl_rate) - (2 + 2 * ppbaud);
744 }
745
746 high_period_ns = (ppbaud + 1) * fclk_period_ns;
747 odhpp = 0;
748 odbaud = DIV_ROUND_UP(fclk_rate, i2c_scl_rate * (2 + 2 * ppbaud)) - 1;
749
750 od_low_period_ns = (odbaud + 1) * high_period_ns;
751 i2cbaud = DIV_ROUND_UP(i2c_period_ns, od_low_period_ns) - 2;
752 odstop = 1;
753 break;
754 default:
755 goto rpm_out;
756 }
757
758 reg = SVC_I3C_MCONFIG_MASTER_EN |
759 SVC_I3C_MCONFIG_DISTO(0) |
760 SVC_I3C_MCONFIG_HKEEP(0) |
761 SVC_I3C_MCONFIG_ODSTOP(odstop) |
762 SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
763 SVC_I3C_MCONFIG_PPLOW(pplow) |
764 SVC_I3C_MCONFIG_ODBAUD(odbaud) |
765 SVC_I3C_MCONFIG_ODHPP(odhpp) |
766 SVC_I3C_MCONFIG_SKEW(0) |
767 SVC_I3C_MCONFIG_I2CBAUD(i2cbaud);
768 writel(reg, master->regs + SVC_I3C_MCONFIG);
769
770 master->mctrl_config = reg;
771 /* Master core's registration */
772 ret = i3c_master_get_free_addr(m, 0);
773 if (ret < 0)
774 goto rpm_out;
775
776 info.dyn_addr = ret;
777
778 writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
779 master->regs + SVC_I3C_MDYNADDR);
780
781 ret = i3c_master_set_info(&master->base, &info);
782 if (ret)
783 goto rpm_out;
784
785 rpm_out:
786 pm_runtime_mark_last_busy(master->dev);
787 pm_runtime_put_autosuspend(master->dev);
788
789 return ret;
790 }
791
svc_i3c_master_bus_cleanup(struct i3c_master_controller * m)792 static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
793 {
794 struct svc_i3c_master *master = to_svc_i3c_master(m);
795 int ret;
796
797 ret = pm_runtime_resume_and_get(master->dev);
798 if (ret < 0) {
799 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
800 return;
801 }
802
803 svc_i3c_master_disable_interrupts(master);
804
805 /* Disable master */
806 writel(0, master->regs + SVC_I3C_MCONFIG);
807
808 pm_runtime_mark_last_busy(master->dev);
809 pm_runtime_put_autosuspend(master->dev);
810 }
811
svc_i3c_master_reserve_slot(struct svc_i3c_master * master)812 static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
813 {
814 unsigned int slot;
815
816 if (!(master->free_slots & GENMASK(SVC_I3C_MAX_DEVS - 1, 0)))
817 return -ENOSPC;
818
819 slot = ffs(master->free_slots) - 1;
820
821 master->free_slots &= ~BIT(slot);
822
823 return slot;
824 }
825
svc_i3c_master_release_slot(struct svc_i3c_master * master,unsigned int slot)826 static void svc_i3c_master_release_slot(struct svc_i3c_master *master,
827 unsigned int slot)
828 {
829 master->free_slots |= BIT(slot);
830 }
831
svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc * dev)832 static int svc_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
833 {
834 struct i3c_master_controller *m = i3c_dev_get_master(dev);
835 struct svc_i3c_master *master = to_svc_i3c_master(m);
836 struct svc_i3c_i2c_dev_data *data;
837 int slot;
838
839 slot = svc_i3c_master_reserve_slot(master);
840 if (slot < 0)
841 return slot;
842
843 data = kzalloc(sizeof(*data), GFP_KERNEL);
844 if (!data) {
845 svc_i3c_master_release_slot(master, slot);
846 return -ENOMEM;
847 }
848
849 data->ibi = -1;
850 data->index = slot;
851 master->addrs[slot] = dev->info.dyn_addr ? dev->info.dyn_addr :
852 dev->info.static_addr;
853 master->descs[slot] = dev;
854
855 i3c_dev_set_master_data(dev, data);
856
857 return 0;
858 }
859
svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc * dev,u8 old_dyn_addr)860 static int svc_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
861 u8 old_dyn_addr)
862 {
863 struct i3c_master_controller *m = i3c_dev_get_master(dev);
864 struct svc_i3c_master *master = to_svc_i3c_master(m);
865 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
866
867 master->addrs[data->index] = dev->info.dyn_addr ? dev->info.dyn_addr :
868 dev->info.static_addr;
869
870 return 0;
871 }
872
svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc * dev)873 static void svc_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
874 {
875 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
876 struct i3c_master_controller *m = i3c_dev_get_master(dev);
877 struct svc_i3c_master *master = to_svc_i3c_master(m);
878
879 master->addrs[data->index] = 0;
880 svc_i3c_master_release_slot(master, data->index);
881
882 kfree(data);
883 }
884
svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc * dev)885 static int svc_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
886 {
887 struct i3c_master_controller *m = i2c_dev_get_master(dev);
888 struct svc_i3c_master *master = to_svc_i3c_master(m);
889 struct svc_i3c_i2c_dev_data *data;
890 int slot;
891
892 slot = svc_i3c_master_reserve_slot(master);
893 if (slot < 0)
894 return slot;
895
896 data = kzalloc(sizeof(*data), GFP_KERNEL);
897 if (!data) {
898 svc_i3c_master_release_slot(master, slot);
899 return -ENOMEM;
900 }
901
902 data->index = slot;
903 master->addrs[slot] = dev->addr;
904
905 i2c_dev_set_master_data(dev, data);
906
907 return 0;
908 }
909
svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc * dev)910 static void svc_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
911 {
912 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
913 struct i3c_master_controller *m = i2c_dev_get_master(dev);
914 struct svc_i3c_master *master = to_svc_i3c_master(m);
915
916 svc_i3c_master_release_slot(master, data->index);
917
918 kfree(data);
919 }
920
svc_i3c_master_readb(struct svc_i3c_master * master,u8 * dst,unsigned int len)921 static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
922 unsigned int len)
923 {
924 int ret, i;
925 u32 reg;
926
927 for (i = 0; i < len; i++) {
928 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
929 reg,
930 SVC_I3C_MSTATUS_RXPEND(reg),
931 0, 1000);
932 if (ret)
933 return ret;
934
935 dst[i] = readl(master->regs + SVC_I3C_MRDATAB);
936 }
937
938 return 0;
939 }
940
svc_i3c_master_do_daa_locked(struct svc_i3c_master * master,u8 * addrs,unsigned int * count)941 static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
942 u8 *addrs, unsigned int *count)
943 {
944 u64 prov_id[SVC_I3C_MAX_DEVS] = {}, nacking_prov_id = 0;
945 unsigned int dev_nb = 0, last_addr = 0, dyn_addr = 0;
946 u32 reg;
947 int ret, i;
948
949 svc_i3c_master_flush_fifo(master);
950
951 while (true) {
952 /* clean SVC_I3C_MINT_IBIWON w1c bits */
953 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
954
955 /* SVC_I3C_MCTRL_REQUEST_PROC_DAA have two mode, ENTER DAA or PROCESS DAA.
956 *
957 * ENTER DAA:
958 * 1 will issue START, 7E, ENTDAA, and then emits 7E/R to process first target.
959 * 2 Stops just before the new Dynamic Address (DA) is to be emitted.
960 *
961 * PROCESS DAA:
962 * 1 The DA is written using MWDATAB or ADDR bits 6:0.
963 * 2 ProcessDAA is requested again to write the new address, and then starts the
964 * next (START, 7E, ENTDAA) unless marked to STOP; an MSTATUS indicating NACK
965 * means DA was not accepted (e.g. parity error). If PROCESSDAA is NACKed on the
966 * 7E/R, which means no more Slaves need a DA, then a COMPLETE will be signaled
967 * (along with DONE), and a STOP issued automatically.
968 */
969 writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA |
970 SVC_I3C_MCTRL_TYPE_I3C |
971 SVC_I3C_MCTRL_IBIRESP_NACK |
972 SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE),
973 master->regs + SVC_I3C_MCTRL);
974
975 /*
976 * Either one slave will send its ID, or the assignment process
977 * is done.
978 */
979 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
980 reg,
981 SVC_I3C_MSTATUS_RXPEND(reg) |
982 SVC_I3C_MSTATUS_MCTRLDONE(reg),
983 1, 1000);
984 if (ret)
985 break;
986
987 if (SVC_I3C_MSTATUS_RXPEND(reg)) {
988 u8 data[6];
989
990 /*
991 * One slave sends its ID to request for address assignment,
992 * prefilling the dynamic address can reduce SCL clock stalls
993 * and also fix the SVC_I3C_QUIRK_FIFO_EMPTY quirk.
994 *
995 * Ideally, prefilling before the processDAA command is better.
996 * However, it requires an additional check to write the dyn_addr
997 * at the right time because the driver needs to write the processDAA
998 * command twice for one assignment.
999 * Prefilling here is safe and efficient because the FIFO starts
1000 * filling within a few hundred nanoseconds, which is significantly
1001 * faster compared to the 64 SCL clock cycles.
1002 */
1003 ret = i3c_master_get_free_addr(&master->base, last_addr + 1);
1004 if (ret < 0)
1005 break;
1006
1007 dyn_addr = ret;
1008 writel(dyn_addr, master->regs + SVC_I3C_MWDATAB);
1009
1010 /*
1011 * We only care about the 48-bit provisioned ID yet to
1012 * be sure a device does not nack an address twice.
1013 * Otherwise, we would just need to flush the RX FIFO.
1014 */
1015 ret = svc_i3c_master_readb(master, data, 6);
1016 if (ret)
1017 break;
1018
1019 for (i = 0; i < 6; i++)
1020 prov_id[dev_nb] |= (u64)(data[i]) << (8 * (5 - i));
1021
1022 /* We do not care about the BCR and DCR yet */
1023 ret = svc_i3c_master_readb(master, data, 2);
1024 if (ret)
1025 break;
1026 } else if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1027 ret = svc_i3c_master_handle_ibi_won(master, reg);
1028 if (ret)
1029 break;
1030 continue;
1031 } else if (SVC_I3C_MSTATUS_MCTRLDONE(reg)) {
1032 if (SVC_I3C_MSTATUS_STATE_IDLE(reg) &&
1033 SVC_I3C_MSTATUS_COMPLETE(reg)) {
1034 /*
1035 * All devices received and acked they dynamic
1036 * address, this is the natural end of the DAA
1037 * procedure.
1038 *
1039 * Hardware will auto emit STOP at this case.
1040 */
1041 *count = dev_nb;
1042 return 0;
1043
1044 } else if (SVC_I3C_MSTATUS_NACKED(reg)) {
1045 /* No I3C devices attached */
1046 if (dev_nb == 0) {
1047 /*
1048 * Hardware can't treat first NACK for ENTAA as normal
1049 * COMPLETE. So need manual emit STOP.
1050 */
1051 ret = 0;
1052 *count = 0;
1053 break;
1054 }
1055
1056 /*
1057 * A slave device nacked the address, this is
1058 * allowed only once, DAA will be stopped and
1059 * then resumed. The same device is supposed to
1060 * answer again immediately and shall ack the
1061 * address this time.
1062 */
1063 if (prov_id[dev_nb] == nacking_prov_id) {
1064 ret = -EIO;
1065 break;
1066 }
1067
1068 dev_nb--;
1069 nacking_prov_id = prov_id[dev_nb];
1070 svc_i3c_master_emit_stop(master);
1071
1072 continue;
1073 } else {
1074 break;
1075 }
1076 }
1077
1078 /* Wait for the slave to be ready to receive its address */
1079 ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
1080 reg,
1081 SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
1082 SVC_I3C_MSTATUS_STATE_DAA(reg) &&
1083 SVC_I3C_MSTATUS_BETWEEN(reg),
1084 0, 1000);
1085 if (ret)
1086 break;
1087
1088 addrs[dev_nb] = dyn_addr;
1089 dev_dbg(master->dev, "DAA: device %d assigned to 0x%02x\n",
1090 dev_nb, addrs[dev_nb]);
1091 last_addr = addrs[dev_nb++];
1092 }
1093
1094 /* Need manual issue STOP except for Complete condition */
1095 svc_i3c_master_emit_stop(master);
1096 svc_i3c_master_flush_fifo(master);
1097
1098 return ret;
1099 }
1100
svc_i3c_update_ibirules(struct svc_i3c_master * master)1101 static int svc_i3c_update_ibirules(struct svc_i3c_master *master)
1102 {
1103 struct i3c_dev_desc *dev;
1104 u32 reg_mbyte = 0, reg_nobyte = SVC_I3C_IBIRULES_NOBYTE;
1105 unsigned int mbyte_addr_ok = 0, mbyte_addr_ko = 0, nobyte_addr_ok = 0,
1106 nobyte_addr_ko = 0;
1107 bool list_mbyte = false, list_nobyte = false;
1108
1109 /* Create the IBIRULES register for both cases */
1110 i3c_bus_for_each_i3cdev(&master->base.bus, dev) {
1111 if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP))
1112 continue;
1113
1114 if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) {
1115 reg_mbyte |= SVC_I3C_IBIRULES_ADDR(mbyte_addr_ok,
1116 dev->info.dyn_addr);
1117
1118 /* IBI rules cannot be applied to devices with MSb=1 */
1119 if (dev->info.dyn_addr & BIT(7))
1120 mbyte_addr_ko++;
1121 else
1122 mbyte_addr_ok++;
1123 } else {
1124 reg_nobyte |= SVC_I3C_IBIRULES_ADDR(nobyte_addr_ok,
1125 dev->info.dyn_addr);
1126
1127 /* IBI rules cannot be applied to devices with MSb=1 */
1128 if (dev->info.dyn_addr & BIT(7))
1129 nobyte_addr_ko++;
1130 else
1131 nobyte_addr_ok++;
1132 }
1133 }
1134
1135 /* Device list cannot be handled by hardware */
1136 if (!mbyte_addr_ko && mbyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
1137 list_mbyte = true;
1138
1139 if (!nobyte_addr_ko && nobyte_addr_ok <= SVC_I3C_IBIRULES_ADDRS)
1140 list_nobyte = true;
1141
1142 /* No list can be properly handled, return an error */
1143 if (!list_mbyte && !list_nobyte)
1144 return -ERANGE;
1145
1146 /* Pick the first list that can be handled by hardware, randomly */
1147 if (list_mbyte)
1148 writel(reg_mbyte, master->regs + SVC_I3C_IBIRULES);
1149 else
1150 writel(reg_nobyte, master->regs + SVC_I3C_IBIRULES);
1151
1152 return 0;
1153 }
1154
svc_i3c_master_do_daa(struct i3c_master_controller * m)1155 static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
1156 {
1157 struct svc_i3c_master *master = to_svc_i3c_master(m);
1158 u8 addrs[SVC_I3C_MAX_DEVS];
1159 unsigned long flags;
1160 unsigned int dev_nb;
1161 int ret, i;
1162
1163 ret = pm_runtime_resume_and_get(master->dev);
1164 if (ret < 0) {
1165 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1166 return ret;
1167 }
1168
1169 spin_lock_irqsave(&master->xferqueue.lock, flags);
1170
1171 if (svc_has_daa_corrupt(master))
1172 writel(master->mctrl_config | SVC_I3C_MCONFIG_SKEW(1),
1173 master->regs + SVC_I3C_MCONFIG);
1174
1175 ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
1176
1177 if (svc_has_daa_corrupt(master))
1178 writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG);
1179
1180 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1181
1182 svc_i3c_master_clear_merrwarn(master);
1183 if (ret)
1184 goto rpm_out;
1185
1186 /*
1187 * Register all devices who participated to the core
1188 *
1189 * If two devices (A and B) are detected in DAA and address 0xa is assigned to
1190 * device A and 0xb to device B, a failure in i3c_master_add_i3c_dev_locked()
1191 * for device A (addr: 0xa) could prevent device B (addr: 0xb) from being
1192 * registered on the bus. The I3C stack might still consider 0xb a free
1193 * address. If a subsequent Hotjoin occurs, 0xb might be assigned to Device A,
1194 * causing both devices A and B to use the same address 0xb, violating the I3C
1195 * specification.
1196 *
1197 * The return value for i3c_master_add_i3c_dev_locked() should not be checked
1198 * because subsequent steps will scan the entire I3C bus, independent of
1199 * whether i3c_master_add_i3c_dev_locked() returns success.
1200 *
1201 * If device A registration fails, there is still a chance to register device
1202 * B. i3c_master_add_i3c_dev_locked() can reset DAA if a failure occurs while
1203 * retrieving device information.
1204 */
1205 for (i = 0; i < dev_nb; i++)
1206 i3c_master_add_i3c_dev_locked(m, addrs[i]);
1207
1208 /* Configure IBI auto-rules */
1209 ret = svc_i3c_update_ibirules(master);
1210 if (ret)
1211 dev_err(master->dev, "Cannot handle such a list of devices");
1212
1213 rpm_out:
1214 pm_runtime_mark_last_busy(master->dev);
1215 pm_runtime_put_autosuspend(master->dev);
1216
1217 return ret;
1218 }
1219
svc_i3c_master_read(struct svc_i3c_master * master,u8 * in,unsigned int len)1220 static int svc_i3c_master_read(struct svc_i3c_master *master,
1221 u8 *in, unsigned int len)
1222 {
1223 int offset = 0, i;
1224 u32 mdctrl, mstatus;
1225 bool completed = false;
1226 unsigned int count;
1227 unsigned long start = jiffies;
1228
1229 while (!completed) {
1230 mstatus = readl(master->regs + SVC_I3C_MSTATUS);
1231 if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
1232 completed = true;
1233
1234 if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
1235 dev_dbg(master->dev, "I3C read timeout\n");
1236 return -ETIMEDOUT;
1237 }
1238
1239 mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
1240 count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
1241 if (offset + count > len) {
1242 dev_err(master->dev, "I3C receive length too long!\n");
1243 return -EINVAL;
1244 }
1245 for (i = 0; i < count; i++)
1246 in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
1247
1248 offset += count;
1249 }
1250
1251 return offset;
1252 }
1253
svc_i3c_master_write(struct svc_i3c_master * master,const u8 * out,unsigned int len)1254 static int svc_i3c_master_write(struct svc_i3c_master *master,
1255 const u8 *out, unsigned int len)
1256 {
1257 int offset = 0, ret;
1258 u32 mdctrl;
1259
1260 while (offset < len) {
1261 ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
1262 mdctrl,
1263 !(mdctrl & SVC_I3C_MDATACTRL_TXFULL),
1264 0, 1000);
1265 if (ret)
1266 return ret;
1267
1268 /*
1269 * The last byte to be sent over the bus must either have the
1270 * "end" bit set or be written in MWDATABE.
1271 */
1272 if (likely(offset < (len - 1)))
1273 writel(out[offset++], master->regs + SVC_I3C_MWDATAB);
1274 else
1275 writel(out[offset++], master->regs + SVC_I3C_MWDATABE);
1276 }
1277
1278 return 0;
1279 }
1280
svc_i3c_master_xfer(struct svc_i3c_master * master,bool rnw,unsigned int xfer_type,u8 addr,u8 * in,const u8 * out,unsigned int xfer_len,unsigned int * actual_len,bool continued)1281 static int svc_i3c_master_xfer(struct svc_i3c_master *master,
1282 bool rnw, unsigned int xfer_type, u8 addr,
1283 u8 *in, const u8 *out, unsigned int xfer_len,
1284 unsigned int *actual_len, bool continued)
1285 {
1286 int retry = 2;
1287 u32 reg;
1288 int ret;
1289
1290 /* clean SVC_I3C_MINT_IBIWON w1c bits */
1291 writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
1292
1293
1294 while (retry--) {
1295 writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
1296 xfer_type |
1297 SVC_I3C_MCTRL_IBIRESP_NACK |
1298 SVC_I3C_MCTRL_DIR(rnw) |
1299 SVC_I3C_MCTRL_ADDR(addr) |
1300 SVC_I3C_MCTRL_RDTERM(*actual_len),
1301 master->regs + SVC_I3C_MCTRL);
1302
1303 /*
1304 * The entire transaction can consist of multiple write transfers.
1305 * Prefilling before EmitStartAddr causes the data to be emitted
1306 * immediately, becoming part of the previous transfer.
1307 * The only way to work around this hardware issue is to let the
1308 * FIFO start filling as soon as possible after EmitStartAddr.
1309 */
1310 if (svc_has_quirk(master, SVC_I3C_QUIRK_FIFO_EMPTY) && !rnw && xfer_len) {
1311 u32 end = xfer_len > SVC_I3C_FIFO_SIZE ? 0 : SVC_I3C_MWDATAB_END;
1312 u32 len = min_t(u32, xfer_len, SVC_I3C_FIFO_SIZE);
1313
1314 writesb(master->regs + SVC_I3C_MWDATAB1, out, len - 1);
1315 /* Mark END bit if this is the last byte */
1316 writel(out[len - 1] | end, master->regs + SVC_I3C_MWDATAB);
1317 xfer_len -= len;
1318 out += len;
1319 }
1320
1321 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1322 SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
1323 if (ret)
1324 goto emit_stop;
1325
1326 /*
1327 * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a
1328 * Frame with I3C Target Address.
1329 *
1330 * The I3C Controller normally should start a Frame, the Address may be arbitrated,
1331 * and so the Controller shall monitor to see whether an In-Band Interrupt request,
1332 * a Controller Role Request (i.e., Secondary Controller requests to become the
1333 * Active Controller), or a Hot-Join Request has been made.
1334 *
1335 * If missed IBIWON check, the wrong data will be return. When IBIWON happen, issue
1336 * repeat start. Address arbitrate only happen at START, never happen at REPEAT
1337 * start.
1338 */
1339 if (SVC_I3C_MSTATUS_IBIWON(reg)) {
1340 ret = svc_i3c_master_handle_ibi_won(master, reg);
1341 if (ret)
1342 goto emit_stop;
1343 continue;
1344 }
1345
1346 if (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_NACK) {
1347 /*
1348 * According to I3C Spec 1.1.1, 11-Jun-2021, section: 5.1.2.2.3.
1349 * If the Controller chooses to start an I3C Message with an I3C Dynamic
1350 * Address, then special provisions shall be made because that same I3C
1351 * Target may be initiating an IBI or a Controller Role Request. So, one of
1352 * three things may happen: (skip 1, 2)
1353 *
1354 * 3. The Addresses match and the RnW bits also match, and so neither
1355 * Controller nor Target will ACK since both are expecting the other side to
1356 * provide ACK. As a result, each side might think it had "won" arbitration,
1357 * but neither side would continue, as each would subsequently see that the
1358 * other did not provide ACK.
1359 * ...
1360 * For either value of RnW: Due to the NACK, the Controller shall defer the
1361 * Private Write or Private Read, and should typically transmit the Target
1362 * Address again after a Repeated START (i.e., the next one or any one prior
1363 * to a STOP in the Frame). Since the Address Header following a Repeated
1364 * START is not arbitrated, the Controller will always win (see Section
1365 * 5.1.2.2.4).
1366 */
1367 if (retry && addr != 0x7e) {
1368 writel(SVC_I3C_MERRWARN_NACK, master->regs + SVC_I3C_MERRWARN);
1369 } else {
1370 ret = -ENXIO;
1371 *actual_len = 0;
1372 goto emit_stop;
1373 }
1374 } else {
1375 break;
1376 }
1377 }
1378
1379 if (rnw)
1380 ret = svc_i3c_master_read(master, in, xfer_len);
1381 else
1382 ret = svc_i3c_master_write(master, out, xfer_len);
1383 if (ret < 0)
1384 goto emit_stop;
1385
1386 if (rnw)
1387 *actual_len = ret;
1388
1389 ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1390 SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
1391 if (ret)
1392 goto emit_stop;
1393
1394 writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
1395
1396 if (!continued) {
1397 svc_i3c_master_emit_stop(master);
1398
1399 /* Wait idle if stop is sent. */
1400 readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
1401 SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
1402 }
1403
1404 return 0;
1405
1406 emit_stop:
1407 svc_i3c_master_emit_stop(master);
1408 svc_i3c_master_clear_merrwarn(master);
1409 svc_i3c_master_flush_fifo(master);
1410
1411 return ret;
1412 }
1413
1414 static struct svc_i3c_xfer *
svc_i3c_master_alloc_xfer(struct svc_i3c_master * master,unsigned int ncmds)1415 svc_i3c_master_alloc_xfer(struct svc_i3c_master *master, unsigned int ncmds)
1416 {
1417 struct svc_i3c_xfer *xfer;
1418
1419 xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
1420 if (!xfer)
1421 return NULL;
1422
1423 INIT_LIST_HEAD(&xfer->node);
1424 xfer->ncmds = ncmds;
1425 xfer->ret = -ETIMEDOUT;
1426
1427 return xfer;
1428 }
1429
svc_i3c_master_free_xfer(struct svc_i3c_xfer * xfer)1430 static void svc_i3c_master_free_xfer(struct svc_i3c_xfer *xfer)
1431 {
1432 kfree(xfer);
1433 }
1434
svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1435 static void svc_i3c_master_dequeue_xfer_locked(struct svc_i3c_master *master,
1436 struct svc_i3c_xfer *xfer)
1437 {
1438 if (master->xferqueue.cur == xfer)
1439 master->xferqueue.cur = NULL;
1440 else
1441 list_del_init(&xfer->node);
1442 }
1443
svc_i3c_master_dequeue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1444 static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
1445 struct svc_i3c_xfer *xfer)
1446 {
1447 unsigned long flags;
1448
1449 spin_lock_irqsave(&master->xferqueue.lock, flags);
1450 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1451 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1452 }
1453
svc_i3c_master_start_xfer_locked(struct svc_i3c_master * master)1454 static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
1455 {
1456 struct svc_i3c_xfer *xfer = master->xferqueue.cur;
1457 int ret, i;
1458
1459 if (!xfer)
1460 return;
1461
1462 svc_i3c_master_clear_merrwarn(master);
1463 svc_i3c_master_flush_fifo(master);
1464
1465 for (i = 0; i < xfer->ncmds; i++) {
1466 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1467
1468 ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
1469 cmd->addr, cmd->in, cmd->out,
1470 cmd->len, &cmd->actual_len,
1471 cmd->continued);
1472 /* cmd->xfer is NULL if I2C or CCC transfer */
1473 if (cmd->xfer)
1474 cmd->xfer->actual_len = cmd->actual_len;
1475
1476 if (ret)
1477 break;
1478 }
1479
1480 xfer->ret = ret;
1481 complete(&xfer->comp);
1482
1483 if (ret < 0)
1484 svc_i3c_master_dequeue_xfer_locked(master, xfer);
1485
1486 xfer = list_first_entry_or_null(&master->xferqueue.list,
1487 struct svc_i3c_xfer,
1488 node);
1489 if (xfer)
1490 list_del_init(&xfer->node);
1491
1492 master->xferqueue.cur = xfer;
1493 svc_i3c_master_start_xfer_locked(master);
1494 }
1495
svc_i3c_master_enqueue_xfer(struct svc_i3c_master * master,struct svc_i3c_xfer * xfer)1496 static void svc_i3c_master_enqueue_xfer(struct svc_i3c_master *master,
1497 struct svc_i3c_xfer *xfer)
1498 {
1499 unsigned long flags;
1500 int ret;
1501
1502 ret = pm_runtime_resume_and_get(master->dev);
1503 if (ret < 0) {
1504 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1505 return;
1506 }
1507
1508 init_completion(&xfer->comp);
1509 spin_lock_irqsave(&master->xferqueue.lock, flags);
1510 if (master->xferqueue.cur) {
1511 list_add_tail(&xfer->node, &master->xferqueue.list);
1512 } else {
1513 master->xferqueue.cur = xfer;
1514 svc_i3c_master_start_xfer_locked(master);
1515 }
1516 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
1517
1518 pm_runtime_mark_last_busy(master->dev);
1519 pm_runtime_put_autosuspend(master->dev);
1520 }
1521
1522 static bool
svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller * master,const struct i3c_ccc_cmd * cmd)1523 svc_i3c_master_supports_ccc_cmd(struct i3c_master_controller *master,
1524 const struct i3c_ccc_cmd *cmd)
1525 {
1526 /* No software support for CCC commands targeting more than one slave */
1527 return (cmd->ndests == 1);
1528 }
1529
svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1530 static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
1531 struct i3c_ccc_cmd *ccc)
1532 {
1533 unsigned int xfer_len = ccc->dests[0].payload.len + 1;
1534 struct svc_i3c_xfer *xfer;
1535 struct svc_i3c_cmd *cmd;
1536 u8 *buf;
1537 int ret;
1538
1539 xfer = svc_i3c_master_alloc_xfer(master, 1);
1540 if (!xfer)
1541 return -ENOMEM;
1542
1543 buf = kmalloc(xfer_len, GFP_KERNEL);
1544 if (!buf) {
1545 svc_i3c_master_free_xfer(xfer);
1546 return -ENOMEM;
1547 }
1548
1549 buf[0] = ccc->id;
1550 memcpy(&buf[1], ccc->dests[0].payload.data, ccc->dests[0].payload.len);
1551
1552 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1553
1554 cmd = &xfer->cmds[0];
1555 cmd->addr = ccc->dests[0].addr;
1556 cmd->rnw = ccc->rnw;
1557 cmd->in = NULL;
1558 cmd->out = buf;
1559 cmd->len = xfer_len;
1560 cmd->actual_len = 0;
1561 cmd->continued = false;
1562
1563 mutex_lock(&master->lock);
1564 svc_i3c_master_enqueue_xfer(master, xfer);
1565 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1566 svc_i3c_master_dequeue_xfer(master, xfer);
1567 mutex_unlock(&master->lock);
1568
1569 ret = xfer->ret;
1570 kfree(buf);
1571 svc_i3c_master_free_xfer(xfer);
1572
1573 return ret;
1574 }
1575
svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master * master,struct i3c_ccc_cmd * ccc)1576 static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
1577 struct i3c_ccc_cmd *ccc)
1578 {
1579 unsigned int xfer_len = ccc->dests[0].payload.len;
1580 unsigned int actual_len = ccc->rnw ? xfer_len : 0;
1581 struct svc_i3c_xfer *xfer;
1582 struct svc_i3c_cmd *cmd;
1583 int ret;
1584
1585 xfer = svc_i3c_master_alloc_xfer(master, 2);
1586 if (!xfer)
1587 return -ENOMEM;
1588
1589 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1590
1591 /* Broadcasted message */
1592 cmd = &xfer->cmds[0];
1593 cmd->addr = I3C_BROADCAST_ADDR;
1594 cmd->rnw = 0;
1595 cmd->in = NULL;
1596 cmd->out = &ccc->id;
1597 cmd->len = 1;
1598 cmd->actual_len = 0;
1599 cmd->continued = true;
1600
1601 /* Directed message */
1602 cmd = &xfer->cmds[1];
1603 cmd->addr = ccc->dests[0].addr;
1604 cmd->rnw = ccc->rnw;
1605 cmd->in = ccc->rnw ? ccc->dests[0].payload.data : NULL;
1606 cmd->out = ccc->rnw ? NULL : ccc->dests[0].payload.data;
1607 cmd->len = xfer_len;
1608 cmd->actual_len = actual_len;
1609 cmd->continued = false;
1610
1611 mutex_lock(&master->lock);
1612 svc_i3c_master_enqueue_xfer(master, xfer);
1613 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1614 svc_i3c_master_dequeue_xfer(master, xfer);
1615 mutex_unlock(&master->lock);
1616
1617 if (cmd->actual_len != xfer_len)
1618 ccc->dests[0].payload.len = cmd->actual_len;
1619
1620 ret = xfer->ret;
1621 svc_i3c_master_free_xfer(xfer);
1622
1623 return ret;
1624 }
1625
svc_i3c_master_send_ccc_cmd(struct i3c_master_controller * m,struct i3c_ccc_cmd * cmd)1626 static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
1627 struct i3c_ccc_cmd *cmd)
1628 {
1629 struct svc_i3c_master *master = to_svc_i3c_master(m);
1630 bool broadcast = cmd->id < 0x80;
1631 int ret;
1632
1633 if (broadcast)
1634 ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
1635 else
1636 ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
1637
1638 if (ret)
1639 cmd->err = I3C_ERROR_M2;
1640
1641 return ret;
1642 }
1643
svc_i3c_master_priv_xfers(struct i3c_dev_desc * dev,struct i3c_priv_xfer * xfers,int nxfers)1644 static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
1645 struct i3c_priv_xfer *xfers,
1646 int nxfers)
1647 {
1648 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1649 struct svc_i3c_master *master = to_svc_i3c_master(m);
1650 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1651 struct svc_i3c_xfer *xfer;
1652 int ret, i;
1653
1654 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1655 if (!xfer)
1656 return -ENOMEM;
1657
1658 xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
1659
1660 for (i = 0; i < nxfers; i++) {
1661 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1662
1663 cmd->xfer = &xfers[i];
1664 cmd->addr = master->addrs[data->index];
1665 cmd->rnw = xfers[i].rnw;
1666 cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
1667 cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
1668 cmd->len = xfers[i].len;
1669 cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
1670 cmd->continued = (i + 1) < nxfers;
1671 }
1672
1673 mutex_lock(&master->lock);
1674 svc_i3c_master_enqueue_xfer(master, xfer);
1675 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1676 svc_i3c_master_dequeue_xfer(master, xfer);
1677 mutex_unlock(&master->lock);
1678
1679 ret = xfer->ret;
1680 svc_i3c_master_free_xfer(xfer);
1681
1682 return ret;
1683 }
1684
svc_i3c_master_i2c_xfers(struct i2c_dev_desc * dev,struct i2c_msg * xfers,int nxfers)1685 static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
1686 struct i2c_msg *xfers,
1687 int nxfers)
1688 {
1689 struct i3c_master_controller *m = i2c_dev_get_master(dev);
1690 struct svc_i3c_master *master = to_svc_i3c_master(m);
1691 struct svc_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
1692 struct svc_i3c_xfer *xfer;
1693 int ret, i;
1694
1695 xfer = svc_i3c_master_alloc_xfer(master, nxfers);
1696 if (!xfer)
1697 return -ENOMEM;
1698
1699 xfer->type = SVC_I3C_MCTRL_TYPE_I2C;
1700
1701 for (i = 0; i < nxfers; i++) {
1702 struct svc_i3c_cmd *cmd = &xfer->cmds[i];
1703
1704 cmd->addr = master->addrs[data->index];
1705 cmd->rnw = xfers[i].flags & I2C_M_RD;
1706 cmd->in = cmd->rnw ? xfers[i].buf : NULL;
1707 cmd->out = cmd->rnw ? NULL : xfers[i].buf;
1708 cmd->len = xfers[i].len;
1709 cmd->actual_len = cmd->rnw ? xfers[i].len : 0;
1710 cmd->continued = (i + 1 < nxfers);
1711 }
1712
1713 mutex_lock(&master->lock);
1714 svc_i3c_master_enqueue_xfer(master, xfer);
1715 if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
1716 svc_i3c_master_dequeue_xfer(master, xfer);
1717 mutex_unlock(&master->lock);
1718
1719 ret = xfer->ret;
1720 svc_i3c_master_free_xfer(xfer);
1721
1722 return ret;
1723 }
1724
svc_i3c_master_request_ibi(struct i3c_dev_desc * dev,const struct i3c_ibi_setup * req)1725 static int svc_i3c_master_request_ibi(struct i3c_dev_desc *dev,
1726 const struct i3c_ibi_setup *req)
1727 {
1728 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1729 struct svc_i3c_master *master = to_svc_i3c_master(m);
1730 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1731 unsigned long flags;
1732 unsigned int i;
1733
1734 if (dev->ibi->max_payload_len > SVC_I3C_FIFO_SIZE) {
1735 dev_err(master->dev, "IBI max payload %d should be < %d\n",
1736 dev->ibi->max_payload_len, SVC_I3C_FIFO_SIZE);
1737 return -ERANGE;
1738 }
1739
1740 data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
1741 if (IS_ERR(data->ibi_pool))
1742 return PTR_ERR(data->ibi_pool);
1743
1744 spin_lock_irqsave(&master->ibi.lock, flags);
1745 for (i = 0; i < master->ibi.num_slots; i++) {
1746 if (!master->ibi.slots[i]) {
1747 data->ibi = i;
1748 master->ibi.slots[i] = dev;
1749 break;
1750 }
1751 }
1752 spin_unlock_irqrestore(&master->ibi.lock, flags);
1753
1754 if (i < master->ibi.num_slots)
1755 return 0;
1756
1757 i3c_generic_ibi_free_pool(data->ibi_pool);
1758 data->ibi_pool = NULL;
1759
1760 return -ENOSPC;
1761 }
1762
svc_i3c_master_free_ibi(struct i3c_dev_desc * dev)1763 static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
1764 {
1765 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1766 struct svc_i3c_master *master = to_svc_i3c_master(m);
1767 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1768 unsigned long flags;
1769
1770 spin_lock_irqsave(&master->ibi.lock, flags);
1771 master->ibi.slots[data->ibi] = NULL;
1772 data->ibi = -1;
1773 spin_unlock_irqrestore(&master->ibi.lock, flags);
1774
1775 i3c_generic_ibi_free_pool(data->ibi_pool);
1776 }
1777
svc_i3c_master_enable_ibi(struct i3c_dev_desc * dev)1778 static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
1779 {
1780 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1781 struct svc_i3c_master *master = to_svc_i3c_master(m);
1782 int ret;
1783
1784 ret = pm_runtime_resume_and_get(master->dev);
1785 if (ret < 0) {
1786 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1787 return ret;
1788 }
1789
1790 master->enabled_events++;
1791 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1792
1793 return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1794 }
1795
svc_i3c_master_disable_ibi(struct i3c_dev_desc * dev)1796 static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
1797 {
1798 struct i3c_master_controller *m = i3c_dev_get_master(dev);
1799 struct svc_i3c_master *master = to_svc_i3c_master(m);
1800 int ret;
1801
1802 master->enabled_events--;
1803 if (!master->enabled_events)
1804 svc_i3c_master_disable_interrupts(master);
1805
1806 ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
1807
1808 pm_runtime_mark_last_busy(master->dev);
1809 pm_runtime_put_autosuspend(master->dev);
1810
1811 return ret;
1812 }
1813
svc_i3c_master_enable_hotjoin(struct i3c_master_controller * m)1814 static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
1815 {
1816 struct svc_i3c_master *master = to_svc_i3c_master(m);
1817 int ret;
1818
1819 ret = pm_runtime_resume_and_get(master->dev);
1820 if (ret < 0) {
1821 dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
1822 return ret;
1823 }
1824
1825 master->enabled_events |= SVC_I3C_EVENT_HOTJOIN;
1826
1827 svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
1828
1829 return 0;
1830 }
1831
svc_i3c_master_disable_hotjoin(struct i3c_master_controller * m)1832 static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
1833 {
1834 struct svc_i3c_master *master = to_svc_i3c_master(m);
1835
1836 master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN;
1837
1838 if (!master->enabled_events)
1839 svc_i3c_master_disable_interrupts(master);
1840
1841 pm_runtime_mark_last_busy(master->dev);
1842 pm_runtime_put_autosuspend(master->dev);
1843
1844 return 0;
1845 }
1846
svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc * dev,struct i3c_ibi_slot * slot)1847 static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
1848 struct i3c_ibi_slot *slot)
1849 {
1850 struct svc_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
1851
1852 i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
1853 }
1854
1855 static const struct i3c_master_controller_ops svc_i3c_master_ops = {
1856 .bus_init = svc_i3c_master_bus_init,
1857 .bus_cleanup = svc_i3c_master_bus_cleanup,
1858 .attach_i3c_dev = svc_i3c_master_attach_i3c_dev,
1859 .detach_i3c_dev = svc_i3c_master_detach_i3c_dev,
1860 .reattach_i3c_dev = svc_i3c_master_reattach_i3c_dev,
1861 .attach_i2c_dev = svc_i3c_master_attach_i2c_dev,
1862 .detach_i2c_dev = svc_i3c_master_detach_i2c_dev,
1863 .do_daa = svc_i3c_master_do_daa,
1864 .supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
1865 .send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
1866 .priv_xfers = svc_i3c_master_priv_xfers,
1867 .i2c_xfers = svc_i3c_master_i2c_xfers,
1868 .request_ibi = svc_i3c_master_request_ibi,
1869 .free_ibi = svc_i3c_master_free_ibi,
1870 .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot,
1871 .enable_ibi = svc_i3c_master_enable_ibi,
1872 .disable_ibi = svc_i3c_master_disable_ibi,
1873 .enable_hotjoin = svc_i3c_master_enable_hotjoin,
1874 .disable_hotjoin = svc_i3c_master_disable_hotjoin,
1875 .set_speed = svc_i3c_master_set_speed,
1876 };
1877
svc_i3c_master_prepare_clks(struct svc_i3c_master * master)1878 static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
1879 {
1880 int ret = 0;
1881
1882 ret = clk_prepare_enable(master->pclk);
1883 if (ret)
1884 return ret;
1885
1886 ret = clk_prepare_enable(master->fclk);
1887 if (ret) {
1888 clk_disable_unprepare(master->pclk);
1889 return ret;
1890 }
1891
1892 ret = clk_prepare_enable(master->sclk);
1893 if (ret) {
1894 clk_disable_unprepare(master->pclk);
1895 clk_disable_unprepare(master->fclk);
1896 return ret;
1897 }
1898
1899 return 0;
1900 }
1901
svc_i3c_master_unprepare_clks(struct svc_i3c_master * master)1902 static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
1903 {
1904 clk_disable_unprepare(master->pclk);
1905 clk_disable_unprepare(master->fclk);
1906 clk_disable_unprepare(master->sclk);
1907 }
1908
svc_i3c_master_probe(struct platform_device * pdev)1909 static int svc_i3c_master_probe(struct platform_device *pdev)
1910 {
1911 struct device *dev = &pdev->dev;
1912 struct svc_i3c_master *master;
1913 int ret;
1914
1915 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
1916 if (!master)
1917 return -ENOMEM;
1918
1919 master->drvdata = of_device_get_match_data(dev);
1920 if (!master->drvdata)
1921 return -EINVAL;
1922
1923 master->regs = devm_platform_ioremap_resource(pdev, 0);
1924 if (IS_ERR(master->regs))
1925 return PTR_ERR(master->regs);
1926
1927 master->pclk = devm_clk_get(dev, "pclk");
1928 if (IS_ERR(master->pclk))
1929 return PTR_ERR(master->pclk);
1930
1931 master->fclk = devm_clk_get(dev, "fast_clk");
1932 if (IS_ERR(master->fclk))
1933 return PTR_ERR(master->fclk);
1934
1935 master->sclk = devm_clk_get(dev, "slow_clk");
1936 if (IS_ERR(master->sclk))
1937 return PTR_ERR(master->sclk);
1938
1939 master->irq = platform_get_irq(pdev, 0);
1940 if (master->irq < 0)
1941 return master->irq;
1942
1943 master->dev = dev;
1944
1945 ret = svc_i3c_master_prepare_clks(master);
1946 if (ret)
1947 return ret;
1948
1949 INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
1950 INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
1951 mutex_init(&master->lock);
1952
1953 ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
1954 IRQF_NO_SUSPEND, "svc-i3c-irq", master);
1955 if (ret)
1956 goto err_disable_clks;
1957
1958 master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
1959
1960 spin_lock_init(&master->xferqueue.lock);
1961 INIT_LIST_HEAD(&master->xferqueue.list);
1962
1963 spin_lock_init(&master->ibi.lock);
1964 master->ibi.num_slots = SVC_I3C_MAX_DEVS;
1965 master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
1966 sizeof(*master->ibi.slots),
1967 GFP_KERNEL);
1968 if (!master->ibi.slots) {
1969 ret = -ENOMEM;
1970 goto err_disable_clks;
1971 }
1972
1973 platform_set_drvdata(pdev, master);
1974
1975 pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
1976 pm_runtime_use_autosuspend(&pdev->dev);
1977 pm_runtime_get_noresume(&pdev->dev);
1978 pm_runtime_set_active(&pdev->dev);
1979 pm_runtime_enable(&pdev->dev);
1980
1981 svc_i3c_master_reset(master);
1982
1983 /* Register the master */
1984 ret = i3c_master_register(&master->base, &pdev->dev,
1985 &svc_i3c_master_ops, false);
1986 if (ret)
1987 goto rpm_disable;
1988
1989 pm_runtime_mark_last_busy(&pdev->dev);
1990 pm_runtime_put_autosuspend(&pdev->dev);
1991
1992 return 0;
1993
1994 rpm_disable:
1995 pm_runtime_dont_use_autosuspend(&pdev->dev);
1996 pm_runtime_put_noidle(&pdev->dev);
1997 pm_runtime_disable(&pdev->dev);
1998 pm_runtime_set_suspended(&pdev->dev);
1999
2000 err_disable_clks:
2001 svc_i3c_master_unprepare_clks(master);
2002
2003 return ret;
2004 }
2005
svc_i3c_master_remove(struct platform_device * pdev)2006 static void svc_i3c_master_remove(struct platform_device *pdev)
2007 {
2008 struct svc_i3c_master *master = platform_get_drvdata(pdev);
2009
2010 cancel_work_sync(&master->hj_work);
2011 i3c_master_unregister(&master->base);
2012
2013 pm_runtime_dont_use_autosuspend(&pdev->dev);
2014 pm_runtime_disable(&pdev->dev);
2015 }
2016
svc_i3c_save_regs(struct svc_i3c_master * master)2017 static void svc_i3c_save_regs(struct svc_i3c_master *master)
2018 {
2019 master->saved_regs.mconfig = readl(master->regs + SVC_I3C_MCONFIG);
2020 master->saved_regs.mdynaddr = readl(master->regs + SVC_I3C_MDYNADDR);
2021 }
2022
svc_i3c_restore_regs(struct svc_i3c_master * master)2023 static void svc_i3c_restore_regs(struct svc_i3c_master *master)
2024 {
2025 if (readl(master->regs + SVC_I3C_MDYNADDR) !=
2026 master->saved_regs.mdynaddr) {
2027 writel(master->saved_regs.mconfig,
2028 master->regs + SVC_I3C_MCONFIG);
2029 writel(master->saved_regs.mdynaddr,
2030 master->regs + SVC_I3C_MDYNADDR);
2031 }
2032 }
2033
svc_i3c_runtime_suspend(struct device * dev)2034 static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
2035 {
2036 struct svc_i3c_master *master = dev_get_drvdata(dev);
2037
2038 svc_i3c_save_regs(master);
2039 svc_i3c_master_unprepare_clks(master);
2040 pinctrl_pm_select_sleep_state(dev);
2041
2042 return 0;
2043 }
2044
svc_i3c_runtime_resume(struct device * dev)2045 static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
2046 {
2047 struct svc_i3c_master *master = dev_get_drvdata(dev);
2048
2049 pinctrl_pm_select_default_state(dev);
2050 svc_i3c_master_prepare_clks(master);
2051
2052 svc_i3c_restore_regs(master);
2053
2054 return 0;
2055 }
2056
2057 static const struct dev_pm_ops svc_i3c_pm_ops = {
2058 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2059 pm_runtime_force_resume)
2060 SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
2061 svc_i3c_runtime_resume, NULL)
2062 };
2063
2064 static const struct svc_i3c_drvdata npcm845_drvdata = {
2065 .quirks = SVC_I3C_QUIRK_FIFO_EMPTY |
2066 SVC_I3C_QUIRK_FALSE_SLVSTART |
2067 SVC_I3C_QUIRK_DAA_CORRUPT,
2068 };
2069
2070 static const struct svc_i3c_drvdata svc_default_drvdata = {};
2071
2072 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
2073 { .compatible = "nuvoton,npcm845-i3c", .data = &npcm845_drvdata },
2074 { .compatible = "silvaco,i3c-master-v1", .data = &svc_default_drvdata },
2075 { /* sentinel */ },
2076 };
2077 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
2078
2079 static struct platform_driver svc_i3c_master = {
2080 .probe = svc_i3c_master_probe,
2081 .remove = svc_i3c_master_remove,
2082 .driver = {
2083 .name = "silvaco-i3c-master",
2084 .of_match_table = svc_i3c_master_of_match_tbl,
2085 .pm = &svc_i3c_pm_ops,
2086 },
2087 };
2088 module_platform_driver(svc_i3c_master);
2089
2090 MODULE_AUTHOR("Conor Culhane <conor.culhane@silvaco.com>");
2091 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
2092 MODULE_DESCRIPTION("Silvaco dual-role I3C master driver");
2093 MODULE_LICENSE("GPL v2");
2094