1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
3 // Copyright (c) 2017-2022 Linaro Limited.
4
5 #include <linux/clk.h>
6 #include <linux/completion.h>
7 #include <linux/i2c.h>
8 #include <linux/io.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14
15 #define CCI_HW_VERSION 0x0
16 #define CCI_RESET_CMD 0x004
17 #define CCI_RESET_CMD_MASK 0x0f73f3f7
18 #define CCI_RESET_CMD_M0_MASK 0x000003f1
19 #define CCI_RESET_CMD_M1_MASK 0x0003f001
20 #define CCI_QUEUE_START 0x008
21 #define CCI_HALT_REQ 0x034
22 #define CCI_HALT_REQ_I2C_M0_Q0Q1 BIT(0)
23 #define CCI_HALT_REQ_I2C_M1_Q0Q1 BIT(1)
24
25 #define CCI_I2C_Mm_SCL_CTL(m) (0x100 + 0x100 * (m))
26 #define CCI_I2C_Mm_SDA_CTL_0(m) (0x104 + 0x100 * (m))
27 #define CCI_I2C_Mm_SDA_CTL_1(m) (0x108 + 0x100 * (m))
28 #define CCI_I2C_Mm_SDA_CTL_2(m) (0x10c + 0x100 * (m))
29 #define CCI_I2C_Mm_MISC_CTL(m) (0x110 + 0x100 * (m))
30
31 #define CCI_I2C_Mm_READ_DATA(m) (0x118 + 0x100 * (m))
32 #define CCI_I2C_Mm_READ_BUF_LEVEL(m) (0x11c + 0x100 * (m))
33 #define CCI_I2C_Mm_Qn_EXEC_WORD_CNT(m, n) (0x300 + 0x200 * (m) + 0x100 * (n))
34 #define CCI_I2C_Mm_Qn_CUR_WORD_CNT(m, n) (0x304 + 0x200 * (m) + 0x100 * (n))
35 #define CCI_I2C_Mm_Qn_CUR_CMD(m, n) (0x308 + 0x200 * (m) + 0x100 * (n))
36 #define CCI_I2C_Mm_Qn_REPORT_STATUS(m, n) (0x30c + 0x200 * (m) + 0x100 * (n))
37 #define CCI_I2C_Mm_Qn_LOAD_DATA(m, n) (0x310 + 0x200 * (m) + 0x100 * (n))
38
39 #define CCI_IRQ_GLOBAL_CLEAR_CMD 0xc00
40 #define CCI_IRQ_MASK_0 0xc04
41 #define CCI_IRQ_MASK_0_I2C_M0_RD_DONE BIT(0)
42 #define CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT BIT(4)
43 #define CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT BIT(8)
44 #define CCI_IRQ_MASK_0_I2C_M1_RD_DONE BIT(12)
45 #define CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT BIT(16)
46 #define CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT BIT(20)
47 #define CCI_IRQ_MASK_0_RST_DONE_ACK BIT(24)
48 #define CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK BIT(25)
49 #define CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK BIT(26)
50 #define CCI_IRQ_MASK_0_I2C_M0_ERROR 0x18000ee6
51 #define CCI_IRQ_MASK_0_I2C_M1_ERROR 0x60ee6000
52 #define CCI_IRQ_CLEAR_0 0xc08
53 #define CCI_IRQ_STATUS_0 0xc0c
54 #define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE BIT(0)
55 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT BIT(4)
56 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT BIT(8)
57 #define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE BIT(12)
58 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT BIT(16)
59 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT BIT(20)
60 #define CCI_IRQ_STATUS_0_RST_DONE_ACK BIT(24)
61 #define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK BIT(25)
62 #define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK BIT(26)
63 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR BIT(27)
64 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR BIT(28)
65 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR BIT(29)
66 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR BIT(30)
67 #define CCI_IRQ_STATUS_0_I2C_M0_ERROR 0x18000ee6
68 #define CCI_IRQ_STATUS_0_I2C_M1_ERROR 0x60ee6000
69
70 #define CCI_TIMEOUT (msecs_to_jiffies(100))
71 #define NUM_MASTERS 2
72 #define NUM_QUEUES 2
73
74 /* Max number of resources + 1 for a NULL terminator */
75 #define CCI_RES_MAX 6
76
77 #define CCI_I2C_SET_PARAM 1
78 #define CCI_I2C_REPORT 8
79 #define CCI_I2C_WRITE 9
80 #define CCI_I2C_READ 10
81
82 #define CCI_I2C_REPORT_IRQ_EN BIT(8)
83
84 enum {
85 I2C_MODE_STANDARD,
86 I2C_MODE_FAST,
87 I2C_MODE_FAST_PLUS,
88 };
89
90 enum cci_i2c_queue_t {
91 QUEUE_0,
92 QUEUE_1
93 };
94
95 struct hw_params {
96 u16 thigh; /* HIGH period of the SCL clock in clock ticks */
97 u16 tlow; /* LOW period of the SCL clock */
98 u16 tsu_sto; /* set-up time for STOP condition */
99 u16 tsu_sta; /* set-up time for a repeated START condition */
100 u16 thd_dat; /* data hold time */
101 u16 thd_sta; /* hold time (repeated) START condition */
102 u16 tbuf; /* bus free time between a STOP and START condition */
103 u8 scl_stretch_en;
104 u16 trdhld;
105 u16 tsp; /* pulse width of spikes suppressed by the input filter */
106 };
107
108 struct cci;
109
110 struct cci_master {
111 struct i2c_adapter adap;
112 u16 master;
113 u8 mode;
114 int status;
115 struct completion irq_complete;
116 struct cci *cci;
117 };
118
119 struct cci_data {
120 unsigned int num_masters;
121 struct i2c_adapter_quirks quirks;
122 u16 queue_size[NUM_QUEUES];
123 struct hw_params params[3];
124 };
125
126 struct cci {
127 struct device *dev;
128 void __iomem *base;
129 unsigned int irq;
130 const struct cci_data *data;
131 struct clk_bulk_data *clocks;
132 int nclocks;
133 struct cci_master master[NUM_MASTERS];
134 };
135
cci_isr(int irq,void * dev)136 static irqreturn_t cci_isr(int irq, void *dev)
137 {
138 struct cci *cci = dev;
139 u32 val, reset = 0;
140 int ret = IRQ_NONE;
141
142 val = readl(cci->base + CCI_IRQ_STATUS_0);
143 writel(val, cci->base + CCI_IRQ_CLEAR_0);
144 writel(0x1, cci->base + CCI_IRQ_GLOBAL_CLEAR_CMD);
145
146 if (val & CCI_IRQ_STATUS_0_RST_DONE_ACK) {
147 complete(&cci->master[0].irq_complete);
148 if (cci->master[1].master)
149 complete(&cci->master[1].irq_complete);
150 ret = IRQ_HANDLED;
151 }
152
153 if (val & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE ||
154 val & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT ||
155 val & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT) {
156 cci->master[0].status = 0;
157 complete(&cci->master[0].irq_complete);
158 ret = IRQ_HANDLED;
159 }
160
161 if (val & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE ||
162 val & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT ||
163 val & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT) {
164 cci->master[1].status = 0;
165 complete(&cci->master[1].irq_complete);
166 ret = IRQ_HANDLED;
167 }
168
169 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK)) {
170 reset = CCI_RESET_CMD_M0_MASK;
171 ret = IRQ_HANDLED;
172 }
173
174 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK)) {
175 reset = CCI_RESET_CMD_M1_MASK;
176 ret = IRQ_HANDLED;
177 }
178
179 if (unlikely(reset))
180 writel(reset, cci->base + CCI_RESET_CMD);
181
182 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_ERROR)) {
183 if (val & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR ||
184 val & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR)
185 cci->master[0].status = -ENXIO;
186 else
187 cci->master[0].status = -EIO;
188
189 writel(CCI_HALT_REQ_I2C_M0_Q0Q1, cci->base + CCI_HALT_REQ);
190 ret = IRQ_HANDLED;
191 }
192
193 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) {
194 if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR ||
195 val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR)
196 cci->master[1].status = -ENXIO;
197 else
198 cci->master[1].status = -EIO;
199
200 writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ);
201 ret = IRQ_HANDLED;
202 }
203
204 return ret;
205 }
206
cci_halt(struct cci * cci,u8 master_num)207 static int cci_halt(struct cci *cci, u8 master_num)
208 {
209 struct cci_master *master;
210 u32 val;
211
212 if (master_num >= cci->data->num_masters) {
213 dev_err(cci->dev, "Unsupported master idx (%u)\n", master_num);
214 return -EINVAL;
215 }
216
217 val = BIT(master_num);
218 master = &cci->master[master_num];
219
220 reinit_completion(&master->irq_complete);
221 writel(val, cci->base + CCI_HALT_REQ);
222
223 if (!wait_for_completion_timeout(&master->irq_complete, CCI_TIMEOUT)) {
224 dev_err(cci->dev, "CCI halt timeout\n");
225 return -ETIMEDOUT;
226 }
227
228 return 0;
229 }
230
cci_reset(struct cci * cci)231 static int cci_reset(struct cci *cci)
232 {
233 /*
234 * we reset the whole controller, here and for implicity use
235 * master[0].xxx for waiting on it.
236 */
237 reinit_completion(&cci->master[0].irq_complete);
238 writel(CCI_RESET_CMD_MASK, cci->base + CCI_RESET_CMD);
239
240 if (!wait_for_completion_timeout(&cci->master[0].irq_complete,
241 CCI_TIMEOUT)) {
242 dev_err(cci->dev, "CCI reset timeout\n");
243 return -ETIMEDOUT;
244 }
245
246 return 0;
247 }
248
cci_init(struct cci * cci)249 static int cci_init(struct cci *cci)
250 {
251 u32 val = CCI_IRQ_MASK_0_I2C_M0_RD_DONE |
252 CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT |
253 CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT |
254 CCI_IRQ_MASK_0_I2C_M1_RD_DONE |
255 CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT |
256 CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT |
257 CCI_IRQ_MASK_0_RST_DONE_ACK |
258 CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK |
259 CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK |
260 CCI_IRQ_MASK_0_I2C_M0_ERROR |
261 CCI_IRQ_MASK_0_I2C_M1_ERROR;
262 int i;
263
264 writel(val, cci->base + CCI_IRQ_MASK_0);
265
266 for (i = 0; i < cci->data->num_masters; i++) {
267 int mode = cci->master[i].mode;
268 const struct hw_params *hw;
269
270 if (!cci->master[i].cci)
271 continue;
272
273 hw = &cci->data->params[mode];
274
275 val = hw->thigh << 16 | hw->tlow;
276 writel(val, cci->base + CCI_I2C_Mm_SCL_CTL(i));
277
278 val = hw->tsu_sto << 16 | hw->tsu_sta;
279 writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_0(i));
280
281 val = hw->thd_dat << 16 | hw->thd_sta;
282 writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_1(i));
283
284 val = hw->tbuf;
285 writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_2(i));
286
287 val = hw->scl_stretch_en << 8 | hw->trdhld << 4 | hw->tsp;
288 writel(val, cci->base + CCI_I2C_Mm_MISC_CTL(i));
289 }
290
291 return 0;
292 }
293
cci_run_queue(struct cci * cci,u8 master,u8 queue)294 static int cci_run_queue(struct cci *cci, u8 master, u8 queue)
295 {
296 u32 val;
297
298 val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
299 writel(val, cci->base + CCI_I2C_Mm_Qn_EXEC_WORD_CNT(master, queue));
300
301 reinit_completion(&cci->master[master].irq_complete);
302 val = BIT(master * 2 + queue);
303 writel(val, cci->base + CCI_QUEUE_START);
304
305 if (!wait_for_completion_timeout(&cci->master[master].irq_complete,
306 CCI_TIMEOUT)) {
307 dev_err(cci->dev, "master %d queue %d timeout\n",
308 master, queue);
309 cci_reset(cci);
310 cci_init(cci);
311 return -ETIMEDOUT;
312 }
313
314 return cci->master[master].status;
315 }
316
cci_validate_queue(struct cci * cci,u8 master,u8 queue)317 static int cci_validate_queue(struct cci *cci, u8 master, u8 queue)
318 {
319 u32 val;
320
321 val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
322 if (val == cci->data->queue_size[queue])
323 return -EINVAL;
324
325 if (!val)
326 return 0;
327
328 val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
329 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
330
331 return cci_run_queue(cci, master, queue);
332 }
333
cci_i2c_read(struct cci * cci,u16 master,u16 addr,u8 * buf,u16 len)334 static int cci_i2c_read(struct cci *cci, u16 master,
335 u16 addr, u8 *buf, u16 len)
336 {
337 u32 val, words_read, words_exp;
338 u8 queue = QUEUE_1;
339 int i, index = 0, ret;
340 bool first = true;
341
342 /*
343 * Call validate queue to make sure queue is empty before starting.
344 * This is to avoid overflow / underflow of queue.
345 */
346 ret = cci_validate_queue(cci, master, queue);
347 if (ret < 0)
348 return ret;
349
350 val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
351 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
352
353 val = CCI_I2C_READ | len << 4;
354 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
355
356 ret = cci_run_queue(cci, master, queue);
357 if (ret < 0)
358 return ret;
359
360 words_read = readl(cci->base + CCI_I2C_Mm_READ_BUF_LEVEL(master));
361 words_exp = len / 4 + 1;
362 if (words_read != words_exp) {
363 dev_err(cci->dev, "words read = %d, words expected = %d\n",
364 words_read, words_exp);
365 return -EIO;
366 }
367
368 do {
369 val = readl(cci->base + CCI_I2C_Mm_READ_DATA(master));
370
371 for (i = 0; i < 4 && index < len; i++) {
372 if (first) {
373 /* The LS byte of this register represents the
374 * first byte read from the slave during a read
375 * access.
376 */
377 first = false;
378 continue;
379 }
380 buf[index++] = (val >> (i * 8)) & 0xff;
381 }
382 } while (--words_read);
383
384 return 0;
385 }
386
cci_i2c_write(struct cci * cci,u16 master,u16 addr,u8 * buf,u16 len)387 static int cci_i2c_write(struct cci *cci, u16 master,
388 u16 addr, u8 *buf, u16 len)
389 {
390 u8 queue = QUEUE_0;
391 u8 load[12] = { 0 };
392 int i = 0, j, ret;
393 u32 val;
394
395 /*
396 * Call validate queue to make sure queue is empty before starting.
397 * This is to avoid overflow / underflow of queue.
398 */
399 ret = cci_validate_queue(cci, master, queue);
400 if (ret < 0)
401 return ret;
402
403 val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
404 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
405
406 load[i++] = CCI_I2C_WRITE | len << 4;
407
408 for (j = 0; j < len; j++)
409 load[i++] = buf[j];
410
411 for (j = 0; j < i; j += 4) {
412 val = load[j];
413 val |= load[j + 1] << 8;
414 val |= load[j + 2] << 16;
415 val |= load[j + 3] << 24;
416 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
417 }
418
419 val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
420 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
421
422 return cci_run_queue(cci, master, queue);
423 }
424
cci_xfer(struct i2c_adapter * adap,struct i2c_msg msgs[],int num)425 static int cci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
426 {
427 struct cci_master *cci_master = i2c_get_adapdata(adap);
428 struct cci *cci = cci_master->cci;
429 int i, ret;
430
431 ret = pm_runtime_get_sync(cci->dev);
432 if (ret < 0)
433 goto err;
434
435 for (i = 0; i < num; i++) {
436 if (msgs[i].flags & I2C_M_RD)
437 ret = cci_i2c_read(cci, cci_master->master,
438 msgs[i].addr, msgs[i].buf,
439 msgs[i].len);
440 else
441 ret = cci_i2c_write(cci, cci_master->master,
442 msgs[i].addr, msgs[i].buf,
443 msgs[i].len);
444
445 if (ret < 0)
446 break;
447 }
448
449 if (!ret)
450 ret = num;
451
452 err:
453 pm_runtime_put_autosuspend(cci->dev);
454
455 return ret;
456 }
457
cci_func(struct i2c_adapter * adap)458 static u32 cci_func(struct i2c_adapter *adap)
459 {
460 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
461 }
462
463 static const struct i2c_algorithm cci_algo = {
464 .xfer = cci_xfer,
465 .functionality = cci_func,
466 };
467
cci_enable_clocks(struct cci * cci)468 static int cci_enable_clocks(struct cci *cci)
469 {
470 return clk_bulk_prepare_enable(cci->nclocks, cci->clocks);
471 }
472
cci_disable_clocks(struct cci * cci)473 static void cci_disable_clocks(struct cci *cci)
474 {
475 clk_bulk_disable_unprepare(cci->nclocks, cci->clocks);
476 }
477
cci_suspend_runtime(struct device * dev)478 static int __maybe_unused cci_suspend_runtime(struct device *dev)
479 {
480 struct cci *cci = dev_get_drvdata(dev);
481
482 cci_disable_clocks(cci);
483 return 0;
484 }
485
cci_resume_runtime(struct device * dev)486 static int __maybe_unused cci_resume_runtime(struct device *dev)
487 {
488 struct cci *cci = dev_get_drvdata(dev);
489 int ret;
490
491 ret = cci_enable_clocks(cci);
492 if (ret)
493 return ret;
494
495 cci_init(cci);
496 return 0;
497 }
498
cci_suspend(struct device * dev)499 static int __maybe_unused cci_suspend(struct device *dev)
500 {
501 if (!pm_runtime_suspended(dev))
502 return cci_suspend_runtime(dev);
503
504 return 0;
505 }
506
cci_resume(struct device * dev)507 static int __maybe_unused cci_resume(struct device *dev)
508 {
509 cci_resume_runtime(dev);
510 pm_request_autosuspend(dev);
511
512 return 0;
513 }
514
515 static const struct dev_pm_ops qcom_cci_pm = {
516 SET_SYSTEM_SLEEP_PM_OPS(cci_suspend, cci_resume)
517 SET_RUNTIME_PM_OPS(cci_suspend_runtime, cci_resume_runtime, NULL)
518 };
519
cci_probe(struct platform_device * pdev)520 static int cci_probe(struct platform_device *pdev)
521 {
522 struct device *dev = &pdev->dev;
523 struct device_node *child;
524 struct resource *r;
525 struct cci *cci;
526 int ret, i;
527 u32 val;
528
529 cci = devm_kzalloc(dev, sizeof(*cci), GFP_KERNEL);
530 if (!cci)
531 return -ENOMEM;
532
533 cci->dev = dev;
534 platform_set_drvdata(pdev, cci);
535 cci->data = device_get_match_data(dev);
536 if (!cci->data)
537 return -ENOENT;
538
539 for_each_available_child_of_node(dev->of_node, child) {
540 struct cci_master *master;
541 u32 idx;
542
543 ret = of_property_read_u32(child, "reg", &idx);
544 if (ret) {
545 dev_err(dev, "%pOF invalid 'reg' property", child);
546 continue;
547 }
548
549 if (idx >= cci->data->num_masters) {
550 dev_err(dev, "%pOF invalid 'reg' value: %u (max is %u)",
551 child, idx, cci->data->num_masters - 1);
552 continue;
553 }
554
555 master = &cci->master[idx];
556 master->adap.quirks = &cci->data->quirks;
557 master->adap.algo = &cci_algo;
558 master->adap.dev.parent = dev;
559 master->adap.dev.of_node = of_node_get(child);
560 master->master = idx;
561 master->cci = cci;
562
563 i2c_set_adapdata(&master->adap, master);
564 snprintf(master->adap.name, sizeof(master->adap.name), "Qualcomm-CCI");
565
566 master->mode = I2C_MODE_STANDARD;
567 ret = of_property_read_u32(child, "clock-frequency", &val);
568 if (!ret) {
569 if (val == I2C_MAX_FAST_MODE_FREQ)
570 master->mode = I2C_MODE_FAST;
571 else if (val == I2C_MAX_FAST_MODE_PLUS_FREQ)
572 master->mode = I2C_MODE_FAST_PLUS;
573 }
574
575 init_completion(&master->irq_complete);
576 }
577
578 /* Memory */
579
580 cci->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
581 if (IS_ERR(cci->base))
582 return PTR_ERR(cci->base);
583
584 /* Clocks */
585
586 ret = devm_clk_bulk_get_all(dev, &cci->clocks);
587 if (ret < 0)
588 return dev_err_probe(dev, ret, "failed to get clocks\n");
589 else if (!ret)
590 return dev_err_probe(dev, -EINVAL, "not enough clocks in DT\n");
591 cci->nclocks = ret;
592
593 ret = cci_enable_clocks(cci);
594 if (ret < 0)
595 return ret;
596
597 /* Interrupt */
598
599 ret = platform_get_irq(pdev, 0);
600 if (ret < 0)
601 goto disable_clocks;
602 cci->irq = ret;
603
604 ret = devm_request_irq(dev, cci->irq, cci_isr, 0, dev_name(dev), cci);
605 if (ret < 0) {
606 dev_err(dev, "request_irq failed, ret: %d\n", ret);
607 goto disable_clocks;
608 }
609
610 val = readl(cci->base + CCI_HW_VERSION);
611 dev_dbg(dev, "CCI HW version = 0x%08x", val);
612
613 ret = cci_reset(cci);
614 if (ret < 0)
615 goto error;
616
617 ret = cci_init(cci);
618 if (ret < 0)
619 goto error;
620
621 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
622 pm_runtime_use_autosuspend(dev);
623 pm_runtime_set_active(dev);
624 pm_runtime_enable(dev);
625
626 for (i = 0; i < cci->data->num_masters; i++) {
627 if (!cci->master[i].cci)
628 continue;
629
630 ret = i2c_add_adapter(&cci->master[i].adap);
631 if (ret < 0) {
632 of_node_put(cci->master[i].adap.dev.of_node);
633 goto error_i2c;
634 }
635 }
636
637 return 0;
638
639 error_i2c:
640 pm_runtime_disable(dev);
641 pm_runtime_dont_use_autosuspend(dev);
642
643 for (--i ; i >= 0; i--) {
644 if (cci->master[i].cci) {
645 i2c_del_adapter(&cci->master[i].adap);
646 of_node_put(cci->master[i].adap.dev.of_node);
647 }
648 }
649 error:
650 disable_irq(cci->irq);
651 disable_clocks:
652 cci_disable_clocks(cci);
653
654 return ret;
655 }
656
cci_remove(struct platform_device * pdev)657 static void cci_remove(struct platform_device *pdev)
658 {
659 struct cci *cci = platform_get_drvdata(pdev);
660 int i;
661
662 for (i = 0; i < cci->data->num_masters; i++) {
663 if (cci->master[i].cci) {
664 i2c_del_adapter(&cci->master[i].adap);
665 of_node_put(cci->master[i].adap.dev.of_node);
666 }
667 cci_halt(cci, i);
668 }
669
670 disable_irq(cci->irq);
671 pm_runtime_disable(&pdev->dev);
672 pm_runtime_set_suspended(&pdev->dev);
673 }
674
675 static const struct cci_data cci_v1_data = {
676 .num_masters = 1,
677 .queue_size = { 64, 16 },
678 .quirks = {
679 .max_write_len = 10,
680 .max_read_len = 12,
681 },
682 .params[I2C_MODE_STANDARD] = {
683 .thigh = 78,
684 .tlow = 114,
685 .tsu_sto = 28,
686 .tsu_sta = 28,
687 .thd_dat = 10,
688 .thd_sta = 77,
689 .tbuf = 118,
690 .scl_stretch_en = 0,
691 .trdhld = 6,
692 .tsp = 1
693 },
694 .params[I2C_MODE_FAST] = {
695 .thigh = 20,
696 .tlow = 28,
697 .tsu_sto = 21,
698 .tsu_sta = 21,
699 .thd_dat = 13,
700 .thd_sta = 18,
701 .tbuf = 32,
702 .scl_stretch_en = 0,
703 .trdhld = 6,
704 .tsp = 3
705 },
706 };
707
708 static const struct cci_data cci_v1_5_data = {
709 .num_masters = 2,
710 .queue_size = { 64, 16 },
711 .quirks = {
712 .max_write_len = 10,
713 .max_read_len = 12,
714 },
715 .params[I2C_MODE_STANDARD] = {
716 .thigh = 78,
717 .tlow = 114,
718 .tsu_sto = 28,
719 .tsu_sta = 28,
720 .thd_dat = 10,
721 .thd_sta = 77,
722 .tbuf = 118,
723 .scl_stretch_en = 0,
724 .trdhld = 6,
725 .tsp = 1
726 },
727 .params[I2C_MODE_FAST] = {
728 .thigh = 20,
729 .tlow = 28,
730 .tsu_sto = 21,
731 .tsu_sta = 21,
732 .thd_dat = 13,
733 .thd_sta = 18,
734 .tbuf = 32,
735 .scl_stretch_en = 0,
736 .trdhld = 6,
737 .tsp = 3
738 },
739 };
740
741 static const struct cci_data cci_v2_data = {
742 .num_masters = 2,
743 .queue_size = { 64, 16 },
744 .quirks = {
745 .max_write_len = 11,
746 .max_read_len = 12,
747 },
748 .params[I2C_MODE_STANDARD] = {
749 .thigh = 201,
750 .tlow = 174,
751 .tsu_sto = 204,
752 .tsu_sta = 231,
753 .thd_dat = 22,
754 .thd_sta = 162,
755 .tbuf = 227,
756 .scl_stretch_en = 0,
757 .trdhld = 6,
758 .tsp = 3
759 },
760 .params[I2C_MODE_FAST] = {
761 .thigh = 38,
762 .tlow = 56,
763 .tsu_sto = 40,
764 .tsu_sta = 40,
765 .thd_dat = 22,
766 .thd_sta = 35,
767 .tbuf = 62,
768 .scl_stretch_en = 0,
769 .trdhld = 6,
770 .tsp = 3
771 },
772 .params[I2C_MODE_FAST_PLUS] = {
773 .thigh = 16,
774 .tlow = 22,
775 .tsu_sto = 17,
776 .tsu_sta = 18,
777 .thd_dat = 16,
778 .thd_sta = 15,
779 .tbuf = 24,
780 .scl_stretch_en = 0,
781 .trdhld = 3,
782 .tsp = 3
783 },
784 };
785
786 static const struct of_device_id cci_dt_match[] = {
787 { .compatible = "qcom,msm8226-cci", .data = &cci_v1_data},
788 { .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data},
789 { .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
790
791
792 /*
793 * Legacy compatibles kept for backwards compatibility.
794 * Do not add any new ones unless they introduce a new config
795 */
796 { .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
797 { .compatible = "qcom,sdm845-cci", .data = &cci_v2_data},
798 { .compatible = "qcom,sm8250-cci", .data = &cci_v2_data},
799 { .compatible = "qcom,sm8450-cci", .data = &cci_v2_data},
800 {}
801 };
802 MODULE_DEVICE_TABLE(of, cci_dt_match);
803
804 static struct platform_driver qcom_cci_driver = {
805 .probe = cci_probe,
806 .remove = cci_remove,
807 .driver = {
808 .name = "i2c-qcom-cci",
809 .of_match_table = cci_dt_match,
810 .pm = &qcom_cci_pm,
811 },
812 };
813
814 module_platform_driver(qcom_cci_driver);
815
816 MODULE_DESCRIPTION("Qualcomm Camera Control Interface driver");
817 MODULE_AUTHOR("Todor Tomov <todor.tomov@linaro.org>");
818 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
819 MODULE_LICENSE("GPL v2");
820