1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * UCSI driver for Cypress CCGx Type-C controller
4 *
5 * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
6 * Author: Ajay Gupta <ajayg@nvidia.com>
7 *
8 * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
9 */
10 #include <linux/acpi.h>
11 #include <linux/delay.h>
12 #include <linux/firmware.h>
13 #include <linux/i2c.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/usb/typec_dp.h>
20
21 #include <linux/unaligned.h>
22 #include "ucsi.h"
23
24 enum enum_fw_mode {
25 BOOT, /* bootloader */
26 FW1, /* FW partition-1 (contains secondary fw) */
27 FW2, /* FW partition-2 (contains primary fw) */
28 FW_INVALID,
29 };
30
31 #define CCGX_RAB_DEVICE_MODE 0x0000
32 #define CCGX_RAB_INTR_REG 0x0006
33 #define DEV_INT BIT(0)
34 #define PORT0_INT BIT(1)
35 #define PORT1_INT BIT(2)
36 #define UCSI_READ_INT BIT(7)
37 #define CCGX_RAB_JUMP_TO_BOOT 0x0007
38 #define TO_BOOT 'J'
39 #define TO_ALT_FW 'A'
40 #define CCGX_RAB_RESET_REQ 0x0008
41 #define RESET_SIG 'R'
42 #define CMD_RESET_I2C 0x0
43 #define CMD_RESET_DEV 0x1
44 #define CCGX_RAB_ENTER_FLASHING 0x000A
45 #define FLASH_ENTER_SIG 'P'
46 #define CCGX_RAB_VALIDATE_FW 0x000B
47 #define CCGX_RAB_FLASH_ROW_RW 0x000C
48 #define FLASH_SIG 'F'
49 #define FLASH_RD_CMD 0x0
50 #define FLASH_WR_CMD 0x1
51 #define FLASH_FWCT1_WR_CMD 0x2
52 #define FLASH_FWCT2_WR_CMD 0x3
53 #define FLASH_FWCT_SIG_WR_CMD 0x4
54 #define CCGX_RAB_READ_ALL_VER 0x0010
55 #define CCGX_RAB_READ_FW2_VER 0x0020
56 #define CCGX_RAB_UCSI_CONTROL 0x0039
57 #define CCGX_RAB_UCSI_CONTROL_START BIT(0)
58 #define CCGX_RAB_UCSI_CONTROL_STOP BIT(1)
59 #define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff))
60 #define REG_FLASH_RW_MEM 0x0200
61 #define DEV_REG_IDX CCGX_RAB_DEVICE_MODE
62 #define CCGX_RAB_PDPORT_ENABLE 0x002C
63 #define PDPORT_1 BIT(0)
64 #define PDPORT_2 BIT(1)
65 #define CCGX_RAB_RESPONSE 0x007E
66 #define ASYNC_EVENT BIT(7)
67
68 /* CCGx events & async msg codes */
69 #define RESET_COMPLETE 0x80
70 #define EVENT_INDEX RESET_COMPLETE
71 #define PORT_CONNECT_DET 0x84
72 #define PORT_DISCONNECT_DET 0x85
73 #define ROLE_SWAP_COMPELETE 0x87
74
75 /* ccg firmware */
76 #define CYACD_LINE_SIZE 527
77 #define CCG4_ROW_SIZE 256
78 #define FW1_METADATA_ROW 0x1FF
79 #define FW2_METADATA_ROW 0x1FE
80 #define FW_CFG_TABLE_SIG_SIZE 256
81
82 static int secondary_fw_min_ver = 41;
83
84 enum enum_flash_mode {
85 SECONDARY_BL, /* update secondary using bootloader */
86 PRIMARY, /* update primary using secondary */
87 SECONDARY, /* update secondary using primary */
88 FLASH_NOT_NEEDED, /* update not required */
89 FLASH_INVALID,
90 };
91
92 static const char * const ccg_fw_names[] = {
93 "ccg_boot.cyacd",
94 "ccg_primary.cyacd",
95 "ccg_secondary.cyacd"
96 };
97
98 struct ccg_dev_info {
99 #define CCG_DEVINFO_FWMODE_SHIFT (0)
100 #define CCG_DEVINFO_FWMODE_MASK (0x3 << CCG_DEVINFO_FWMODE_SHIFT)
101 #define CCG_DEVINFO_PDPORTS_SHIFT (2)
102 #define CCG_DEVINFO_PDPORTS_MASK (0x3 << CCG_DEVINFO_PDPORTS_SHIFT)
103 u8 mode;
104 u8 bl_mode;
105 __le16 silicon_id;
106 __le16 bl_last_row;
107 } __packed;
108
109 struct version_format {
110 __le16 build;
111 u8 patch;
112 u8 ver;
113 #define CCG_VERSION_PATCH(x) ((x) << 16)
114 #define CCG_VERSION(x) ((x) << 24)
115 #define CCG_VERSION_MIN_SHIFT (0)
116 #define CCG_VERSION_MIN_MASK (0xf << CCG_VERSION_MIN_SHIFT)
117 #define CCG_VERSION_MAJ_SHIFT (4)
118 #define CCG_VERSION_MAJ_MASK (0xf << CCG_VERSION_MAJ_SHIFT)
119 } __packed;
120
121 /*
122 * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
123 * of missing interrupt when a device is connected for runtime resume
124 */
125 #define CCG_FW_BUILD_NVIDIA (('n' << 8) | 'v')
126 #define CCG_OLD_FW_VERSION (CCG_VERSION(0x31) | CCG_VERSION_PATCH(10))
127
128 /* Firmware for Tegra doesn't support UCSI ALT command, built
129 * for NVIDIA has known issue of reporting wrong capability info
130 */
131 #define CCG_FW_BUILD_NVIDIA_TEGRA (('g' << 8) | 'n')
132
133 /* Altmode offset for NVIDIA Function Test Board (FTB) */
134 #define NVIDIA_FTB_DP_OFFSET (2)
135 #define NVIDIA_FTB_DBG_OFFSET (3)
136
137 struct version_info {
138 struct version_format base;
139 struct version_format app;
140 };
141
142 struct fw_config_table {
143 u32 identity;
144 u16 table_size;
145 u8 fwct_version;
146 u8 is_key_change;
147 u8 guid[16];
148 struct version_format base;
149 struct version_format app;
150 u8 primary_fw_digest[32];
151 u32 key_exp_length;
152 u8 key_modulus[256];
153 u8 key_exp[4];
154 };
155
156 /* CCGx response codes */
157 enum ccg_resp_code {
158 CMD_NO_RESP = 0x00,
159 CMD_SUCCESS = 0x02,
160 FLASH_DATA_AVAILABLE = 0x03,
161 CMD_INVALID = 0x05,
162 FLASH_UPDATE_FAIL = 0x07,
163 INVALID_FW = 0x08,
164 INVALID_ARG = 0x09,
165 CMD_NOT_SUPPORT = 0x0A,
166 TRANSACTION_FAIL = 0x0C,
167 PD_CMD_FAIL = 0x0D,
168 UNDEF_ERROR = 0x0F,
169 INVALID_RESP = 0x10,
170 };
171
172 #define CCG_EVENT_MAX (EVENT_INDEX + 43)
173
174 struct ccg_cmd {
175 u16 reg;
176 u32 data;
177 int len;
178 u32 delay; /* ms delay for cmd timeout */
179 };
180
181 struct ccg_resp {
182 u8 code;
183 u8 length;
184 };
185
186 struct ucsi_ccg_altmode {
187 u16 svid;
188 u32 mid;
189 u8 linked_idx;
190 u8 active_idx;
191 #define UCSI_MULTI_DP_INDEX (0xff)
192 bool checked;
193 } __packed;
194
195 #define CCGX_MESSAGE_IN_MAX 4
196 struct op_region {
197 __le32 cci;
198 __le32 message_in[CCGX_MESSAGE_IN_MAX];
199 };
200
201 struct ucsi_ccg {
202 struct device *dev;
203 struct ucsi *ucsi;
204 struct i2c_client *client;
205
206 struct ccg_dev_info info;
207 /* version info for boot, primary and secondary */
208 struct version_info version[FW2 + 1];
209 u32 fw_version;
210 /* CCG HPI communication flags */
211 unsigned long flags;
212 #define RESET_PENDING 0
213 #define DEV_CMD_PENDING 1
214 struct ccg_resp dev_resp;
215 u8 cmd_resp;
216 int port_num;
217 int irq;
218 struct work_struct work;
219 struct mutex lock; /* to sync between user and driver thread */
220
221 /* fw build with vendor information */
222 u16 fw_build;
223 struct work_struct pm_work;
224
225 bool has_multiple_dp;
226 struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
227 struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
228
229 /*
230 * This spinlock protects op_data which includes CCI and MESSAGE_IN that
231 * will be updated in ISR
232 */
233 spinlock_t op_lock;
234 struct op_region op_data;
235 };
236
ccg_read(struct ucsi_ccg * uc,u16 rab,u8 * data,u32 len)237 static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
238 {
239 struct i2c_client *client = uc->client;
240 const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
241 unsigned char buf[2];
242 struct i2c_msg msgs[] = {
243 {
244 .addr = client->addr,
245 .flags = 0x0,
246 .len = sizeof(buf),
247 .buf = buf,
248 },
249 {
250 .addr = client->addr,
251 .flags = I2C_M_RD,
252 .buf = data,
253 },
254 };
255 u32 rlen, rem_len = len, max_read_len = len;
256 int status;
257
258 /* check any max_read_len limitation on i2c adapter */
259 if (quirks && quirks->max_read_len)
260 max_read_len = quirks->max_read_len;
261
262 pm_runtime_get_sync(uc->dev);
263 while (rem_len > 0) {
264 msgs[1].buf = &data[len - rem_len];
265 rlen = min_t(u16, rem_len, max_read_len);
266 msgs[1].len = rlen;
267 put_unaligned_le16(rab, buf);
268 status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
269 if (status < 0) {
270 dev_err(uc->dev, "i2c_transfer failed %d\n", status);
271 pm_runtime_put_sync(uc->dev);
272 return status;
273 }
274 rab += rlen;
275 rem_len -= rlen;
276 }
277
278 pm_runtime_put_sync(uc->dev);
279 return 0;
280 }
281
ccg_write(struct ucsi_ccg * uc,u16 rab,const u8 * data,u32 len)282 static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
283 {
284 struct i2c_client *client = uc->client;
285 unsigned char *buf;
286 struct i2c_msg msgs[] = {
287 {
288 .addr = client->addr,
289 .flags = 0x0,
290 }
291 };
292 int status;
293
294 buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
295 if (!buf)
296 return -ENOMEM;
297
298 put_unaligned_le16(rab, buf);
299 memcpy(buf + sizeof(rab), data, len);
300
301 msgs[0].len = len + sizeof(rab);
302 msgs[0].buf = buf;
303
304 pm_runtime_get_sync(uc->dev);
305 status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
306 if (status < 0) {
307 dev_err(uc->dev, "i2c_transfer failed %d\n", status);
308 pm_runtime_put_sync(uc->dev);
309 kfree(buf);
310 return status;
311 }
312
313 pm_runtime_put_sync(uc->dev);
314 kfree(buf);
315 return 0;
316 }
317
ccg_op_region_update(struct ucsi_ccg * uc,u32 cci)318 static int ccg_op_region_update(struct ucsi_ccg *uc, u32 cci)
319 {
320 u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_MESSAGE_IN);
321 struct op_region *data = &uc->op_data;
322 unsigned char *buf;
323 size_t size = sizeof(data->message_in);
324
325 buf = kzalloc(size, GFP_ATOMIC);
326 if (!buf)
327 return -ENOMEM;
328 if (UCSI_CCI_LENGTH(cci)) {
329 int ret = ccg_read(uc, reg, (void *)buf, size);
330
331 if (ret) {
332 kfree(buf);
333 return ret;
334 }
335 }
336
337 spin_lock(&uc->op_lock);
338 data->cci = cpu_to_le32(cci);
339 if (UCSI_CCI_LENGTH(cci))
340 memcpy(&data->message_in, buf, size);
341 spin_unlock(&uc->op_lock);
342 kfree(buf);
343 return 0;
344 }
345
ucsi_ccg_init(struct ucsi_ccg * uc)346 static int ucsi_ccg_init(struct ucsi_ccg *uc)
347 {
348 unsigned int count = 10;
349 u8 data;
350 int status;
351
352 spin_lock_init(&uc->op_lock);
353
354 data = CCGX_RAB_UCSI_CONTROL_STOP;
355 status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
356 if (status < 0)
357 return status;
358
359 data = CCGX_RAB_UCSI_CONTROL_START;
360 status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
361 if (status < 0)
362 return status;
363
364 /*
365 * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
366 * register write will push response which must be cleared.
367 */
368 do {
369 status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
370 if (status < 0)
371 return status;
372
373 if (!(data & DEV_INT))
374 return 0;
375
376 status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
377 if (status < 0)
378 return status;
379
380 usleep_range(10000, 11000);
381 } while (--count);
382
383 return -ETIMEDOUT;
384 }
385
ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg * uc,u8 * data)386 static void ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg *uc, u8 *data)
387 {
388 u8 cam, new_cam;
389
390 cam = data[0];
391 new_cam = uc->orig[cam].linked_idx;
392 uc->updated[new_cam].active_idx = cam;
393 data[0] = new_cam;
394 }
395
ucsi_ccg_update_altmodes(struct ucsi * ucsi,u8 recipient,struct ucsi_altmode * orig,struct ucsi_altmode * updated)396 static bool ucsi_ccg_update_altmodes(struct ucsi *ucsi,
397 u8 recipient,
398 struct ucsi_altmode *orig,
399 struct ucsi_altmode *updated)
400 {
401 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
402 struct ucsi_ccg_altmode *alt, *new_alt;
403 int i, j, k = 0;
404 bool found = false;
405
406 if (recipient != UCSI_RECIPIENT_CON)
407 return false;
408
409 alt = uc->orig;
410 new_alt = uc->updated;
411 memset(uc->updated, 0, sizeof(uc->updated));
412
413 /*
414 * Copy original connector altmodes to new structure.
415 * We need this before second loop since second loop
416 * checks for duplicate altmodes.
417 */
418 for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
419 alt[i].svid = orig[i].svid;
420 alt[i].mid = orig[i].mid;
421 if (!alt[i].svid)
422 break;
423 }
424
425 for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
426 if (!alt[i].svid)
427 break;
428
429 /* already checked and considered */
430 if (alt[i].checked)
431 continue;
432
433 if (!DP_CONF_GET_PIN_ASSIGN(alt[i].mid)) {
434 /* Found Non DP altmode */
435 new_alt[k].svid = alt[i].svid;
436 new_alt[k].mid |= alt[i].mid;
437 new_alt[k].linked_idx = i;
438 alt[i].linked_idx = k;
439 updated[k].svid = new_alt[k].svid;
440 updated[k].mid = new_alt[k].mid;
441 k++;
442 continue;
443 }
444
445 for (j = i + 1; j < UCSI_MAX_ALTMODES; j++) {
446 if (alt[i].svid != alt[j].svid ||
447 !DP_CONF_GET_PIN_ASSIGN(alt[j].mid)) {
448 continue;
449 } else {
450 /* Found duplicate DP mode */
451 new_alt[k].svid = alt[i].svid;
452 new_alt[k].mid |= alt[i].mid | alt[j].mid;
453 new_alt[k].linked_idx = UCSI_MULTI_DP_INDEX;
454 alt[i].linked_idx = k;
455 alt[j].linked_idx = k;
456 alt[j].checked = true;
457 found = true;
458 }
459 }
460 if (found) {
461 uc->has_multiple_dp = true;
462 } else {
463 /* Didn't find any duplicate DP altmode */
464 new_alt[k].svid = alt[i].svid;
465 new_alt[k].mid |= alt[i].mid;
466 new_alt[k].linked_idx = i;
467 alt[i].linked_idx = k;
468 }
469 updated[k].svid = new_alt[k].svid;
470 updated[k].mid = new_alt[k].mid;
471 k++;
472 }
473 return found;
474 }
475
ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg * uc,struct ucsi_connector * con,u64 * cmd)476 static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
477 struct ucsi_connector *con,
478 u64 *cmd)
479 {
480 struct ucsi_ccg_altmode *new_port, *port;
481 struct typec_altmode *alt = NULL;
482 u8 new_cam, cam, pin;
483 bool enter_new_mode;
484 int i, j, k = 0xff;
485
486 port = uc->orig;
487 new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
488 if (new_cam >= ARRAY_SIZE(uc->updated))
489 return;
490 new_port = &uc->updated[new_cam];
491 cam = new_port->linked_idx;
492 enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);
493
494 /*
495 * If CAM is UCSI_MULTI_DP_INDEX then this is DP altmode
496 * with multiple DP mode. Find out CAM for best pin assignment
497 * among all DP mode. Priorite pin E->D->C after making sure
498 * the partner supports that pin.
499 */
500 if (cam == UCSI_MULTI_DP_INDEX) {
501 if (enter_new_mode) {
502 for (i = 0; con->partner_altmode[i]; i++) {
503 alt = con->partner_altmode[i];
504 if (alt->svid == new_port->svid)
505 break;
506 }
507 /*
508 * alt will always be non NULL since this is
509 * UCSI_SET_NEW_CAM command and so there will be
510 * at least one con->partner_altmode[i] with svid
511 * matching with new_port->svid.
512 */
513 for (j = 0; port[j].svid; j++) {
514 pin = DP_CONF_GET_PIN_ASSIGN(port[j].mid);
515 if (alt && port[j].svid == alt->svid &&
516 (pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) {
517 /* prioritize pin E->D->C */
518 if (k == 0xff || (k != 0xff && pin >
519 DP_CONF_GET_PIN_ASSIGN(port[k].mid))
520 ) {
521 k = j;
522 }
523 }
524 }
525 cam = k;
526 new_port->active_idx = cam;
527 } else {
528 cam = new_port->active_idx;
529 }
530 }
531 *cmd &= ~UCSI_SET_NEW_CAM_AM_MASK;
532 *cmd |= UCSI_SET_NEW_CAM_SET_AM(cam);
533 }
534
535 /*
536 * Change the order of vdo values of NVIDIA test device FTB
537 * (Function Test Board) which reports altmode list with vdo=0x3
538 * first and then vdo=0x. Current logic to assign mode value is
539 * based on order in altmode list and it causes a mismatch of CON
540 * and SOP altmodes since NVIDIA GPU connector has order of vdo=0x1
541 * first and then vdo=0x3
542 */
ucsi_ccg_nvidia_altmode(struct ucsi_ccg * uc,struct ucsi_altmode * alt,u64 command)543 static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
544 struct ucsi_altmode *alt,
545 u64 command)
546 {
547 switch (UCSI_ALTMODE_OFFSET(command)) {
548 case NVIDIA_FTB_DP_OFFSET:
549 if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
550 alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
551 DP_CAP_DP_SIGNALLING(0) | DP_CAP_USB |
552 DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_E));
553 break;
554 case NVIDIA_FTB_DBG_OFFSET:
555 if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DP_VDO)
556 alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DBG_VDO;
557 break;
558 default:
559 break;
560 }
561 }
562
ucsi_ccg_read_version(struct ucsi * ucsi,u16 * version)563 static int ucsi_ccg_read_version(struct ucsi *ucsi, u16 *version)
564 {
565 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
566 u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_VERSION);
567
568 return ccg_read(uc, reg, (u8 *)version, sizeof(*version));
569 }
570
ucsi_ccg_read_cci(struct ucsi * ucsi,u32 * cci)571 static int ucsi_ccg_read_cci(struct ucsi *ucsi, u32 *cci)
572 {
573 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
574
575 spin_lock(&uc->op_lock);
576 *cci = uc->op_data.cci;
577 spin_unlock(&uc->op_lock);
578
579 return 0;
580 }
581
ucsi_ccg_read_message_in(struct ucsi * ucsi,void * val,size_t val_len)582 static int ucsi_ccg_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
583 {
584 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
585
586 spin_lock(&uc->op_lock);
587 memcpy(val, uc->op_data.message_in, val_len);
588 spin_unlock(&uc->op_lock);
589
590 return 0;
591 }
592
ucsi_ccg_async_control(struct ucsi * ucsi,u64 command)593 static int ucsi_ccg_async_control(struct ucsi *ucsi, u64 command)
594 {
595 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
596 u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CONTROL);
597
598 /*
599 * UCSI may read CCI instantly after async_control,
600 * clear CCI to avoid caller getting wrong data before we get CCI from ISR
601 */
602 spin_lock(&uc->op_lock);
603 uc->op_data.cci = 0;
604 spin_unlock(&uc->op_lock);
605
606 return ccg_write(uc, reg, (u8 *)&command, sizeof(command));
607 }
608
ucsi_ccg_sync_control(struct ucsi * ucsi,u64 command,u32 * cci,void * data,size_t size)609 static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
610 void *data, size_t size)
611 {
612 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
613 struct ucsi_connector *con;
614 int con_index;
615 int ret;
616
617 mutex_lock(&uc->lock);
618 pm_runtime_get_sync(uc->dev);
619
620 if (UCSI_COMMAND(command) == UCSI_SET_NEW_CAM &&
621 uc->has_multiple_dp) {
622 con_index = (command >> 16) &
623 UCSI_CMD_CONNECTOR_MASK;
624 if (con_index == 0) {
625 ret = -EINVAL;
626 goto err_put;
627 }
628 con = &uc->ucsi->connector[con_index - 1];
629 ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
630 }
631
632 ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
633
634 switch (UCSI_COMMAND(command)) {
635 case UCSI_GET_CURRENT_CAM:
636 if (uc->has_multiple_dp)
637 ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)data);
638 break;
639 case UCSI_GET_ALTERNATE_MODES:
640 if (UCSI_ALTMODE_RECIPIENT(command) == UCSI_RECIPIENT_SOP) {
641 struct ucsi_altmode *alt = data;
642
643 if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
644 ucsi_ccg_nvidia_altmode(uc, alt, command);
645 }
646 break;
647 case UCSI_GET_CAPABILITY:
648 if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
649 struct ucsi_capability *cap = data;
650
651 cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
652 }
653 break;
654 default:
655 break;
656 }
657
658 err_put:
659 pm_runtime_put_sync(uc->dev);
660 mutex_unlock(&uc->lock);
661
662 return ret;
663 }
664
665 static const struct ucsi_operations ucsi_ccg_ops = {
666 .read_version = ucsi_ccg_read_version,
667 .read_cci = ucsi_ccg_read_cci,
668 .poll_cci = ucsi_ccg_read_cci,
669 .read_message_in = ucsi_ccg_read_message_in,
670 .sync_control = ucsi_ccg_sync_control,
671 .async_control = ucsi_ccg_async_control,
672 .update_altmodes = ucsi_ccg_update_altmodes
673 };
674
ccg_irq_handler(int irq,void * data)675 static irqreturn_t ccg_irq_handler(int irq, void *data)
676 {
677 u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
678 struct ucsi_ccg *uc = data;
679 u8 intr_reg;
680 u32 cci = 0;
681 int ret = 0;
682
683 ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
684 if (ret)
685 return ret;
686
687 if (!intr_reg)
688 return IRQ_HANDLED;
689 else if (!(intr_reg & UCSI_READ_INT))
690 goto err_clear_irq;
691
692 ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
693 if (ret)
694 goto err_clear_irq;
695
696 /*
697 * As per CCGx UCSI interface guide, copy CCI and MESSAGE_IN
698 * to the OpRegion before clear the UCSI interrupt
699 */
700 ret = ccg_op_region_update(uc, cci);
701 if (ret)
702 goto err_clear_irq;
703
704 err_clear_irq:
705 ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
706
707 if (!ret)
708 ucsi_notify_common(uc->ucsi, cci);
709
710 return IRQ_HANDLED;
711 }
712
ccg_request_irq(struct ucsi_ccg * uc)713 static int ccg_request_irq(struct ucsi_ccg *uc)
714 {
715 unsigned long flags = IRQF_ONESHOT;
716
717 if (!dev_fwnode(uc->dev))
718 flags |= IRQF_TRIGGER_HIGH;
719
720 return request_threaded_irq(uc->irq, NULL, ccg_irq_handler, flags, dev_name(uc->dev), uc);
721 }
722
ccg_pm_workaround_work(struct work_struct * pm_work)723 static void ccg_pm_workaround_work(struct work_struct *pm_work)
724 {
725 ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work));
726 }
727
get_fw_info(struct ucsi_ccg * uc)728 static int get_fw_info(struct ucsi_ccg *uc)
729 {
730 int err;
731
732 err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)(&uc->version),
733 sizeof(uc->version));
734 if (err < 0)
735 return err;
736
737 uc->fw_version = CCG_VERSION(uc->version[FW2].app.ver) |
738 CCG_VERSION_PATCH(uc->version[FW2].app.patch);
739
740 err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
741 sizeof(uc->info));
742 if (err < 0)
743 return err;
744
745 return 0;
746 }
747
invalid_async_evt(int code)748 static inline bool invalid_async_evt(int code)
749 {
750 return (code >= CCG_EVENT_MAX) || (code < EVENT_INDEX);
751 }
752
ccg_process_response(struct ucsi_ccg * uc)753 static void ccg_process_response(struct ucsi_ccg *uc)
754 {
755 struct device *dev = uc->dev;
756
757 if (uc->dev_resp.code & ASYNC_EVENT) {
758 if (uc->dev_resp.code == RESET_COMPLETE) {
759 if (test_bit(RESET_PENDING, &uc->flags))
760 uc->cmd_resp = uc->dev_resp.code;
761 get_fw_info(uc);
762 }
763 if (invalid_async_evt(uc->dev_resp.code))
764 dev_err(dev, "invalid async evt %d\n",
765 uc->dev_resp.code);
766 } else {
767 if (test_bit(DEV_CMD_PENDING, &uc->flags)) {
768 uc->cmd_resp = uc->dev_resp.code;
769 clear_bit(DEV_CMD_PENDING, &uc->flags);
770 } else {
771 dev_err(dev, "dev resp 0x%04x but no cmd pending\n",
772 uc->dev_resp.code);
773 }
774 }
775 }
776
ccg_read_response(struct ucsi_ccg * uc)777 static int ccg_read_response(struct ucsi_ccg *uc)
778 {
779 unsigned long target = jiffies + msecs_to_jiffies(1000);
780 struct device *dev = uc->dev;
781 u8 intval;
782 int status;
783
784 /* wait for interrupt status to get updated */
785 do {
786 status = ccg_read(uc, CCGX_RAB_INTR_REG, &intval,
787 sizeof(intval));
788 if (status < 0)
789 return status;
790
791 if (intval & DEV_INT)
792 break;
793 usleep_range(500, 600);
794 } while (time_is_after_jiffies(target));
795
796 if (time_is_before_jiffies(target)) {
797 dev_err(dev, "response timeout error\n");
798 return -ETIME;
799 }
800
801 status = ccg_read(uc, CCGX_RAB_RESPONSE, (u8 *)&uc->dev_resp,
802 sizeof(uc->dev_resp));
803 if (status < 0)
804 return status;
805
806 status = ccg_write(uc, CCGX_RAB_INTR_REG, &intval, sizeof(intval));
807 if (status < 0)
808 return status;
809
810 return 0;
811 }
812
813 /* Caller must hold uc->lock */
ccg_send_command(struct ucsi_ccg * uc,struct ccg_cmd * cmd)814 static int ccg_send_command(struct ucsi_ccg *uc, struct ccg_cmd *cmd)
815 {
816 struct device *dev = uc->dev;
817 int ret;
818
819 switch (cmd->reg & 0xF000) {
820 case DEV_REG_IDX:
821 set_bit(DEV_CMD_PENDING, &uc->flags);
822 break;
823 default:
824 dev_err(dev, "invalid cmd register\n");
825 break;
826 }
827
828 ret = ccg_write(uc, cmd->reg, (u8 *)&cmd->data, cmd->len);
829 if (ret < 0)
830 return ret;
831
832 msleep(cmd->delay);
833
834 ret = ccg_read_response(uc);
835 if (ret < 0) {
836 dev_err(dev, "response read error\n");
837 switch (cmd->reg & 0xF000) {
838 case DEV_REG_IDX:
839 clear_bit(DEV_CMD_PENDING, &uc->flags);
840 break;
841 default:
842 dev_err(dev, "invalid cmd register\n");
843 break;
844 }
845 return -EIO;
846 }
847 ccg_process_response(uc);
848
849 return uc->cmd_resp;
850 }
851
ccg_cmd_enter_flashing(struct ucsi_ccg * uc)852 static int ccg_cmd_enter_flashing(struct ucsi_ccg *uc)
853 {
854 struct ccg_cmd cmd;
855 int ret;
856
857 cmd.reg = CCGX_RAB_ENTER_FLASHING;
858 cmd.data = FLASH_ENTER_SIG;
859 cmd.len = 1;
860 cmd.delay = 50;
861
862 mutex_lock(&uc->lock);
863
864 ret = ccg_send_command(uc, &cmd);
865
866 mutex_unlock(&uc->lock);
867
868 if (ret != CMD_SUCCESS) {
869 dev_err(uc->dev, "enter flashing failed ret=%d\n", ret);
870 return ret;
871 }
872
873 return 0;
874 }
875
ccg_cmd_reset(struct ucsi_ccg * uc)876 static int ccg_cmd_reset(struct ucsi_ccg *uc)
877 {
878 struct ccg_cmd cmd;
879 u8 *p;
880 int ret;
881
882 p = (u8 *)&cmd.data;
883 cmd.reg = CCGX_RAB_RESET_REQ;
884 p[0] = RESET_SIG;
885 p[1] = CMD_RESET_DEV;
886 cmd.len = 2;
887 cmd.delay = 5000;
888
889 mutex_lock(&uc->lock);
890
891 set_bit(RESET_PENDING, &uc->flags);
892
893 ret = ccg_send_command(uc, &cmd);
894 if (ret != RESET_COMPLETE)
895 goto err_clear_flag;
896
897 ret = 0;
898
899 err_clear_flag:
900 clear_bit(RESET_PENDING, &uc->flags);
901
902 mutex_unlock(&uc->lock);
903
904 return ret;
905 }
906
ccg_cmd_port_control(struct ucsi_ccg * uc,bool enable)907 static int ccg_cmd_port_control(struct ucsi_ccg *uc, bool enable)
908 {
909 struct ccg_cmd cmd;
910 int ret;
911
912 cmd.reg = CCGX_RAB_PDPORT_ENABLE;
913 if (enable)
914 cmd.data = (uc->port_num == 1) ?
915 PDPORT_1 : (PDPORT_1 | PDPORT_2);
916 else
917 cmd.data = 0x0;
918 cmd.len = 1;
919 cmd.delay = 10;
920
921 mutex_lock(&uc->lock);
922
923 ret = ccg_send_command(uc, &cmd);
924
925 mutex_unlock(&uc->lock);
926
927 if (ret != CMD_SUCCESS) {
928 dev_err(uc->dev, "port control failed ret=%d\n", ret);
929 return ret;
930 }
931 return 0;
932 }
933
ccg_cmd_jump_boot_mode(struct ucsi_ccg * uc,int bl_mode)934 static int ccg_cmd_jump_boot_mode(struct ucsi_ccg *uc, int bl_mode)
935 {
936 struct ccg_cmd cmd;
937 int ret;
938
939 cmd.reg = CCGX_RAB_JUMP_TO_BOOT;
940
941 if (bl_mode)
942 cmd.data = TO_BOOT;
943 else
944 cmd.data = TO_ALT_FW;
945
946 cmd.len = 1;
947 cmd.delay = 100;
948
949 mutex_lock(&uc->lock);
950
951 set_bit(RESET_PENDING, &uc->flags);
952
953 ret = ccg_send_command(uc, &cmd);
954 if (ret != RESET_COMPLETE)
955 goto err_clear_flag;
956
957 ret = 0;
958
959 err_clear_flag:
960 clear_bit(RESET_PENDING, &uc->flags);
961
962 mutex_unlock(&uc->lock);
963
964 return ret;
965 }
966
967 static int
ccg_cmd_write_flash_row(struct ucsi_ccg * uc,u16 row,const void * data,u8 fcmd)968 ccg_cmd_write_flash_row(struct ucsi_ccg *uc, u16 row,
969 const void *data, u8 fcmd)
970 {
971 struct i2c_client *client = uc->client;
972 struct ccg_cmd cmd;
973 u8 buf[CCG4_ROW_SIZE + 2];
974 u8 *p;
975 int ret;
976
977 /* Copy the data into the flash read/write memory. */
978 put_unaligned_le16(REG_FLASH_RW_MEM, buf);
979
980 memcpy(buf + 2, data, CCG4_ROW_SIZE);
981
982 mutex_lock(&uc->lock);
983
984 ret = i2c_master_send(client, buf, CCG4_ROW_SIZE + 2);
985 if (ret != CCG4_ROW_SIZE + 2) {
986 dev_err(uc->dev, "REG_FLASH_RW_MEM write fail %d\n", ret);
987 mutex_unlock(&uc->lock);
988 return ret < 0 ? ret : -EIO;
989 }
990
991 /* Use the FLASH_ROW_READ_WRITE register to trigger */
992 /* writing of data to the desired flash row */
993 p = (u8 *)&cmd.data;
994 cmd.reg = CCGX_RAB_FLASH_ROW_RW;
995 p[0] = FLASH_SIG;
996 p[1] = fcmd;
997 put_unaligned_le16(row, &p[2]);
998 cmd.len = 4;
999 cmd.delay = 50;
1000 if (fcmd == FLASH_FWCT_SIG_WR_CMD)
1001 cmd.delay += 400;
1002 if (row == 510)
1003 cmd.delay += 220;
1004 ret = ccg_send_command(uc, &cmd);
1005
1006 mutex_unlock(&uc->lock);
1007
1008 if (ret != CMD_SUCCESS) {
1009 dev_err(uc->dev, "write flash row failed ret=%d\n", ret);
1010 return ret;
1011 }
1012
1013 return 0;
1014 }
1015
ccg_cmd_validate_fw(struct ucsi_ccg * uc,unsigned int fwid)1016 static int ccg_cmd_validate_fw(struct ucsi_ccg *uc, unsigned int fwid)
1017 {
1018 struct ccg_cmd cmd;
1019 int ret;
1020
1021 cmd.reg = CCGX_RAB_VALIDATE_FW;
1022 cmd.data = fwid;
1023 cmd.len = 1;
1024 cmd.delay = 500;
1025
1026 mutex_lock(&uc->lock);
1027
1028 ret = ccg_send_command(uc, &cmd);
1029
1030 mutex_unlock(&uc->lock);
1031
1032 if (ret != CMD_SUCCESS)
1033 return ret;
1034
1035 return 0;
1036 }
1037
ccg_check_vendor_version(struct ucsi_ccg * uc,struct version_format * app,struct fw_config_table * fw_cfg)1038 static bool ccg_check_vendor_version(struct ucsi_ccg *uc,
1039 struct version_format *app,
1040 struct fw_config_table *fw_cfg)
1041 {
1042 struct device *dev = uc->dev;
1043
1044 /* Check if the fw build is for supported vendors */
1045 if (le16_to_cpu(app->build) != uc->fw_build) {
1046 dev_info(dev, "current fw is not from supported vendor\n");
1047 return false;
1048 }
1049
1050 /* Check if the new fw build is for supported vendors */
1051 if (le16_to_cpu(fw_cfg->app.build) != uc->fw_build) {
1052 dev_info(dev, "new fw is not from supported vendor\n");
1053 return false;
1054 }
1055 return true;
1056 }
1057
ccg_check_fw_version(struct ucsi_ccg * uc,const char * fw_name,struct version_format * app)1058 static bool ccg_check_fw_version(struct ucsi_ccg *uc, const char *fw_name,
1059 struct version_format *app)
1060 {
1061 const struct firmware *fw = NULL;
1062 struct device *dev = uc->dev;
1063 struct fw_config_table fw_cfg;
1064 u32 cur_version, new_version;
1065 bool is_later = false;
1066
1067 if (request_firmware(&fw, fw_name, dev) != 0) {
1068 dev_err(dev, "error: Failed to open cyacd file %s\n", fw_name);
1069 return false;
1070 }
1071
1072 /*
1073 * check if signed fw
1074 * last part of fw image is fw cfg table and signature
1075 */
1076 if (fw->size < sizeof(fw_cfg) + FW_CFG_TABLE_SIG_SIZE)
1077 goto out_release_firmware;
1078
1079 memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
1080 sizeof(fw_cfg) - FW_CFG_TABLE_SIG_SIZE, sizeof(fw_cfg));
1081
1082 if (fw_cfg.identity != ('F' | 'W' << 8 | 'C' << 16 | 'T' << 24)) {
1083 dev_info(dev, "not a signed image\n");
1084 goto out_release_firmware;
1085 }
1086
1087 /* compare input version with FWCT version */
1088 cur_version = le16_to_cpu(app->build) | CCG_VERSION_PATCH(app->patch) |
1089 CCG_VERSION(app->ver);
1090
1091 new_version = le16_to_cpu(fw_cfg.app.build) |
1092 CCG_VERSION_PATCH(fw_cfg.app.patch) |
1093 CCG_VERSION(fw_cfg.app.ver);
1094
1095 if (!ccg_check_vendor_version(uc, app, &fw_cfg))
1096 goto out_release_firmware;
1097
1098 if (new_version > cur_version)
1099 is_later = true;
1100
1101 out_release_firmware:
1102 release_firmware(fw);
1103 return is_later;
1104 }
1105
ccg_fw_update_needed(struct ucsi_ccg * uc,enum enum_flash_mode * mode)1106 static int ccg_fw_update_needed(struct ucsi_ccg *uc,
1107 enum enum_flash_mode *mode)
1108 {
1109 struct device *dev = uc->dev;
1110 int err;
1111 struct version_info version[3];
1112
1113 err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
1114 sizeof(uc->info));
1115 if (err) {
1116 dev_err(dev, "read device mode failed\n");
1117 return err;
1118 }
1119
1120 err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)version,
1121 sizeof(version));
1122 if (err) {
1123 dev_err(dev, "read device mode failed\n");
1124 return err;
1125 }
1126
1127 if (memcmp(&version[FW1], "\0\0\0\0\0\0\0\0",
1128 sizeof(struct version_info)) == 0) {
1129 dev_info(dev, "secondary fw is not flashed\n");
1130 *mode = SECONDARY_BL;
1131 } else if (le16_to_cpu(version[FW1].base.build) <
1132 secondary_fw_min_ver) {
1133 dev_info(dev, "secondary fw version is too low (< %d)\n",
1134 secondary_fw_min_ver);
1135 *mode = SECONDARY;
1136 } else if (memcmp(&version[FW2], "\0\0\0\0\0\0\0\0",
1137 sizeof(struct version_info)) == 0) {
1138 dev_info(dev, "primary fw is not flashed\n");
1139 *mode = PRIMARY;
1140 } else if (ccg_check_fw_version(uc, ccg_fw_names[PRIMARY],
1141 &version[FW2].app)) {
1142 dev_info(dev, "found primary fw with later version\n");
1143 *mode = PRIMARY;
1144 } else {
1145 dev_info(dev, "secondary and primary fw are the latest\n");
1146 *mode = FLASH_NOT_NEEDED;
1147 }
1148 return 0;
1149 }
1150
do_flash(struct ucsi_ccg * uc,enum enum_flash_mode mode)1151 static int do_flash(struct ucsi_ccg *uc, enum enum_flash_mode mode)
1152 {
1153 struct device *dev = uc->dev;
1154 const struct firmware *fw = NULL;
1155 const char *p, *s;
1156 const char *eof;
1157 int err, row, len, line_sz, line_cnt = 0;
1158 unsigned long start_time = jiffies;
1159 struct fw_config_table fw_cfg;
1160 u8 fw_cfg_sig[FW_CFG_TABLE_SIG_SIZE];
1161 u8 *wr_buf;
1162
1163 err = request_firmware(&fw, ccg_fw_names[mode], dev);
1164 if (err) {
1165 dev_err(dev, "request %s failed err=%d\n",
1166 ccg_fw_names[mode], err);
1167 return err;
1168 }
1169
1170 if (((uc->info.mode & CCG_DEVINFO_FWMODE_MASK) >>
1171 CCG_DEVINFO_FWMODE_SHIFT) == FW2) {
1172 err = ccg_cmd_port_control(uc, false);
1173 if (err < 0)
1174 goto release_fw;
1175 err = ccg_cmd_jump_boot_mode(uc, 0);
1176 if (err < 0)
1177 goto release_fw;
1178 }
1179
1180 eof = fw->data + fw->size;
1181
1182 /*
1183 * check if signed fw
1184 * last part of fw image is fw cfg table and signature
1185 */
1186 if (fw->size < sizeof(fw_cfg) + sizeof(fw_cfg_sig))
1187 goto not_signed_fw;
1188
1189 memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
1190 sizeof(fw_cfg) - sizeof(fw_cfg_sig), sizeof(fw_cfg));
1191
1192 if (fw_cfg.identity != ('F' | ('W' << 8) | ('C' << 16) | ('T' << 24))) {
1193 dev_info(dev, "not a signed image\n");
1194 goto not_signed_fw;
1195 }
1196 eof = fw->data + fw->size - sizeof(fw_cfg) - sizeof(fw_cfg_sig);
1197
1198 memcpy((uint8_t *)&fw_cfg_sig,
1199 fw->data + fw->size - sizeof(fw_cfg_sig), sizeof(fw_cfg_sig));
1200
1201 /* flash fw config table and signature first */
1202 err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg,
1203 FLASH_FWCT1_WR_CMD);
1204 if (err)
1205 goto release_fw;
1206
1207 err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg + CCG4_ROW_SIZE,
1208 FLASH_FWCT2_WR_CMD);
1209 if (err)
1210 goto release_fw;
1211
1212 err = ccg_cmd_write_flash_row(uc, 0, &fw_cfg_sig,
1213 FLASH_FWCT_SIG_WR_CMD);
1214 if (err)
1215 goto release_fw;
1216
1217 not_signed_fw:
1218 wr_buf = kzalloc(CCG4_ROW_SIZE + 4, GFP_KERNEL);
1219 if (!wr_buf) {
1220 err = -ENOMEM;
1221 goto release_fw;
1222 }
1223
1224 err = ccg_cmd_enter_flashing(uc);
1225 if (err)
1226 goto release_mem;
1227
1228 /*****************************************************************
1229 * CCG firmware image (.cyacd) file line format
1230 *
1231 * :00rrrrllll[dd....]cc/r/n
1232 *
1233 * :00 header
1234 * rrrr is row number to flash (4 char)
1235 * llll is data len to flash (4 char)
1236 * dd is a data field represents one byte of data (512 char)
1237 * cc is checksum (2 char)
1238 * \r\n newline
1239 *
1240 * Total length: 3 + 4 + 4 + 512 + 2 + 2 = 527
1241 *
1242 *****************************************************************/
1243
1244 p = strnchr(fw->data, fw->size, ':');
1245 while (p < eof) {
1246 s = strnchr(p + 1, eof - p - 1, ':');
1247
1248 if (!s)
1249 s = eof;
1250
1251 line_sz = s - p;
1252
1253 if (line_sz != CYACD_LINE_SIZE) {
1254 dev_err(dev, "Bad FW format line_sz=%d\n", line_sz);
1255 err = -EINVAL;
1256 goto release_mem;
1257 }
1258
1259 if (hex2bin(wr_buf, p + 3, CCG4_ROW_SIZE + 4)) {
1260 err = -EINVAL;
1261 goto release_mem;
1262 }
1263
1264 row = get_unaligned_be16(wr_buf);
1265 len = get_unaligned_be16(&wr_buf[2]);
1266
1267 if (len != CCG4_ROW_SIZE) {
1268 err = -EINVAL;
1269 goto release_mem;
1270 }
1271
1272 err = ccg_cmd_write_flash_row(uc, row, wr_buf + 4,
1273 FLASH_WR_CMD);
1274 if (err)
1275 goto release_mem;
1276
1277 line_cnt++;
1278 p = s;
1279 }
1280
1281 dev_info(dev, "total %d row flashed. time: %dms\n",
1282 line_cnt, jiffies_to_msecs(jiffies - start_time));
1283
1284 err = ccg_cmd_validate_fw(uc, (mode == PRIMARY) ? FW2 : FW1);
1285 if (err)
1286 dev_err(dev, "%s validation failed err=%d\n",
1287 (mode == PRIMARY) ? "FW2" : "FW1", err);
1288 else
1289 dev_info(dev, "%s validated\n",
1290 (mode == PRIMARY) ? "FW2" : "FW1");
1291
1292 err = ccg_cmd_port_control(uc, false);
1293 if (err < 0)
1294 goto release_mem;
1295
1296 err = ccg_cmd_reset(uc);
1297 if (err < 0)
1298 goto release_mem;
1299
1300 err = ccg_cmd_port_control(uc, true);
1301 if (err < 0)
1302 goto release_mem;
1303
1304 release_mem:
1305 kfree(wr_buf);
1306
1307 release_fw:
1308 release_firmware(fw);
1309 return err;
1310 }
1311
1312 /*******************************************************************************
1313 * CCG4 has two copies of the firmware in addition to the bootloader.
1314 * If the device is running FW1, FW2 can be updated with the new version.
1315 * Dual firmware mode allows the CCG device to stay in a PD contract and support
1316 * USB PD and Type-C functionality while a firmware update is in progress.
1317 ******************************************************************************/
ccg_fw_update(struct ucsi_ccg * uc,enum enum_flash_mode flash_mode)1318 static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
1319 {
1320 int err = 0;
1321
1322 while (flash_mode != FLASH_NOT_NEEDED) {
1323 err = do_flash(uc, flash_mode);
1324 if (err < 0)
1325 return err;
1326 err = ccg_fw_update_needed(uc, &flash_mode);
1327 if (err < 0)
1328 return err;
1329 }
1330 dev_info(uc->dev, "CCG FW update successful\n");
1331
1332 return err;
1333 }
1334
ccg_restart(struct ucsi_ccg * uc)1335 static int ccg_restart(struct ucsi_ccg *uc)
1336 {
1337 struct device *dev = uc->dev;
1338 int status;
1339
1340 status = ucsi_ccg_init(uc);
1341 if (status < 0) {
1342 dev_err(dev, "ucsi_ccg_start fail, err=%d\n", status);
1343 return status;
1344 }
1345
1346 status = ccg_request_irq(uc);
1347 if (status < 0) {
1348 dev_err(dev, "request_threaded_irq failed - %d\n", status);
1349 return status;
1350 }
1351
1352 status = ucsi_register(uc->ucsi);
1353 if (status) {
1354 dev_err(uc->dev, "failed to register the interface\n");
1355 return status;
1356 }
1357
1358 pm_runtime_enable(uc->dev);
1359 return 0;
1360 }
1361
ccg_update_firmware(struct work_struct * work)1362 static void ccg_update_firmware(struct work_struct *work)
1363 {
1364 struct ucsi_ccg *uc = container_of(work, struct ucsi_ccg, work);
1365 enum enum_flash_mode flash_mode;
1366 int status;
1367
1368 status = ccg_fw_update_needed(uc, &flash_mode);
1369 if (status < 0)
1370 return;
1371
1372 if (flash_mode != FLASH_NOT_NEEDED) {
1373 ucsi_unregister(uc->ucsi);
1374 pm_runtime_disable(uc->dev);
1375 free_irq(uc->irq, uc);
1376
1377 ccg_fw_update(uc, flash_mode);
1378 ccg_restart(uc);
1379 }
1380 }
1381
do_flash_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)1382 static ssize_t do_flash_store(struct device *dev,
1383 struct device_attribute *attr,
1384 const char *buf, size_t n)
1385 {
1386 struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
1387 bool flash;
1388
1389 if (kstrtobool(buf, &flash))
1390 return -EINVAL;
1391
1392 if (!flash)
1393 return n;
1394
1395 schedule_work(&uc->work);
1396 return n;
1397 }
1398
ucsi_ccg_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int idx)1399 static umode_t ucsi_ccg_attrs_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
1400 {
1401 struct device *dev = kobj_to_dev(kobj);
1402 struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
1403
1404 if (!uc->fw_build)
1405 return 0;
1406
1407 return attr->mode;
1408 }
1409
1410 static DEVICE_ATTR_WO(do_flash);
1411
1412 static struct attribute *ucsi_ccg_attrs[] = {
1413 &dev_attr_do_flash.attr,
1414 NULL,
1415 };
1416 static struct attribute_group ucsi_ccg_attr_group = {
1417 .attrs = ucsi_ccg_attrs,
1418 .is_visible = ucsi_ccg_attrs_is_visible,
1419 };
1420 static const struct attribute_group *ucsi_ccg_groups[] = {
1421 &ucsi_ccg_attr_group,
1422 NULL,
1423 };
1424
ucsi_ccg_probe(struct i2c_client * client)1425 static int ucsi_ccg_probe(struct i2c_client *client)
1426 {
1427 struct device *dev = &client->dev;
1428 struct ucsi_ccg *uc;
1429 const char *fw_name;
1430 int status;
1431
1432 uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
1433 if (!uc)
1434 return -ENOMEM;
1435
1436 uc->dev = dev;
1437 uc->client = client;
1438 uc->irq = client->irq;
1439 mutex_init(&uc->lock);
1440 INIT_WORK(&uc->work, ccg_update_firmware);
1441 INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
1442
1443 /* Only fail FW flashing when FW build information is not provided */
1444 status = device_property_read_string(dev, "firmware-name", &fw_name);
1445 if (!status) {
1446 if (!strcmp(fw_name, "nvidia,jetson-agx-xavier"))
1447 uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA;
1448 else if (!strcmp(fw_name, "nvidia,gpu"))
1449 uc->fw_build = CCG_FW_BUILD_NVIDIA;
1450 if (!uc->fw_build)
1451 dev_err(uc->dev, "failed to get FW build information\n");
1452 }
1453
1454 /* reset ccg device and initialize ucsi */
1455 status = ucsi_ccg_init(uc);
1456 if (status < 0) {
1457 dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
1458 return status;
1459 }
1460
1461 status = get_fw_info(uc);
1462 if (status < 0) {
1463 dev_err(uc->dev, "get_fw_info failed - %d\n", status);
1464 return status;
1465 }
1466
1467 uc->port_num = 1;
1468
1469 if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK)
1470 uc->port_num++;
1471
1472 uc->ucsi = ucsi_create(dev, &ucsi_ccg_ops);
1473 if (IS_ERR(uc->ucsi))
1474 return PTR_ERR(uc->ucsi);
1475
1476 ucsi_set_drvdata(uc->ucsi, uc);
1477
1478 status = ccg_request_irq(uc);
1479 if (status < 0) {
1480 dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
1481 goto out_ucsi_destroy;
1482 }
1483
1484 status = ucsi_register(uc->ucsi);
1485 if (status)
1486 goto out_free_irq;
1487
1488 i2c_set_clientdata(client, uc);
1489
1490 device_disable_async_suspend(uc->dev);
1491
1492 pm_runtime_set_active(uc->dev);
1493 pm_runtime_enable(uc->dev);
1494 pm_runtime_use_autosuspend(uc->dev);
1495 pm_runtime_set_autosuspend_delay(uc->dev, 5000);
1496 pm_runtime_idle(uc->dev);
1497
1498 return 0;
1499
1500 out_free_irq:
1501 free_irq(uc->irq, uc);
1502 out_ucsi_destroy:
1503 ucsi_destroy(uc->ucsi);
1504
1505 return status;
1506 }
1507
ucsi_ccg_remove(struct i2c_client * client)1508 static void ucsi_ccg_remove(struct i2c_client *client)
1509 {
1510 struct ucsi_ccg *uc = i2c_get_clientdata(client);
1511
1512 cancel_work_sync(&uc->pm_work);
1513 cancel_work_sync(&uc->work);
1514 pm_runtime_disable(uc->dev);
1515 ucsi_unregister(uc->ucsi);
1516 ucsi_destroy(uc->ucsi);
1517 free_irq(uc->irq, uc);
1518 }
1519
1520 static const struct of_device_id ucsi_ccg_of_match_table[] = {
1521 { .compatible = "cypress,cypd4226", },
1522 { /* sentinel */ }
1523 };
1524 MODULE_DEVICE_TABLE(of, ucsi_ccg_of_match_table);
1525
1526 static const struct i2c_device_id ucsi_ccg_device_id[] = {
1527 { "ccgx-ucsi" },
1528 {}
1529 };
1530 MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
1531
1532 static const struct acpi_device_id amd_i2c_ucsi_match[] = {
1533 {"AMDI0042"},
1534 {}
1535 };
1536 MODULE_DEVICE_TABLE(acpi, amd_i2c_ucsi_match);
1537
ucsi_ccg_resume(struct device * dev)1538 static int ucsi_ccg_resume(struct device *dev)
1539 {
1540 struct i2c_client *client = to_i2c_client(dev);
1541 struct ucsi_ccg *uc = i2c_get_clientdata(client);
1542
1543 return ucsi_resume(uc->ucsi);
1544 }
1545
ucsi_ccg_runtime_suspend(struct device * dev)1546 static int ucsi_ccg_runtime_suspend(struct device *dev)
1547 {
1548 return 0;
1549 }
1550
ucsi_ccg_runtime_resume(struct device * dev)1551 static int ucsi_ccg_runtime_resume(struct device *dev)
1552 {
1553 struct i2c_client *client = to_i2c_client(dev);
1554 struct ucsi_ccg *uc = i2c_get_clientdata(client);
1555
1556 /*
1557 * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
1558 * of missing interrupt when a device is connected for runtime resume.
1559 * Schedule a work to call ISR as a workaround.
1560 */
1561 if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
1562 uc->fw_version <= CCG_OLD_FW_VERSION)
1563 schedule_work(&uc->pm_work);
1564
1565 return 0;
1566 }
1567
1568 static const struct dev_pm_ops ucsi_ccg_pm = {
1569 .resume = ucsi_ccg_resume,
1570 .runtime_suspend = ucsi_ccg_runtime_suspend,
1571 .runtime_resume = ucsi_ccg_runtime_resume,
1572 };
1573
1574 static struct i2c_driver ucsi_ccg_driver = {
1575 .driver = {
1576 .name = "ucsi_ccg",
1577 .pm = &ucsi_ccg_pm,
1578 .dev_groups = ucsi_ccg_groups,
1579 .acpi_match_table = amd_i2c_ucsi_match,
1580 .of_match_table = ucsi_ccg_of_match_table,
1581 },
1582 .probe = ucsi_ccg_probe,
1583 .remove = ucsi_ccg_remove,
1584 .id_table = ucsi_ccg_device_id,
1585 };
1586
1587 module_i2c_driver(ucsi_ccg_driver);
1588
1589 MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
1590 MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
1591 MODULE_LICENSE("GPL v2");
1592