1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt/USB4 retimer support.
4 *
5 * Copyright (C) 2020, Intel Corporation
6 * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 */
9
10 #include <linux/delay.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sched/signal.h>
13
14 #include "sb_regs.h"
15 #include "tb.h"
16
17 #define TB_MAX_RETIMER_INDEX 6
18
19 /**
20 * tb_retimer_nvm_read() - Read contents of retimer NVM
21 * @rt: Retimer device
22 * @address: NVM address (in bytes) to start reading
23 * @buf: Data read from NVM is stored here
24 * @size: Number of bytes to read
25 *
26 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
27 * read was successful and negative errno in case of failure.
28 */
tb_retimer_nvm_read(struct tb_retimer * rt,unsigned int address,void * buf,size_t size)29 int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
30 size_t size)
31 {
32 return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
33 }
34
nvm_read(void * priv,unsigned int offset,void * val,size_t bytes)35 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
36 {
37 struct tb_nvm *nvm = priv;
38 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
39 int ret;
40
41 pm_runtime_get_sync(&rt->dev);
42
43 if (!mutex_trylock(&rt->tb->lock)) {
44 ret = restart_syscall();
45 goto out;
46 }
47
48 ret = tb_retimer_nvm_read(rt, offset, val, bytes);
49 mutex_unlock(&rt->tb->lock);
50
51 out:
52 pm_runtime_mark_last_busy(&rt->dev);
53 pm_runtime_put_autosuspend(&rt->dev);
54
55 return ret;
56 }
57
nvm_write(void * priv,unsigned int offset,void * val,size_t bytes)58 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
59 {
60 struct tb_nvm *nvm = priv;
61 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
62 int ret = 0;
63
64 if (!mutex_trylock(&rt->tb->lock))
65 return restart_syscall();
66
67 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
68 mutex_unlock(&rt->tb->lock);
69
70 return ret;
71 }
72
tb_retimer_nvm_add(struct tb_retimer * rt)73 static int tb_retimer_nvm_add(struct tb_retimer *rt)
74 {
75 struct tb_nvm *nvm;
76 int ret;
77
78 nvm = tb_nvm_alloc(&rt->dev);
79 if (IS_ERR(nvm)) {
80 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
81 goto err_nvm;
82 }
83
84 ret = tb_nvm_read_version(nvm);
85 if (ret)
86 goto err_nvm;
87
88 ret = tb_nvm_add_active(nvm, nvm_read);
89 if (ret)
90 goto err_nvm;
91
92 ret = tb_nvm_add_non_active(nvm, nvm_write);
93 if (ret)
94 goto err_nvm;
95
96 rt->nvm = nvm;
97 dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor);
98 return 0;
99
100 err_nvm:
101 dev_dbg(&rt->dev, "NVM upgrade disabled\n");
102 if (!IS_ERR(nvm))
103 tb_nvm_free(nvm);
104
105 return ret;
106 }
107
tb_retimer_nvm_validate_and_write(struct tb_retimer * rt)108 static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
109 {
110 unsigned int image_size;
111 const u8 *buf;
112 int ret;
113
114 ret = tb_nvm_validate(rt->nvm);
115 if (ret)
116 return ret;
117
118 buf = rt->nvm->buf_data_start;
119 image_size = rt->nvm->buf_data_size;
120
121 ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
122 image_size);
123 if (ret)
124 return ret;
125
126 rt->nvm->flushed = true;
127 return 0;
128 }
129
tb_retimer_nvm_authenticate(struct tb_retimer * rt,bool auth_only)130 static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
131 {
132 u32 status;
133 int ret;
134
135 if (auth_only) {
136 ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
137 if (ret)
138 return ret;
139 }
140
141 ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
142 if (ret)
143 return ret;
144
145 usleep_range(100, 150);
146
147 /*
148 * Check the status now if we still can access the retimer. It
149 * is expected that the below fails.
150 */
151 ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
152 &status);
153 if (!ret) {
154 rt->auth_status = status;
155 return status ? -EINVAL : 0;
156 }
157
158 return 0;
159 }
160
device_show(struct device * dev,struct device_attribute * attr,char * buf)161 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
162 char *buf)
163 {
164 struct tb_retimer *rt = tb_to_retimer(dev);
165
166 return sysfs_emit(buf, "%#x\n", rt->device);
167 }
168 static DEVICE_ATTR_RO(device);
169
nvm_authenticate_show(struct device * dev,struct device_attribute * attr,char * buf)170 static ssize_t nvm_authenticate_show(struct device *dev,
171 struct device_attribute *attr, char *buf)
172 {
173 struct tb_retimer *rt = tb_to_retimer(dev);
174 int ret;
175
176 if (!mutex_trylock(&rt->tb->lock))
177 return restart_syscall();
178
179 if (!rt->nvm)
180 ret = -EAGAIN;
181 else if (rt->no_nvm_upgrade)
182 ret = -EOPNOTSUPP;
183 else
184 ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
185
186 mutex_unlock(&rt->tb->lock);
187
188 return ret;
189 }
190
tb_retimer_nvm_authenticate_status(struct tb_port * port,u32 * status)191 static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
192 {
193 int i;
194
195 tb_port_dbg(port, "reading NVM authentication status of retimers\n");
196
197 /*
198 * Before doing anything else, read the authentication status.
199 * If the retimer has it set, store it for the new retimer
200 * device instance.
201 */
202 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
203 usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
204 }
205
tb_retimer_set_inbound_sbtx(struct tb_port * port)206 static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
207 {
208 int i;
209
210 /*
211 * When USB4 port is online sideband communications are
212 * already up.
213 */
214 if (!usb4_port_device_is_offline(port->usb4))
215 return;
216
217 tb_port_dbg(port, "enabling sideband transactions\n");
218
219 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
220 usb4_port_retimer_set_inbound_sbtx(port, i);
221 }
222
tb_retimer_unset_inbound_sbtx(struct tb_port * port)223 static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
224 {
225 int i;
226
227 /*
228 * When USB4 port is offline we need to keep the sideband
229 * communications up to make it possible to communicate with
230 * the connected retimers.
231 */
232 if (usb4_port_device_is_offline(port->usb4))
233 return;
234
235 tb_port_dbg(port, "disabling sideband transactions\n");
236
237 for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
238 usb4_port_retimer_unset_inbound_sbtx(port, i);
239 }
240
nvm_authenticate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)241 static ssize_t nvm_authenticate_store(struct device *dev,
242 struct device_attribute *attr, const char *buf, size_t count)
243 {
244 struct tb_retimer *rt = tb_to_retimer(dev);
245 int val, ret;
246
247 pm_runtime_get_sync(&rt->dev);
248
249 if (!mutex_trylock(&rt->tb->lock)) {
250 ret = restart_syscall();
251 goto exit_rpm;
252 }
253
254 if (!rt->nvm) {
255 ret = -EAGAIN;
256 goto exit_unlock;
257 }
258
259 ret = kstrtoint(buf, 10, &val);
260 if (ret)
261 goto exit_unlock;
262
263 /* Always clear status */
264 rt->auth_status = 0;
265
266 if (val) {
267 /*
268 * When NVM authentication starts the retimer is not
269 * accessible so calling tb_retimer_unset_inbound_sbtx()
270 * will fail and therefore we do not call it. Exception
271 * is when the validation fails or we only write the new
272 * NVM image without authentication.
273 */
274 tb_retimer_set_inbound_sbtx(rt->port);
275 if (val == AUTHENTICATE_ONLY) {
276 ret = tb_retimer_nvm_authenticate(rt, true);
277 } else {
278 if (!rt->nvm->flushed) {
279 if (!rt->nvm->buf) {
280 ret = -EINVAL;
281 goto exit_unlock;
282 }
283
284 ret = tb_retimer_nvm_validate_and_write(rt);
285 if (ret || val == WRITE_ONLY)
286 goto exit_unlock;
287 }
288 if (val == WRITE_AND_AUTHENTICATE)
289 ret = tb_retimer_nvm_authenticate(rt, false);
290 }
291 }
292
293 exit_unlock:
294 if (ret || val == WRITE_ONLY)
295 tb_retimer_unset_inbound_sbtx(rt->port);
296 mutex_unlock(&rt->tb->lock);
297 exit_rpm:
298 pm_runtime_mark_last_busy(&rt->dev);
299 pm_runtime_put_autosuspend(&rt->dev);
300
301 if (ret)
302 return ret;
303 return count;
304 }
305 static DEVICE_ATTR_RW(nvm_authenticate);
306
nvm_version_show(struct device * dev,struct device_attribute * attr,char * buf)307 static ssize_t nvm_version_show(struct device *dev,
308 struct device_attribute *attr, char *buf)
309 {
310 struct tb_retimer *rt = tb_to_retimer(dev);
311 int ret;
312
313 if (!mutex_trylock(&rt->tb->lock))
314 return restart_syscall();
315
316 if (!rt->nvm)
317 ret = -EAGAIN;
318 else
319 ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
320
321 mutex_unlock(&rt->tb->lock);
322 return ret;
323 }
324 static DEVICE_ATTR_RO(nvm_version);
325
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)326 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
327 char *buf)
328 {
329 struct tb_retimer *rt = tb_to_retimer(dev);
330
331 return sysfs_emit(buf, "%#x\n", rt->vendor);
332 }
333 static DEVICE_ATTR_RO(vendor);
334
335 static struct attribute *retimer_attrs[] = {
336 &dev_attr_device.attr,
337 &dev_attr_nvm_authenticate.attr,
338 &dev_attr_nvm_version.attr,
339 &dev_attr_vendor.attr,
340 NULL
341 };
342
343 static const struct attribute_group retimer_group = {
344 .attrs = retimer_attrs,
345 };
346
347 static const struct attribute_group *retimer_groups[] = {
348 &retimer_group,
349 NULL
350 };
351
tb_retimer_release(struct device * dev)352 static void tb_retimer_release(struct device *dev)
353 {
354 struct tb_retimer *rt = tb_to_retimer(dev);
355
356 kfree(rt);
357 }
358
359 struct device_type tb_retimer_type = {
360 .name = "thunderbolt_retimer",
361 .groups = retimer_groups,
362 .release = tb_retimer_release,
363 };
364
tb_retimer_add(struct tb_port * port,u8 index,u32 auth_status)365 static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
366 {
367 struct tb_retimer *rt;
368 u32 vendor, device;
369 int ret;
370
371 ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
372 sizeof(vendor));
373 if (ret) {
374 if (ret != -ENODEV)
375 tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
376 return ret;
377 }
378
379 ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
380 sizeof(device));
381 if (ret) {
382 if (ret != -ENODEV)
383 tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
384 return ret;
385 }
386
387 /*
388 * Check that it supports NVM operations. If not then don't add
389 * the device at all.
390 */
391 ret = usb4_port_retimer_nvm_sector_size(port, index);
392 if (ret < 0)
393 return ret;
394
395 rt = kzalloc(sizeof(*rt), GFP_KERNEL);
396 if (!rt)
397 return -ENOMEM;
398
399 rt->index = index;
400 rt->vendor = vendor;
401 rt->device = device;
402 rt->auth_status = auth_status;
403 rt->port = port;
404 rt->tb = port->sw->tb;
405
406 rt->dev.parent = &port->usb4->dev;
407 rt->dev.bus = &tb_bus_type;
408 rt->dev.type = &tb_retimer_type;
409 dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
410 port->port, index);
411
412 ret = device_register(&rt->dev);
413 if (ret) {
414 dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
415 put_device(&rt->dev);
416 return ret;
417 }
418
419 ret = tb_retimer_nvm_add(rt);
420 if (ret) {
421 dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
422 device_unregister(&rt->dev);
423 return ret;
424 }
425
426 dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
427 rt->vendor, rt->device);
428
429 pm_runtime_no_callbacks(&rt->dev);
430 pm_runtime_set_active(&rt->dev);
431 pm_runtime_enable(&rt->dev);
432 pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
433 pm_runtime_mark_last_busy(&rt->dev);
434 pm_runtime_use_autosuspend(&rt->dev);
435
436 return 0;
437 }
438
tb_retimer_remove(struct tb_retimer * rt)439 static void tb_retimer_remove(struct tb_retimer *rt)
440 {
441 dev_info(&rt->dev, "retimer disconnected\n");
442 tb_nvm_free(rt->nvm);
443 device_unregister(&rt->dev);
444 }
445
446 struct tb_retimer_lookup {
447 const struct tb_port *port;
448 u8 index;
449 };
450
retimer_match(struct device * dev,void * data)451 static int retimer_match(struct device *dev, void *data)
452 {
453 const struct tb_retimer_lookup *lookup = data;
454 struct tb_retimer *rt = tb_to_retimer(dev);
455
456 return rt && rt->port == lookup->port && rt->index == lookup->index;
457 }
458
tb_port_find_retimer(struct tb_port * port,u8 index)459 static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
460 {
461 struct tb_retimer_lookup lookup = { .port = port, .index = index };
462 struct device *dev;
463
464 dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
465 if (dev)
466 return tb_to_retimer(dev);
467
468 return NULL;
469 }
470
471 /**
472 * tb_retimer_scan() - Scan for on-board retimers under port
473 * @port: USB4 port to scan
474 * @add: If true also registers found retimers
475 *
476 * Brings the sideband into a state where retimers can be accessed.
477 * Then Tries to enumerate on-board retimers connected to @port. Found
478 * retimers are registered as children of @port if @add is set. Does
479 * not scan for cable retimers for now.
480 */
tb_retimer_scan(struct tb_port * port,bool add)481 int tb_retimer_scan(struct tb_port *port, bool add)
482 {
483 u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
484 int ret, i, last_idx = 0;
485
486 /*
487 * Send broadcast RT to make sure retimer indices facing this
488 * port are set.
489 */
490 ret = usb4_port_enumerate_retimers(port);
491 if (ret)
492 return ret;
493
494 /*
495 * Immediately after sending enumerate retimers read the
496 * authentication status of each retimer.
497 */
498 tb_retimer_nvm_authenticate_status(port, status);
499
500 /*
501 * Enable sideband channel for each retimer. We can do this
502 * regardless whether there is device connected or not.
503 */
504 tb_retimer_set_inbound_sbtx(port);
505
506 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
507 /*
508 * Last retimer is true only for the last on-board
509 * retimer (the one connected directly to the Type-C
510 * port).
511 */
512 ret = usb4_port_retimer_is_last(port, i);
513 if (ret > 0)
514 last_idx = i;
515 else if (ret < 0)
516 break;
517 }
518
519 tb_retimer_unset_inbound_sbtx(port);
520
521 if (!last_idx)
522 return 0;
523
524 /* Add on-board retimers if they do not exist already */
525 ret = 0;
526 for (i = 1; i <= last_idx; i++) {
527 struct tb_retimer *rt;
528
529 rt = tb_port_find_retimer(port, i);
530 if (rt) {
531 put_device(&rt->dev);
532 } else if (add) {
533 ret = tb_retimer_add(port, i, status[i]);
534 if (ret && ret != -EOPNOTSUPP)
535 break;
536 }
537 }
538
539 return ret;
540 }
541
remove_retimer(struct device * dev,void * data)542 static int remove_retimer(struct device *dev, void *data)
543 {
544 struct tb_retimer *rt = tb_to_retimer(dev);
545 struct tb_port *port = data;
546
547 if (rt && rt->port == port)
548 tb_retimer_remove(rt);
549 return 0;
550 }
551
552 /**
553 * tb_retimer_remove_all() - Remove all retimers under port
554 * @port: USB4 port whose retimers to remove
555 *
556 * This removes all previously added retimers under @port.
557 */
tb_retimer_remove_all(struct tb_port * port)558 void tb_retimer_remove_all(struct tb_port *port)
559 {
560 struct usb4_port *usb4;
561
562 usb4 = port->usb4;
563 if (usb4)
564 device_for_each_child_reverse(&usb4->dev, port,
565 remove_retimer);
566 }
567