1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt/USB4 retimer support.
4  *
5  * Copyright (C) 2020, Intel Corporation
6  * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
7  *	    Mika Westerberg <mika.westerberg@linux.intel.com>
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sched/signal.h>
13 
14 #include "sb_regs.h"
15 #include "tb.h"
16 
17 #if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
18 #define TB_MAX_RETIMER_INDEX	6
19 #else
20 #define TB_MAX_RETIMER_INDEX	2
21 #endif
22 
23 /**
24  * tb_retimer_nvm_read() - Read contents of retimer NVM
25  * @rt: Retimer device
26  * @address: NVM address (in bytes) to start reading
27  * @buf: Data read from NVM is stored here
28  * @size: Number of bytes to read
29  *
30  * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
31  * read was successful and negative errno in case of failure.
32  */
tb_retimer_nvm_read(struct tb_retimer * rt,unsigned int address,void * buf,size_t size)33 int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
34 			size_t size)
35 {
36 	return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
37 }
38 
nvm_read(void * priv,unsigned int offset,void * val,size_t bytes)39 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
40 {
41 	struct tb_nvm *nvm = priv;
42 	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
43 	int ret;
44 
45 	pm_runtime_get_sync(&rt->dev);
46 
47 	if (!mutex_trylock(&rt->tb->lock)) {
48 		ret = restart_syscall();
49 		goto out;
50 	}
51 
52 	ret = tb_retimer_nvm_read(rt, offset, val, bytes);
53 	mutex_unlock(&rt->tb->lock);
54 
55 out:
56 	pm_runtime_mark_last_busy(&rt->dev);
57 	pm_runtime_put_autosuspend(&rt->dev);
58 
59 	return ret;
60 }
61 
nvm_write(void * priv,unsigned int offset,void * val,size_t bytes)62 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
63 {
64 	struct tb_nvm *nvm = priv;
65 	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
66 	int ret = 0;
67 
68 	if (!mutex_trylock(&rt->tb->lock))
69 		return restart_syscall();
70 
71 	ret = tb_nvm_write_buf(nvm, offset, val, bytes);
72 	mutex_unlock(&rt->tb->lock);
73 
74 	return ret;
75 }
76 
tb_retimer_nvm_add(struct tb_retimer * rt)77 static int tb_retimer_nvm_add(struct tb_retimer *rt)
78 {
79 	struct tb_nvm *nvm;
80 	int ret;
81 
82 	nvm = tb_nvm_alloc(&rt->dev);
83 	if (IS_ERR(nvm)) {
84 		ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
85 		goto err_nvm;
86 	}
87 
88 	ret = tb_nvm_read_version(nvm);
89 	if (ret)
90 		goto err_nvm;
91 
92 	ret = tb_nvm_add_active(nvm, nvm_read);
93 	if (ret)
94 		goto err_nvm;
95 
96 	if (!rt->no_nvm_upgrade) {
97 		ret = tb_nvm_add_non_active(nvm, nvm_write);
98 		if (ret)
99 			goto err_nvm;
100 	}
101 
102 	rt->nvm = nvm;
103 	dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor);
104 	return 0;
105 
106 err_nvm:
107 	dev_dbg(&rt->dev, "NVM upgrade disabled\n");
108 	rt->no_nvm_upgrade = true;
109 	if (!IS_ERR(nvm))
110 		tb_nvm_free(nvm);
111 
112 	return ret;
113 }
114 
tb_retimer_nvm_validate_and_write(struct tb_retimer * rt)115 static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
116 {
117 	unsigned int image_size;
118 	const u8 *buf;
119 	int ret;
120 
121 	ret = tb_nvm_validate(rt->nvm);
122 	if (ret)
123 		return ret;
124 
125 	buf = rt->nvm->buf_data_start;
126 	image_size = rt->nvm->buf_data_size;
127 
128 	ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
129 					 image_size);
130 	if (ret)
131 		return ret;
132 
133 	rt->nvm->flushed = true;
134 	return 0;
135 }
136 
tb_retimer_nvm_authenticate(struct tb_retimer * rt,bool auth_only)137 static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
138 {
139 	u32 status;
140 	int ret;
141 
142 	if (auth_only) {
143 		ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
144 		if (ret)
145 			return ret;
146 	}
147 
148 	ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
149 	if (ret)
150 		return ret;
151 
152 	usleep_range(100, 150);
153 
154 	/*
155 	 * Check the status now if we still can access the retimer. It
156 	 * is expected that the below fails.
157 	 */
158 	ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
159 							&status);
160 	if (!ret) {
161 		rt->auth_status = status;
162 		return status ? -EINVAL : 0;
163 	}
164 
165 	return 0;
166 }
167 
device_show(struct device * dev,struct device_attribute * attr,char * buf)168 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
169 			   char *buf)
170 {
171 	struct tb_retimer *rt = tb_to_retimer(dev);
172 
173 	return sysfs_emit(buf, "%#x\n", rt->device);
174 }
175 static DEVICE_ATTR_RO(device);
176 
nvm_authenticate_show(struct device * dev,struct device_attribute * attr,char * buf)177 static ssize_t nvm_authenticate_show(struct device *dev,
178 	struct device_attribute *attr, char *buf)
179 {
180 	struct tb_retimer *rt = tb_to_retimer(dev);
181 	int ret;
182 
183 	if (!mutex_trylock(&rt->tb->lock))
184 		return restart_syscall();
185 
186 	if (!rt->nvm)
187 		ret = -EAGAIN;
188 	else
189 		ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
190 
191 	mutex_unlock(&rt->tb->lock);
192 
193 	return ret;
194 }
195 
tb_retimer_nvm_authenticate_status(struct tb_port * port,u32 * status)196 static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
197 {
198 	int i;
199 
200 	tb_port_dbg(port, "reading NVM authentication status of retimers\n");
201 
202 	/*
203 	 * Before doing anything else, read the authentication status.
204 	 * If the retimer has it set, store it for the new retimer
205 	 * device instance.
206 	 */
207 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
208 		if (usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]))
209 			break;
210 	}
211 }
212 
tb_retimer_set_inbound_sbtx(struct tb_port * port)213 static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
214 {
215 	int i;
216 
217 	/*
218 	 * When USB4 port is online sideband communications are
219 	 * already up.
220 	 */
221 	if (!usb4_port_device_is_offline(port->usb4))
222 		return;
223 
224 	tb_port_dbg(port, "enabling sideband transactions\n");
225 
226 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
227 		usb4_port_retimer_set_inbound_sbtx(port, i);
228 }
229 
tb_retimer_unset_inbound_sbtx(struct tb_port * port)230 static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
231 {
232 	int i;
233 
234 	/*
235 	 * When USB4 port is offline we need to keep the sideband
236 	 * communications up to make it possible to communicate with
237 	 * the connected retimers.
238 	 */
239 	if (usb4_port_device_is_offline(port->usb4))
240 		return;
241 
242 	tb_port_dbg(port, "disabling sideband transactions\n");
243 
244 	for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) {
245 		if (usb4_port_retimer_unset_inbound_sbtx(port, i))
246 			break;
247 	}
248 }
249 
nvm_authenticate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)250 static ssize_t nvm_authenticate_store(struct device *dev,
251 	struct device_attribute *attr, const char *buf, size_t count)
252 {
253 	struct tb_retimer *rt = tb_to_retimer(dev);
254 	int val, ret;
255 
256 	pm_runtime_get_sync(&rt->dev);
257 
258 	if (!mutex_trylock(&rt->tb->lock)) {
259 		ret = restart_syscall();
260 		goto exit_rpm;
261 	}
262 
263 	if (!rt->nvm) {
264 		ret = -EAGAIN;
265 		goto exit_unlock;
266 	}
267 
268 	ret = kstrtoint(buf, 10, &val);
269 	if (ret)
270 		goto exit_unlock;
271 
272 	/* Always clear status */
273 	rt->auth_status = 0;
274 
275 	if (val) {
276 		/*
277 		 * When NVM authentication starts the retimer is not
278 		 * accessible so calling tb_retimer_unset_inbound_sbtx()
279 		 * will fail and therefore we do not call it. Exception
280 		 * is when the validation fails or we only write the new
281 		 * NVM image without authentication.
282 		 */
283 		tb_retimer_set_inbound_sbtx(rt->port);
284 		if (val == AUTHENTICATE_ONLY) {
285 			ret = tb_retimer_nvm_authenticate(rt, true);
286 		} else {
287 			if (!rt->nvm->flushed) {
288 				if (!rt->nvm->buf) {
289 					ret = -EINVAL;
290 					goto exit_unlock;
291 				}
292 
293 				ret = tb_retimer_nvm_validate_and_write(rt);
294 				if (ret || val == WRITE_ONLY)
295 					goto exit_unlock;
296 			}
297 			if (val == WRITE_AND_AUTHENTICATE)
298 				ret = tb_retimer_nvm_authenticate(rt, false);
299 		}
300 	}
301 
302 exit_unlock:
303 	if (ret || val == WRITE_ONLY)
304 		tb_retimer_unset_inbound_sbtx(rt->port);
305 	mutex_unlock(&rt->tb->lock);
306 exit_rpm:
307 	pm_runtime_mark_last_busy(&rt->dev);
308 	pm_runtime_put_autosuspend(&rt->dev);
309 
310 	if (ret)
311 		return ret;
312 	return count;
313 }
314 static DEVICE_ATTR_RW(nvm_authenticate);
315 
nvm_version_show(struct device * dev,struct device_attribute * attr,char * buf)316 static ssize_t nvm_version_show(struct device *dev,
317 				struct device_attribute *attr, char *buf)
318 {
319 	struct tb_retimer *rt = tb_to_retimer(dev);
320 	int ret;
321 
322 	if (!mutex_trylock(&rt->tb->lock))
323 		return restart_syscall();
324 
325 	if (!rt->nvm)
326 		ret = -EAGAIN;
327 	else
328 		ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
329 
330 	mutex_unlock(&rt->tb->lock);
331 	return ret;
332 }
333 static DEVICE_ATTR_RO(nvm_version);
334 
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)335 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
336 			   char *buf)
337 {
338 	struct tb_retimer *rt = tb_to_retimer(dev);
339 
340 	return sysfs_emit(buf, "%#x\n", rt->vendor);
341 }
342 static DEVICE_ATTR_RO(vendor);
343 
retimer_is_visible(struct kobject * kobj,struct attribute * attr,int n)344 static umode_t retimer_is_visible(struct kobject *kobj, struct attribute *attr,
345 				  int n)
346 {
347 	struct device *dev = kobj_to_dev(kobj);
348 	struct tb_retimer *rt = tb_to_retimer(dev);
349 
350 	if (attr == &dev_attr_nvm_authenticate.attr ||
351 	    attr == &dev_attr_nvm_version.attr)
352 		return rt->no_nvm_upgrade ? 0 : attr->mode;
353 
354 	return attr->mode;
355 }
356 
357 static struct attribute *retimer_attrs[] = {
358 	&dev_attr_device.attr,
359 	&dev_attr_nvm_authenticate.attr,
360 	&dev_attr_nvm_version.attr,
361 	&dev_attr_vendor.attr,
362 	NULL
363 };
364 
365 static const struct attribute_group retimer_group = {
366 	.is_visible = retimer_is_visible,
367 	.attrs = retimer_attrs,
368 };
369 
370 static const struct attribute_group *retimer_groups[] = {
371 	&retimer_group,
372 	NULL
373 };
374 
tb_retimer_release(struct device * dev)375 static void tb_retimer_release(struct device *dev)
376 {
377 	struct tb_retimer *rt = tb_to_retimer(dev);
378 
379 	kfree(rt);
380 }
381 
382 const struct device_type tb_retimer_type = {
383 	.name = "thunderbolt_retimer",
384 	.groups = retimer_groups,
385 	.release = tb_retimer_release,
386 };
387 
tb_retimer_add(struct tb_port * port,u8 index,u32 auth_status,bool on_board)388 static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status,
389 			  bool on_board)
390 {
391 	struct tb_retimer *rt;
392 	u32 vendor, device;
393 	int ret;
394 
395 	ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
396 				USB4_SB_VENDOR_ID, &vendor, sizeof(vendor));
397 	if (ret) {
398 		if (ret != -ENODEV)
399 			tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
400 		return ret;
401 	}
402 
403 	ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
404 				USB4_SB_PRODUCT_ID, &device, sizeof(device));
405 	if (ret) {
406 		if (ret != -ENODEV)
407 			tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
408 		return ret;
409 	}
410 
411 
412 	rt = kzalloc(sizeof(*rt), GFP_KERNEL);
413 	if (!rt)
414 		return -ENOMEM;
415 
416 	rt->index = index;
417 	rt->vendor = vendor;
418 	rt->device = device;
419 	rt->auth_status = auth_status;
420 	rt->port = port;
421 	rt->tb = port->sw->tb;
422 
423 	/*
424 	 * Only support NVM upgrade for on-board retimers. The retimers
425 	 * on the other side of the connection.
426 	 */
427 	if (!on_board || usb4_port_retimer_nvm_sector_size(port, index) <= 0)
428 		rt->no_nvm_upgrade = true;
429 
430 	rt->dev.parent = &port->usb4->dev;
431 	rt->dev.bus = &tb_bus_type;
432 	rt->dev.type = &tb_retimer_type;
433 	dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
434 		     port->port, index);
435 
436 	ret = device_register(&rt->dev);
437 	if (ret) {
438 		dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
439 		put_device(&rt->dev);
440 		return ret;
441 	}
442 
443 	ret = tb_retimer_nvm_add(rt);
444 	if (ret) {
445 		dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
446 		device_unregister(&rt->dev);
447 		return ret;
448 	}
449 
450 	dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
451 		 rt->vendor, rt->device);
452 
453 	pm_runtime_no_callbacks(&rt->dev);
454 	pm_runtime_set_active(&rt->dev);
455 	pm_runtime_enable(&rt->dev);
456 	pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
457 	pm_runtime_mark_last_busy(&rt->dev);
458 	pm_runtime_use_autosuspend(&rt->dev);
459 
460 	tb_retimer_debugfs_init(rt);
461 	return 0;
462 }
463 
tb_retimer_remove(struct tb_retimer * rt)464 static void tb_retimer_remove(struct tb_retimer *rt)
465 {
466 	dev_info(&rt->dev, "retimer disconnected\n");
467 	tb_retimer_debugfs_remove(rt);
468 	tb_nvm_free(rt->nvm);
469 	device_unregister(&rt->dev);
470 }
471 
472 struct tb_retimer_lookup {
473 	const struct tb_port *port;
474 	u8 index;
475 };
476 
retimer_match(struct device * dev,const void * data)477 static int retimer_match(struct device *dev, const void *data)
478 {
479 	const struct tb_retimer_lookup *lookup = data;
480 	struct tb_retimer *rt = tb_to_retimer(dev);
481 
482 	return rt && rt->port == lookup->port && rt->index == lookup->index;
483 }
484 
tb_port_find_retimer(struct tb_port * port,u8 index)485 static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
486 {
487 	struct tb_retimer_lookup lookup = { .port = port, .index = index };
488 	struct device *dev;
489 
490 	dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
491 	if (dev)
492 		return tb_to_retimer(dev);
493 
494 	return NULL;
495 }
496 
497 /**
498  * tb_retimer_scan() - Scan for on-board retimers under port
499  * @port: USB4 port to scan
500  * @add: If true also registers found retimers
501  *
502  * Brings the sideband into a state where retimers can be accessed.
503  * Then Tries to enumerate on-board retimers connected to @port. Found
504  * retimers are registered as children of @port if @add is set.  Does
505  * not scan for cable retimers for now.
506  */
tb_retimer_scan(struct tb_port * port,bool add)507 int tb_retimer_scan(struct tb_port *port, bool add)
508 {
509 	u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
510 	int ret, i, max, last_idx = 0;
511 
512 	/*
513 	 * Send broadcast RT to make sure retimer indices facing this
514 	 * port are set.
515 	 */
516 	ret = usb4_port_enumerate_retimers(port);
517 	if (ret)
518 		return ret;
519 
520 	/*
521 	 * Immediately after sending enumerate retimers read the
522 	 * authentication status of each retimer.
523 	 */
524 	tb_retimer_nvm_authenticate_status(port, status);
525 
526 	/*
527 	 * Enable sideband channel for each retimer. We can do this
528 	 * regardless whether there is device connected or not.
529 	 */
530 	tb_retimer_set_inbound_sbtx(port);
531 
532 	for (max = 1, i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
533 		/*
534 		 * Last retimer is true only for the last on-board
535 		 * retimer (the one connected directly to the Type-C
536 		 * port).
537 		 */
538 		ret = usb4_port_retimer_is_last(port, i);
539 		if (ret > 0)
540 			last_idx = i;
541 		else if (ret < 0)
542 			break;
543 
544 		max = i;
545 	}
546 
547 	ret = 0;
548 	if (!IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING))
549 		max = min(last_idx, max);
550 
551 	/* Add retimers if they do not exist already */
552 	for (i = 1; i <= max; i++) {
553 		struct tb_retimer *rt;
554 
555 		/* Skip cable retimers */
556 		if (usb4_port_retimer_is_cable(port, i))
557 			continue;
558 
559 		rt = tb_port_find_retimer(port, i);
560 		if (rt) {
561 			put_device(&rt->dev);
562 		} else if (add) {
563 			ret = tb_retimer_add(port, i, status[i], i <= last_idx);
564 			if (ret && ret != -EOPNOTSUPP)
565 				break;
566 		}
567 	}
568 
569 	tb_retimer_unset_inbound_sbtx(port);
570 	return ret;
571 }
572 
remove_retimer(struct device * dev,void * data)573 static int remove_retimer(struct device *dev, void *data)
574 {
575 	struct tb_retimer *rt = tb_to_retimer(dev);
576 	struct tb_port *port = data;
577 
578 	if (rt && rt->port == port)
579 		tb_retimer_remove(rt);
580 	return 0;
581 }
582 
583 /**
584  * tb_retimer_remove_all() - Remove all retimers under port
585  * @port: USB4 port whose retimers to remove
586  *
587  * This removes all previously added retimers under @port.
588  */
tb_retimer_remove_all(struct tb_port * port)589 void tb_retimer_remove_all(struct tb_port *port)
590 {
591 	struct usb4_port *usb4;
592 
593 	usb4 = port->usb4;
594 	if (usb4)
595 		device_for_each_child_reverse(&usb4->dev, port,
596 					      remove_retimer);
597 }
598