1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - control channel and configuration commands
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2018, Intel Corporation
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/pci.h>
13 #include <linux/dmapool.h>
14 #include <linux/workqueue.h>
15 
16 #include "ctl.h"
17 
18 #define CREATE_TRACE_POINTS
19 #include "trace.h"
20 
21 #define TB_CTL_RX_PKG_COUNT	10
22 #define TB_CTL_RETRIES		4
23 
24 /**
25  * struct tb_ctl - Thunderbolt control channel
26  * @nhi: Pointer to the NHI structure
27  * @tx: Transmit ring
28  * @rx: Receive ring
29  * @frame_pool: DMA pool for control messages
30  * @rx_packets: Received control messages
31  * @request_queue_lock: Lock protecting @request_queue
32  * @request_queue: List of outstanding requests
33  * @running: Is the control channel running at the moment
34  * @timeout_msec: Default timeout for non-raw control messages
35  * @callback: Callback called when hotplug message is received
36  * @callback_data: Data passed to @callback
37  * @index: Domain number. This will be output with the trace record.
38  */
39 struct tb_ctl {
40 	struct tb_nhi *nhi;
41 	struct tb_ring *tx;
42 	struct tb_ring *rx;
43 
44 	struct dma_pool *frame_pool;
45 	struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
46 	struct mutex request_queue_lock;
47 	struct list_head request_queue;
48 	bool running;
49 
50 	int timeout_msec;
51 	event_cb callback;
52 	void *callback_data;
53 
54 	int index;
55 };
56 
57 
58 #define tb_ctl_WARN(ctl, format, arg...) \
59 	dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
60 
61 #define tb_ctl_err(ctl, format, arg...) \
62 	dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
63 
64 #define tb_ctl_warn(ctl, format, arg...) \
65 	dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
66 
67 #define tb_ctl_info(ctl, format, arg...) \
68 	dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
69 
70 #define tb_ctl_dbg(ctl, format, arg...) \
71 	dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
72 
73 #define tb_ctl_dbg_once(ctl, format, arg...) \
74 	dev_dbg_once(&(ctl)->nhi->pdev->dev, format, ## arg)
75 
76 static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
77 /* Serializes access to request kref_get/put */
78 static DEFINE_MUTEX(tb_cfg_request_lock);
79 
80 /**
81  * tb_cfg_request_alloc() - Allocates a new config request
82  *
83  * This is refcounted object so when you are done with this, call
84  * tb_cfg_request_put() to it.
85  */
86 struct tb_cfg_request *tb_cfg_request_alloc(void)
87 {
88 	struct tb_cfg_request *req;
89 
90 	req = kzalloc(sizeof(*req), GFP_KERNEL);
91 	if (!req)
92 		return NULL;
93 
94 	kref_init(&req->kref);
95 
96 	return req;
97 }
98 
99 /**
100  * tb_cfg_request_get() - Increase refcount of a request
101  * @req: Request whose refcount is increased
102  */
103 void tb_cfg_request_get(struct tb_cfg_request *req)
104 {
105 	mutex_lock(&tb_cfg_request_lock);
106 	kref_get(&req->kref);
107 	mutex_unlock(&tb_cfg_request_lock);
108 }
109 
110 static void tb_cfg_request_destroy(struct kref *kref)
111 {
112 	struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
113 
114 	kfree(req);
115 }
116 
117 /**
118  * tb_cfg_request_put() - Decrease refcount and possibly release the request
119  * @req: Request whose refcount is decreased
120  *
121  * Call this function when you are done with the request. When refcount
122  * goes to %0 the object is released.
123  */
124 void tb_cfg_request_put(struct tb_cfg_request *req)
125 {
126 	mutex_lock(&tb_cfg_request_lock);
127 	kref_put(&req->kref, tb_cfg_request_destroy);
128 	mutex_unlock(&tb_cfg_request_lock);
129 }
130 
131 static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
132 				  struct tb_cfg_request *req)
133 {
134 	WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
135 	WARN_ON(req->ctl);
136 
137 	mutex_lock(&ctl->request_queue_lock);
138 	if (!ctl->running) {
139 		mutex_unlock(&ctl->request_queue_lock);
140 		return -ENOTCONN;
141 	}
142 	req->ctl = ctl;
143 	list_add_tail(&req->list, &ctl->request_queue);
144 	set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
145 	mutex_unlock(&ctl->request_queue_lock);
146 	return 0;
147 }
148 
149 static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
150 {
151 	struct tb_ctl *ctl = req->ctl;
152 
153 	mutex_lock(&ctl->request_queue_lock);
154 	if (!test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)) {
155 		mutex_unlock(&ctl->request_queue_lock);
156 		return;
157 	}
158 
159 	list_del(&req->list);
160 	clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
161 	if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
162 		wake_up(&tb_cfg_request_cancel_queue);
163 	mutex_unlock(&ctl->request_queue_lock);
164 }
165 
166 static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
167 {
168 	return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
169 }
170 
171 static struct tb_cfg_request *
172 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
173 {
174 	struct tb_cfg_request *req = NULL, *iter;
175 
176 	mutex_lock(&pkg->ctl->request_queue_lock);
177 	list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
178 		tb_cfg_request_get(iter);
179 		if (iter->match(iter, pkg)) {
180 			req = iter;
181 			break;
182 		}
183 		tb_cfg_request_put(iter);
184 	}
185 	mutex_unlock(&pkg->ctl->request_queue_lock);
186 
187 	return req;
188 }
189 
190 /* utility functions */
191 
192 
193 static int check_header(const struct ctl_pkg *pkg, u32 len,
194 			enum tb_cfg_pkg_type type, u64 route)
195 {
196 	struct tb_cfg_header *header = pkg->buffer;
197 
198 	/* check frame, TODO: frame flags */
199 	if (WARN(len != pkg->frame.size,
200 			"wrong framesize (expected %#x, got %#x)\n",
201 			len, pkg->frame.size))
202 		return -EIO;
203 	if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
204 			type, pkg->frame.eof))
205 		return -EIO;
206 	if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
207 			pkg->frame.sof))
208 		return -EIO;
209 
210 	/* check header */
211 	if (WARN(header->unknown != 1 << 9,
212 			"header->unknown is %#x\n", header->unknown))
213 		return -EIO;
214 	if (WARN(route != tb_cfg_get_route(header),
215 			"wrong route (expected %llx, got %llx)",
216 			route, tb_cfg_get_route(header)))
217 		return -EIO;
218 	return 0;
219 }
220 
221 static int check_config_address(struct tb_cfg_address addr,
222 				enum tb_cfg_space space, u32 offset,
223 				u32 length)
224 {
225 	if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
226 		return -EIO;
227 	if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
228 			space, addr.space))
229 		return -EIO;
230 	if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
231 			offset, addr.offset))
232 		return -EIO;
233 	if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
234 			length, addr.length))
235 		return -EIO;
236 	/*
237 	 * We cannot check addr->port as it is set to the upstream port of the
238 	 * sender.
239 	 */
240 	return 0;
241 }
242 
243 static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
244 {
245 	struct cfg_error_pkg *pkg = response->buffer;
246 	struct tb_cfg_result res = { 0 };
247 	res.response_route = tb_cfg_get_route(&pkg->header);
248 	res.response_port = 0;
249 	res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
250 			       tb_cfg_get_route(&pkg->header));
251 	if (res.err)
252 		return res;
253 
254 	res.err = 1;
255 	res.tb_error = pkg->error;
256 	res.response_port = pkg->port;
257 	return res;
258 
259 }
260 
261 static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
262 					 enum tb_cfg_pkg_type type, u64 route)
263 {
264 	struct tb_cfg_header *header = pkg->buffer;
265 	struct tb_cfg_result res = { 0 };
266 
267 	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
268 		return decode_error(pkg);
269 
270 	res.response_port = 0; /* will be updated later for cfg_read/write */
271 	res.response_route = tb_cfg_get_route(header);
272 	res.err = check_header(pkg, len, type, route);
273 	return res;
274 }
275 
276 static void tb_cfg_print_error(struct tb_ctl *ctl, enum tb_cfg_space space,
277 			       const struct tb_cfg_result *res)
278 {
279 	WARN_ON(res->err != 1);
280 	switch (res->tb_error) {
281 	case TB_CFG_ERROR_PORT_NOT_CONNECTED:
282 		/* Port is not connected. This can happen during surprise
283 		 * removal. Do not warn. */
284 		return;
285 	case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
286 		/*
287 		 * Invalid cfg_space/offset/length combination in
288 		 * cfg_read/cfg_write.
289 		 */
290 		tb_ctl_dbg_once(ctl, "%llx:%x: invalid config space (%u) or offset\n",
291 				res->response_route, res->response_port, space);
292 		return;
293 	case TB_CFG_ERROR_NO_SUCH_PORT:
294 		/*
295 		 * - The route contains a non-existent port.
296 		 * - The route contains a non-PHY port (e.g. PCIe).
297 		 * - The port in cfg_read/cfg_write does not exist.
298 		 */
299 		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
300 			res->response_route, res->response_port);
301 		return;
302 	case TB_CFG_ERROR_LOOP:
303 		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
304 			res->response_route, res->response_port);
305 		return;
306 	case TB_CFG_ERROR_LOCK:
307 		tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
308 			    res->response_route, res->response_port);
309 		return;
310 	default:
311 		/* 5,6,7,9 and 11 are also valid error codes */
312 		tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
313 			res->response_route, res->response_port);
314 		return;
315 	}
316 }
317 
318 static __be32 tb_crc(const void *data, size_t len)
319 {
320 	return cpu_to_be32(~crc32c(~0, data, len));
321 }
322 
323 static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
324 {
325 	if (pkg) {
326 		dma_pool_free(pkg->ctl->frame_pool,
327 			      pkg->buffer, pkg->frame.buffer_phy);
328 		kfree(pkg);
329 	}
330 }
331 
332 static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
333 {
334 	struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
335 	if (!pkg)
336 		return NULL;
337 	pkg->ctl = ctl;
338 	pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
339 				     &pkg->frame.buffer_phy);
340 	if (!pkg->buffer) {
341 		kfree(pkg);
342 		return NULL;
343 	}
344 	return pkg;
345 }
346 
347 
348 /* RX/TX handling */
349 
350 static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
351 			       bool canceled)
352 {
353 	struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
354 	tb_ctl_pkg_free(pkg);
355 }
356 
357 /*
358  * tb_cfg_tx() - transmit a packet on the control channel
359  *
360  * len must be a multiple of four.
361  *
362  * Return: Returns 0 on success or an error code on failure.
363  */
364 static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
365 		     enum tb_cfg_pkg_type type)
366 {
367 	int res;
368 	struct ctl_pkg *pkg;
369 	if (len % 4 != 0) { /* required for le->be conversion */
370 		tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
371 		return -EINVAL;
372 	}
373 	if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
374 		tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
375 			    len, TB_FRAME_SIZE - 4);
376 		return -EINVAL;
377 	}
378 	pkg = tb_ctl_pkg_alloc(ctl);
379 	if (!pkg)
380 		return -ENOMEM;
381 	pkg->frame.callback = tb_ctl_tx_callback;
382 	pkg->frame.size = len + 4;
383 	pkg->frame.sof = type;
384 	pkg->frame.eof = type;
385 
386 	trace_tb_tx(ctl->index, type, data, len);
387 
388 	cpu_to_be32_array(pkg->buffer, data, len / 4);
389 	*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
390 
391 	res = tb_ring_tx(ctl->tx, &pkg->frame);
392 	if (res) /* ring is stopped */
393 		tb_ctl_pkg_free(pkg);
394 	return res;
395 }
396 
397 /*
398  * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
399  */
400 static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
401 				struct ctl_pkg *pkg, size_t size)
402 {
403 	trace_tb_event(ctl->index, type, pkg->buffer, size);
404 	return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
405 }
406 
407 static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
408 {
409 	tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
410 					     * We ignore failures during stop.
411 					     * All rx packets are referenced
412 					     * from ctl->rx_packets, so we do
413 					     * not loose them.
414 					     */
415 }
416 
417 static int tb_async_error(const struct ctl_pkg *pkg)
418 {
419 	const struct cfg_error_pkg *error = pkg->buffer;
420 
421 	if (pkg->frame.eof != TB_CFG_PKG_ERROR)
422 		return false;
423 
424 	switch (error->error) {
425 	case TB_CFG_ERROR_LINK_ERROR:
426 	case TB_CFG_ERROR_HEC_ERROR_DETECTED:
427 	case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
428 	case TB_CFG_ERROR_DP_BW:
429 	case TB_CFG_ERROR_ROP_CMPLT:
430 	case TB_CFG_ERROR_POP_CMPLT:
431 	case TB_CFG_ERROR_PCIE_WAKE:
432 	case TB_CFG_ERROR_DP_CON_CHANGE:
433 	case TB_CFG_ERROR_DPTX_DISCOVERY:
434 	case TB_CFG_ERROR_LINK_RECOVERY:
435 	case TB_CFG_ERROR_ASYM_LINK:
436 		return true;
437 
438 	default:
439 		return false;
440 	}
441 }
442 
443 static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
444 			       bool canceled)
445 {
446 	struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
447 	struct tb_cfg_request *req;
448 	__be32 crc32;
449 
450 	if (canceled)
451 		return; /*
452 			 * ring is stopped, packet is referenced from
453 			 * ctl->rx_packets.
454 			 */
455 
456 	if (frame->size < 4 || frame->size % 4 != 0) {
457 		tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
458 			   frame->size);
459 		goto rx;
460 	}
461 
462 	frame->size -= 4; /* remove checksum */
463 	crc32 = tb_crc(pkg->buffer, frame->size);
464 	be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
465 
466 	switch (frame->eof) {
467 	case TB_CFG_PKG_READ:
468 	case TB_CFG_PKG_WRITE:
469 	case TB_CFG_PKG_ERROR:
470 	case TB_CFG_PKG_OVERRIDE:
471 	case TB_CFG_PKG_RESET:
472 		if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
473 			tb_ctl_err(pkg->ctl,
474 				   "RX: checksum mismatch, dropping packet\n");
475 			goto rx;
476 		}
477 		if (tb_async_error(pkg)) {
478 			tb_ctl_handle_event(pkg->ctl, frame->eof,
479 					    pkg, frame->size);
480 			goto rx;
481 		}
482 		break;
483 
484 	case TB_CFG_PKG_EVENT:
485 	case TB_CFG_PKG_XDOMAIN_RESP:
486 	case TB_CFG_PKG_XDOMAIN_REQ:
487 		if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
488 			tb_ctl_err(pkg->ctl,
489 				   "RX: checksum mismatch, dropping packet\n");
490 			goto rx;
491 		}
492 		fallthrough;
493 	case TB_CFG_PKG_ICM_EVENT:
494 		if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
495 			goto rx;
496 		break;
497 
498 	default:
499 		break;
500 	}
501 
502 	/*
503 	 * The received packet will be processed only if there is an
504 	 * active request and that the packet is what is expected. This
505 	 * prevents packets such as replies coming after timeout has
506 	 * triggered from messing with the active requests.
507 	 */
508 	req = tb_cfg_request_find(pkg->ctl, pkg);
509 
510 	trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req);
511 
512 	if (req) {
513 		if (req->copy(req, pkg))
514 			schedule_work(&req->work);
515 		tb_cfg_request_put(req);
516 	}
517 
518 rx:
519 	tb_ctl_rx_submit(pkg);
520 }
521 
522 static void tb_cfg_request_work(struct work_struct *work)
523 {
524 	struct tb_cfg_request *req = container_of(work, typeof(*req), work);
525 
526 	if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
527 		req->callback(req->callback_data);
528 
529 	tb_cfg_request_dequeue(req);
530 	tb_cfg_request_put(req);
531 }
532 
533 /**
534  * tb_cfg_request() - Start control request not waiting for it to complete
535  * @ctl: Control channel to use
536  * @req: Request to start
537  * @callback: Callback called when the request is completed
538  * @callback_data: Data to be passed to @callback
539  *
540  * This queues @req on the given control channel without waiting for it
541  * to complete. When the request completes @callback is called.
542  */
543 int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
544 		   void (*callback)(void *), void *callback_data)
545 {
546 	int ret;
547 
548 	req->flags = 0;
549 	req->callback = callback;
550 	req->callback_data = callback_data;
551 	INIT_WORK(&req->work, tb_cfg_request_work);
552 	INIT_LIST_HEAD(&req->list);
553 
554 	tb_cfg_request_get(req);
555 	ret = tb_cfg_request_enqueue(ctl, req);
556 	if (ret)
557 		goto err_put;
558 
559 	ret = tb_ctl_tx(ctl, req->request, req->request_size,
560 			req->request_type);
561 	if (ret)
562 		goto err_dequeue;
563 
564 	if (!req->response)
565 		schedule_work(&req->work);
566 
567 	return 0;
568 
569 err_dequeue:
570 	tb_cfg_request_dequeue(req);
571 err_put:
572 	tb_cfg_request_put(req);
573 
574 	return ret;
575 }
576 
577 /**
578  * tb_cfg_request_cancel() - Cancel a control request
579  * @req: Request to cancel
580  * @err: Error to assign to the request
581  *
582  * This function can be used to cancel ongoing request. It will wait
583  * until the request is not active anymore.
584  */
585 void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
586 {
587 	set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
588 	schedule_work(&req->work);
589 	wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
590 	req->result.err = err;
591 }
592 
593 static void tb_cfg_request_complete(void *data)
594 {
595 	complete(data);
596 }
597 
598 /**
599  * tb_cfg_request_sync() - Start control request and wait until it completes
600  * @ctl: Control channel to use
601  * @req: Request to start
602  * @timeout_msec: Timeout how long to wait @req to complete
603  *
604  * Starts a control request and waits until it completes. If timeout
605  * triggers the request is canceled before function returns. Note the
606  * caller needs to make sure only one message for given switch is active
607  * at a time.
608  */
609 struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
610 					 struct tb_cfg_request *req,
611 					 int timeout_msec)
612 {
613 	unsigned long timeout = msecs_to_jiffies(timeout_msec);
614 	struct tb_cfg_result res = { 0 };
615 	DECLARE_COMPLETION_ONSTACK(done);
616 	int ret;
617 
618 	ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
619 	if (ret) {
620 		res.err = ret;
621 		return res;
622 	}
623 
624 	if (!wait_for_completion_timeout(&done, timeout))
625 		tb_cfg_request_cancel(req, -ETIMEDOUT);
626 
627 	flush_work(&req->work);
628 
629 	return req->result;
630 }
631 
632 /* public interface, alloc/start/stop/free */
633 
634 /**
635  * tb_ctl_alloc() - allocate a control channel
636  * @nhi: Pointer to NHI
637  * @index: Domain number
638  * @timeout_msec: Default timeout used with non-raw control messages
639  * @cb: Callback called for plug events
640  * @cb_data: Data passed to @cb
641  *
642  * cb will be invoked once for every hot plug event.
643  *
644  * Return: Returns a pointer on success or NULL on failure.
645  */
646 struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
647 			    event_cb cb, void *cb_data)
648 {
649 	int i;
650 	struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
651 	if (!ctl)
652 		return NULL;
653 
654 	ctl->nhi = nhi;
655 	ctl->index = index;
656 	ctl->timeout_msec = timeout_msec;
657 	ctl->callback = cb;
658 	ctl->callback_data = cb_data;
659 
660 	mutex_init(&ctl->request_queue_lock);
661 	INIT_LIST_HEAD(&ctl->request_queue);
662 	ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
663 					 TB_FRAME_SIZE, 4, 0);
664 	if (!ctl->frame_pool)
665 		goto err;
666 
667 	ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
668 	if (!ctl->tx)
669 		goto err;
670 
671 	ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
672 				   0xffff, NULL, NULL);
673 	if (!ctl->rx)
674 		goto err;
675 
676 	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
677 		ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
678 		if (!ctl->rx_packets[i])
679 			goto err;
680 		ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
681 	}
682 
683 	tb_ctl_dbg(ctl, "control channel created\n");
684 	return ctl;
685 err:
686 	tb_ctl_free(ctl);
687 	return NULL;
688 }
689 
690 /**
691  * tb_ctl_free() - free a control channel
692  * @ctl: Control channel to free
693  *
694  * Must be called after tb_ctl_stop.
695  *
696  * Must NOT be called from ctl->callback.
697  */
698 void tb_ctl_free(struct tb_ctl *ctl)
699 {
700 	int i;
701 
702 	if (!ctl)
703 		return;
704 
705 	if (ctl->rx)
706 		tb_ring_free(ctl->rx);
707 	if (ctl->tx)
708 		tb_ring_free(ctl->tx);
709 
710 	/* free RX packets */
711 	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
712 		tb_ctl_pkg_free(ctl->rx_packets[i]);
713 
714 
715 	dma_pool_destroy(ctl->frame_pool);
716 	kfree(ctl);
717 }
718 
719 /**
720  * tb_ctl_start() - start/resume the control channel
721  * @ctl: Control channel to start
722  */
723 void tb_ctl_start(struct tb_ctl *ctl)
724 {
725 	int i;
726 	tb_ctl_dbg(ctl, "control channel starting...\n");
727 	tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
728 	tb_ring_start(ctl->rx);
729 	for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
730 		tb_ctl_rx_submit(ctl->rx_packets[i]);
731 
732 	ctl->running = true;
733 }
734 
735 /**
736  * tb_ctl_stop() - pause the control channel
737  * @ctl: Control channel to stop
738  *
739  * All invocations of ctl->callback will have finished after this method
740  * returns.
741  *
742  * Must NOT be called from ctl->callback.
743  */
744 void tb_ctl_stop(struct tb_ctl *ctl)
745 {
746 	mutex_lock(&ctl->request_queue_lock);
747 	ctl->running = false;
748 	mutex_unlock(&ctl->request_queue_lock);
749 
750 	tb_ring_stop(ctl->rx);
751 	tb_ring_stop(ctl->tx);
752 
753 	if (!list_empty(&ctl->request_queue))
754 		tb_ctl_WARN(ctl, "dangling request in request_queue\n");
755 	INIT_LIST_HEAD(&ctl->request_queue);
756 	tb_ctl_dbg(ctl, "control channel stopped\n");
757 }
758 
759 /* public interface, commands */
760 
761 /**
762  * tb_cfg_ack_notification() - Ack notification
763  * @ctl: Control channel to use
764  * @route: Router that originated the event
765  * @error: Pointer to the notification package
766  *
767  * Call this as response for non-plug notification to ack it. Returns
768  * %0 on success or an error code on failure.
769  */
770 int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
771 			    const struct cfg_error_pkg *error)
772 {
773 	struct cfg_ack_pkg pkg = {
774 		.header = tb_cfg_make_header(route),
775 	};
776 	const char *name;
777 
778 	switch (error->error) {
779 	case TB_CFG_ERROR_LINK_ERROR:
780 		name = "link error";
781 		break;
782 	case TB_CFG_ERROR_HEC_ERROR_DETECTED:
783 		name = "HEC error";
784 		break;
785 	case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
786 		name = "flow control error";
787 		break;
788 	case TB_CFG_ERROR_DP_BW:
789 		name = "DP_BW";
790 		break;
791 	case TB_CFG_ERROR_ROP_CMPLT:
792 		name = "router operation completion";
793 		break;
794 	case TB_CFG_ERROR_POP_CMPLT:
795 		name = "port operation completion";
796 		break;
797 	case TB_CFG_ERROR_PCIE_WAKE:
798 		name = "PCIe wake";
799 		break;
800 	case TB_CFG_ERROR_DP_CON_CHANGE:
801 		name = "DP connector change";
802 		break;
803 	case TB_CFG_ERROR_DPTX_DISCOVERY:
804 		name = "DPTX discovery";
805 		break;
806 	case TB_CFG_ERROR_LINK_RECOVERY:
807 		name = "link recovery";
808 		break;
809 	case TB_CFG_ERROR_ASYM_LINK:
810 		name = "asymmetric link";
811 		break;
812 	default:
813 		name = "unknown";
814 		break;
815 	}
816 
817 	tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name,
818 		   error->error, route);
819 
820 	return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK);
821 }
822 
823 /**
824  * tb_cfg_ack_plug() - Ack hot plug/unplug event
825  * @ctl: Control channel to use
826  * @route: Router that originated the event
827  * @port: Port where the hot plug/unplug happened
828  * @unplug: Ack hot plug or unplug
829  *
830  * Call this as response for hot plug/unplug event to ack it.
831  * Returns %0 on success or an error code on failure.
832  */
833 int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
834 {
835 	struct cfg_error_pkg pkg = {
836 		.header = tb_cfg_make_header(route),
837 		.port = port,
838 		.error = TB_CFG_ERROR_ACK_PLUG_EVENT,
839 		.pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
840 			     : TB_CFG_ERROR_PG_HOT_PLUG,
841 	};
842 	tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n",
843 		   unplug ? "un" : "", route, port);
844 	return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
845 }
846 
847 static bool tb_cfg_match(const struct tb_cfg_request *req,
848 			 const struct ctl_pkg *pkg)
849 {
850 	u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
851 
852 	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
853 		return true;
854 
855 	if (pkg->frame.eof != req->response_type)
856 		return false;
857 	if (route != tb_cfg_get_route(req->request))
858 		return false;
859 	if (pkg->frame.size != req->response_size)
860 		return false;
861 
862 	if (pkg->frame.eof == TB_CFG_PKG_READ ||
863 	    pkg->frame.eof == TB_CFG_PKG_WRITE) {
864 		const struct cfg_read_pkg *req_hdr = req->request;
865 		const struct cfg_read_pkg *res_hdr = pkg->buffer;
866 
867 		if (req_hdr->addr.seq != res_hdr->addr.seq)
868 			return false;
869 	}
870 
871 	return true;
872 }
873 
874 static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
875 {
876 	struct tb_cfg_result res;
877 
878 	/* Now make sure it is in expected format */
879 	res = parse_header(pkg, req->response_size, req->response_type,
880 			   tb_cfg_get_route(req->request));
881 	if (!res.err)
882 		memcpy(req->response, pkg->buffer, req->response_size);
883 
884 	req->result = res;
885 
886 	/* Always complete when first response is received */
887 	return true;
888 }
889 
890 /**
891  * tb_cfg_reset() - send a reset packet and wait for a response
892  * @ctl: Control channel pointer
893  * @route: Router string for the router to send reset
894  *
895  * If the switch at route is incorrectly configured then we will not receive a
896  * reply (even though the switch will reset). The caller should check for
897  * -ETIMEDOUT and attempt to reconfigure the switch.
898  */
899 struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
900 {
901 	struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
902 	struct tb_cfg_result res = { 0 };
903 	struct tb_cfg_header reply;
904 	struct tb_cfg_request *req;
905 
906 	req = tb_cfg_request_alloc();
907 	if (!req) {
908 		res.err = -ENOMEM;
909 		return res;
910 	}
911 
912 	req->match = tb_cfg_match;
913 	req->copy = tb_cfg_copy;
914 	req->request = &request;
915 	req->request_size = sizeof(request);
916 	req->request_type = TB_CFG_PKG_RESET;
917 	req->response = &reply;
918 	req->response_size = sizeof(reply);
919 	req->response_type = TB_CFG_PKG_RESET;
920 
921 	res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
922 
923 	tb_cfg_request_put(req);
924 
925 	return res;
926 }
927 
928 /**
929  * tb_cfg_read_raw() - read from config space into buffer
930  * @ctl: Pointer to the control channel
931  * @buffer: Buffer where the data is read
932  * @route: Route string of the router
933  * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
934  * @space: Config space selector
935  * @offset: Dword word offset of the register to start reading
936  * @length: Number of dwords to read
937  * @timeout_msec: Timeout in ms how long to wait for the response
938  *
939  * Reads from router config space without translating the possible error.
940  */
941 struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
942 		u64 route, u32 port, enum tb_cfg_space space,
943 		u32 offset, u32 length, int timeout_msec)
944 {
945 	struct tb_cfg_result res = { 0 };
946 	struct cfg_read_pkg request = {
947 		.header = tb_cfg_make_header(route),
948 		.addr = {
949 			.port = port,
950 			.space = space,
951 			.offset = offset,
952 			.length = length,
953 		},
954 	};
955 	struct cfg_write_pkg reply;
956 	int retries = 0;
957 
958 	while (retries < TB_CTL_RETRIES) {
959 		struct tb_cfg_request *req;
960 
961 		req = tb_cfg_request_alloc();
962 		if (!req) {
963 			res.err = -ENOMEM;
964 			return res;
965 		}
966 
967 		request.addr.seq = retries++;
968 
969 		req->match = tb_cfg_match;
970 		req->copy = tb_cfg_copy;
971 		req->request = &request;
972 		req->request_size = sizeof(request);
973 		req->request_type = TB_CFG_PKG_READ;
974 		req->response = &reply;
975 		req->response_size = 12 + 4 * length;
976 		req->response_type = TB_CFG_PKG_READ;
977 
978 		res = tb_cfg_request_sync(ctl, req, timeout_msec);
979 
980 		tb_cfg_request_put(req);
981 
982 		if (res.err != -ETIMEDOUT)
983 			break;
984 
985 		/* Wait a bit (arbitrary time) until we send a retry */
986 		usleep_range(10, 100);
987 	}
988 
989 	if (res.err)
990 		return res;
991 
992 	res.response_port = reply.addr.port;
993 	res.err = check_config_address(reply.addr, space, offset, length);
994 	if (!res.err)
995 		memcpy(buffer, &reply.data, 4 * length);
996 	return res;
997 }
998 
999 /**
1000  * tb_cfg_write_raw() - write from buffer into config space
1001  * @ctl: Pointer to the control channel
1002  * @buffer: Data to write
1003  * @route: Route string of the router
1004  * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
1005  * @space: Config space selector
1006  * @offset: Dword word offset of the register to start writing
1007  * @length: Number of dwords to write
1008  * @timeout_msec: Timeout in ms how long to wait for the response
1009  *
1010  * Writes to router config space without translating the possible error.
1011  */
1012 struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
1013 		u64 route, u32 port, enum tb_cfg_space space,
1014 		u32 offset, u32 length, int timeout_msec)
1015 {
1016 	struct tb_cfg_result res = { 0 };
1017 	struct cfg_write_pkg request = {
1018 		.header = tb_cfg_make_header(route),
1019 		.addr = {
1020 			.port = port,
1021 			.space = space,
1022 			.offset = offset,
1023 			.length = length,
1024 		},
1025 	};
1026 	struct cfg_read_pkg reply;
1027 	int retries = 0;
1028 
1029 	memcpy(&request.data, buffer, length * 4);
1030 
1031 	while (retries < TB_CTL_RETRIES) {
1032 		struct tb_cfg_request *req;
1033 
1034 		req = tb_cfg_request_alloc();
1035 		if (!req) {
1036 			res.err = -ENOMEM;
1037 			return res;
1038 		}
1039 
1040 		request.addr.seq = retries++;
1041 
1042 		req->match = tb_cfg_match;
1043 		req->copy = tb_cfg_copy;
1044 		req->request = &request;
1045 		req->request_size = 12 + 4 * length;
1046 		req->request_type = TB_CFG_PKG_WRITE;
1047 		req->response = &reply;
1048 		req->response_size = sizeof(reply);
1049 		req->response_type = TB_CFG_PKG_WRITE;
1050 
1051 		res = tb_cfg_request_sync(ctl, req, timeout_msec);
1052 
1053 		tb_cfg_request_put(req);
1054 
1055 		if (res.err != -ETIMEDOUT)
1056 			break;
1057 
1058 		/* Wait a bit (arbitrary time) until we send a retry */
1059 		usleep_range(10, 100);
1060 	}
1061 
1062 	if (res.err)
1063 		return res;
1064 
1065 	res.response_port = reply.addr.port;
1066 	res.err = check_config_address(reply.addr, space, offset, length);
1067 	return res;
1068 }
1069 
1070 static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
1071 			    const struct tb_cfg_result *res)
1072 {
1073 	/*
1074 	 * For unimplemented ports access to port config space may return
1075 	 * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
1076 	 * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
1077 	 * that the caller can mark the port as disabled.
1078 	 */
1079 	if (space == TB_CFG_PORT &&
1080 	    res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
1081 		return -ENODEV;
1082 
1083 	tb_cfg_print_error(ctl, space, res);
1084 
1085 	if (res->tb_error == TB_CFG_ERROR_LOCK)
1086 		return -EACCES;
1087 	if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
1088 		return -ENOTCONN;
1089 
1090 	return -EIO;
1091 }
1092 
1093 int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
1094 		enum tb_cfg_space space, u32 offset, u32 length)
1095 {
1096 	struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
1097 			space, offset, length, ctl->timeout_msec);
1098 	switch (res.err) {
1099 	case 0:
1100 		/* Success */
1101 		break;
1102 
1103 	case 1:
1104 		/* Thunderbolt error, tb_error holds the actual number */
1105 		return tb_cfg_get_error(ctl, space, &res);
1106 
1107 	case -ETIMEDOUT:
1108 		tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
1109 			    route, space, offset);
1110 		break;
1111 
1112 	default:
1113 		WARN(1, "tb_cfg_read: %d\n", res.err);
1114 		break;
1115 	}
1116 	return res.err;
1117 }
1118 
1119 int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
1120 		 enum tb_cfg_space space, u32 offset, u32 length)
1121 {
1122 	struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
1123 			space, offset, length, ctl->timeout_msec);
1124 	switch (res.err) {
1125 	case 0:
1126 		/* Success */
1127 		break;
1128 
1129 	case 1:
1130 		/* Thunderbolt error, tb_error holds the actual number */
1131 		return tb_cfg_get_error(ctl, space, &res);
1132 
1133 	case -ETIMEDOUT:
1134 		tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
1135 			    route, space, offset);
1136 		break;
1137 
1138 	default:
1139 		WARN(1, "tb_cfg_write: %d\n", res.err);
1140 		break;
1141 	}
1142 	return res.err;
1143 }
1144 
1145 /**
1146  * tb_cfg_get_upstream_port() - get upstream port number of switch at route
1147  * @ctl: Pointer to the control channel
1148  * @route: Route string of the router
1149  *
1150  * Reads the first dword from the switches TB_CFG_SWITCH config area and
1151  * returns the port number from which the reply originated.
1152  *
1153  * Return: Returns the upstream port number on success or an error code on
1154  * failure.
1155  */
1156 int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
1157 {
1158 	u32 dummy;
1159 	struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
1160 						   TB_CFG_SWITCH, 0, 1,
1161 						   ctl->timeout_msec);
1162 	if (res.err == 1)
1163 		return -EIO;
1164 	if (res.err)
1165 		return res.err;
1166 	return res.response_port;
1167 }
1168