xref: /linux/drivers/gpu/drm/display/drm_dp_tunnel.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include <linux/export.h>
7 #include <linux/ref_tracker.h>
8 #include <linux/types.h>
9 
10 #include <drm/drm_atomic_state_helper.h>
11 
12 #include <drm/drm_atomic.h>
13 #include <drm/drm_print.h>
14 #include <drm/display/drm_dp.h>
15 #include <drm/display/drm_dp_helper.h>
16 #include <drm/display/drm_dp_tunnel.h>
17 
18 #define to_group(__private_obj) \
19 	container_of(__private_obj, struct drm_dp_tunnel_group, base)
20 
21 #define to_group_state(__private_state) \
22 	container_of(__private_state, struct drm_dp_tunnel_group_state, base)
23 
24 #define is_dp_tunnel_private_obj(__obj) \
25 	((__obj)->funcs == &tunnel_group_funcs)
26 
27 #define for_each_new_group_in_state(__state, __new_group_state, __i) \
28 	for ((__i) = 0; \
29 	     (__i) < (__state)->num_private_objs; \
30 	     (__i)++) \
31 		for_each_if ((__state)->private_objs[__i].ptr && \
32 			     is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
33 			     ((__new_group_state) = \
34 				to_group_state((__state)->private_objs[__i].new_state), 1))
35 
36 #define for_each_old_group_in_state(__state, __old_group_state, __i) \
37 	for ((__i) = 0; \
38 	     (__i) < (__state)->num_private_objs; \
39 	     (__i)++) \
40 		for_each_if ((__state)->private_objs[__i].ptr && \
41 			     is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
42 			     ((__old_group_state) = \
43 				to_group_state((__state)->private_objs[__i].old_state), 1))
44 
45 #define for_each_tunnel_in_group(__group, __tunnel) \
46 	list_for_each_entry(__tunnel, &(__group)->tunnels, node)
47 
48 #define for_each_tunnel_state(__group_state, __tunnel_state) \
49 	list_for_each_entry(__tunnel_state, &(__group_state)->tunnel_states, node)
50 
51 #define for_each_tunnel_state_safe(__group_state, __tunnel_state, __tunnel_state_tmp) \
52 	list_for_each_entry_safe(__tunnel_state, __tunnel_state_tmp, \
53 				 &(__group_state)->tunnel_states, node)
54 
55 #define kbytes_to_mbits(__kbytes) \
56 	DIV_ROUND_UP((__kbytes) * 8, 1000)
57 
58 #define DPTUN_BW_ARG(__bw) ((__bw) < 0 ? (__bw) : kbytes_to_mbits(__bw))
59 
60 #define __tun_prn(__tunnel, __level, __type, __fmt, ...) \
61 	drm_##__level##__type((__tunnel)->group->mgr->dev, \
62 			      "[DPTUN %s][%s] " __fmt, \
63 			      drm_dp_tunnel_name(__tunnel), \
64 			      (__tunnel)->aux->name, ## \
65 			      __VA_ARGS__)
66 
67 #define tun_dbg(__tunnel, __fmt, ...) \
68 	__tun_prn(__tunnel, dbg, _kms, __fmt, ## __VA_ARGS__)
69 
70 #define tun_dbg_stat(__tunnel, __err, __fmt, ...) do { \
71 	if (__err) \
72 		__tun_prn(__tunnel, dbg, _kms, __fmt " (Failed, err: %pe)\n", \
73 			  ## __VA_ARGS__, ERR_PTR(__err)); \
74 	else \
75 		__tun_prn(__tunnel, dbg, _kms, __fmt " (Ok)\n", \
76 			  ## __VA_ARGS__); \
77 } while (0)
78 
79 #define tun_dbg_atomic(__tunnel, __fmt, ...) \
80 	__tun_prn(__tunnel, dbg, _atomic, __fmt, ## __VA_ARGS__)
81 
82 #define tun_grp_dbg(__group, __fmt, ...) \
83 	drm_dbg_kms((__group)->mgr->dev, \
84 		    "[DPTUN %s] " __fmt, \
85 		    drm_dp_tunnel_group_name(__group), ## \
86 		    __VA_ARGS__)
87 
88 #define DP_TUNNELING_BASE DP_TUNNELING_OUI
89 
90 #define __DPTUN_REG_RANGE(__start, __size) \
91 	GENMASK_ULL((__start) + (__size) - 1, (__start))
92 
93 #define DPTUN_REG_RANGE(__addr, __size) \
94 	__DPTUN_REG_RANGE((__addr) - DP_TUNNELING_BASE, (__size))
95 
96 #define DPTUN_REG(__addr) DPTUN_REG_RANGE(__addr, 1)
97 
98 #define DPTUN_INFO_REG_MASK ( \
99 	DPTUN_REG_RANGE(DP_TUNNELING_OUI, DP_TUNNELING_OUI_BYTES) | \
100 	DPTUN_REG_RANGE(DP_TUNNELING_DEV_ID, DP_TUNNELING_DEV_ID_BYTES) | \
101 	DPTUN_REG(DP_TUNNELING_HW_REV) | \
102 	DPTUN_REG(DP_TUNNELING_SW_REV_MAJOR) | \
103 	DPTUN_REG(DP_TUNNELING_SW_REV_MINOR) | \
104 	DPTUN_REG(DP_TUNNELING_CAPABILITIES) | \
105 	DPTUN_REG(DP_IN_ADAPTER_INFO) | \
106 	DPTUN_REG(DP_USB4_DRIVER_ID) | \
107 	DPTUN_REG(DP_USB4_DRIVER_BW_CAPABILITY) | \
108 	DPTUN_REG(DP_IN_ADAPTER_TUNNEL_INFORMATION) | \
109 	DPTUN_REG(DP_BW_GRANULARITY) | \
110 	DPTUN_REG(DP_ESTIMATED_BW) | \
111 	DPTUN_REG(DP_ALLOCATED_BW) | \
112 	DPTUN_REG(DP_TUNNELING_MAX_LINK_RATE) | \
113 	DPTUN_REG(DP_TUNNELING_MAX_LANE_COUNT) | \
114 	DPTUN_REG(DP_DPTX_BW_ALLOCATION_MODE_CONTROL))
115 
116 static const DECLARE_BITMAP(dptun_info_regs, 64) = {
117 	DPTUN_INFO_REG_MASK & -1UL,
118 #if BITS_PER_LONG == 32
119 	DPTUN_INFO_REG_MASK >> 32,
120 #endif
121 };
122 
123 struct drm_dp_tunnel_regs {
124 	u8 buf[HWEIGHT64(DPTUN_INFO_REG_MASK)];
125 };
126 
127 struct drm_dp_tunnel_group;
128 
129 struct drm_dp_tunnel {
130 	struct drm_dp_tunnel_group *group;
131 
132 	struct list_head node;
133 
134 	struct kref kref;
135 	struct ref_tracker *tracker;
136 	struct drm_dp_aux *aux;
137 	char name[8];
138 
139 	int bw_granularity;
140 	int estimated_bw;
141 	int allocated_bw;
142 
143 	int max_dprx_rate;
144 	u8 max_dprx_lane_count;
145 
146 	u8 adapter_id;
147 
148 	bool bw_alloc_supported:1;
149 	bool bw_alloc_enabled:1;
150 	bool has_io_error:1;
151 	bool destroyed:1;
152 };
153 
154 struct drm_dp_tunnel_group_state;
155 
156 struct drm_dp_tunnel_state {
157 	struct drm_dp_tunnel_group_state *group_state;
158 
159 	struct drm_dp_tunnel_ref tunnel_ref;
160 
161 	struct list_head node;
162 
163 	u32 stream_mask;
164 	int *stream_bw;
165 };
166 
167 struct drm_dp_tunnel_group_state {
168 	struct drm_private_state base;
169 
170 	struct list_head tunnel_states;
171 };
172 
173 struct drm_dp_tunnel_group {
174 	struct drm_private_obj base;
175 	struct drm_dp_tunnel_mgr *mgr;
176 
177 	struct list_head tunnels;
178 
179 	/* available BW including the allocated_bw of all tunnels in the group */
180 	int available_bw;
181 
182 	u8 drv_group_id;
183 	char name[8];
184 
185 	bool active:1;
186 };
187 
188 struct drm_dp_tunnel_mgr {
189 	struct drm_device *dev;
190 
191 	int group_count;
192 	struct drm_dp_tunnel_group *groups;
193 	wait_queue_head_t bw_req_queue;
194 
195 #ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
196 	struct ref_tracker_dir ref_tracker;
197 #endif
198 };
199 
200 /*
201  * The following helpers provide a way to read out the tunneling DPCD
202  * registers with a minimal amount of AUX transfers (1 transfer per contiguous
203  * range, as permitted by the 16 byte per transfer AUX limit), not accessing
204  * other registers to avoid any read side-effects.
205  */
next_reg_area(int * offset)206 static int next_reg_area(int *offset)
207 {
208 	*offset = find_next_bit(dptun_info_regs, 64, *offset);
209 
210 	return find_next_zero_bit(dptun_info_regs, 64, *offset + 1) - *offset;
211 }
212 
213 #define tunnel_reg_ptr(__regs, __address) ({ \
214 	WARN_ON(!test_bit((__address) - DP_TUNNELING_BASE, dptun_info_regs)); \
215 	&(__regs)->buf[bitmap_weight(dptun_info_regs, (__address) - DP_TUNNELING_BASE)]; \
216 })
217 
read_tunnel_regs(struct drm_dp_aux * aux,struct drm_dp_tunnel_regs * regs)218 static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *regs)
219 {
220 	int offset = 0;
221 	int len;
222 
223 	while ((len = next_reg_area(&offset))) {
224 		int address = DP_TUNNELING_BASE + offset;
225 
226 		if (drm_dp_dpcd_read_data(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
227 			return -EIO;
228 
229 		offset += len;
230 	}
231 
232 	return 0;
233 }
234 
tunnel_reg(const struct drm_dp_tunnel_regs * regs,int address)235 static u8 tunnel_reg(const struct drm_dp_tunnel_regs *regs, int address)
236 {
237 	return *tunnel_reg_ptr(regs, address);
238 }
239 
tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs * regs)240 static u8 tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs *regs)
241 {
242 	u8 drv_id = tunnel_reg(regs, DP_USB4_DRIVER_ID) & DP_USB4_DRIVER_ID_MASK;
243 	u8 group_id = tunnel_reg(regs, DP_IN_ADAPTER_TUNNEL_INFORMATION) & DP_GROUP_ID_MASK;
244 
245 	if (!group_id)
246 		return 0;
247 
248 	return (drv_id << DP_GROUP_ID_BITS) | group_id;
249 }
250 
251 /* Return granularity in kB/s units */
tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs * regs)252 static int tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs *regs)
253 {
254 	int gr = tunnel_reg(regs, DP_BW_GRANULARITY) & DP_BW_GRANULARITY_MASK;
255 
256 	if (gr > 2)
257 		return -1;
258 
259 	return (250000 << gr) / 8;
260 }
261 
tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs * regs)262 static int tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs *regs)
263 {
264 	u8 bw_code = tunnel_reg(regs, DP_TUNNELING_MAX_LINK_RATE);
265 
266 	return drm_dp_bw_code_to_link_rate(bw_code);
267 }
268 
tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs * regs)269 static int tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs *regs)
270 {
271 	return tunnel_reg(regs, DP_TUNNELING_MAX_LANE_COUNT) &
272 	       DP_TUNNELING_MAX_LANE_COUNT_MASK;
273 }
274 
tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs * regs)275 static bool tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs *regs)
276 {
277 	u8 cap_mask = DP_TUNNELING_SUPPORT | DP_IN_BW_ALLOCATION_MODE_SUPPORT;
278 
279 	if ((tunnel_reg(regs, DP_TUNNELING_CAPABILITIES) & cap_mask) != cap_mask)
280 		return false;
281 
282 	return tunnel_reg(regs, DP_USB4_DRIVER_BW_CAPABILITY) &
283 	       DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT;
284 }
285 
tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs * regs)286 static bool tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs *regs)
287 {
288 	return tunnel_reg(regs, DP_DPTX_BW_ALLOCATION_MODE_CONTROL) &
289 	       DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE;
290 }
291 
tunnel_group_drv_id(u8 drv_group_id)292 static u8 tunnel_group_drv_id(u8 drv_group_id)
293 {
294 	return drv_group_id >> DP_GROUP_ID_BITS;
295 }
296 
tunnel_group_id(u8 drv_group_id)297 static u8 tunnel_group_id(u8 drv_group_id)
298 {
299 	return drv_group_id & DP_GROUP_ID_MASK;
300 }
301 
drm_dp_tunnel_name(const struct drm_dp_tunnel * tunnel)302 const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)
303 {
304 	return tunnel->name;
305 }
306 EXPORT_SYMBOL(drm_dp_tunnel_name);
307 
drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group * group)308 static const char *drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group *group)
309 {
310 	return group->name;
311 }
312 
313 static struct drm_dp_tunnel_group *
lookup_or_alloc_group(struct drm_dp_tunnel_mgr * mgr,u8 drv_group_id)314 lookup_or_alloc_group(struct drm_dp_tunnel_mgr *mgr, u8 drv_group_id)
315 {
316 	struct drm_dp_tunnel_group *group = NULL;
317 	int i;
318 
319 	for (i = 0; i < mgr->group_count; i++) {
320 		/*
321 		 * A tunnel group with 0 group ID shouldn't have more than one
322 		 * tunnels.
323 		 */
324 		if (tunnel_group_id(drv_group_id) &&
325 		    mgr->groups[i].drv_group_id == drv_group_id)
326 			return &mgr->groups[i];
327 
328 		if (!group && !mgr->groups[i].active)
329 			group = &mgr->groups[i];
330 	}
331 
332 	if (!group) {
333 		drm_dbg_kms(mgr->dev,
334 			    "DPTUN: Can't allocate more tunnel groups\n");
335 		return NULL;
336 	}
337 
338 	group->drv_group_id = drv_group_id;
339 	group->active = true;
340 
341 	/*
342 	 * The group name format here and elsewhere: Driver-ID:Group-ID:*
343 	 * (* standing for all DP-Adapters/tunnels in the group).
344 	 */
345 	snprintf(group->name, sizeof(group->name), "%d:%d:*",
346 		 tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
347 		 tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1));
348 
349 	return group;
350 }
351 
free_group(struct drm_dp_tunnel_group * group)352 static void free_group(struct drm_dp_tunnel_group *group)
353 {
354 	struct drm_dp_tunnel_mgr *mgr = group->mgr;
355 
356 	if (drm_WARN_ON(mgr->dev, !list_empty(&group->tunnels)))
357 		return;
358 
359 	group->drv_group_id = 0;
360 	group->available_bw = -1;
361 	group->active = false;
362 }
363 
364 static struct drm_dp_tunnel *
tunnel_get(struct drm_dp_tunnel * tunnel)365 tunnel_get(struct drm_dp_tunnel *tunnel)
366 {
367 	kref_get(&tunnel->kref);
368 
369 	return tunnel;
370 }
371 
free_tunnel(struct kref * kref)372 static void free_tunnel(struct kref *kref)
373 {
374 	struct drm_dp_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
375 	struct drm_dp_tunnel_group *group = tunnel->group;
376 
377 	list_del(&tunnel->node);
378 	if (list_empty(&group->tunnels))
379 		free_group(group);
380 
381 	kfree(tunnel);
382 }
383 
tunnel_put(struct drm_dp_tunnel * tunnel)384 static void tunnel_put(struct drm_dp_tunnel *tunnel)
385 {
386 	kref_put(&tunnel->kref, free_tunnel);
387 }
388 
389 #ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
track_tunnel_ref(struct drm_dp_tunnel * tunnel,struct ref_tracker ** tracker)390 static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
391 			     struct ref_tracker **tracker)
392 {
393 	ref_tracker_alloc(&tunnel->group->mgr->ref_tracker,
394 			  tracker, GFP_KERNEL);
395 }
396 
untrack_tunnel_ref(struct drm_dp_tunnel * tunnel,struct ref_tracker ** tracker)397 static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
398 			       struct ref_tracker **tracker)
399 {
400 	ref_tracker_free(&tunnel->group->mgr->ref_tracker,
401 			 tracker);
402 }
403 #else
track_tunnel_ref(struct drm_dp_tunnel * tunnel,struct ref_tracker ** tracker)404 static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
405 			     struct ref_tracker **tracker)
406 {
407 }
408 
untrack_tunnel_ref(struct drm_dp_tunnel * tunnel,struct ref_tracker ** tracker)409 static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
410 			       struct ref_tracker **tracker)
411 {
412 }
413 #endif
414 
415 /**
416  * drm_dp_tunnel_get - Get a reference for a DP tunnel
417  * @tunnel: Tunnel object
418  * @tracker: Debug tracker for the reference
419  *
420  * Get a reference for @tunnel, along with a debug tracker to help locating
421  * the source of a reference leak/double reference put etc. issue.
422  *
423  * The reference must be dropped after use calling drm_dp_tunnel_put()
424  * passing @tunnel and *@tracker returned from here.
425  *
426  * Returns @tunnel - as a convenience - along with *@tracker.
427  */
428 struct drm_dp_tunnel *
drm_dp_tunnel_get(struct drm_dp_tunnel * tunnel,struct ref_tracker ** tracker)429 drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel,
430 		  struct ref_tracker **tracker)
431 {
432 	track_tunnel_ref(tunnel, tracker);
433 
434 	return tunnel_get(tunnel);
435 }
436 EXPORT_SYMBOL(drm_dp_tunnel_get);
437 
438 /**
439  * drm_dp_tunnel_put - Put a reference for a DP tunnel
440  * @tunnel: Tunnel object
441  * @tracker: Debug tracker for the reference
442  *
443  * Put a reference for @tunnel along with its debug *@tracker, which
444  * was obtained with drm_dp_tunnel_get().
445  */
drm_dp_tunnel_put(struct drm_dp_tunnel * tunnel,struct ref_tracker ** tracker)446 void drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel,
447 		       struct ref_tracker **tracker)
448 {
449 	untrack_tunnel_ref(tunnel, tracker);
450 
451 	tunnel_put(tunnel);
452 }
453 EXPORT_SYMBOL(drm_dp_tunnel_put);
454 
add_tunnel_to_group(struct drm_dp_tunnel_mgr * mgr,u8 drv_group_id,struct drm_dp_tunnel * tunnel)455 static bool add_tunnel_to_group(struct drm_dp_tunnel_mgr *mgr,
456 				u8 drv_group_id,
457 				struct drm_dp_tunnel *tunnel)
458 {
459 	struct drm_dp_tunnel_group *group;
460 
461 	group = lookup_or_alloc_group(mgr, drv_group_id);
462 	if (!group)
463 		return false;
464 
465 	tunnel->group = group;
466 	list_add(&tunnel->node, &group->tunnels);
467 
468 	return true;
469 }
470 
471 static struct drm_dp_tunnel *
create_tunnel(struct drm_dp_tunnel_mgr * mgr,struct drm_dp_aux * aux,const struct drm_dp_tunnel_regs * regs)472 create_tunnel(struct drm_dp_tunnel_mgr *mgr,
473 	      struct drm_dp_aux *aux,
474 	      const struct drm_dp_tunnel_regs *regs)
475 {
476 	u8 drv_group_id = tunnel_reg_drv_group_id(regs);
477 	struct drm_dp_tunnel *tunnel;
478 
479 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
480 	if (!tunnel)
481 		return NULL;
482 
483 	INIT_LIST_HEAD(&tunnel->node);
484 
485 	kref_init(&tunnel->kref);
486 
487 	tunnel->aux = aux;
488 
489 	tunnel->adapter_id = tunnel_reg(regs, DP_IN_ADAPTER_INFO) & DP_IN_ADAPTER_NUMBER_MASK;
490 
491 	snprintf(tunnel->name, sizeof(tunnel->name), "%d:%d:%d",
492 		 tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
493 		 tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1),
494 		 tunnel->adapter_id & ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1));
495 
496 	tunnel->bw_granularity = tunnel_reg_bw_granularity(regs);
497 	tunnel->allocated_bw = tunnel_reg(regs, DP_ALLOCATED_BW) *
498 			       tunnel->bw_granularity;
499 	/*
500 	 * An initial allocated BW of 0 indicates an undefined state: the
501 	 * actual allocation is determined by the TBT CM, usually following a
502 	 * legacy allocation policy (based on the max DPRX caps). From the
503 	 * driver's POV the state becomes defined only after the first
504 	 * allocation request.
505 	 */
506 	if (!tunnel->allocated_bw)
507 		tunnel->allocated_bw = -1;
508 
509 	tunnel->bw_alloc_supported = tunnel_reg_bw_alloc_supported(regs);
510 	tunnel->bw_alloc_enabled = tunnel_reg_bw_alloc_enabled(regs);
511 
512 	if (!add_tunnel_to_group(mgr, drv_group_id, tunnel)) {
513 		kfree(tunnel);
514 
515 		return NULL;
516 	}
517 
518 	track_tunnel_ref(tunnel, &tunnel->tracker);
519 
520 	return tunnel;
521 }
522 
destroy_tunnel(struct drm_dp_tunnel * tunnel)523 static void destroy_tunnel(struct drm_dp_tunnel *tunnel)
524 {
525 	untrack_tunnel_ref(tunnel, &tunnel->tracker);
526 	tunnel_put(tunnel);
527 }
528 
529 /**
530  * drm_dp_tunnel_set_io_error - Set the IO error flag for a DP tunnel
531  * @tunnel: Tunnel object
532  *
533  * Set the IO error flag for @tunnel. Drivers can call this function upon
534  * detecting a failure that affects the tunnel functionality, for instance
535  * after a DP AUX transfer failure on the port @tunnel is connected to.
536  *
537  * This disables further management of @tunnel, including any related
538  * AUX accesses for tunneling DPCD registers, returning error to the
539  * initiators of these. The driver is supposed to drop this tunnel and -
540  * optionally - recreate it.
541  */
drm_dp_tunnel_set_io_error(struct drm_dp_tunnel * tunnel)542 void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel)
543 {
544 	tunnel->has_io_error = true;
545 }
546 EXPORT_SYMBOL(drm_dp_tunnel_set_io_error);
547 
548 #define SKIP_DPRX_CAPS_CHECK		BIT(0)
549 #define ALLOW_ALLOCATED_BW_CHANGE	BIT(1)
tunnel_regs_are_valid(struct drm_dp_tunnel_mgr * mgr,const struct drm_dp_tunnel_regs * regs,unsigned int flags)550 static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr,
551 				  const struct drm_dp_tunnel_regs *regs,
552 				  unsigned int flags)
553 {
554 	u8 drv_group_id = tunnel_reg_drv_group_id(regs);
555 	bool check_dprx = !(flags & SKIP_DPRX_CAPS_CHECK);
556 	bool ret = true;
557 
558 	if (!tunnel_reg_bw_alloc_supported(regs)) {
559 		if (tunnel_group_id(drv_group_id)) {
560 			drm_dbg_kms(mgr->dev,
561 				    "DPTUN: A non-zero group ID is only allowed with BWA support\n");
562 			ret = false;
563 		}
564 
565 		if (tunnel_reg(regs, DP_ALLOCATED_BW)) {
566 			drm_dbg_kms(mgr->dev,
567 				    "DPTUN: BW is allocated without BWA support\n");
568 			ret = false;
569 		}
570 
571 		return ret;
572 	}
573 
574 	if (!tunnel_group_id(drv_group_id)) {
575 		drm_dbg_kms(mgr->dev,
576 			    "DPTUN: BWA support requires a non-zero group ID\n");
577 		ret = false;
578 	}
579 
580 	if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) {
581 		drm_dbg_kms(mgr->dev,
582 			    "DPTUN: Invalid DPRX lane count: %d\n",
583 			    tunnel_reg_max_dprx_lane_count(regs));
584 
585 		ret = false;
586 	}
587 
588 	if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) {
589 		drm_dbg_kms(mgr->dev,
590 			    "DPTUN: DPRX rate is 0\n");
591 
592 		ret = false;
593 	}
594 
595 	if (tunnel_reg_bw_granularity(regs) < 0) {
596 		drm_dbg_kms(mgr->dev,
597 			    "DPTUN: Invalid BW granularity\n");
598 
599 		ret = false;
600 	}
601 
602 	if (tunnel_reg(regs, DP_ALLOCATED_BW) > tunnel_reg(regs, DP_ESTIMATED_BW)) {
603 		drm_dbg_kms(mgr->dev,
604 			    "DPTUN: Allocated BW %d > estimated BW %d Mb/s\n",
605 			    DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) *
606 					 tunnel_reg_bw_granularity(regs)),
607 			    DPTUN_BW_ARG(tunnel_reg(regs, DP_ESTIMATED_BW) *
608 					 tunnel_reg_bw_granularity(regs)));
609 
610 		ret = false;
611 	}
612 
613 	return ret;
614 }
615 
tunnel_allocated_bw(const struct drm_dp_tunnel * tunnel)616 static int tunnel_allocated_bw(const struct drm_dp_tunnel *tunnel)
617 {
618 	return max(tunnel->allocated_bw, 0);
619 }
620 
tunnel_info_changes_are_valid(struct drm_dp_tunnel * tunnel,const struct drm_dp_tunnel_regs * regs,unsigned int flags)621 static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel,
622 					  const struct drm_dp_tunnel_regs *regs,
623 					  unsigned int flags)
624 {
625 	u8 new_drv_group_id = tunnel_reg_drv_group_id(regs);
626 	bool ret = true;
627 
628 	if (tunnel->bw_alloc_supported != tunnel_reg_bw_alloc_supported(regs)) {
629 		tun_dbg(tunnel,
630 			"BW alloc support has changed %s -> %s\n",
631 			str_yes_no(tunnel->bw_alloc_supported),
632 			str_yes_no(tunnel_reg_bw_alloc_supported(regs)));
633 
634 		ret = false;
635 	}
636 
637 	if (tunnel->group->drv_group_id != new_drv_group_id) {
638 		tun_dbg(tunnel,
639 			"Driver/group ID has changed %d:%d:* -> %d:%d:*\n",
640 			tunnel_group_drv_id(tunnel->group->drv_group_id),
641 			tunnel_group_id(tunnel->group->drv_group_id),
642 			tunnel_group_drv_id(new_drv_group_id),
643 			tunnel_group_id(new_drv_group_id));
644 
645 		ret = false;
646 	}
647 
648 	if (!tunnel->bw_alloc_supported)
649 		return ret;
650 
651 	if (tunnel->bw_granularity != tunnel_reg_bw_granularity(regs)) {
652 		tun_dbg(tunnel,
653 			"BW granularity has changed: %d -> %d Mb/s\n",
654 			DPTUN_BW_ARG(tunnel->bw_granularity),
655 			DPTUN_BW_ARG(tunnel_reg_bw_granularity(regs)));
656 
657 		ret = false;
658 	}
659 
660 	/*
661 	 * On some devices at least the BW alloc mode enabled status is always
662 	 * reported as 0, so skip checking that here.
663 	 */
664 
665 	if (!(flags & ALLOW_ALLOCATED_BW_CHANGE) &&
666 	    tunnel_allocated_bw(tunnel) !=
667 	    tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity) {
668 		tun_dbg(tunnel,
669 			"Allocated BW has changed: %d -> %d Mb/s\n",
670 			DPTUN_BW_ARG(tunnel->allocated_bw),
671 			DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity));
672 
673 		ret = false;
674 	}
675 
676 	return ret;
677 }
678 
679 static int
read_and_verify_tunnel_regs(struct drm_dp_tunnel * tunnel,struct drm_dp_tunnel_regs * regs,unsigned int flags)680 read_and_verify_tunnel_regs(struct drm_dp_tunnel *tunnel,
681 			    struct drm_dp_tunnel_regs *regs,
682 			    unsigned int flags)
683 {
684 	int err;
685 
686 	err = read_tunnel_regs(tunnel->aux, regs);
687 	if (err < 0) {
688 		drm_dp_tunnel_set_io_error(tunnel);
689 
690 		return err;
691 	}
692 
693 	if (!tunnel_regs_are_valid(tunnel->group->mgr, regs, flags))
694 		return -EINVAL;
695 
696 	if (!tunnel_info_changes_are_valid(tunnel, regs, flags))
697 		return -EINVAL;
698 
699 	return 0;
700 }
701 
update_dprx_caps(struct drm_dp_tunnel * tunnel,const struct drm_dp_tunnel_regs * regs)702 static bool update_dprx_caps(struct drm_dp_tunnel *tunnel, const struct drm_dp_tunnel_regs *regs)
703 {
704 	bool changed = false;
705 
706 	if (tunnel_reg_max_dprx_rate(regs) != tunnel->max_dprx_rate) {
707 		tunnel->max_dprx_rate = tunnel_reg_max_dprx_rate(regs);
708 		changed = true;
709 	}
710 
711 	if (tunnel_reg_max_dprx_lane_count(regs) != tunnel->max_dprx_lane_count) {
712 		tunnel->max_dprx_lane_count = tunnel_reg_max_dprx_lane_count(regs);
713 		changed = true;
714 	}
715 
716 	return changed;
717 }
718 
dev_id_len(const u8 * dev_id,int max_len)719 static int dev_id_len(const u8 *dev_id, int max_len)
720 {
721 	while (max_len && dev_id[max_len - 1] == '\0')
722 		max_len--;
723 
724 	return max_len;
725 }
726 
get_max_dprx_bw(const struct drm_dp_tunnel * tunnel)727 static int get_max_dprx_bw(const struct drm_dp_tunnel *tunnel)
728 {
729 	int max_dprx_bw = drm_dp_max_dprx_data_rate(tunnel->max_dprx_rate,
730 						    tunnel->max_dprx_lane_count);
731 
732 	/*
733 	 * A BW request of roundup(max_dprx_bw, tunnel->bw_granularity) results in
734 	 * an allocation of max_dprx_bw. A BW request above this rounded-up
735 	 * value will fail.
736 	 */
737 	return min(roundup(max_dprx_bw, tunnel->bw_granularity),
738 		   MAX_DP_REQUEST_BW * tunnel->bw_granularity);
739 }
740 
get_max_tunnel_bw(const struct drm_dp_tunnel * tunnel)741 static int get_max_tunnel_bw(const struct drm_dp_tunnel *tunnel)
742 {
743 	return min(get_max_dprx_bw(tunnel), tunnel->group->available_bw);
744 }
745 
746 /**
747  * drm_dp_tunnel_detect - Detect DP tunnel on the link
748  * @mgr: Tunnel manager
749  * @aux: DP AUX on which the tunnel will be detected
750  *
751  * Detect if there is any DP tunnel on the link and add it to the tunnel
752  * group's tunnel list.
753  *
754  * Returns a pointer to a tunnel on success, or an ERR_PTR() error on
755  * failure.
756  */
757 struct drm_dp_tunnel *
drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr * mgr,struct drm_dp_aux * aux)758 drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
759 		     struct drm_dp_aux *aux)
760 {
761 	struct drm_dp_tunnel_regs regs;
762 	struct drm_dp_tunnel *tunnel;
763 	int err;
764 
765 	err = read_tunnel_regs(aux, &regs);
766 	if (err)
767 		return ERR_PTR(err);
768 
769 	if (!(tunnel_reg(&regs, DP_TUNNELING_CAPABILITIES) &
770 	      DP_TUNNELING_SUPPORT))
771 		return ERR_PTR(-ENODEV);
772 
773 	/* The DPRX caps are valid only after enabling BW alloc mode. */
774 	if (!tunnel_regs_are_valid(mgr, &regs, SKIP_DPRX_CAPS_CHECK))
775 		return ERR_PTR(-EINVAL);
776 
777 	tunnel = create_tunnel(mgr, aux, &regs);
778 	if (!tunnel)
779 		return ERR_PTR(-ENOMEM);
780 
781 	tun_dbg(tunnel,
782 		"OUI:%*phD DevID:%*pE Rev-HW:%d.%d SW:%d.%d PR-Sup:%s BWA-Sup:%s BWA-En:%s\n",
783 		DP_TUNNELING_OUI_BYTES,
784 			tunnel_reg_ptr(&regs, DP_TUNNELING_OUI),
785 		dev_id_len(tunnel_reg_ptr(&regs, DP_TUNNELING_DEV_ID), DP_TUNNELING_DEV_ID_BYTES),
786 			tunnel_reg_ptr(&regs, DP_TUNNELING_DEV_ID),
787 		(tunnel_reg(&regs, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MAJOR_MASK) >>
788 			DP_TUNNELING_HW_REV_MAJOR_SHIFT,
789 		(tunnel_reg(&regs, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MINOR_MASK) >>
790 			DP_TUNNELING_HW_REV_MINOR_SHIFT,
791 		tunnel_reg(&regs, DP_TUNNELING_SW_REV_MAJOR),
792 		tunnel_reg(&regs, DP_TUNNELING_SW_REV_MINOR),
793 		str_yes_no(tunnel_reg(&regs, DP_TUNNELING_CAPABILITIES) &
794 			   DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT),
795 		str_yes_no(tunnel->bw_alloc_supported),
796 		str_yes_no(tunnel->bw_alloc_enabled));
797 
798 	return tunnel;
799 }
800 EXPORT_SYMBOL(drm_dp_tunnel_detect);
801 
802 /**
803  * drm_dp_tunnel_destroy - Destroy tunnel object
804  * @tunnel: Tunnel object
805  *
806  * Remove the tunnel from the tunnel topology and destroy it.
807  *
808  * Returns 0 on success, -ENODEV if the tunnel has been destroyed already.
809  */
drm_dp_tunnel_destroy(struct drm_dp_tunnel * tunnel)810 int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)
811 {
812 	if (!tunnel)
813 		return 0;
814 
815 	if (drm_WARN_ON(tunnel->group->mgr->dev, tunnel->destroyed))
816 		return -ENODEV;
817 
818 	tun_dbg(tunnel, "destroying\n");
819 
820 	tunnel->destroyed = true;
821 	destroy_tunnel(tunnel);
822 
823 	return 0;
824 }
825 EXPORT_SYMBOL(drm_dp_tunnel_destroy);
826 
check_tunnel(const struct drm_dp_tunnel * tunnel)827 static int check_tunnel(const struct drm_dp_tunnel *tunnel)
828 {
829 	if (tunnel->destroyed)
830 		return -ENODEV;
831 
832 	if (tunnel->has_io_error)
833 		return -EIO;
834 
835 	return 0;
836 }
837 
group_allocated_bw(struct drm_dp_tunnel_group * group)838 static int group_allocated_bw(struct drm_dp_tunnel_group *group)
839 {
840 	struct drm_dp_tunnel *tunnel;
841 	int group_allocated_bw = 0;
842 
843 	for_each_tunnel_in_group(group, tunnel) {
844 		if (check_tunnel(tunnel) == 0 &&
845 		    tunnel->bw_alloc_enabled)
846 			group_allocated_bw += tunnel_allocated_bw(tunnel);
847 	}
848 
849 	return group_allocated_bw;
850 }
851 
852 /*
853  * The estimated BW reported by the TBT Connection Manager for each tunnel in
854  * a group includes the BW already allocated for the given tunnel and the
855  * unallocated BW which is free to be used by any tunnel in the group.
856  */
group_free_bw(const struct drm_dp_tunnel * tunnel)857 static int group_free_bw(const struct drm_dp_tunnel *tunnel)
858 {
859 	return tunnel->estimated_bw - tunnel_allocated_bw(tunnel);
860 }
861 
calc_group_available_bw(const struct drm_dp_tunnel * tunnel)862 static int calc_group_available_bw(const struct drm_dp_tunnel *tunnel)
863 {
864 	return group_allocated_bw(tunnel->group) +
865 	       group_free_bw(tunnel);
866 }
867 
update_group_available_bw(struct drm_dp_tunnel * tunnel,const struct drm_dp_tunnel_regs * regs)868 static int update_group_available_bw(struct drm_dp_tunnel *tunnel,
869 				     const struct drm_dp_tunnel_regs *regs)
870 {
871 	struct drm_dp_tunnel *tunnel_iter;
872 	int group_available_bw;
873 	bool changed;
874 
875 	tunnel->estimated_bw = tunnel_reg(regs, DP_ESTIMATED_BW) * tunnel->bw_granularity;
876 
877 	if (calc_group_available_bw(tunnel) == tunnel->group->available_bw)
878 		return 0;
879 
880 	for_each_tunnel_in_group(tunnel->group, tunnel_iter) {
881 		int err;
882 
883 		if (tunnel_iter == tunnel)
884 			continue;
885 
886 		if (check_tunnel(tunnel_iter) != 0 ||
887 		    !tunnel_iter->bw_alloc_enabled)
888 			continue;
889 
890 		err = drm_dp_dpcd_probe(tunnel_iter->aux, DP_DPCD_REV);
891 		if (err) {
892 			tun_dbg(tunnel_iter,
893 				"Probe failed, assume disconnected (err %pe)\n",
894 				ERR_PTR(err));
895 			drm_dp_tunnel_set_io_error(tunnel_iter);
896 		}
897 	}
898 
899 	group_available_bw = calc_group_available_bw(tunnel);
900 
901 	tun_dbg(tunnel, "Updated group available BW: %d->%d\n",
902 		DPTUN_BW_ARG(tunnel->group->available_bw),
903 		DPTUN_BW_ARG(group_available_bw));
904 
905 	changed = tunnel->group->available_bw != group_available_bw;
906 
907 	tunnel->group->available_bw = group_available_bw;
908 
909 	return changed ? 1 : 0;
910 }
911 
set_bw_alloc_mode(struct drm_dp_tunnel * tunnel,bool enable)912 static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
913 {
914 	u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ;
915 	u8 val;
916 
917 	if (drm_dp_dpcd_read_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
918 		goto out_err;
919 
920 	if (enable)
921 		val |= mask;
922 	else
923 		val &= ~mask;
924 
925 	if (drm_dp_dpcd_write_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
926 		goto out_err;
927 
928 	tunnel->bw_alloc_enabled = enable;
929 
930 	return 0;
931 
932 out_err:
933 	drm_dp_tunnel_set_io_error(tunnel);
934 
935 	return -EIO;
936 }
937 
938 /**
939  * drm_dp_tunnel_enable_bw_alloc - Enable DP tunnel BW allocation mode
940  * @tunnel: Tunnel object
941  *
942  * Enable the DP tunnel BW allocation mode on @tunnel if it supports it.
943  *
944  * Returns 0 in case of success, negative error code otherwise.
945  */
drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel * tunnel)946 int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
947 {
948 	struct drm_dp_tunnel_regs regs;
949 	int err;
950 
951 	err = check_tunnel(tunnel);
952 	if (err)
953 		return err;
954 
955 	if (!tunnel->bw_alloc_supported)
956 		return -EOPNOTSUPP;
957 
958 	if (!tunnel_group_id(tunnel->group->drv_group_id))
959 		return -EINVAL;
960 
961 	err = set_bw_alloc_mode(tunnel, true);
962 	if (err)
963 		goto out;
964 
965 	/*
966 	 * After a BWA disable/re-enable sequence the allocated BW can either
967 	 * stay at its last requested value or, for instance after system
968 	 * suspend/resume, TBT CM can reset back the allocation to the amount
969 	 * allocated in the legacy/non-BWA mode. Accordingly allow for the
970 	 * allocation to change wrt. the last SW state.
971 	 */
972 	err = read_and_verify_tunnel_regs(tunnel, &regs,
973 					  ALLOW_ALLOCATED_BW_CHANGE);
974 	if (err) {
975 		set_bw_alloc_mode(tunnel, false);
976 
977 		goto out;
978 	}
979 
980 	if (!tunnel->max_dprx_rate)
981 		update_dprx_caps(tunnel, &regs);
982 
983 	if (tunnel->group->available_bw == -1) {
984 		err = update_group_available_bw(tunnel, &regs);
985 		if (err > 0)
986 			err = 0;
987 	}
988 out:
989 	tun_dbg_stat(tunnel, err,
990 		     "Enabling BW alloc mode: DPRX:%dx%d Group alloc:%d/%d Mb/s",
991 		     tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,
992 		     DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
993 		     DPTUN_BW_ARG(tunnel->group->available_bw));
994 
995 	return err;
996 }
997 EXPORT_SYMBOL(drm_dp_tunnel_enable_bw_alloc);
998 
999 /**
1000  * drm_dp_tunnel_disable_bw_alloc - Disable DP tunnel BW allocation mode
1001  * @tunnel: Tunnel object
1002  *
1003  * Disable the DP tunnel BW allocation mode on @tunnel.
1004  *
1005  * Returns 0 in case of success, negative error code otherwise.
1006  */
drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel * tunnel)1007 int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel)
1008 {
1009 	int err;
1010 
1011 	err = check_tunnel(tunnel);
1012 	if (err)
1013 		return err;
1014 
1015 	tunnel->allocated_bw = -1;
1016 
1017 	err = set_bw_alloc_mode(tunnel, false);
1018 
1019 	tun_dbg_stat(tunnel, err, "Disabling BW alloc mode");
1020 
1021 	return err;
1022 }
1023 EXPORT_SYMBOL(drm_dp_tunnel_disable_bw_alloc);
1024 
1025 /**
1026  * drm_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation mode enabled state
1027  * @tunnel: Tunnel object
1028  *
1029  * Query if the BW allocation mode is enabled for @tunnel.
1030  *
1031  * Returns %true if the BW allocation mode is enabled for @tunnel.
1032  */
drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel * tunnel)1033 bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel)
1034 {
1035 	return tunnel && tunnel->bw_alloc_enabled;
1036 }
1037 EXPORT_SYMBOL(drm_dp_tunnel_bw_alloc_is_enabled);
1038 
clear_bw_req_state(struct drm_dp_aux * aux)1039 static int clear_bw_req_state(struct drm_dp_aux *aux)
1040 {
1041 	u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
1042 
1043 	if (drm_dp_dpcd_write_byte(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
1044 		return -EIO;
1045 
1046 	return 0;
1047 }
1048 
bw_req_complete(struct drm_dp_aux * aux,bool * status_changed)1049 static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)
1050 {
1051 	u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
1052 	u8 status_change_mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
1053 	u8 val;
1054 	int err;
1055 
1056 	if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
1057 		return -EIO;
1058 
1059 	*status_changed = val & status_change_mask;
1060 
1061 	val &= bw_req_mask;
1062 
1063 	if (!val)
1064 		return -EAGAIN;
1065 
1066 	err = clear_bw_req_state(aux);
1067 	if (err < 0)
1068 		return err;
1069 
1070 	return val == DP_BW_REQUEST_SUCCEEDED ? 0 : -ENOSPC;
1071 }
1072 
allocate_tunnel_bw(struct drm_dp_tunnel * tunnel,int bw)1073 static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)
1074 {
1075 	struct drm_dp_tunnel_mgr *mgr = tunnel->group->mgr;
1076 	int request_bw = DIV_ROUND_UP(bw, tunnel->bw_granularity);
1077 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1078 	long timeout;
1079 	int err;
1080 
1081 	if (bw < 0) {
1082 		err = -EINVAL;
1083 		goto out;
1084 	}
1085 
1086 	if (request_bw * tunnel->bw_granularity == tunnel->allocated_bw)
1087 		return 0;
1088 
1089 	/* Atomic check should prevent the following. */
1090 	if (drm_WARN_ON(mgr->dev, request_bw > MAX_DP_REQUEST_BW)) {
1091 		err = -EINVAL;
1092 		goto out;
1093 	}
1094 
1095 	err = clear_bw_req_state(tunnel->aux);
1096 	if (err)
1097 		goto out;
1098 
1099 	if (drm_dp_dpcd_write_byte(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
1100 		err = -EIO;
1101 		goto out;
1102 	}
1103 
1104 	timeout = msecs_to_jiffies(3000);
1105 	add_wait_queue(&mgr->bw_req_queue, &wait);
1106 
1107 	for (;;) {
1108 		bool status_changed;
1109 
1110 		err = bw_req_complete(tunnel->aux, &status_changed);
1111 		if (err != -EAGAIN)
1112 			break;
1113 
1114 		if (status_changed) {
1115 			struct drm_dp_tunnel_regs regs;
1116 
1117 			err = read_and_verify_tunnel_regs(tunnel, &regs,
1118 							  ALLOW_ALLOCATED_BW_CHANGE);
1119 			if (err)
1120 				break;
1121 		}
1122 
1123 		if (!timeout) {
1124 			err = -ETIMEDOUT;
1125 			break;
1126 		}
1127 
1128 		timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE, timeout);
1129 	};
1130 
1131 	remove_wait_queue(&mgr->bw_req_queue, &wait);
1132 
1133 	if (err)
1134 		goto out;
1135 
1136 	tunnel->allocated_bw = request_bw * tunnel->bw_granularity;
1137 
1138 out:
1139 	tun_dbg_stat(tunnel, err, "Allocating %d/%d Mb/s for tunnel: Group alloc:%d/%d Mb/s",
1140 		     DPTUN_BW_ARG(request_bw * tunnel->bw_granularity),
1141 		     DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),
1142 		     DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
1143 		     DPTUN_BW_ARG(tunnel->group->available_bw));
1144 
1145 	if (err == -EIO)
1146 		drm_dp_tunnel_set_io_error(tunnel);
1147 
1148 	return err;
1149 }
1150 
1151 /**
1152  * drm_dp_tunnel_alloc_bw - Allocate BW for a DP tunnel
1153  * @tunnel: Tunnel object
1154  * @bw: BW in kB/s units
1155  *
1156  * Allocate @bw kB/s for @tunnel. The allocated BW must be freed after use by
1157  * calling this function for the same tunnel setting @bw to 0.
1158  *
1159  * Returns 0 in case of success, a negative error code otherwise.
1160  */
drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel * tunnel,int bw)1161 int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
1162 {
1163 	int err;
1164 
1165 	err = check_tunnel(tunnel);
1166 	if (err)
1167 		return err;
1168 
1169 	return allocate_tunnel_bw(tunnel, bw);
1170 }
1171 EXPORT_SYMBOL(drm_dp_tunnel_alloc_bw);
1172 
1173 /**
1174  * drm_dp_tunnel_get_allocated_bw - Get the BW allocated for a DP tunnel
1175  * @tunnel: Tunnel object
1176  *
1177  * Get the current BW allocated for @tunnel. After the tunnel is created /
1178  * resumed and the BW allocation mode is enabled for it, the allocation
1179  * becomes determined only after the first allocation request by the driver
1180  * calling drm_dp_tunnel_alloc_bw().
1181  *
1182  * Return the BW allocated for the tunnel, or -1 if the allocation is
1183  * undetermined.
1184  */
drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel * tunnel)1185 int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)
1186 {
1187 	return tunnel->allocated_bw;
1188 }
1189 EXPORT_SYMBOL(drm_dp_tunnel_get_allocated_bw);
1190 
1191 /*
1192  * Return 0 if the status hasn't changed, 1 if the status has changed, a
1193  * negative error code in case of an I/O failure.
1194  */
check_and_clear_status_change(struct drm_dp_tunnel * tunnel)1195 static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
1196 {
1197 	u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
1198 	u8 val;
1199 
1200 	if (drm_dp_dpcd_read_byte(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
1201 		goto out_err;
1202 
1203 	val &= mask;
1204 
1205 	if (val) {
1206 		if (drm_dp_dpcd_write_byte(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
1207 			goto out_err;
1208 
1209 		return 1;
1210 	}
1211 
1212 	if (!drm_dp_tunnel_bw_alloc_is_enabled(tunnel))
1213 		return 0;
1214 
1215 	/*
1216 	 * Check for estimated BW changes explicitly to account for lost
1217 	 * BW change notifications.
1218 	 */
1219 	if (drm_dp_dpcd_read_byte(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
1220 		goto out_err;
1221 
1222 	if (val * tunnel->bw_granularity != tunnel->estimated_bw)
1223 		return 1;
1224 
1225 	return 0;
1226 
1227 out_err:
1228 	drm_dp_tunnel_set_io_error(tunnel);
1229 
1230 	return -EIO;
1231 }
1232 
1233 /**
1234  * drm_dp_tunnel_update_state - Update DP tunnel SW state with the HW state
1235  * @tunnel: Tunnel object
1236  *
1237  * Update the SW state of @tunnel with the HW state.
1238  *
1239  * Returns 0 if the state has not changed, 1 if it has changed and got updated
1240  * successfully and a negative error code otherwise.
1241  */
drm_dp_tunnel_update_state(struct drm_dp_tunnel * tunnel)1242 int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)
1243 {
1244 	struct drm_dp_tunnel_regs regs;
1245 	bool changed = false;
1246 	int ret;
1247 
1248 	ret = check_tunnel(tunnel);
1249 	if (ret < 0)
1250 		return ret;
1251 
1252 	ret = check_and_clear_status_change(tunnel);
1253 	if (ret < 0)
1254 		goto out;
1255 
1256 	if (!ret)
1257 		return 0;
1258 
1259 	ret = read_and_verify_tunnel_regs(tunnel, &regs, 0);
1260 	if (ret)
1261 		goto out;
1262 
1263 	if (update_dprx_caps(tunnel, &regs))
1264 		changed = true;
1265 
1266 	ret = update_group_available_bw(tunnel, &regs);
1267 	if (ret == 1)
1268 		changed = true;
1269 
1270 out:
1271 	tun_dbg_stat(tunnel, ret < 0 ? ret : 0,
1272 		     "State update: Changed:%s DPRX:%dx%d Tunnel alloc:%d/%d Group alloc:%d/%d Mb/s",
1273 		     str_yes_no(changed),
1274 		     tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,
1275 		     DPTUN_BW_ARG(tunnel->allocated_bw),
1276 		     DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),
1277 		     DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
1278 		     DPTUN_BW_ARG(tunnel->group->available_bw));
1279 
1280 	if (ret < 0)
1281 		return ret;
1282 
1283 	if (changed)
1284 		return 1;
1285 
1286 	return 0;
1287 }
1288 EXPORT_SYMBOL(drm_dp_tunnel_update_state);
1289 
1290 /*
1291  * drm_dp_tunnel_handle_irq - Handle DP tunnel IRQs
1292  *
1293  * Handle any pending DP tunnel IRQs, waking up waiters for a completion
1294  * event.
1295  *
1296  * Returns 1 if the state of the tunnel has changed which requires calling
1297  * drm_dp_tunnel_update_state(), a negative error code in case of a failure,
1298  * 0 otherwise.
1299  */
drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr * mgr,struct drm_dp_aux * aux)1300 int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *aux)
1301 {
1302 	u8 val;
1303 
1304 	if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0)
1305 		return -EIO;
1306 
1307 	if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))
1308 		wake_up_all(&mgr->bw_req_queue);
1309 
1310 	if (val & (DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED))
1311 		return 1;
1312 
1313 	return 0;
1314 }
1315 EXPORT_SYMBOL(drm_dp_tunnel_handle_irq);
1316 
1317 /**
1318  * drm_dp_tunnel_max_dprx_rate - Query the maximum rate of the tunnel's DPRX
1319  * @tunnel: Tunnel object
1320  *
1321  * The function is used to query the maximum link rate of the DPRX connected
1322  * to @tunnel. Note that this rate will not be limited by the BW limit of the
1323  * tunnel, as opposed to the standard and extended DP_MAX_LINK_RATE DPCD
1324  * registers.
1325  *
1326  * Returns the maximum link rate in 10 kbit/s units.
1327  */
drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel * tunnel)1328 int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel)
1329 {
1330 	return tunnel->max_dprx_rate;
1331 }
1332 EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_rate);
1333 
1334 /**
1335  * drm_dp_tunnel_max_dprx_lane_count - Query the maximum lane count of the tunnel's DPRX
1336  * @tunnel: Tunnel object
1337  *
1338  * The function is used to query the maximum lane count of the DPRX connected
1339  * to @tunnel. Note that this lane count will not be limited by the BW limit of
1340  * the tunnel, as opposed to the standard and extended DP_MAX_LANE_COUNT DPCD
1341  * registers.
1342  *
1343  * Returns the maximum lane count.
1344  */
drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel * tunnel)1345 int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel)
1346 {
1347 	return tunnel->max_dprx_lane_count;
1348 }
1349 EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_lane_count);
1350 
1351 /**
1352  * drm_dp_tunnel_available_bw - Query the estimated total available BW of the tunnel
1353  * @tunnel: Tunnel object
1354  *
1355  * This function is used to query the estimated total available BW of the
1356  * tunnel. This includes the currently allocated and free BW for all the
1357  * tunnels in @tunnel's group. The available BW is valid only after the BW
1358  * allocation mode has been enabled for the tunnel and its state got updated
1359  * calling drm_dp_tunnel_update_state().
1360  *
1361  * Returns the @tunnel group's estimated total available bandwidth in kB/s
1362  * units, or -1 if the available BW isn't valid (the BW allocation mode is
1363  * not enabled or the tunnel's state hasn't been updated).
1364  */
drm_dp_tunnel_available_bw(const struct drm_dp_tunnel * tunnel)1365 int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel)
1366 {
1367 	return tunnel->group->available_bw;
1368 }
1369 EXPORT_SYMBOL(drm_dp_tunnel_available_bw);
1370 
1371 static struct drm_dp_tunnel_group_state *
drm_dp_tunnel_atomic_get_group_state(struct drm_atomic_state * state,const struct drm_dp_tunnel * tunnel)1372 drm_dp_tunnel_atomic_get_group_state(struct drm_atomic_state *state,
1373 				     const struct drm_dp_tunnel *tunnel)
1374 {
1375 	return (struct drm_dp_tunnel_group_state *)
1376 		drm_atomic_get_private_obj_state(state,
1377 						 &tunnel->group->base);
1378 }
1379 
1380 static struct drm_dp_tunnel_state *
add_tunnel_state(struct drm_dp_tunnel_group_state * group_state,struct drm_dp_tunnel * tunnel)1381 add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
1382 		 struct drm_dp_tunnel *tunnel)
1383 {
1384 	struct drm_dp_tunnel_state *tunnel_state;
1385 
1386 	tun_dbg_atomic(tunnel,
1387 		       "Adding state for tunnel %p to group state %p\n",
1388 		       tunnel, group_state);
1389 
1390 	tunnel_state = kzalloc(sizeof(*tunnel_state), GFP_KERNEL);
1391 	if (!tunnel_state)
1392 		return NULL;
1393 
1394 	tunnel_state->group_state = group_state;
1395 
1396 	drm_dp_tunnel_ref_get(tunnel, &tunnel_state->tunnel_ref);
1397 
1398 	INIT_LIST_HEAD(&tunnel_state->node);
1399 	list_add(&tunnel_state->node, &group_state->tunnel_states);
1400 
1401 	return tunnel_state;
1402 }
1403 
free_tunnel_state(struct drm_dp_tunnel_state * tunnel_state)1404 static void free_tunnel_state(struct drm_dp_tunnel_state *tunnel_state)
1405 {
1406 	tun_dbg_atomic(tunnel_state->tunnel_ref.tunnel,
1407 		       "Freeing state for tunnel %p\n",
1408 		       tunnel_state->tunnel_ref.tunnel);
1409 
1410 	list_del(&tunnel_state->node);
1411 
1412 	kfree(tunnel_state->stream_bw);
1413 	drm_dp_tunnel_ref_put(&tunnel_state->tunnel_ref);
1414 
1415 	kfree(tunnel_state);
1416 }
1417 
free_group_state(struct drm_dp_tunnel_group_state * group_state)1418 static void free_group_state(struct drm_dp_tunnel_group_state *group_state)
1419 {
1420 	struct drm_dp_tunnel_state *tunnel_state;
1421 	struct drm_dp_tunnel_state *tunnel_state_tmp;
1422 
1423 	for_each_tunnel_state_safe(group_state, tunnel_state, tunnel_state_tmp)
1424 		free_tunnel_state(tunnel_state);
1425 
1426 	kfree(group_state);
1427 }
1428 
1429 static struct drm_dp_tunnel_state *
get_tunnel_state(struct drm_dp_tunnel_group_state * group_state,const struct drm_dp_tunnel * tunnel)1430 get_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
1431 		 const struct drm_dp_tunnel *tunnel)
1432 {
1433 	struct drm_dp_tunnel_state *tunnel_state;
1434 
1435 	for_each_tunnel_state(group_state, tunnel_state)
1436 		if (tunnel_state->tunnel_ref.tunnel == tunnel)
1437 			return tunnel_state;
1438 
1439 	return NULL;
1440 }
1441 
1442 static struct drm_dp_tunnel_state *
get_or_add_tunnel_state(struct drm_dp_tunnel_group_state * group_state,struct drm_dp_tunnel * tunnel)1443 get_or_add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
1444 			struct drm_dp_tunnel *tunnel)
1445 {
1446 	struct drm_dp_tunnel_state *tunnel_state;
1447 
1448 	tunnel_state = get_tunnel_state(group_state, tunnel);
1449 	if (tunnel_state)
1450 		return tunnel_state;
1451 
1452 	return add_tunnel_state(group_state, tunnel);
1453 }
1454 
1455 static struct drm_private_state *
tunnel_group_duplicate_state(struct drm_private_obj * obj)1456 tunnel_group_duplicate_state(struct drm_private_obj *obj)
1457 {
1458 	struct drm_dp_tunnel_group_state *group_state;
1459 	struct drm_dp_tunnel_state *tunnel_state;
1460 
1461 	group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);
1462 	if (!group_state)
1463 		return NULL;
1464 
1465 	INIT_LIST_HEAD(&group_state->tunnel_states);
1466 
1467 	__drm_atomic_helper_private_obj_duplicate_state(obj, &group_state->base);
1468 
1469 	for_each_tunnel_state(to_group_state(obj->state), tunnel_state) {
1470 		struct drm_dp_tunnel_state *new_tunnel_state;
1471 
1472 		new_tunnel_state = get_or_add_tunnel_state(group_state,
1473 							   tunnel_state->tunnel_ref.tunnel);
1474 		if (!new_tunnel_state)
1475 			goto out_free_state;
1476 
1477 		new_tunnel_state->stream_mask = tunnel_state->stream_mask;
1478 		new_tunnel_state->stream_bw = kmemdup(tunnel_state->stream_bw,
1479 						      sizeof(*tunnel_state->stream_bw) *
1480 							hweight32(tunnel_state->stream_mask),
1481 						      GFP_KERNEL);
1482 
1483 		if (!new_tunnel_state->stream_bw)
1484 			goto out_free_state;
1485 	}
1486 
1487 	return &group_state->base;
1488 
1489 out_free_state:
1490 	free_group_state(group_state);
1491 
1492 	return NULL;
1493 }
1494 
tunnel_group_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)1495 static void tunnel_group_destroy_state(struct drm_private_obj *obj, struct drm_private_state *state)
1496 {
1497 	free_group_state(to_group_state(state));
1498 }
1499 
1500 static const struct drm_private_state_funcs tunnel_group_funcs = {
1501 	.atomic_duplicate_state = tunnel_group_duplicate_state,
1502 	.atomic_destroy_state = tunnel_group_destroy_state,
1503 };
1504 
1505 /**
1506  * drm_dp_tunnel_atomic_get_state - get/allocate the new atomic state for a tunnel
1507  * @state: Atomic state
1508  * @tunnel: Tunnel to get the state for
1509  *
1510  * Get the new atomic state for @tunnel, duplicating it from the old tunnel
1511  * state if not yet allocated.
1512  *
1513  * Return the state or an ERR_PTR() error on failure.
1514  */
1515 struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_state(struct drm_atomic_state * state,struct drm_dp_tunnel * tunnel)1516 drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
1517 			       struct drm_dp_tunnel *tunnel)
1518 {
1519 	struct drm_dp_tunnel_group_state *group_state;
1520 	struct drm_dp_tunnel_state *tunnel_state;
1521 
1522 	group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
1523 	if (IS_ERR(group_state))
1524 		return ERR_CAST(group_state);
1525 
1526 	tunnel_state = get_or_add_tunnel_state(group_state, tunnel);
1527 	if (!tunnel_state)
1528 		return ERR_PTR(-ENOMEM);
1529 
1530 	return tunnel_state;
1531 }
1532 EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_state);
1533 
1534 /**
1535  * drm_dp_tunnel_atomic_get_old_state - get the old atomic state for a tunnel
1536  * @state: Atomic state
1537  * @tunnel: Tunnel to get the state for
1538  *
1539  * Get the old atomic state for @tunnel.
1540  *
1541  * Return the old state or NULL if the tunnel's atomic state is not in @state.
1542  */
1543 struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state * state,const struct drm_dp_tunnel * tunnel)1544 drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state,
1545 				   const struct drm_dp_tunnel *tunnel)
1546 {
1547 	struct drm_dp_tunnel_group_state *old_group_state;
1548 	int i;
1549 
1550 	for_each_old_group_in_state(state, old_group_state, i)
1551 		if (to_group(old_group_state->base.obj) == tunnel->group)
1552 			return get_tunnel_state(old_group_state, tunnel);
1553 
1554 	return NULL;
1555 }
1556 EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_old_state);
1557 
1558 /**
1559  * drm_dp_tunnel_atomic_get_new_state - get the new atomic state for a tunnel
1560  * @state: Atomic state
1561  * @tunnel: Tunnel to get the state for
1562  *
1563  * Get the new atomic state for @tunnel.
1564  *
1565  * Return the new state or NULL if the tunnel's atomic state is not in @state.
1566  */
1567 struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state * state,const struct drm_dp_tunnel * tunnel)1568 drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
1569 				   const struct drm_dp_tunnel *tunnel)
1570 {
1571 	struct drm_dp_tunnel_group_state *new_group_state;
1572 	int i;
1573 
1574 	for_each_new_group_in_state(state, new_group_state, i)
1575 		if (to_group(new_group_state->base.obj) == tunnel->group)
1576 			return get_tunnel_state(new_group_state, tunnel);
1577 
1578 	return NULL;
1579 }
1580 EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_new_state);
1581 
init_group(struct drm_dp_tunnel_mgr * mgr,struct drm_dp_tunnel_group * group)1582 static bool init_group(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_tunnel_group *group)
1583 {
1584 	struct drm_dp_tunnel_group_state *group_state;
1585 
1586 	group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);
1587 	if (!group_state)
1588 		return false;
1589 
1590 	INIT_LIST_HEAD(&group_state->tunnel_states);
1591 
1592 	group->mgr = mgr;
1593 	group->available_bw = -1;
1594 	INIT_LIST_HEAD(&group->tunnels);
1595 
1596 	drm_atomic_private_obj_init(mgr->dev, &group->base, &group_state->base,
1597 				    &tunnel_group_funcs);
1598 
1599 	return true;
1600 }
1601 
cleanup_group(struct drm_dp_tunnel_group * group)1602 static void cleanup_group(struct drm_dp_tunnel_group *group)
1603 {
1604 	drm_atomic_private_obj_fini(&group->base);
1605 }
1606 
1607 #ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
check_unique_stream_ids(const struct drm_dp_tunnel_group_state * group_state)1608 static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
1609 {
1610 	const struct drm_dp_tunnel_state *tunnel_state;
1611 	u32 stream_mask = 0;
1612 
1613 	for_each_tunnel_state(group_state, tunnel_state) {
1614 		drm_WARN(to_group(group_state->base.obj)->mgr->dev,
1615 			 tunnel_state->stream_mask & stream_mask,
1616 			 "[DPTUN %s]: conflicting stream IDs %x (IDs in other tunnels %x)\n",
1617 			 tunnel_state->tunnel_ref.tunnel->name,
1618 			 tunnel_state->stream_mask,
1619 			 stream_mask);
1620 
1621 		stream_mask |= tunnel_state->stream_mask;
1622 	}
1623 }
1624 #else
check_unique_stream_ids(const struct drm_dp_tunnel_group_state * group_state)1625 static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
1626 {
1627 }
1628 #endif
1629 
stream_id_to_idx(u32 stream_mask,u8 stream_id)1630 static int stream_id_to_idx(u32 stream_mask, u8 stream_id)
1631 {
1632 	return hweight32(stream_mask & (BIT(stream_id) - 1));
1633 }
1634 
resize_bw_array(struct drm_dp_tunnel_state * tunnel_state,unsigned long old_mask,unsigned long new_mask)1635 static int resize_bw_array(struct drm_dp_tunnel_state *tunnel_state,
1636 			   unsigned long old_mask, unsigned long new_mask)
1637 {
1638 	unsigned long move_mask = old_mask & new_mask;
1639 	int *new_bws = NULL;
1640 	int id;
1641 
1642 	WARN_ON(!new_mask);
1643 
1644 	if (old_mask == new_mask)
1645 		return 0;
1646 
1647 	new_bws = kcalloc(hweight32(new_mask), sizeof(*new_bws), GFP_KERNEL);
1648 	if (!new_bws)
1649 		return -ENOMEM;
1650 
1651 	for_each_set_bit(id, &move_mask, BITS_PER_TYPE(move_mask))
1652 		new_bws[stream_id_to_idx(new_mask, id)] =
1653 			tunnel_state->stream_bw[stream_id_to_idx(old_mask, id)];
1654 
1655 	kfree(tunnel_state->stream_bw);
1656 	tunnel_state->stream_bw = new_bws;
1657 	tunnel_state->stream_mask = new_mask;
1658 
1659 	return 0;
1660 }
1661 
set_stream_bw(struct drm_dp_tunnel_state * tunnel_state,u8 stream_id,int bw)1662 static int set_stream_bw(struct drm_dp_tunnel_state *tunnel_state,
1663 			 u8 stream_id, int bw)
1664 {
1665 	int err;
1666 
1667 	err = resize_bw_array(tunnel_state,
1668 			      tunnel_state->stream_mask,
1669 			      tunnel_state->stream_mask | BIT(stream_id));
1670 	if (err)
1671 		return err;
1672 
1673 	tunnel_state->stream_bw[stream_id_to_idx(tunnel_state->stream_mask, stream_id)] = bw;
1674 
1675 	return 0;
1676 }
1677 
clear_stream_bw(struct drm_dp_tunnel_state * tunnel_state,u8 stream_id)1678 static int clear_stream_bw(struct drm_dp_tunnel_state *tunnel_state,
1679 			   u8 stream_id)
1680 {
1681 	if (!(tunnel_state->stream_mask & ~BIT(stream_id))) {
1682 		free_tunnel_state(tunnel_state);
1683 		return 0;
1684 	}
1685 
1686 	return resize_bw_array(tunnel_state,
1687 			       tunnel_state->stream_mask,
1688 			       tunnel_state->stream_mask & ~BIT(stream_id));
1689 }
1690 
1691 /**
1692  * drm_dp_tunnel_atomic_set_stream_bw - Set the BW for a DP tunnel stream
1693  * @state: Atomic state
1694  * @tunnel: DP tunnel containing the stream
1695  * @stream_id: Stream ID
1696  * @bw: BW of the stream
1697  *
1698  * Set a DP tunnel stream's required BW in the atomic state.
1699  *
1700  * Returns 0 in case of success, a negative error code otherwise.
1701  */
drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state * state,struct drm_dp_tunnel * tunnel,u8 stream_id,int bw)1702 int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
1703 				       struct drm_dp_tunnel *tunnel,
1704 				       u8 stream_id, int bw)
1705 {
1706 	struct drm_dp_tunnel_group_state *new_group_state;
1707 	struct drm_dp_tunnel_state *tunnel_state;
1708 	int err;
1709 
1710 	if (drm_WARN_ON(tunnel->group->mgr->dev,
1711 			stream_id > BITS_PER_TYPE(tunnel_state->stream_mask)))
1712 		return -EINVAL;
1713 
1714 	tun_dbg(tunnel,
1715 		"Setting %d Mb/s for stream %d\n",
1716 		DPTUN_BW_ARG(bw), stream_id);
1717 
1718 	new_group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
1719 	if (IS_ERR(new_group_state))
1720 		return PTR_ERR(new_group_state);
1721 
1722 	if (bw == 0) {
1723 		tunnel_state = get_tunnel_state(new_group_state, tunnel);
1724 		if (!tunnel_state)
1725 			return 0;
1726 
1727 		return clear_stream_bw(tunnel_state, stream_id);
1728 	}
1729 
1730 	tunnel_state = get_or_add_tunnel_state(new_group_state, tunnel);
1731 	if (drm_WARN_ON(state->dev, !tunnel_state))
1732 		return -EINVAL;
1733 
1734 	err = set_stream_bw(tunnel_state, stream_id, bw);
1735 	if (err)
1736 		return err;
1737 
1738 	check_unique_stream_ids(new_group_state);
1739 
1740 	return 0;
1741 }
1742 EXPORT_SYMBOL(drm_dp_tunnel_atomic_set_stream_bw);
1743 
1744 /**
1745  * drm_dp_tunnel_atomic_get_required_bw - Get the BW required by a DP tunnel
1746  * @tunnel_state: Atomic state of the queried tunnel
1747  *
1748  * Calculate the BW required by a tunnel adding up the required BW of all
1749  * the streams in the tunnel.
1750  *
1751  * Return the total BW required by the tunnel.
1752  */
drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state * tunnel_state)1753 int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state)
1754 {
1755 	int tunnel_bw = 0;
1756 	int i;
1757 
1758 	if (!tunnel_state || !tunnel_state->stream_mask)
1759 		return 0;
1760 
1761 	for (i = 0; i < hweight32(tunnel_state->stream_mask); i++)
1762 		tunnel_bw += tunnel_state->stream_bw[i];
1763 
1764 	return tunnel_bw;
1765 }
1766 EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_required_bw);
1767 
1768 /**
1769  * drm_dp_tunnel_atomic_get_group_streams_in_state - Get mask of stream IDs in a group
1770  * @state: Atomic state
1771  * @tunnel: Tunnel object
1772  * @stream_mask: Mask of streams in @tunnel's group
1773  *
1774  * Get the mask of all the stream IDs in the tunnel group of @tunnel.
1775  *
1776  * Return 0 in case of success - with the stream IDs in @stream_mask - or a
1777  * negative error code in case of failure.
1778  */
drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state * state,const struct drm_dp_tunnel * tunnel,u32 * stream_mask)1779 int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
1780 						    const struct drm_dp_tunnel *tunnel,
1781 						    u32 *stream_mask)
1782 {
1783 	struct drm_dp_tunnel_group_state *group_state;
1784 	struct drm_dp_tunnel_state *tunnel_state;
1785 
1786 	group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
1787 	if (IS_ERR(group_state))
1788 		return PTR_ERR(group_state);
1789 
1790 	*stream_mask = 0;
1791 	for_each_tunnel_state(group_state, tunnel_state)
1792 		*stream_mask |= tunnel_state->stream_mask;
1793 
1794 	return 0;
1795 }
1796 EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_group_streams_in_state);
1797 
1798 static int
drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state * new_group_state,u32 * failed_stream_mask)1799 drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state *new_group_state,
1800 				    u32 *failed_stream_mask)
1801 {
1802 	struct drm_dp_tunnel_group *group = to_group(new_group_state->base.obj);
1803 	struct drm_dp_tunnel_state *new_tunnel_state;
1804 	u32 group_stream_mask = 0;
1805 	int group_bw = 0;
1806 
1807 	for_each_tunnel_state(new_group_state, new_tunnel_state) {
1808 		struct drm_dp_tunnel *tunnel = new_tunnel_state->tunnel_ref.tunnel;
1809 		int max_dprx_bw = get_max_dprx_bw(tunnel);
1810 		int tunnel_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
1811 
1812 		tun_dbg(tunnel,
1813 			"%sRequired %d/%d Mb/s total for tunnel.\n",
1814 			tunnel_bw > max_dprx_bw ? "Not enough BW: " : "",
1815 			DPTUN_BW_ARG(tunnel_bw),
1816 			DPTUN_BW_ARG(max_dprx_bw));
1817 
1818 		if (tunnel_bw > max_dprx_bw) {
1819 			*failed_stream_mask = new_tunnel_state->stream_mask;
1820 			return -ENOSPC;
1821 		}
1822 
1823 		group_bw += min(roundup(tunnel_bw, tunnel->bw_granularity),
1824 				max_dprx_bw);
1825 		group_stream_mask |= new_tunnel_state->stream_mask;
1826 	}
1827 
1828 	tun_grp_dbg(group,
1829 		    "%sRequired %d/%d Mb/s total for tunnel group.\n",
1830 		    group_bw > group->available_bw ? "Not enough BW: " : "",
1831 		    DPTUN_BW_ARG(group_bw),
1832 		    DPTUN_BW_ARG(group->available_bw));
1833 
1834 	if (group_bw > group->available_bw) {
1835 		*failed_stream_mask = group_stream_mask;
1836 		return -ENOSPC;
1837 	}
1838 
1839 	return 0;
1840 }
1841 
1842 /**
1843  * drm_dp_tunnel_atomic_check_stream_bws - Check BW limit for all streams in state
1844  * @state: Atomic state
1845  * @failed_stream_mask: Mask of stream IDs with a BW limit failure
1846  *
1847  * Check the required BW of each DP tunnel in @state against both the DPRX BW
1848  * limit of the tunnel and the BW limit of the tunnel group. Return a mask of
1849  * stream IDs in @failed_stream_mask once a check fails. The mask will contain
1850  * either all the streams in a tunnel (in case a DPRX BW limit check failed) or
1851  * all the streams in a tunnel group (in case a group BW limit check failed).
1852  *
1853  * Return 0 if all the BW limit checks passed, -ENOSPC in case a BW limit
1854  * check failed - with @failed_stream_mask containing the streams failing the
1855  * check - or a negative error code otherwise.
1856  */
drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state * state,u32 * failed_stream_mask)1857 int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
1858 					  u32 *failed_stream_mask)
1859 {
1860 	struct drm_dp_tunnel_group_state *new_group_state;
1861 	int i;
1862 
1863 	for_each_new_group_in_state(state, new_group_state, i) {
1864 		int ret;
1865 
1866 		ret = drm_dp_tunnel_atomic_check_group_bw(new_group_state,
1867 							  failed_stream_mask);
1868 		if (ret)
1869 			return ret;
1870 	}
1871 
1872 	return 0;
1873 }
1874 EXPORT_SYMBOL(drm_dp_tunnel_atomic_check_stream_bws);
1875 
destroy_mgr(struct drm_dp_tunnel_mgr * mgr)1876 static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
1877 {
1878 	int i;
1879 
1880 	for (i = 0; i < mgr->group_count; i++) {
1881 		cleanup_group(&mgr->groups[i]);
1882 		drm_WARN_ON(mgr->dev, !list_empty(&mgr->groups[i].tunnels));
1883 	}
1884 
1885 #ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
1886 	ref_tracker_dir_exit(&mgr->ref_tracker);
1887 #endif
1888 
1889 	kfree(mgr->groups);
1890 	kfree(mgr);
1891 }
1892 
1893 /**
1894  * drm_dp_tunnel_mgr_create - Create a DP tunnel manager
1895  * @dev: DRM device object
1896  * @max_group_count: Maximum number of tunnel groups
1897  *
1898  * Creates a DP tunnel manager for @dev.
1899  *
1900  * Returns a pointer to the tunnel manager if created successfully or error
1901  * pointer in case of failure.
1902  */
1903 struct drm_dp_tunnel_mgr *
drm_dp_tunnel_mgr_create(struct drm_device * dev,int max_group_count)1904 drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
1905 {
1906 	struct drm_dp_tunnel_mgr *mgr;
1907 	int i;
1908 
1909 	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
1910 	if (!mgr)
1911 		return ERR_PTR(-ENOMEM);
1912 
1913 	mgr->dev = dev;
1914 	init_waitqueue_head(&mgr->bw_req_queue);
1915 
1916 	mgr->groups = kcalloc(max_group_count, sizeof(*mgr->groups), GFP_KERNEL);
1917 	if (!mgr->groups) {
1918 		kfree(mgr);
1919 
1920 		return ERR_PTR(-ENOMEM);
1921 	}
1922 
1923 #ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
1924 	ref_tracker_dir_init(&mgr->ref_tracker, 16, "drm_dptun");
1925 #endif
1926 
1927 	for (i = 0; i < max_group_count; i++) {
1928 		if (!init_group(mgr, &mgr->groups[i])) {
1929 			destroy_mgr(mgr);
1930 
1931 			return ERR_PTR(-ENOMEM);
1932 		}
1933 
1934 		mgr->group_count++;
1935 	}
1936 
1937 	return mgr;
1938 }
1939 EXPORT_SYMBOL(drm_dp_tunnel_mgr_create);
1940 
1941 /**
1942  * drm_dp_tunnel_mgr_destroy - Destroy DP tunnel manager
1943  * @mgr: Tunnel manager object
1944  *
1945  * Destroy the tunnel manager.
1946  */
drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr * mgr)1947 void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr)
1948 {
1949 	destroy_mgr(mgr);
1950 }
1951 EXPORT_SYMBOL(drm_dp_tunnel_mgr_destroy);
1952