1
2 /*
3 * Copyright 2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26 /*********************************************************************/
27 // USB4 DPIA BANDWIDTH ALLOCATION LOGIC
28 /*********************************************************************/
29 #include "link_dp_dpia_bw.h"
30 #include "link_dpcd.h"
31 #include "dc_dmub_srv.h"
32
33 #define DC_LOGGER \
34 link->ctx->logger
35
36 #define Kbps_TO_Gbps (1000 * 1000)
37
38 #define MST_TIME_SLOT_COUNT 64
39
40 // ------------------------------------------------------------------
41 // PRIVATE FUNCTIONS
42 // ------------------------------------------------------------------
43 /*
44 * Always Check the following:
45 * - Is it USB4 link?
46 * - Is HPD HIGH?
47 * - Is BW Allocation Support Mode enabled on DP-Tx?
48 */
link_dp_is_bw_alloc_available(struct dc_link * link)49 static bool link_dp_is_bw_alloc_available(struct dc_link *link)
50 {
51 return (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
52 && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
53 && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
54 }
55
reset_bw_alloc_struct(struct dc_link * link)56 static void reset_bw_alloc_struct(struct dc_link *link)
57 {
58 link->dpia_bw_alloc_config.bw_alloc_enabled = false;
59 link->dpia_bw_alloc_config.link_verified_bw = 0;
60 link->dpia_bw_alloc_config.link_max_bw = 0;
61 link->dpia_bw_alloc_config.allocated_bw = 0;
62 link->dpia_bw_alloc_config.estimated_bw = 0;
63 link->dpia_bw_alloc_config.bw_granularity = 0;
64 link->dpia_bw_alloc_config.dp_overhead = 0;
65 link->dpia_bw_alloc_config.nrd_max_lane_count = 0;
66 link->dpia_bw_alloc_config.nrd_max_link_rate = 0;
67 for (int i = 0; i < MAX_SINKS_PER_LINK; i++)
68 link->dpia_bw_alloc_config.remote_sink_req_bw[i] = 0;
69 DC_LOG_DEBUG("reset usb4 bw alloc of link(%d)\n", link->link_index);
70 }
71
72 #define BW_GRANULARITY_0 4 // 0.25 Gbps
73 #define BW_GRANULARITY_1 2 // 0.5 Gbps
74 #define BW_GRANULARITY_2 1 // 1 Gbps
75
get_bw_granularity(struct dc_link * link)76 static uint8_t get_bw_granularity(struct dc_link *link)
77 {
78 uint8_t bw_granularity = 0;
79
80 core_link_read_dpcd(
81 link,
82 DP_BW_GRANULALITY,
83 &bw_granularity,
84 sizeof(uint8_t));
85
86 switch (bw_granularity & 0x3) {
87 case 0:
88 bw_granularity = BW_GRANULARITY_0;
89 break;
90 case 1:
91 bw_granularity = BW_GRANULARITY_1;
92 break;
93 case 2:
94 default:
95 bw_granularity = BW_GRANULARITY_2;
96 break;
97 }
98
99 return bw_granularity;
100 }
101
get_estimated_bw(struct dc_link * link)102 static int get_estimated_bw(struct dc_link *link)
103 {
104 uint8_t bw_estimated_bw = 0;
105
106 core_link_read_dpcd(
107 link,
108 ESTIMATED_BW,
109 &bw_estimated_bw,
110 sizeof(uint8_t));
111
112 return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
113 }
114
get_non_reduced_max_link_rate(struct dc_link * link)115 static int get_non_reduced_max_link_rate(struct dc_link *link)
116 {
117 uint8_t nrd_max_link_rate = 0;
118
119 core_link_read_dpcd(
120 link,
121 DP_TUNNELING_MAX_LINK_RATE,
122 &nrd_max_link_rate,
123 sizeof(uint8_t));
124
125 return nrd_max_link_rate;
126 }
127
get_non_reduced_max_lane_count(struct dc_link * link)128 static int get_non_reduced_max_lane_count(struct dc_link *link)
129 {
130 uint8_t nrd_max_lane_count = 0;
131
132 core_link_read_dpcd(
133 link,
134 DP_TUNNELING_MAX_LANE_COUNT,
135 &nrd_max_lane_count,
136 sizeof(uint8_t));
137
138 return nrd_max_lane_count;
139 }
140
141 /*
142 * Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
143 * granuality, Driver_ID, CM_Group, & populate the BW allocation structs
144 * for host router and dpia
145 */
retrieve_usb4_dp_bw_allocation_info(struct dc_link * link)146 static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link)
147 {
148 reset_bw_alloc_struct(link);
149
150 /* init the known values */
151 link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
152 link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
153 link->dpia_bw_alloc_config.nrd_max_link_rate = get_non_reduced_max_link_rate(link);
154 link->dpia_bw_alloc_config.nrd_max_lane_count = get_non_reduced_max_lane_count(link);
155
156 DC_LOG_DEBUG("%s: bw_granularity(%d), estimated_bw(%d)\n",
157 __func__, link->dpia_bw_alloc_config.bw_granularity,
158 link->dpia_bw_alloc_config.estimated_bw);
159 DC_LOG_DEBUG("%s: nrd_max_link_rate(%d), nrd_max_lane_count(%d)\n",
160 __func__, link->dpia_bw_alloc_config.nrd_max_link_rate,
161 link->dpia_bw_alloc_config.nrd_max_lane_count);
162 }
163
164 /*
165 * Cleanup function for when the dpia is unplugged to reset struct
166 * and perform any required clean up
167 *
168 * @link: pointer to the dc_link struct instance
169 *
170 * return: none
171 */
dpia_bw_alloc_unplug(struct dc_link * link)172 static void dpia_bw_alloc_unplug(struct dc_link *link)
173 {
174 if (link) {
175 DC_LOG_DEBUG("%s: resetting BW alloc config for link(%d)\n",
176 __func__, link->link_index);
177 reset_bw_alloc_struct(link);
178 }
179 }
180
link_dpia_send_bw_alloc_request(struct dc_link * link,int req_bw)181 static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw)
182 {
183 uint8_t request_reg_val;
184 uint32_t temp, request_bw;
185
186 if (link->dpia_bw_alloc_config.bw_granularity == 0) {
187 DC_LOG_ERROR("%s: Link[%d]: bw_granularity is zero!", __func__, link->link_index);
188 return;
189 }
190
191 temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
192 request_reg_val = temp / Kbps_TO_Gbps;
193
194 /* Always make sure to add more to account for floating points */
195 if (temp % Kbps_TO_Gbps)
196 ++request_reg_val;
197
198 request_bw = request_reg_val * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
199
200 if (request_bw > link->dpia_bw_alloc_config.estimated_bw) {
201 DC_LOG_ERROR("%s: Link[%d]: Request BW (%d --> %d) > Estimated BW (%d)... Set to Estimated BW!",
202 __func__, link->link_index,
203 req_bw, request_bw, link->dpia_bw_alloc_config.estimated_bw);
204 req_bw = link->dpia_bw_alloc_config.estimated_bw;
205
206 temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
207 request_reg_val = temp / Kbps_TO_Gbps;
208 if (temp % Kbps_TO_Gbps)
209 ++request_reg_val;
210 }
211
212 link->dpia_bw_alloc_config.allocated_bw = request_bw;
213 DC_LOG_DC("%s: Link[%d]: Request BW: %d", __func__, link->link_index, request_bw);
214
215 core_link_write_dpcd(link, REQUESTED_BW,
216 &request_reg_val,
217 sizeof(uint8_t));
218 }
219
220 // ------------------------------------------------------------------
221 // PUBLIC FUNCTIONS
222 // ------------------------------------------------------------------
link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link * link)223 bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
224 {
225 bool ret = false;
226 uint8_t val;
227
228 val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
229
230 if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
231 DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
232
233 retrieve_usb4_dp_bw_allocation_info(link);
234
235 if (
236 link->dpia_bw_alloc_config.nrd_max_link_rate
237 && link->dpia_bw_alloc_config.nrd_max_lane_count) {
238 link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
239 link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
240 }
241
242 link->dpia_bw_alloc_config.bw_alloc_enabled = true;
243 ret = true;
244
245 if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
246 /*
247 * During DP tunnel creation, the CM preallocates BW
248 * and reduces the estimated BW of other DPIAs.
249 * The CM releases the preallocation only when the allocation is complete.
250 * Perform a zero allocation to make the CM release the preallocation
251 * and correctly update the estimated BW for all DPIAs per host router.
252 */
253 link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
254 }
255 } else
256 DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
257
258 return ret;
259 }
260
261 /*
262 * Handle DP BW allocation status register
263 *
264 * @link: pointer to the dc_link struct instance
265 * @status: content of DP tunneling status DPCD register
266 *
267 * return: none
268 */
link_dp_dpia_handle_bw_alloc_status(struct dc_link * link,uint8_t status)269 void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
270 {
271 if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) {
272 DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)",
273 __func__, link->link_index);
274 }
275
276 if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
277 DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d",
278 __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
279
280 link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw);
281 }
282
283 if (status & DP_TUNNELING_BW_ALLOC_CAP_CHANGED) {
284 link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
285
286 DC_LOG_DEBUG("%s: Granularity changed on link(%d) new granularity=%d",
287 __func__, link->link_index, link->dpia_bw_alloc_config.bw_granularity);
288 }
289
290 if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
291 link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
292
293 DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d",
294 __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
295 }
296
297 core_link_write_dpcd(
298 link, DP_TUNNELING_STATUS,
299 &status, sizeof(status));
300 }
301
302 /*
303 * Handle the DP Bandwidth allocation for DPIA
304 *
305 */
dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link * link,int peak_bw)306 void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
307 {
308 if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
309 && link->dpia_bw_alloc_config.bw_alloc_enabled) {
310 if (peak_bw > 0) {
311 // If DP over USB4 then we need to check BW allocation
312 link->dpia_bw_alloc_config.link_max_bw = peak_bw;
313
314 link_dpia_send_bw_alloc_request(link, peak_bw);
315 } else
316 dpia_bw_alloc_unplug(link);
317 }
318 }
319
link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link * link,int req_bw)320 void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw)
321 {
322 link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
323
324 DC_LOG_DEBUG("%s: ENTER: link[%d] hpd(%d) Allocated_BW: %d Estimated_BW: %d Req_BW: %d",
325 __func__, link->link_index, link->hpd_status,
326 link->dpia_bw_alloc_config.allocated_bw,
327 link->dpia_bw_alloc_config.estimated_bw,
328 req_bw);
329
330 if (link_dp_is_bw_alloc_available(link))
331 link_dpia_send_bw_alloc_request(link, req_bw);
332 else
333 DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__);
334 }
335
link_dpia_get_dp_overhead(const struct dc_link * link)336 uint32_t link_dpia_get_dp_overhead(const struct dc_link *link)
337 {
338 uint32_t link_dp_overhead = 0;
339
340 if ((link->type == dc_connection_mst_branch) &&
341 !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
342 /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH
343 * MST overhead is 1/64 of link bandwidth (excluding any overhead)
344 */
345 const struct dc_link_settings *link_cap = dc_link_get_link_cap(link);
346
347 if (link_cap) {
348 uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate *
349 (uint32_t)link_cap->lane_count *
350 LINK_RATE_REF_FREQ_IN_KHZ * 8;
351 link_dp_overhead = (link_bw_in_kbps / MST_TIME_SLOT_COUNT)
352 + ((link_bw_in_kbps % MST_TIME_SLOT_COUNT) ? 1 : 0);
353 }
354 }
355
356 return link_dp_overhead;
357 }
358
359 /*
360 * Aggregates the DPIA bandwidth usage for the respective USB4 Router.
361 * And then validate if the required bandwidth is within the router's capacity.
362 *
363 * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set
364 * @count: number of DPIA validation sets
365 *
366 * return: true if validation is succeeded
367 */
link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set * dpia_link_sets,uint8_t count)368 bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count)
369 {
370 uint32_t granularity_Gbps;
371 const struct dc_link *link;
372 uint32_t link_bw_granularity;
373 uint32_t link_required_bw;
374 struct usb4_router_validation_set router_sets[MAX_HOST_ROUTERS_NUM] = { 0 };
375 uint8_t i;
376 bool is_success = true;
377 uint8_t router_count = 0;
378
379 if ((dpia_link_sets == NULL) || (count == 0))
380 return is_success;
381
382 // Iterate through each DP tunneling link (DPIA).
383 // Aggregate its bandwidth requirements onto the respective USB4 router.
384 for (i = 0; i < count; i++) {
385 link = dpia_link_sets[i].link;
386 link_required_bw = dpia_link_sets[i].required_bw;
387 const struct dc_tunnel_settings *dp_tunnel_settings = dpia_link_sets[i].tunnel_settings;
388
389 if ((link == NULL) || (dp_tunnel_settings == NULL) || dp_tunnel_settings->bw_granularity == 0)
390 break;
391
392 if (link->type == dc_connection_mst_branch)
393 link_required_bw += link_dpia_get_dp_overhead(link);
394
395 granularity_Gbps = (Kbps_TO_Gbps / dp_tunnel_settings->bw_granularity);
396 link_bw_granularity = (link_required_bw / granularity_Gbps) * granularity_Gbps +
397 ((link_required_bw % granularity_Gbps) ? granularity_Gbps : 0);
398
399 // Find or add the USB4 router associated with the current DPIA link
400 for (uint8_t j = 0; j < MAX_HOST_ROUTERS_NUM; j++) {
401 if (router_sets[j].is_valid == false) {
402 router_sets[j].is_valid = true;
403 router_sets[j].cm_id = dp_tunnel_settings->cm_id;
404 router_count++;
405 }
406
407 if (router_sets[j].cm_id == dp_tunnel_settings->cm_id) {
408 uint32_t remaining_bw =
409 dp_tunnel_settings->estimated_bw - dp_tunnel_settings->allocated_bw;
410
411 router_sets[j].allocated_bw += dp_tunnel_settings->allocated_bw;
412
413 if (remaining_bw > router_sets[j].remaining_bw)
414 router_sets[j].remaining_bw = remaining_bw;
415
416 // Get the max estimated BW within the same CM_ID
417 if (dp_tunnel_settings->estimated_bw > router_sets[j].estimated_bw)
418 router_sets[j].estimated_bw = dp_tunnel_settings->estimated_bw;
419
420 router_sets[j].required_bw += link_bw_granularity;
421 router_sets[j].dpia_count++;
422 break;
423 }
424 }
425 }
426
427 // Validate bandwidth for each unique router found.
428 for (i = 0; i < router_count; i++) {
429 uint32_t total_bw = 0;
430
431 if (router_sets[i].is_valid == false)
432 break;
433
434 // Determine the total available bandwidth for the current router based on aggregated data
435 if ((router_sets[i].dpia_count == 1) || (router_sets[i].allocated_bw == 0))
436 total_bw = router_sets[i].estimated_bw;
437 else
438 total_bw = router_sets[i].allocated_bw + router_sets[i].remaining_bw;
439
440 if (router_sets[i].required_bw > total_bw) {
441 is_success = false;
442 break;
443 }
444 }
445
446 return is_success;
447 }
448
449