1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2020-2022 Realtek Corporation
3 */
4
5 #include "chan.h"
6 #include "coex.h"
7 #include "debug.h"
8 #include "fw.h"
9 #include "mac.h"
10 #include "ps.h"
11 #include "util.h"
12
rtw89_get_subband_type(enum rtw89_band band,u8 center_chan)13 static enum rtw89_subband rtw89_get_subband_type(enum rtw89_band band,
14 u8 center_chan)
15 {
16 switch (band) {
17 default:
18 case RTW89_BAND_2G:
19 switch (center_chan) {
20 default:
21 case 1 ... 14:
22 return RTW89_CH_2G;
23 }
24 case RTW89_BAND_5G:
25 switch (center_chan) {
26 default:
27 case 36 ... 64:
28 return RTW89_CH_5G_BAND_1;
29 case 100 ... 144:
30 return RTW89_CH_5G_BAND_3;
31 case 149 ... 177:
32 return RTW89_CH_5G_BAND_4;
33 }
34 case RTW89_BAND_6G:
35 switch (center_chan) {
36 default:
37 case 1 ... 29:
38 return RTW89_CH_6G_BAND_IDX0;
39 case 33 ... 61:
40 return RTW89_CH_6G_BAND_IDX1;
41 case 65 ... 93:
42 return RTW89_CH_6G_BAND_IDX2;
43 case 97 ... 125:
44 return RTW89_CH_6G_BAND_IDX3;
45 case 129 ... 157:
46 return RTW89_CH_6G_BAND_IDX4;
47 case 161 ... 189:
48 return RTW89_CH_6G_BAND_IDX5;
49 case 193 ... 221:
50 return RTW89_CH_6G_BAND_IDX6;
51 case 225 ... 253:
52 return RTW89_CH_6G_BAND_IDX7;
53 }
54 }
55 }
56
rtw89_get_primary_chan_idx(enum rtw89_bandwidth bw,u32 center_freq,u32 primary_freq)57 static enum rtw89_sc_offset rtw89_get_primary_chan_idx(enum rtw89_bandwidth bw,
58 u32 center_freq,
59 u32 primary_freq)
60 {
61 u8 primary_chan_idx;
62 u32 offset;
63
64 switch (bw) {
65 default:
66 case RTW89_CHANNEL_WIDTH_20:
67 primary_chan_idx = RTW89_SC_DONT_CARE;
68 break;
69 case RTW89_CHANNEL_WIDTH_40:
70 if (primary_freq > center_freq)
71 primary_chan_idx = RTW89_SC_20_UPPER;
72 else
73 primary_chan_idx = RTW89_SC_20_LOWER;
74 break;
75 case RTW89_CHANNEL_WIDTH_80:
76 case RTW89_CHANNEL_WIDTH_160:
77 if (primary_freq > center_freq) {
78 offset = (primary_freq - center_freq - 10) / 20;
79 primary_chan_idx = RTW89_SC_20_UPPER + offset * 2;
80 } else {
81 offset = (center_freq - primary_freq - 10) / 20;
82 primary_chan_idx = RTW89_SC_20_LOWER + offset * 2;
83 }
84 break;
85 }
86
87 return primary_chan_idx;
88 }
89
rtw89_get_primary_sb_idx(u8 central_ch,u8 pri_ch,enum rtw89_bandwidth bw)90 static u8 rtw89_get_primary_sb_idx(u8 central_ch, u8 pri_ch,
91 enum rtw89_bandwidth bw)
92 {
93 static const u8 prisb_cal_ofst[RTW89_CHANNEL_WIDTH_ORDINARY_NUM] = {
94 0, 2, 6, 14, 30
95 };
96
97 if (bw >= RTW89_CHANNEL_WIDTH_ORDINARY_NUM)
98 return 0;
99
100 return (prisb_cal_ofst[bw] + pri_ch - central_ch) / 4;
101 }
102
rtw89_chan_create(struct rtw89_chan * chan,u8 center_chan,u8 primary_chan,enum rtw89_band band,enum rtw89_bandwidth bandwidth)103 void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
104 enum rtw89_band band, enum rtw89_bandwidth bandwidth)
105 {
106 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
107 u32 center_freq, primary_freq;
108
109 memset(chan, 0, sizeof(*chan));
110 chan->channel = center_chan;
111 chan->primary_channel = primary_chan;
112 chan->band_type = band;
113 chan->band_width = bandwidth;
114
115 center_freq = ieee80211_channel_to_frequency(center_chan, nl_band);
116 primary_freq = ieee80211_channel_to_frequency(primary_chan, nl_band);
117
118 chan->freq = center_freq;
119 chan->subband_type = rtw89_get_subband_type(band, center_chan);
120 chan->pri_ch_idx = rtw89_get_primary_chan_idx(bandwidth, center_freq,
121 primary_freq);
122 chan->pri_sb_idx = rtw89_get_primary_sb_idx(center_chan, primary_chan,
123 bandwidth);
124 }
125
rtw89_assign_entity_chan(struct rtw89_dev * rtwdev,enum rtw89_sub_entity_idx idx,const struct rtw89_chan * new)126 bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
127 enum rtw89_sub_entity_idx idx,
128 const struct rtw89_chan *new)
129 {
130 struct rtw89_hal *hal = &rtwdev->hal;
131 struct rtw89_chan *chan = &hal->sub[idx].chan;
132 struct rtw89_chan_rcd *rcd = &hal->sub[idx].rcd;
133 bool band_changed;
134
135 rcd->prev_primary_channel = chan->primary_channel;
136 rcd->prev_band_type = chan->band_type;
137 band_changed = new->band_type != chan->band_type;
138 rcd->band_changed = band_changed;
139
140 *chan = *new;
141 return band_changed;
142 }
143
__rtw89_config_entity_chandef(struct rtw89_dev * rtwdev,enum rtw89_sub_entity_idx idx,const struct cfg80211_chan_def * chandef,bool from_stack)144 static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
145 enum rtw89_sub_entity_idx idx,
146 const struct cfg80211_chan_def *chandef,
147 bool from_stack)
148 {
149 struct rtw89_hal *hal = &rtwdev->hal;
150
151 hal->sub[idx].chandef = *chandef;
152
153 if (from_stack)
154 set_bit(idx, hal->entity_map);
155 }
156
rtw89_config_entity_chandef(struct rtw89_dev * rtwdev,enum rtw89_sub_entity_idx idx,const struct cfg80211_chan_def * chandef)157 void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
158 enum rtw89_sub_entity_idx idx,
159 const struct cfg80211_chan_def *chandef)
160 {
161 __rtw89_config_entity_chandef(rtwdev, idx, chandef, true);
162 }
163
rtw89_config_roc_chandef(struct rtw89_dev * rtwdev,enum rtw89_sub_entity_idx idx,const struct cfg80211_chan_def * chandef)164 void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev,
165 enum rtw89_sub_entity_idx idx,
166 const struct cfg80211_chan_def *chandef)
167 {
168 struct rtw89_hal *hal = &rtwdev->hal;
169 enum rtw89_sub_entity_idx cur;
170
171 if (chandef) {
172 cur = atomic_cmpxchg(&hal->roc_entity_idx,
173 RTW89_SUB_ENTITY_IDLE, idx);
174 if (cur != RTW89_SUB_ENTITY_IDLE) {
175 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
176 "ROC still processing on entity %d\n", idx);
177 return;
178 }
179
180 hal->roc_chandef = *chandef;
181 } else {
182 cur = atomic_cmpxchg(&hal->roc_entity_idx, idx,
183 RTW89_SUB_ENTITY_IDLE);
184 if (cur == idx)
185 return;
186
187 if (cur == RTW89_SUB_ENTITY_IDLE)
188 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
189 "ROC already finished on entity %d\n", idx);
190 else
191 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
192 "ROC is processing on entity %d\n", cur);
193 }
194 }
195
rtw89_config_default_chandef(struct rtw89_dev * rtwdev)196 static void rtw89_config_default_chandef(struct rtw89_dev *rtwdev)
197 {
198 struct cfg80211_chan_def chandef = {0};
199
200 rtw89_get_default_chandef(&chandef);
201 __rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0, &chandef, false);
202 }
203
rtw89_entity_init(struct rtw89_dev * rtwdev)204 void rtw89_entity_init(struct rtw89_dev *rtwdev)
205 {
206 struct rtw89_hal *hal = &rtwdev->hal;
207
208 hal->entity_pause = false;
209 bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
210 bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES);
211 atomic_set(&hal->roc_entity_idx, RTW89_SUB_ENTITY_IDLE);
212 rtw89_config_default_chandef(rtwdev);
213 }
214
rtw89_entity_recalc(struct rtw89_dev * rtwdev)215 enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
216 {
217 struct rtw89_hal *hal = &rtwdev->hal;
218 const struct cfg80211_chan_def *chandef;
219 enum rtw89_entity_mode mode;
220 struct rtw89_chan chan;
221 u8 weight;
222 u8 last;
223 u8 idx;
224
225 lockdep_assert_held(&rtwdev->mutex);
226
227 weight = bitmap_weight(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
228 switch (weight) {
229 default:
230 rtw89_warn(rtwdev, "unknown ent chan weight: %d\n", weight);
231 bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
232 fallthrough;
233 case 0:
234 rtw89_config_default_chandef(rtwdev);
235 fallthrough;
236 case 1:
237 last = RTW89_SUB_ENTITY_0;
238 mode = RTW89_ENTITY_MODE_SCC;
239 break;
240 case 2:
241 last = RTW89_SUB_ENTITY_1;
242 mode = rtw89_get_entity_mode(rtwdev);
243 if (mode == RTW89_ENTITY_MODE_MCC)
244 break;
245
246 mode = RTW89_ENTITY_MODE_MCC_PREPARE;
247 break;
248 }
249
250 for (idx = 0; idx <= last; idx++) {
251 chandef = rtw89_chandef_get(rtwdev, idx);
252 rtw89_get_channel_params(chandef, &chan);
253 if (chan.channel == 0) {
254 WARN(1, "Invalid channel on chanctx %d\n", idx);
255 return RTW89_ENTITY_MODE_INVALID;
256 }
257
258 rtw89_assign_entity_chan(rtwdev, idx, &chan);
259 }
260
261 if (hal->entity_pause)
262 return rtw89_get_entity_mode(rtwdev);
263
264 rtw89_set_entity_mode(rtwdev, mode);
265 return mode;
266 }
267
rtw89_chanctx_notify(struct rtw89_dev * rtwdev,enum rtw89_chanctx_state state)268 static void rtw89_chanctx_notify(struct rtw89_dev *rtwdev,
269 enum rtw89_chanctx_state state)
270 {
271 const struct rtw89_chip_info *chip = rtwdev->chip;
272 const struct rtw89_chanctx_listener *listener = chip->chanctx_listener;
273 int i;
274
275 if (!listener)
276 return;
277
278 for (i = 0; i < NUM_OF_RTW89_CHANCTX_CALLBACKS; i++) {
279 if (!listener->callbacks[i])
280 continue;
281
282 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
283 "chanctx notify listener: cb %d, state %d\n",
284 i, state);
285
286 listener->callbacks[i](rtwdev, state);
287 }
288 }
289
290 /* This function centrally manages how MCC roles are sorted and iterated.
291 * And, it guarantees that ordered_idx is less than NUM_OF_RTW89_MCC_ROLES.
292 * So, if data needs to pass an array for ordered_idx, the array can declare
293 * with NUM_OF_RTW89_MCC_ROLES. Besides, the entire iteration will stop
294 * immediately as long as iterator returns a non-zero value.
295 */
296 static
rtw89_iterate_mcc_roles(struct rtw89_dev * rtwdev,int (* iterator)(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * mcc_role,unsigned int ordered_idx,void * data),void * data)297 int rtw89_iterate_mcc_roles(struct rtw89_dev *rtwdev,
298 int (*iterator)(struct rtw89_dev *rtwdev,
299 struct rtw89_mcc_role *mcc_role,
300 unsigned int ordered_idx,
301 void *data),
302 void *data)
303 {
304 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
305 struct rtw89_mcc_role * const roles[] = {
306 &mcc->role_ref,
307 &mcc->role_aux,
308 };
309 unsigned int idx;
310 int ret;
311
312 BUILD_BUG_ON(ARRAY_SIZE(roles) != NUM_OF_RTW89_MCC_ROLES);
313
314 for (idx = 0; idx < NUM_OF_RTW89_MCC_ROLES; idx++) {
315 ret = iterator(rtwdev, roles[idx], idx, data);
316 if (ret)
317 return ret;
318 }
319
320 return 0;
321 }
322
323 /* For now, IEEE80211_HW_TIMING_BEACON_ONLY can make things simple to ensure
324 * correctness of MCC calculation logic below. We have noticed that once driver
325 * declares WIPHY_FLAG_SUPPORTS_MLO, the use of IEEE80211_HW_TIMING_BEACON_ONLY
326 * will be restricted. We will make an alternative in driver when it is ready
327 * for MLO.
328 */
rtw89_mcc_get_tbtt_ofst(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * role,u64 tsf)329 static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev,
330 struct rtw89_mcc_role *role, u64 tsf)
331 {
332 struct rtw89_vif *rtwvif = role->rtwvif;
333 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
334 u32 bcn_intvl_us = ieee80211_tu_to_usec(role->beacon_interval);
335 u64 sync_tsf = vif->bss_conf.sync_tsf;
336 u32 remainder;
337
338 if (tsf < sync_tsf) {
339 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
340 "MCC get tbtt ofst: tsf might not update yet\n");
341 sync_tsf = 0;
342 }
343
344 div_u64_rem(tsf - sync_tsf, bcn_intvl_us, &remainder);
345
346 return remainder;
347 }
348
rtw89_mcc_get_bcn_ofst(struct rtw89_dev * rtwdev)349 static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
350 {
351 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
352 struct rtw89_mcc_role *ref = &mcc->role_ref;
353 struct rtw89_mcc_role *aux = &mcc->role_aux;
354 struct rtw89_mac_mcc_tsf_rpt rpt = {};
355 struct rtw89_fw_mcc_tsf_req req = {};
356 u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
357 u32 tbtt_ofst_ref, tbtt_ofst_aux;
358 u64 tsf_ref, tsf_aux;
359 int ret;
360
361 req.group = mcc->group;
362 req.macid_x = ref->rtwvif->mac_id;
363 req.macid_y = aux->rtwvif->mac_id;
364 ret = rtw89_fw_h2c_mcc_req_tsf(rtwdev, &req, &rpt);
365 if (ret) {
366 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
367 "MCC h2c failed to request tsf: %d\n", ret);
368 return RTW89_MCC_DFLT_BCN_OFST_TIME;
369 }
370
371 tsf_ref = (u64)rpt.tsf_x_high << 32 | rpt.tsf_x_low;
372 tsf_aux = (u64)rpt.tsf_y_high << 32 | rpt.tsf_y_low;
373 tbtt_ofst_ref = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf_ref);
374 tbtt_ofst_aux = rtw89_mcc_get_tbtt_ofst(rtwdev, aux, tsf_aux);
375
376 while (tbtt_ofst_ref < tbtt_ofst_aux)
377 tbtt_ofst_ref += bcn_intvl_ref_us;
378
379 return (tbtt_ofst_ref - tbtt_ofst_aux) / 1024;
380 }
381
382 static
rtw89_mcc_role_fw_macid_bitmap_set_bit(struct rtw89_mcc_role * mcc_role,unsigned int bit)383 void rtw89_mcc_role_fw_macid_bitmap_set_bit(struct rtw89_mcc_role *mcc_role,
384 unsigned int bit)
385 {
386 unsigned int idx = bit / 8;
387 unsigned int pos = bit % 8;
388
389 if (idx >= ARRAY_SIZE(mcc_role->macid_bitmap))
390 return;
391
392 mcc_role->macid_bitmap[idx] |= BIT(pos);
393 }
394
rtw89_mcc_role_macid_sta_iter(void * data,struct ieee80211_sta * sta)395 static void rtw89_mcc_role_macid_sta_iter(void *data, struct ieee80211_sta *sta)
396 {
397 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
398 struct rtw89_vif *rtwvif = rtwsta->rtwvif;
399 struct rtw89_mcc_role *mcc_role = data;
400 struct rtw89_vif *target = mcc_role->rtwvif;
401
402 if (rtwvif != target)
403 return;
404
405 rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwsta->mac_id);
406 }
407
rtw89_mcc_fill_role_macid_bitmap(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * mcc_role)408 static void rtw89_mcc_fill_role_macid_bitmap(struct rtw89_dev *rtwdev,
409 struct rtw89_mcc_role *mcc_role)
410 {
411 struct rtw89_vif *rtwvif = mcc_role->rtwvif;
412
413 rtw89_mcc_role_fw_macid_bitmap_set_bit(mcc_role, rtwvif->mac_id);
414 ieee80211_iterate_stations_atomic(rtwdev->hw,
415 rtw89_mcc_role_macid_sta_iter,
416 mcc_role);
417 }
418
rtw89_mcc_fill_role_policy(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * mcc_role)419 static void rtw89_mcc_fill_role_policy(struct rtw89_dev *rtwdev,
420 struct rtw89_mcc_role *mcc_role)
421 {
422 struct rtw89_mcc_policy *policy = &mcc_role->policy;
423
424 policy->c2h_rpt = RTW89_FW_MCC_C2H_RPT_ALL;
425 policy->tx_null_early = RTW89_MCC_DFLT_TX_NULL_EARLY;
426 policy->in_curr_ch = false;
427 policy->dis_sw_retry = true;
428 policy->sw_retry_count = false;
429
430 if (mcc_role->is_go)
431 policy->dis_tx_null = true;
432 else
433 policy->dis_tx_null = false;
434 }
435
rtw89_mcc_fill_role_limit(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * mcc_role)436 static void rtw89_mcc_fill_role_limit(struct rtw89_dev *rtwdev,
437 struct rtw89_mcc_role *mcc_role)
438 {
439 struct ieee80211_vif *vif = rtwvif_to_vif(mcc_role->rtwvif);
440 struct ieee80211_p2p_noa_desc *noa_desc;
441 u32 bcn_intvl_us = ieee80211_tu_to_usec(mcc_role->beacon_interval);
442 u32 max_toa_us, max_tob_us, max_dur_us;
443 u32 start_time, interval, duration;
444 u64 tsf, tsf_lmt;
445 int ret;
446 int i;
447
448 if (!mcc_role->is_go && !mcc_role->is_gc)
449 return;
450
451 /* find the first periodic NoA */
452 for (i = 0; i < RTW89_P2P_MAX_NOA_NUM; i++) {
453 noa_desc = &vif->bss_conf.p2p_noa_attr.desc[i];
454 if (noa_desc->count == 255)
455 goto fill;
456 }
457
458 return;
459
460 fill:
461 start_time = le32_to_cpu(noa_desc->start_time);
462 interval = le32_to_cpu(noa_desc->interval);
463 duration = le32_to_cpu(noa_desc->duration);
464
465 if (interval != bcn_intvl_us) {
466 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
467 "MCC role limit: mismatch interval: %d vs. %d\n",
468 interval, bcn_intvl_us);
469 return;
470 }
471
472 ret = rtw89_mac_port_get_tsf(rtwdev, mcc_role->rtwvif, &tsf);
473 if (ret) {
474 rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret);
475 return;
476 }
477
478 tsf_lmt = (tsf & GENMASK_ULL(63, 32)) | start_time;
479 max_toa_us = rtw89_mcc_get_tbtt_ofst(rtwdev, mcc_role, tsf_lmt);
480 max_dur_us = interval - duration;
481 max_tob_us = max_dur_us - max_toa_us;
482
483 if (!max_toa_us || !max_tob_us) {
484 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
485 "MCC role limit: hit boundary\n");
486 return;
487 }
488
489 if (max_dur_us < max_toa_us) {
490 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
491 "MCC role limit: insufficient duration\n");
492 return;
493 }
494
495 mcc_role->limit.max_toa = max_toa_us / 1024;
496 mcc_role->limit.max_tob = max_tob_us / 1024;
497 mcc_role->limit.max_dur = max_dur_us / 1024;
498 mcc_role->limit.enable = true;
499
500 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
501 "MCC role limit: max_toa %d, max_tob %d, max_dur %d\n",
502 mcc_role->limit.max_toa, mcc_role->limit.max_tob,
503 mcc_role->limit.max_dur);
504 }
505
rtw89_mcc_fill_role(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_mcc_role * role)506 static int rtw89_mcc_fill_role(struct rtw89_dev *rtwdev,
507 struct rtw89_vif *rtwvif,
508 struct rtw89_mcc_role *role)
509 {
510 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
511 const struct rtw89_chan *chan;
512
513 memset(role, 0, sizeof(*role));
514 role->rtwvif = rtwvif;
515 role->beacon_interval = vif->bss_conf.beacon_int;
516
517 if (!role->beacon_interval) {
518 rtw89_warn(rtwdev,
519 "cannot handle MCC role without beacon interval\n");
520 return -EINVAL;
521 }
522
523 role->duration = role->beacon_interval / 2;
524
525 chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
526 role->is_2ghz = chan->band_type == RTW89_BAND_2G;
527 role->is_go = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_GO;
528 role->is_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
529
530 rtw89_mcc_fill_role_macid_bitmap(rtwdev, role);
531 rtw89_mcc_fill_role_policy(rtwdev, role);
532 rtw89_mcc_fill_role_limit(rtwdev, role);
533
534 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
535 "MCC role: bcn_intvl %d, is_2ghz %d, is_go %d, is_gc %d\n",
536 role->beacon_interval, role->is_2ghz, role->is_go, role->is_gc);
537 return 0;
538 }
539
rtw89_mcc_fill_bt_role(struct rtw89_dev * rtwdev)540 static void rtw89_mcc_fill_bt_role(struct rtw89_dev *rtwdev)
541 {
542 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
543 struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role;
544
545 memset(bt_role, 0, sizeof(*bt_role));
546 bt_role->duration = rtw89_coex_query_bt_req_len(rtwdev, RTW89_PHY_0);
547
548 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC bt role: dur %d\n",
549 bt_role->duration);
550 }
551
552 struct rtw89_mcc_fill_role_selector {
553 struct rtw89_vif *bind_vif[NUM_OF_RTW89_SUB_ENTITY];
554 };
555
556 static_assert((u8)NUM_OF_RTW89_SUB_ENTITY >= NUM_OF_RTW89_MCC_ROLES);
557
rtw89_mcc_fill_role_iterator(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * mcc_role,unsigned int ordered_idx,void * data)558 static int rtw89_mcc_fill_role_iterator(struct rtw89_dev *rtwdev,
559 struct rtw89_mcc_role *mcc_role,
560 unsigned int ordered_idx,
561 void *data)
562 {
563 struct rtw89_mcc_fill_role_selector *sel = data;
564 struct rtw89_vif *role_vif = sel->bind_vif[ordered_idx];
565 int ret;
566
567 if (!role_vif) {
568 rtw89_warn(rtwdev, "cannot handle MCC without role[%d]\n",
569 ordered_idx);
570 return -EINVAL;
571 }
572
573 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
574 "MCC fill role[%d] with vif <macid %d>\n",
575 ordered_idx, role_vif->mac_id);
576
577 ret = rtw89_mcc_fill_role(rtwdev, role_vif, mcc_role);
578 if (ret)
579 return ret;
580
581 return 0;
582 }
583
rtw89_mcc_fill_all_roles(struct rtw89_dev * rtwdev)584 static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
585 {
586 struct rtw89_mcc_fill_role_selector sel = {};
587 struct rtw89_vif *rtwvif;
588 int ret;
589
590 rtw89_for_each_rtwvif(rtwdev, rtwvif) {
591 if (sel.bind_vif[rtwvif->sub_entity_idx]) {
592 rtw89_warn(rtwdev,
593 "MCC skip extra vif <macid %d> on chanctx[%d]\n",
594 rtwvif->mac_id, rtwvif->sub_entity_idx);
595 continue;
596 }
597
598 sel.bind_vif[rtwvif->sub_entity_idx] = rtwvif;
599 }
600
601 ret = rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_fill_role_iterator, &sel);
602 if (ret)
603 return ret;
604
605 rtw89_mcc_fill_bt_role(rtwdev);
606 return 0;
607 }
608
rtw89_mcc_assign_pattern(struct rtw89_dev * rtwdev,const struct rtw89_mcc_pattern * new)609 static void rtw89_mcc_assign_pattern(struct rtw89_dev *rtwdev,
610 const struct rtw89_mcc_pattern *new)
611 {
612 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
613 struct rtw89_mcc_role *ref = &mcc->role_ref;
614 struct rtw89_mcc_role *aux = &mcc->role_aux;
615 struct rtw89_mcc_config *config = &mcc->config;
616 struct rtw89_mcc_pattern *pattern = &config->pattern;
617
618 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
619 "MCC assign pattern: ref {%d | %d}, aux {%d | %d}\n",
620 new->tob_ref, new->toa_ref, new->tob_aux, new->toa_aux);
621
622 *pattern = *new;
623 memset(&pattern->courtesy, 0, sizeof(pattern->courtesy));
624
625 if (pattern->tob_aux <= 0 || pattern->toa_aux <= 0) {
626 pattern->courtesy.macid_tgt = aux->rtwvif->mac_id;
627 pattern->courtesy.macid_src = ref->rtwvif->mac_id;
628 pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT;
629 pattern->courtesy.enable = true;
630 } else if (pattern->tob_ref <= 0 || pattern->toa_ref <= 0) {
631 pattern->courtesy.macid_tgt = ref->rtwvif->mac_id;
632 pattern->courtesy.macid_src = aux->rtwvif->mac_id;
633 pattern->courtesy.slot_num = RTW89_MCC_DFLT_COURTESY_SLOT;
634 pattern->courtesy.enable = true;
635 }
636
637 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
638 "MCC pattern flags: plan %d, courtesy_en %d\n",
639 pattern->plan, pattern->courtesy.enable);
640
641 if (!pattern->courtesy.enable)
642 return;
643
644 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
645 "MCC pattern courtesy: tgt %d, src %d, slot %d\n",
646 pattern->courtesy.macid_tgt, pattern->courtesy.macid_src,
647 pattern->courtesy.slot_num);
648 }
649
650 /* The follow-up roughly shows the relationship between the parameters
651 * for pattern calculation.
652 *
653 * |< duration ref >| (if mid bt) |< duration aux >|
654 * |< tob ref >|< toa ref >| ... |< tob aux >|< toa aux >|
655 * V V
656 * tbtt ref tbtt aux
657 * |< beacon offset >|
658 *
659 * In loose pattern calculation, we only ensure at least tob_ref and
660 * toa_ref have positive results. If tob_aux or toa_aux is negative
661 * unfortunately, FW will be notified to handle it with courtesy
662 * mechanism.
663 */
__rtw89_mcc_calc_pattern_loose(struct rtw89_dev * rtwdev,struct rtw89_mcc_pattern * ptrn,bool hdl_bt)664 static void __rtw89_mcc_calc_pattern_loose(struct rtw89_dev *rtwdev,
665 struct rtw89_mcc_pattern *ptrn,
666 bool hdl_bt)
667 {
668 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
669 struct rtw89_mcc_role *ref = &mcc->role_ref;
670 struct rtw89_mcc_role *aux = &mcc->role_aux;
671 struct rtw89_mcc_config *config = &mcc->config;
672 u16 bcn_ofst = config->beacon_offset;
673 u16 bt_dur_in_mid = 0;
674 u16 max_bcn_ofst;
675 s16 upper, lower;
676 u16 res;
677
678 *ptrn = (typeof(*ptrn)){
679 .plan = hdl_bt ? RTW89_MCC_PLAN_TAIL_BT : RTW89_MCC_PLAN_NO_BT,
680 };
681
682 if (!hdl_bt)
683 goto calc;
684
685 max_bcn_ofst = ref->duration + aux->duration;
686 if (ref->limit.enable)
687 max_bcn_ofst = min_t(u16, max_bcn_ofst,
688 ref->limit.max_toa + aux->duration);
689 else if (aux->limit.enable)
690 max_bcn_ofst = min_t(u16, max_bcn_ofst,
691 ref->duration + aux->limit.max_tob);
692
693 if (bcn_ofst > max_bcn_ofst && bcn_ofst >= mcc->bt_role.duration) {
694 bt_dur_in_mid = mcc->bt_role.duration;
695 ptrn->plan = RTW89_MCC_PLAN_MID_BT;
696 }
697
698 calc:
699 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
700 "MCC calc ptrn_ls: plan %d, bcn_ofst %d\n",
701 ptrn->plan, bcn_ofst);
702
703 res = bcn_ofst - bt_dur_in_mid;
704 upper = min_t(s16, ref->duration, res);
705 lower = 0;
706
707 if (ref->limit.enable) {
708 upper = min_t(s16, upper, ref->limit.max_toa);
709 lower = max_t(s16, lower, ref->duration - ref->limit.max_tob);
710 } else if (aux->limit.enable) {
711 upper = min_t(s16, upper,
712 res - (aux->duration - aux->limit.max_toa));
713 lower = max_t(s16, lower, res - aux->limit.max_tob);
714 }
715
716 if (lower < upper)
717 ptrn->toa_ref = (upper + lower) / 2;
718 else
719 ptrn->toa_ref = lower;
720
721 ptrn->tob_ref = ref->duration - ptrn->toa_ref;
722 ptrn->tob_aux = res - ptrn->toa_ref;
723 ptrn->toa_aux = aux->duration - ptrn->tob_aux;
724 }
725
726 /* In strict pattern calculation, we consider timing that might need
727 * for HW stuffs, i.e. min_tob and min_toa.
728 */
__rtw89_mcc_calc_pattern_strict(struct rtw89_dev * rtwdev,struct rtw89_mcc_pattern * ptrn)729 static int __rtw89_mcc_calc_pattern_strict(struct rtw89_dev *rtwdev,
730 struct rtw89_mcc_pattern *ptrn)
731 {
732 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
733 struct rtw89_mcc_role *ref = &mcc->role_ref;
734 struct rtw89_mcc_role *aux = &mcc->role_aux;
735 struct rtw89_mcc_config *config = &mcc->config;
736 u16 min_tob = RTW89_MCC_EARLY_RX_BCN_TIME;
737 u16 min_toa = RTW89_MCC_MIN_RX_BCN_TIME;
738 u16 bcn_ofst = config->beacon_offset;
739 s16 upper_toa_ref, lower_toa_ref;
740 s16 upper_tob_aux, lower_tob_aux;
741 u16 bt_dur_in_mid;
742 s16 res;
743
744 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
745 "MCC calc ptrn_st: plan %d, bcn_ofst %d\n",
746 ptrn->plan, bcn_ofst);
747
748 if (ptrn->plan == RTW89_MCC_PLAN_MID_BT)
749 bt_dur_in_mid = mcc->bt_role.duration;
750 else
751 bt_dur_in_mid = 0;
752
753 if (ref->duration < min_tob + min_toa) {
754 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
755 "MCC calc ptrn_st: not meet ref dur cond\n");
756 return -EINVAL;
757 }
758
759 if (aux->duration < min_tob + min_toa) {
760 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
761 "MCC calc ptrn_st: not meet aux dur cond\n");
762 return -EINVAL;
763 }
764
765 res = bcn_ofst - min_toa - min_tob - bt_dur_in_mid;
766 if (res < 0) {
767 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
768 "MCC calc ptrn_st: not meet bcn_ofst cond\n");
769 return -EINVAL;
770 }
771
772 upper_toa_ref = min_t(s16, min_toa + res, ref->duration - min_tob);
773 lower_toa_ref = min_toa;
774 upper_tob_aux = min_t(s16, min_tob + res, aux->duration - min_toa);
775 lower_tob_aux = min_tob;
776
777 if (ref->limit.enable) {
778 if (min_tob > ref->limit.max_tob || min_toa > ref->limit.max_toa) {
779 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
780 "MCC calc ptrn_st: conflict ref limit\n");
781 return -EINVAL;
782 }
783
784 upper_toa_ref = min_t(s16, upper_toa_ref, ref->limit.max_toa);
785 lower_toa_ref = max_t(s16, lower_toa_ref,
786 ref->duration - ref->limit.max_tob);
787 } else if (aux->limit.enable) {
788 if (min_tob > aux->limit.max_tob || min_toa > aux->limit.max_toa) {
789 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
790 "MCC calc ptrn_st: conflict aux limit\n");
791 return -EINVAL;
792 }
793
794 upper_tob_aux = min_t(s16, upper_tob_aux, aux->limit.max_tob);
795 lower_tob_aux = max_t(s16, lower_tob_aux,
796 aux->duration - aux->limit.max_toa);
797 }
798
799 upper_toa_ref = min_t(s16, upper_toa_ref,
800 bcn_ofst - bt_dur_in_mid - lower_tob_aux);
801 lower_toa_ref = max_t(s16, lower_toa_ref,
802 bcn_ofst - bt_dur_in_mid - upper_tob_aux);
803 if (lower_toa_ref > upper_toa_ref) {
804 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
805 "MCC calc ptrn_st: conflict boundary\n");
806 return -EINVAL;
807 }
808
809 ptrn->toa_ref = (upper_toa_ref + lower_toa_ref) / 2;
810 ptrn->tob_ref = ref->duration - ptrn->toa_ref;
811 ptrn->tob_aux = bcn_ofst - ptrn->toa_ref - bt_dur_in_mid;
812 ptrn->toa_aux = aux->duration - ptrn->tob_aux;
813 return 0;
814 }
815
rtw89_mcc_calc_pattern(struct rtw89_dev * rtwdev,bool hdl_bt)816 static int rtw89_mcc_calc_pattern(struct rtw89_dev *rtwdev, bool hdl_bt)
817 {
818 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
819 struct rtw89_mcc_role *ref = &mcc->role_ref;
820 struct rtw89_mcc_role *aux = &mcc->role_aux;
821 bool sel_plan[NUM_OF_RTW89_MCC_PLAN] = {};
822 struct rtw89_mcc_pattern ptrn;
823 int ret;
824 int i;
825
826 if (ref->limit.enable && aux->limit.enable) {
827 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
828 "MCC calc ptrn: not support dual limited roles\n");
829 return -EINVAL;
830 }
831
832 if (ref->limit.enable &&
833 ref->duration > ref->limit.max_tob + ref->limit.max_toa) {
834 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
835 "MCC calc ptrn: not fit ref limit\n");
836 return -EINVAL;
837 }
838
839 if (aux->limit.enable &&
840 aux->duration > aux->limit.max_tob + aux->limit.max_toa) {
841 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
842 "MCC calc ptrn: not fit aux limit\n");
843 return -EINVAL;
844 }
845
846 if (hdl_bt) {
847 sel_plan[RTW89_MCC_PLAN_TAIL_BT] = true;
848 sel_plan[RTW89_MCC_PLAN_MID_BT] = true;
849 } else {
850 sel_plan[RTW89_MCC_PLAN_NO_BT] = true;
851 }
852
853 for (i = 0; i < NUM_OF_RTW89_MCC_PLAN; i++) {
854 if (!sel_plan[i])
855 continue;
856
857 ptrn = (typeof(ptrn)){
858 .plan = i,
859 };
860
861 ret = __rtw89_mcc_calc_pattern_strict(rtwdev, &ptrn);
862 if (ret)
863 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
864 "MCC calc ptrn_st with plan %d: fail\n", i);
865 else
866 goto done;
867 }
868
869 __rtw89_mcc_calc_pattern_loose(rtwdev, &ptrn, hdl_bt);
870
871 done:
872 rtw89_mcc_assign_pattern(rtwdev, &ptrn);
873 return 0;
874 }
875
rtw89_mcc_set_default_pattern(struct rtw89_dev * rtwdev)876 static void rtw89_mcc_set_default_pattern(struct rtw89_dev *rtwdev)
877 {
878 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
879 struct rtw89_mcc_role *ref = &mcc->role_ref;
880 struct rtw89_mcc_role *aux = &mcc->role_aux;
881 struct rtw89_mcc_pattern tmp = {};
882
883 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
884 "MCC use default pattern unexpectedly\n");
885
886 tmp.plan = RTW89_MCC_PLAN_NO_BT;
887 tmp.tob_ref = ref->duration / 2;
888 tmp.toa_ref = ref->duration - tmp.tob_ref;
889 tmp.tob_aux = aux->duration / 2;
890 tmp.toa_aux = aux->duration - tmp.tob_aux;
891
892 rtw89_mcc_assign_pattern(rtwdev, &tmp);
893 }
894
rtw89_mcc_set_duration_go_sta(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * role_go,struct rtw89_mcc_role * role_sta)895 static void rtw89_mcc_set_duration_go_sta(struct rtw89_dev *rtwdev,
896 struct rtw89_mcc_role *role_go,
897 struct rtw89_mcc_role *role_sta)
898 {
899 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
900 struct rtw89_mcc_config *config = &mcc->config;
901 u16 mcc_intvl = config->mcc_interval;
902 u16 dur_go, dur_sta;
903
904 dur_go = clamp_t(u16, role_go->duration, RTW89_MCC_MIN_GO_DURATION,
905 mcc_intvl - RTW89_MCC_MIN_STA_DURATION);
906 if (role_go->limit.enable)
907 dur_go = min(dur_go, role_go->limit.max_dur);
908 dur_sta = mcc_intvl - dur_go;
909
910 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
911 "MCC set dur: (go, sta) {%d, %d} -> {%d, %d}\n",
912 role_go->duration, role_sta->duration, dur_go, dur_sta);
913
914 role_go->duration = dur_go;
915 role_sta->duration = dur_sta;
916 }
917
rtw89_mcc_set_duration_gc_sta(struct rtw89_dev * rtwdev)918 static void rtw89_mcc_set_duration_gc_sta(struct rtw89_dev *rtwdev)
919 {
920 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
921 struct rtw89_mcc_role *ref = &mcc->role_ref;
922 struct rtw89_mcc_role *aux = &mcc->role_aux;
923 struct rtw89_mcc_config *config = &mcc->config;
924 u16 mcc_intvl = config->mcc_interval;
925 u16 dur_ref, dur_aux;
926
927 if (ref->duration < RTW89_MCC_MIN_STA_DURATION) {
928 dur_ref = RTW89_MCC_MIN_STA_DURATION;
929 dur_aux = mcc_intvl - dur_ref;
930 } else if (aux->duration < RTW89_MCC_MIN_STA_DURATION) {
931 dur_aux = RTW89_MCC_MIN_STA_DURATION;
932 dur_ref = mcc_intvl - dur_aux;
933 } else {
934 dur_ref = ref->duration;
935 dur_aux = mcc_intvl - dur_ref;
936 }
937
938 if (ref->limit.enable) {
939 dur_ref = min(dur_ref, ref->limit.max_dur);
940 dur_aux = mcc_intvl - dur_ref;
941 } else if (aux->limit.enable) {
942 dur_aux = min(dur_aux, aux->limit.max_dur);
943 dur_ref = mcc_intvl - dur_aux;
944 }
945
946 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
947 "MCC set dur: (ref, aux) {%d ~ %d} -> {%d ~ %d}\n",
948 ref->duration, aux->duration, dur_ref, dur_aux);
949
950 ref->duration = dur_ref;
951 aux->duration = dur_aux;
952 }
953
954 struct rtw89_mcc_mod_dur_data {
955 u16 available;
956 struct {
957 u16 dur;
958 u16 room;
959 } parm[NUM_OF_RTW89_MCC_ROLES];
960 };
961
rtw89_mcc_mod_dur_get_iterator(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * mcc_role,unsigned int ordered_idx,void * data)962 static int rtw89_mcc_mod_dur_get_iterator(struct rtw89_dev *rtwdev,
963 struct rtw89_mcc_role *mcc_role,
964 unsigned int ordered_idx,
965 void *data)
966 {
967 struct rtw89_mcc_mod_dur_data *p = data;
968 u16 min;
969
970 p->parm[ordered_idx].dur = mcc_role->duration;
971
972 if (mcc_role->is_go)
973 min = RTW89_MCC_MIN_GO_DURATION;
974 else
975 min = RTW89_MCC_MIN_STA_DURATION;
976
977 p->parm[ordered_idx].room = max_t(s32, p->parm[ordered_idx].dur - min, 0);
978
979 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
980 "MCC mod dur: chk role[%u]: dur %u, min %u, room %u\n",
981 ordered_idx, p->parm[ordered_idx].dur, min,
982 p->parm[ordered_idx].room);
983
984 p->available += p->parm[ordered_idx].room;
985 return 0;
986 }
987
rtw89_mcc_mod_dur_put_iterator(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * mcc_role,unsigned int ordered_idx,void * data)988 static int rtw89_mcc_mod_dur_put_iterator(struct rtw89_dev *rtwdev,
989 struct rtw89_mcc_role *mcc_role,
990 unsigned int ordered_idx,
991 void *data)
992 {
993 struct rtw89_mcc_mod_dur_data *p = data;
994
995 mcc_role->duration = p->parm[ordered_idx].dur;
996
997 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
998 "MCC mod dur: set role[%u]: dur %u\n",
999 ordered_idx, p->parm[ordered_idx].dur);
1000 return 0;
1001 }
1002
rtw89_mcc_mod_duration_dual_2ghz_with_bt(struct rtw89_dev * rtwdev)1003 static void rtw89_mcc_mod_duration_dual_2ghz_with_bt(struct rtw89_dev *rtwdev)
1004 {
1005 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1006 struct rtw89_mcc_config *config = &mcc->config;
1007 struct rtw89_mcc_mod_dur_data data = {};
1008 u16 mcc_intvl = config->mcc_interval;
1009 u16 bt_dur = mcc->bt_role.duration;
1010 u16 wifi_dur;
1011
1012 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1013 "MCC mod dur (dual 2ghz): mcc_intvl %u, raw bt_dur %u\n",
1014 mcc_intvl, bt_dur);
1015
1016 rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_mod_dur_get_iterator, &data);
1017
1018 bt_dur = clamp_t(u16, bt_dur, 1, data.available / 3);
1019 wifi_dur = mcc_intvl - bt_dur;
1020
1021 if (data.parm[0].room <= data.parm[1].room) {
1022 data.parm[0].dur -= min_t(u16, bt_dur / 2, data.parm[0].room);
1023 data.parm[1].dur = wifi_dur - data.parm[0].dur;
1024 } else {
1025 data.parm[1].dur -= min_t(u16, bt_dur / 2, data.parm[1].room);
1026 data.parm[0].dur = wifi_dur - data.parm[1].dur;
1027 }
1028
1029 rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_mod_dur_put_iterator, &data);
1030
1031 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC mod dur: set bt: dur %u\n", bt_dur);
1032 mcc->bt_role.duration = bt_dur;
1033 }
1034
1035 static
rtw89_mcc_mod_duration_diff_band_with_bt(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * role_2ghz,struct rtw89_mcc_role * role_non_2ghz)1036 void rtw89_mcc_mod_duration_diff_band_with_bt(struct rtw89_dev *rtwdev,
1037 struct rtw89_mcc_role *role_2ghz,
1038 struct rtw89_mcc_role *role_non_2ghz)
1039 {
1040 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1041 struct rtw89_mcc_config *config = &mcc->config;
1042 u16 dur_2ghz, dur_non_2ghz;
1043 u16 bt_dur, mcc_intvl;
1044
1045 dur_2ghz = role_2ghz->duration;
1046 dur_non_2ghz = role_non_2ghz->duration;
1047 mcc_intvl = config->mcc_interval;
1048 bt_dur = mcc->bt_role.duration;
1049
1050 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1051 "MCC mod dur (diff band): mcc_intvl %u, bt_dur %u\n",
1052 mcc_intvl, bt_dur);
1053
1054 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1055 "MCC mod dur: check dur_2ghz %u, dur_non_2ghz %u\n",
1056 dur_2ghz, dur_non_2ghz);
1057
1058 if (dur_non_2ghz >= bt_dur) {
1059 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1060 "MCC mod dur: dur_non_2ghz is enough for bt\n");
1061 return;
1062 }
1063
1064 dur_non_2ghz = bt_dur;
1065 dur_2ghz = mcc_intvl - dur_non_2ghz;
1066
1067 if (role_non_2ghz->limit.enable) {
1068 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1069 "MCC mod dur: dur_non_2ghz is limited with max %u\n",
1070 role_non_2ghz->limit.max_dur);
1071
1072 dur_non_2ghz = min(dur_non_2ghz, role_non_2ghz->limit.max_dur);
1073 dur_2ghz = mcc_intvl - dur_non_2ghz;
1074 }
1075
1076 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1077 "MCC mod dur: set dur_2ghz %u, dur_non_2ghz %u\n",
1078 dur_2ghz, dur_non_2ghz);
1079
1080 role_2ghz->duration = dur_2ghz;
1081 role_non_2ghz->duration = dur_non_2ghz;
1082 }
1083
rtw89_mcc_duration_decision_on_bt(struct rtw89_dev * rtwdev)1084 static bool rtw89_mcc_duration_decision_on_bt(struct rtw89_dev *rtwdev)
1085 {
1086 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1087 struct rtw89_mcc_role *ref = &mcc->role_ref;
1088 struct rtw89_mcc_role *aux = &mcc->role_aux;
1089 struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role;
1090
1091 if (!bt_role->duration)
1092 return false;
1093
1094 if (ref->is_2ghz && aux->is_2ghz) {
1095 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1096 "MCC dual roles are on 2GHz; consider BT duration\n");
1097
1098 rtw89_mcc_mod_duration_dual_2ghz_with_bt(rtwdev);
1099 return true;
1100 }
1101
1102 if (!ref->is_2ghz && !aux->is_2ghz) {
1103 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1104 "MCC dual roles are not on 2GHz; ignore BT duration\n");
1105 return false;
1106 }
1107
1108 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1109 "MCC one role is on 2GHz; modify another for BT duration\n");
1110
1111 if (ref->is_2ghz)
1112 rtw89_mcc_mod_duration_diff_band_with_bt(rtwdev, ref, aux);
1113 else
1114 rtw89_mcc_mod_duration_diff_band_with_bt(rtwdev, aux, ref);
1115
1116 return false;
1117 }
1118
rtw89_mcc_sync_tbtt(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * tgt,struct rtw89_mcc_role * src,bool ref_is_src)1119 static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev,
1120 struct rtw89_mcc_role *tgt,
1121 struct rtw89_mcc_role *src,
1122 bool ref_is_src)
1123 {
1124 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1125 struct rtw89_mcc_config *config = &mcc->config;
1126 u16 beacon_offset_us = ieee80211_tu_to_usec(config->beacon_offset);
1127 u32 bcn_intvl_src_us = ieee80211_tu_to_usec(src->beacon_interval);
1128 u32 cur_tbtt_ofst_src;
1129 u32 tsf_ofst_tgt;
1130 u32 remainder;
1131 u64 tbtt_tgt;
1132 u64 tsf_src;
1133 int ret;
1134
1135 ret = rtw89_mac_port_get_tsf(rtwdev, src->rtwvif, &tsf_src);
1136 if (ret) {
1137 rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret);
1138 return;
1139 }
1140
1141 cur_tbtt_ofst_src = rtw89_mcc_get_tbtt_ofst(rtwdev, src, tsf_src);
1142
1143 if (ref_is_src)
1144 tbtt_tgt = tsf_src - cur_tbtt_ofst_src + beacon_offset_us;
1145 else
1146 tbtt_tgt = tsf_src - cur_tbtt_ofst_src +
1147 (bcn_intvl_src_us - beacon_offset_us);
1148
1149 div_u64_rem(tbtt_tgt, bcn_intvl_src_us, &remainder);
1150 tsf_ofst_tgt = bcn_intvl_src_us - remainder;
1151
1152 config->sync.macid_tgt = tgt->rtwvif->mac_id;
1153 config->sync.macid_src = src->rtwvif->mac_id;
1154 config->sync.offset = tsf_ofst_tgt / 1024;
1155 config->sync.enable = true;
1156
1157 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1158 "MCC sync tbtt: tgt %d, src %d, offset %d\n",
1159 config->sync.macid_tgt, config->sync.macid_src,
1160 config->sync.offset);
1161
1162 rtw89_mac_port_tsf_sync(rtwdev, tgt->rtwvif, src->rtwvif,
1163 config->sync.offset);
1164 }
1165
rtw89_mcc_fill_start_tsf(struct rtw89_dev * rtwdev)1166 static int rtw89_mcc_fill_start_tsf(struct rtw89_dev *rtwdev)
1167 {
1168 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1169 struct rtw89_mcc_role *ref = &mcc->role_ref;
1170 struct rtw89_mcc_config *config = &mcc->config;
1171 u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
1172 u32 tob_ref_us = ieee80211_tu_to_usec(config->pattern.tob_ref);
1173 struct rtw89_vif *rtwvif = ref->rtwvif;
1174 u64 tsf, start_tsf;
1175 u32 cur_tbtt_ofst;
1176 u64 min_time;
1177 int ret;
1178
1179 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif, &tsf);
1180 if (ret) {
1181 rtw89_warn(rtwdev, "MCC failed to get port tsf: %d\n", ret);
1182 return ret;
1183 }
1184
1185 min_time = tsf;
1186 if (ref->is_go)
1187 min_time += ieee80211_tu_to_usec(RTW89_MCC_SHORT_TRIGGER_TIME);
1188 else
1189 min_time += ieee80211_tu_to_usec(RTW89_MCC_LONG_TRIGGER_TIME);
1190
1191 cur_tbtt_ofst = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf);
1192 start_tsf = tsf - cur_tbtt_ofst + bcn_intvl_ref_us - tob_ref_us;
1193 while (start_tsf < min_time)
1194 start_tsf += bcn_intvl_ref_us;
1195
1196 config->start_tsf = start_tsf;
1197 return 0;
1198 }
1199
rtw89_mcc_fill_config(struct rtw89_dev * rtwdev)1200 static int rtw89_mcc_fill_config(struct rtw89_dev *rtwdev)
1201 {
1202 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1203 struct rtw89_mcc_role *ref = &mcc->role_ref;
1204 struct rtw89_mcc_role *aux = &mcc->role_aux;
1205 struct rtw89_mcc_config *config = &mcc->config;
1206 bool hdl_bt;
1207 int ret;
1208
1209 memset(config, 0, sizeof(*config));
1210
1211 switch (mcc->mode) {
1212 case RTW89_MCC_MODE_GO_STA:
1213 config->beacon_offset = RTW89_MCC_DFLT_BCN_OFST_TIME;
1214 if (ref->is_go) {
1215 rtw89_mcc_sync_tbtt(rtwdev, ref, aux, false);
1216 config->mcc_interval = ref->beacon_interval;
1217 rtw89_mcc_set_duration_go_sta(rtwdev, ref, aux);
1218 } else {
1219 rtw89_mcc_sync_tbtt(rtwdev, aux, ref, true);
1220 config->mcc_interval = aux->beacon_interval;
1221 rtw89_mcc_set_duration_go_sta(rtwdev, aux, ref);
1222 }
1223 break;
1224 case RTW89_MCC_MODE_GC_STA:
1225 config->beacon_offset = rtw89_mcc_get_bcn_ofst(rtwdev);
1226 config->mcc_interval = ref->beacon_interval;
1227 rtw89_mcc_set_duration_gc_sta(rtwdev);
1228 break;
1229 default:
1230 rtw89_warn(rtwdev, "MCC unknown mode: %d\n", mcc->mode);
1231 return -EFAULT;
1232 }
1233
1234 hdl_bt = rtw89_mcc_duration_decision_on_bt(rtwdev);
1235 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC handle bt: %d\n", hdl_bt);
1236
1237 ret = rtw89_mcc_calc_pattern(rtwdev, hdl_bt);
1238 if (!ret)
1239 goto bottom;
1240
1241 rtw89_mcc_set_default_pattern(rtwdev);
1242
1243 bottom:
1244 return rtw89_mcc_fill_start_tsf(rtwdev);
1245 }
1246
__mcc_fw_add_role(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * role)1247 static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role)
1248 {
1249 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1250 struct rtw89_mcc_config *config = &mcc->config;
1251 struct rtw89_mcc_pattern *pattern = &config->pattern;
1252 struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy;
1253 struct rtw89_mcc_policy *policy = &role->policy;
1254 struct rtw89_fw_mcc_add_req req = {};
1255 const struct rtw89_chan *chan;
1256 int ret;
1257
1258 chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx);
1259 req.central_ch_seg0 = chan->channel;
1260 req.primary_ch = chan->primary_channel;
1261 req.bandwidth = chan->band_width;
1262 req.ch_band_type = chan->band_type;
1263
1264 req.macid = role->rtwvif->mac_id;
1265 req.group = mcc->group;
1266 req.c2h_rpt = policy->c2h_rpt;
1267 req.tx_null_early = policy->tx_null_early;
1268 req.dis_tx_null = policy->dis_tx_null;
1269 req.in_curr_ch = policy->in_curr_ch;
1270 req.sw_retry_count = policy->sw_retry_count;
1271 req.dis_sw_retry = policy->dis_sw_retry;
1272 req.duration = role->duration;
1273 req.btc_in_2g = false;
1274
1275 if (courtesy->enable && courtesy->macid_src == req.macid) {
1276 req.courtesy_target = courtesy->macid_tgt;
1277 req.courtesy_num = courtesy->slot_num;
1278 req.courtesy_en = true;
1279 }
1280
1281 ret = rtw89_fw_h2c_add_mcc(rtwdev, &req);
1282 if (ret) {
1283 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1284 "MCC h2c failed to add wifi role: %d\n", ret);
1285 return ret;
1286 }
1287
1288 ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group,
1289 role->rtwvif->mac_id,
1290 role->macid_bitmap);
1291 if (ret) {
1292 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1293 "MCC h2c failed to set macid bitmap: %d\n", ret);
1294 return ret;
1295 }
1296
1297 return 0;
1298 }
1299
__mcc_fw_add_bt_role(struct rtw89_dev * rtwdev)1300 static int __mcc_fw_add_bt_role(struct rtw89_dev *rtwdev)
1301 {
1302 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1303 struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role;
1304 struct rtw89_fw_mcc_add_req req = {};
1305 int ret;
1306
1307 req.group = mcc->group;
1308 req.duration = bt_role->duration;
1309 req.btc_in_2g = true;
1310
1311 ret = rtw89_fw_h2c_add_mcc(rtwdev, &req);
1312 if (ret) {
1313 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1314 "MCC h2c failed to add bt role: %d\n", ret);
1315 return ret;
1316 }
1317
1318 return 0;
1319 }
1320
__mcc_fw_start(struct rtw89_dev * rtwdev,bool replace)1321 static int __mcc_fw_start(struct rtw89_dev *rtwdev, bool replace)
1322 {
1323 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1324 struct rtw89_mcc_role *ref = &mcc->role_ref;
1325 struct rtw89_mcc_role *aux = &mcc->role_aux;
1326 struct rtw89_mcc_config *config = &mcc->config;
1327 struct rtw89_mcc_pattern *pattern = &config->pattern;
1328 struct rtw89_mcc_sync *sync = &config->sync;
1329 struct rtw89_fw_mcc_start_req req = {};
1330 int ret;
1331
1332 if (replace) {
1333 req.old_group = mcc->group;
1334 req.old_group_action = RTW89_FW_MCC_OLD_GROUP_ACT_REPLACE;
1335 mcc->group = RTW89_MCC_NEXT_GROUP(mcc->group);
1336 }
1337
1338 req.group = mcc->group;
1339
1340 switch (pattern->plan) {
1341 case RTW89_MCC_PLAN_TAIL_BT:
1342 ret = __mcc_fw_add_role(rtwdev, ref);
1343 if (ret)
1344 return ret;
1345 ret = __mcc_fw_add_role(rtwdev, aux);
1346 if (ret)
1347 return ret;
1348 ret = __mcc_fw_add_bt_role(rtwdev);
1349 if (ret)
1350 return ret;
1351
1352 req.btc_in_group = true;
1353 break;
1354 case RTW89_MCC_PLAN_MID_BT:
1355 ret = __mcc_fw_add_role(rtwdev, ref);
1356 if (ret)
1357 return ret;
1358 ret = __mcc_fw_add_bt_role(rtwdev);
1359 if (ret)
1360 return ret;
1361 ret = __mcc_fw_add_role(rtwdev, aux);
1362 if (ret)
1363 return ret;
1364
1365 req.btc_in_group = true;
1366 break;
1367 case RTW89_MCC_PLAN_NO_BT:
1368 ret = __mcc_fw_add_role(rtwdev, ref);
1369 if (ret)
1370 return ret;
1371 ret = __mcc_fw_add_role(rtwdev, aux);
1372 if (ret)
1373 return ret;
1374
1375 req.btc_in_group = false;
1376 break;
1377 default:
1378 rtw89_warn(rtwdev, "MCC unknown plan: %d\n", pattern->plan);
1379 return -EFAULT;
1380 }
1381
1382 if (sync->enable) {
1383 ret = rtw89_fw_h2c_mcc_sync(rtwdev, req.group, sync->macid_src,
1384 sync->macid_tgt, sync->offset);
1385 if (ret) {
1386 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1387 "MCC h2c failed to trigger sync: %d\n", ret);
1388 return ret;
1389 }
1390 }
1391
1392 req.macid = ref->rtwvif->mac_id;
1393 req.tsf_high = config->start_tsf >> 32;
1394 req.tsf_low = config->start_tsf;
1395
1396 ret = rtw89_fw_h2c_start_mcc(rtwdev, &req);
1397 if (ret) {
1398 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1399 "MCC h2c failed to trigger start: %d\n", ret);
1400 return ret;
1401 }
1402
1403 return 0;
1404 }
1405
__mcc_fw_set_duration_no_bt(struct rtw89_dev * rtwdev,bool sync_changed)1406 static int __mcc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_changed)
1407 {
1408 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1409 struct rtw89_mcc_config *config = &mcc->config;
1410 struct rtw89_mcc_sync *sync = &config->sync;
1411 struct rtw89_mcc_role *ref = &mcc->role_ref;
1412 struct rtw89_mcc_role *aux = &mcc->role_aux;
1413 struct rtw89_fw_mcc_duration req = {
1414 .group = mcc->group,
1415 .btc_in_group = false,
1416 .start_macid = ref->rtwvif->mac_id,
1417 .macid_x = ref->rtwvif->mac_id,
1418 .macid_y = aux->rtwvif->mac_id,
1419 .duration_x = ref->duration,
1420 .duration_y = aux->duration,
1421 .start_tsf_high = config->start_tsf >> 32,
1422 .start_tsf_low = config->start_tsf,
1423 };
1424 int ret;
1425
1426 ret = rtw89_fw_h2c_mcc_set_duration(rtwdev, &req);
1427 if (ret) {
1428 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1429 "MCC h2c failed to set duration: %d\n", ret);
1430 return ret;
1431 }
1432
1433 if (!sync->enable || !sync_changed)
1434 return 0;
1435
1436 ret = rtw89_fw_h2c_mcc_sync(rtwdev, mcc->group, sync->macid_src,
1437 sync->macid_tgt, sync->offset);
1438 if (ret) {
1439 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1440 "MCC h2c failed to trigger sync: %d\n", ret);
1441 return ret;
1442 }
1443
1444 return 0;
1445 }
1446
rtw89_mcc_handle_beacon_noa(struct rtw89_dev * rtwdev,bool enable)1447 static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
1448 {
1449 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1450 struct rtw89_mcc_role *ref = &mcc->role_ref;
1451 struct rtw89_mcc_role *aux = &mcc->role_aux;
1452 struct rtw89_mcc_config *config = &mcc->config;
1453 struct rtw89_mcc_pattern *pattern = &config->pattern;
1454 struct rtw89_mcc_sync *sync = &config->sync;
1455 struct ieee80211_p2p_noa_desc noa_desc = {};
1456 u64 start_time = config->start_tsf;
1457 u32 interval = config->mcc_interval;
1458 struct rtw89_vif *rtwvif_go;
1459 u32 duration;
1460
1461 if (mcc->mode != RTW89_MCC_MODE_GO_STA)
1462 return;
1463
1464 if (ref->is_go) {
1465 rtwvif_go = ref->rtwvif;
1466 start_time += ieee80211_tu_to_usec(ref->duration);
1467 duration = config->mcc_interval - ref->duration;
1468 } else if (aux->is_go) {
1469 rtwvif_go = aux->rtwvif;
1470 start_time += ieee80211_tu_to_usec(pattern->tob_ref) +
1471 ieee80211_tu_to_usec(config->beacon_offset) +
1472 ieee80211_tu_to_usec(pattern->toa_aux);
1473 duration = config->mcc_interval - aux->duration;
1474
1475 /* convert time domain from sta(ref) to GO(aux) */
1476 start_time += ieee80211_tu_to_usec(sync->offset);
1477 } else {
1478 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1479 "MCC find no GO: skip updating beacon NoA\n");
1480 return;
1481 }
1482
1483 rtw89_p2p_noa_renew(rtwvif_go);
1484
1485 if (enable) {
1486 noa_desc.start_time = cpu_to_le32(start_time);
1487 noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(interval));
1488 noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(duration));
1489 noa_desc.count = 255;
1490 rtw89_p2p_noa_append(rtwvif_go, &noa_desc);
1491 }
1492
1493 /* without chanctx, we cannot get beacon from mac80211 stack */
1494 if (!rtwvif_go->chanctx_assigned)
1495 return;
1496
1497 rtw89_fw_h2c_update_beacon(rtwdev, rtwvif_go);
1498 }
1499
rtw89_mcc_start_beacon_noa(struct rtw89_dev * rtwdev)1500 static void rtw89_mcc_start_beacon_noa(struct rtw89_dev *rtwdev)
1501 {
1502 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1503 struct rtw89_mcc_role *ref = &mcc->role_ref;
1504 struct rtw89_mcc_role *aux = &mcc->role_aux;
1505
1506 if (mcc->mode != RTW89_MCC_MODE_GO_STA)
1507 return;
1508
1509 if (ref->is_go)
1510 rtw89_fw_h2c_tsf32_toggle(rtwdev, ref->rtwvif, true);
1511 else if (aux->is_go)
1512 rtw89_fw_h2c_tsf32_toggle(rtwdev, aux->rtwvif, true);
1513
1514 rtw89_mcc_handle_beacon_noa(rtwdev, true);
1515 }
1516
rtw89_mcc_stop_beacon_noa(struct rtw89_dev * rtwdev)1517 static void rtw89_mcc_stop_beacon_noa(struct rtw89_dev *rtwdev)
1518 {
1519 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1520 struct rtw89_mcc_role *ref = &mcc->role_ref;
1521 struct rtw89_mcc_role *aux = &mcc->role_aux;
1522
1523 if (mcc->mode != RTW89_MCC_MODE_GO_STA)
1524 return;
1525
1526 if (ref->is_go)
1527 rtw89_fw_h2c_tsf32_toggle(rtwdev, ref->rtwvif, false);
1528 else if (aux->is_go)
1529 rtw89_fw_h2c_tsf32_toggle(rtwdev, aux->rtwvif, false);
1530
1531 rtw89_mcc_handle_beacon_noa(rtwdev, false);
1532 }
1533
rtw89_mcc_start(struct rtw89_dev * rtwdev)1534 static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
1535 {
1536 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1537 struct rtw89_mcc_role *ref = &mcc->role_ref;
1538 struct rtw89_mcc_role *aux = &mcc->role_aux;
1539 int ret;
1540
1541 if (rtwdev->scanning)
1542 rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
1543
1544 rtw89_leave_lps(rtwdev);
1545
1546 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC start\n");
1547
1548 ret = rtw89_mcc_fill_all_roles(rtwdev);
1549 if (ret)
1550 return ret;
1551
1552 if (ref->is_go || aux->is_go)
1553 mcc->mode = RTW89_MCC_MODE_GO_STA;
1554 else
1555 mcc->mode = RTW89_MCC_MODE_GC_STA;
1556
1557 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC sel mode: %d\n", mcc->mode);
1558
1559 mcc->group = RTW89_MCC_DFLT_GROUP;
1560
1561 ret = rtw89_mcc_fill_config(rtwdev);
1562 if (ret)
1563 return ret;
1564
1565 ret = __mcc_fw_start(rtwdev, false);
1566 if (ret)
1567 return ret;
1568
1569 rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_START);
1570
1571 rtw89_mcc_start_beacon_noa(rtwdev);
1572 return 0;
1573 }
1574
rtw89_mcc_stop(struct rtw89_dev * rtwdev)1575 static void rtw89_mcc_stop(struct rtw89_dev *rtwdev)
1576 {
1577 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1578 struct rtw89_mcc_role *ref = &mcc->role_ref;
1579 int ret;
1580
1581 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop\n");
1582
1583 ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group,
1584 ref->rtwvif->mac_id, true);
1585 if (ret)
1586 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1587 "MCC h2c failed to trigger stop: %d\n", ret);
1588
1589 ret = rtw89_fw_h2c_del_mcc_group(rtwdev, mcc->group, true);
1590 if (ret)
1591 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1592 "MCC h2c failed to delete group: %d\n", ret);
1593
1594 rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_STOP);
1595
1596 rtw89_mcc_stop_beacon_noa(rtwdev);
1597 }
1598
rtw89_mcc_update(struct rtw89_dev * rtwdev)1599 static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
1600 {
1601 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1602 struct rtw89_mcc_config *config = &mcc->config;
1603 struct rtw89_mcc_config old_cfg = *config;
1604 bool sync_changed;
1605 int ret;
1606
1607 if (rtwdev->scanning)
1608 rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
1609
1610 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC update\n");
1611
1612 ret = rtw89_mcc_fill_config(rtwdev);
1613 if (ret)
1614 return ret;
1615
1616 if (old_cfg.pattern.plan != RTW89_MCC_PLAN_NO_BT ||
1617 config->pattern.plan != RTW89_MCC_PLAN_NO_BT) {
1618 ret = __mcc_fw_start(rtwdev, true);
1619 if (ret)
1620 return ret;
1621 } else {
1622 if (memcmp(&old_cfg.sync, &config->sync, sizeof(old_cfg.sync)) == 0)
1623 sync_changed = false;
1624 else
1625 sync_changed = true;
1626
1627 ret = __mcc_fw_set_duration_no_bt(rtwdev, sync_changed);
1628 if (ret)
1629 return ret;
1630 }
1631
1632 rtw89_mcc_handle_beacon_noa(rtwdev, true);
1633 return 0;
1634 }
1635
rtw89_mcc_track(struct rtw89_dev * rtwdev)1636 static void rtw89_mcc_track(struct rtw89_dev *rtwdev)
1637 {
1638 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1639 struct rtw89_mcc_config *config = &mcc->config;
1640 struct rtw89_mcc_pattern *pattern = &config->pattern;
1641 s16 tolerance;
1642 u16 bcn_ofst;
1643 u16 diff;
1644
1645 if (mcc->mode != RTW89_MCC_MODE_GC_STA)
1646 return;
1647
1648 bcn_ofst = rtw89_mcc_get_bcn_ofst(rtwdev);
1649 if (bcn_ofst > config->beacon_offset) {
1650 diff = bcn_ofst - config->beacon_offset;
1651 if (pattern->tob_aux < 0)
1652 tolerance = -pattern->tob_aux;
1653 else
1654 tolerance = pattern->toa_aux;
1655 } else {
1656 diff = config->beacon_offset - bcn_ofst;
1657 if (pattern->toa_aux < 0)
1658 tolerance = -pattern->toa_aux;
1659 else
1660 tolerance = pattern->tob_aux;
1661 }
1662
1663 if (diff <= tolerance)
1664 return;
1665
1666 rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_BCN_OFFSET_CHANGE);
1667 }
1668
rtw89_mcc_upd_map_iterator(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * mcc_role,unsigned int ordered_idx,void * data)1669 static int rtw89_mcc_upd_map_iterator(struct rtw89_dev *rtwdev,
1670 struct rtw89_mcc_role *mcc_role,
1671 unsigned int ordered_idx,
1672 void *data)
1673 {
1674 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1675 struct rtw89_mcc_role upd = {
1676 .rtwvif = mcc_role->rtwvif,
1677 };
1678 int ret;
1679
1680 if (!mcc_role->is_go)
1681 return 0;
1682
1683 rtw89_mcc_fill_role_macid_bitmap(rtwdev, &upd);
1684 if (memcmp(mcc_role->macid_bitmap, upd.macid_bitmap,
1685 sizeof(mcc_role->macid_bitmap)) == 0)
1686 return 0;
1687
1688 ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group,
1689 upd.rtwvif->mac_id,
1690 upd.macid_bitmap);
1691 if (ret) {
1692 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1693 "MCC h2c failed to update macid bitmap: %d\n", ret);
1694 return ret;
1695 }
1696
1697 memcpy(mcc_role->macid_bitmap, upd.macid_bitmap,
1698 sizeof(mcc_role->macid_bitmap));
1699 return 0;
1700 }
1701
rtw89_mcc_update_macid_bitmap(struct rtw89_dev * rtwdev)1702 static void rtw89_mcc_update_macid_bitmap(struct rtw89_dev *rtwdev)
1703 {
1704 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1705
1706 if (mcc->mode != RTW89_MCC_MODE_GO_STA)
1707 return;
1708
1709 rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_upd_map_iterator, NULL);
1710 }
1711
rtw89_mcc_upd_lmt_iterator(struct rtw89_dev * rtwdev,struct rtw89_mcc_role * mcc_role,unsigned int ordered_idx,void * data)1712 static int rtw89_mcc_upd_lmt_iterator(struct rtw89_dev *rtwdev,
1713 struct rtw89_mcc_role *mcc_role,
1714 unsigned int ordered_idx,
1715 void *data)
1716 {
1717 memset(&mcc_role->limit, 0, sizeof(mcc_role->limit));
1718 rtw89_mcc_fill_role_limit(rtwdev, mcc_role);
1719 return 0;
1720 }
1721
rtw89_mcc_update_limit(struct rtw89_dev * rtwdev)1722 static void rtw89_mcc_update_limit(struct rtw89_dev *rtwdev)
1723 {
1724 struct rtw89_mcc_info *mcc = &rtwdev->mcc;
1725
1726 if (mcc->mode != RTW89_MCC_MODE_GC_STA)
1727 return;
1728
1729 rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_upd_lmt_iterator, NULL);
1730 }
1731
rtw89_chanctx_work(struct work_struct * work)1732 void rtw89_chanctx_work(struct work_struct *work)
1733 {
1734 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
1735 chanctx_work.work);
1736 struct rtw89_hal *hal = &rtwdev->hal;
1737 bool update_mcc_pattern = false;
1738 enum rtw89_entity_mode mode;
1739 u32 changed = 0;
1740 int ret;
1741 int i;
1742
1743 mutex_lock(&rtwdev->mutex);
1744
1745 if (hal->entity_pause) {
1746 mutex_unlock(&rtwdev->mutex);
1747 return;
1748 }
1749
1750 for (i = 0; i < NUM_OF_RTW89_CHANCTX_CHANGES; i++) {
1751 if (test_and_clear_bit(i, hal->changes))
1752 changed |= BIT(i);
1753 }
1754
1755 mode = rtw89_get_entity_mode(rtwdev);
1756 switch (mode) {
1757 case RTW89_ENTITY_MODE_MCC_PREPARE:
1758 rtw89_set_entity_mode(rtwdev, RTW89_ENTITY_MODE_MCC);
1759 rtw89_set_channel(rtwdev);
1760
1761 ret = rtw89_mcc_start(rtwdev);
1762 if (ret)
1763 rtw89_warn(rtwdev, "failed to start MCC: %d\n", ret);
1764 break;
1765 case RTW89_ENTITY_MODE_MCC:
1766 if (changed & BIT(RTW89_CHANCTX_BCN_OFFSET_CHANGE) ||
1767 changed & BIT(RTW89_CHANCTX_P2P_PS_CHANGE) ||
1768 changed & BIT(RTW89_CHANCTX_BT_SLOT_CHANGE) ||
1769 changed & BIT(RTW89_CHANCTX_TSF32_TOGGLE_CHANGE))
1770 update_mcc_pattern = true;
1771 if (changed & BIT(RTW89_CHANCTX_REMOTE_STA_CHANGE))
1772 rtw89_mcc_update_macid_bitmap(rtwdev);
1773 if (changed & BIT(RTW89_CHANCTX_P2P_PS_CHANGE))
1774 rtw89_mcc_update_limit(rtwdev);
1775 if (changed & BIT(RTW89_CHANCTX_BT_SLOT_CHANGE))
1776 rtw89_mcc_fill_bt_role(rtwdev);
1777 if (update_mcc_pattern) {
1778 ret = rtw89_mcc_update(rtwdev);
1779 if (ret)
1780 rtw89_warn(rtwdev, "failed to update MCC: %d\n",
1781 ret);
1782 }
1783 break;
1784 default:
1785 break;
1786 }
1787
1788 mutex_unlock(&rtwdev->mutex);
1789 }
1790
rtw89_queue_chanctx_change(struct rtw89_dev * rtwdev,enum rtw89_chanctx_changes change)1791 void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev,
1792 enum rtw89_chanctx_changes change)
1793 {
1794 struct rtw89_hal *hal = &rtwdev->hal;
1795 enum rtw89_entity_mode mode;
1796 u32 delay;
1797
1798 mode = rtw89_get_entity_mode(rtwdev);
1799 switch (mode) {
1800 default:
1801 return;
1802 case RTW89_ENTITY_MODE_MCC_PREPARE:
1803 delay = ieee80211_tu_to_usec(RTW89_CHANCTX_TIME_MCC_PREPARE);
1804 break;
1805 case RTW89_ENTITY_MODE_MCC:
1806 delay = ieee80211_tu_to_usec(RTW89_CHANCTX_TIME_MCC);
1807 break;
1808 }
1809
1810 if (change != RTW89_CHANCTX_CHANGE_DFLT) {
1811 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "set chanctx change %d\n",
1812 change);
1813 set_bit(change, hal->changes);
1814 }
1815
1816 rtw89_debug(rtwdev, RTW89_DBG_CHAN,
1817 "queue chanctx work for mode %d with delay %d us\n",
1818 mode, delay);
1819 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->chanctx_work,
1820 usecs_to_jiffies(delay));
1821 }
1822
rtw89_queue_chanctx_work(struct rtw89_dev * rtwdev)1823 void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev)
1824 {
1825 rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_CHANGE_DFLT);
1826 }
1827
rtw89_chanctx_track(struct rtw89_dev * rtwdev)1828 void rtw89_chanctx_track(struct rtw89_dev *rtwdev)
1829 {
1830 struct rtw89_hal *hal = &rtwdev->hal;
1831 enum rtw89_entity_mode mode;
1832
1833 lockdep_assert_held(&rtwdev->mutex);
1834
1835 if (hal->entity_pause)
1836 return;
1837
1838 mode = rtw89_get_entity_mode(rtwdev);
1839 switch (mode) {
1840 case RTW89_ENTITY_MODE_MCC:
1841 rtw89_mcc_track(rtwdev);
1842 break;
1843 default:
1844 break;
1845 }
1846 }
1847
rtw89_chanctx_pause(struct rtw89_dev * rtwdev,enum rtw89_chanctx_pause_reasons rsn)1848 void rtw89_chanctx_pause(struct rtw89_dev *rtwdev,
1849 enum rtw89_chanctx_pause_reasons rsn)
1850 {
1851 struct rtw89_hal *hal = &rtwdev->hal;
1852 enum rtw89_entity_mode mode;
1853
1854 lockdep_assert_held(&rtwdev->mutex);
1855
1856 if (hal->entity_pause)
1857 return;
1858
1859 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx pause (rsn: %d)\n", rsn);
1860
1861 mode = rtw89_get_entity_mode(rtwdev);
1862 switch (mode) {
1863 case RTW89_ENTITY_MODE_MCC:
1864 rtw89_mcc_stop(rtwdev);
1865 break;
1866 default:
1867 break;
1868 }
1869
1870 hal->entity_pause = true;
1871 }
1872
rtw89_chanctx_proceed(struct rtw89_dev * rtwdev)1873 void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
1874 {
1875 struct rtw89_hal *hal = &rtwdev->hal;
1876 enum rtw89_entity_mode mode;
1877 int ret;
1878
1879 lockdep_assert_held(&rtwdev->mutex);
1880
1881 if (!hal->entity_pause)
1882 return;
1883
1884 rtw89_debug(rtwdev, RTW89_DBG_CHAN, "chanctx proceed\n");
1885
1886 hal->entity_pause = false;
1887 rtw89_set_channel(rtwdev);
1888
1889 mode = rtw89_get_entity_mode(rtwdev);
1890 switch (mode) {
1891 case RTW89_ENTITY_MODE_MCC:
1892 ret = rtw89_mcc_start(rtwdev);
1893 if (ret)
1894 rtw89_warn(rtwdev, "failed to start MCC: %d\n", ret);
1895 break;
1896 default:
1897 break;
1898 }
1899
1900 rtw89_queue_chanctx_work(rtwdev);
1901 }
1902
rtw89_chanctx_ops_add(struct rtw89_dev * rtwdev,struct ieee80211_chanctx_conf * ctx)1903 int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
1904 struct ieee80211_chanctx_conf *ctx)
1905 {
1906 struct rtw89_hal *hal = &rtwdev->hal;
1907 struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
1908 const struct rtw89_chip_info *chip = rtwdev->chip;
1909 u8 idx;
1910
1911 idx = find_first_zero_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
1912 if (idx >= chip->support_chanctx_num)
1913 return -ENOENT;
1914
1915 rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
1916 rtw89_set_channel(rtwdev);
1917 cfg->idx = idx;
1918 hal->sub[idx].cfg = cfg;
1919 return 0;
1920 }
1921
rtw89_chanctx_ops_remove(struct rtw89_dev * rtwdev,struct ieee80211_chanctx_conf * ctx)1922 void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
1923 struct ieee80211_chanctx_conf *ctx)
1924 {
1925 struct rtw89_hal *hal = &rtwdev->hal;
1926 struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
1927 enum rtw89_entity_mode mode;
1928 struct rtw89_vif *rtwvif;
1929 u8 drop, roll;
1930
1931 drop = cfg->idx;
1932 if (drop != RTW89_SUB_ENTITY_0)
1933 goto out;
1934
1935 roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY, drop + 1);
1936
1937 /* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */
1938 if (roll == NUM_OF_RTW89_SUB_ENTITY)
1939 goto out;
1940
1941 /* RTW89_SUB_ENTITY_0 is going to release, and another exists.
1942 * Make another roll down to RTW89_SUB_ENTITY_0 to replace.
1943 */
1944 hal->sub[roll].cfg->idx = RTW89_SUB_ENTITY_0;
1945 hal->sub[RTW89_SUB_ENTITY_0] = hal->sub[roll];
1946
1947 rtw89_for_each_rtwvif(rtwdev, rtwvif) {
1948 if (rtwvif->sub_entity_idx == roll)
1949 rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
1950 }
1951
1952 atomic_cmpxchg(&hal->roc_entity_idx, roll, RTW89_SUB_ENTITY_0);
1953
1954 drop = roll;
1955
1956 out:
1957 mode = rtw89_get_entity_mode(rtwdev);
1958 switch (mode) {
1959 case RTW89_ENTITY_MODE_MCC:
1960 rtw89_mcc_stop(rtwdev);
1961 break;
1962 default:
1963 break;
1964 }
1965
1966 clear_bit(drop, hal->entity_map);
1967 rtw89_set_channel(rtwdev);
1968 }
1969
rtw89_chanctx_ops_change(struct rtw89_dev * rtwdev,struct ieee80211_chanctx_conf * ctx,u32 changed)1970 void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
1971 struct ieee80211_chanctx_conf *ctx,
1972 u32 changed)
1973 {
1974 struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
1975 u8 idx = cfg->idx;
1976
1977 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
1978 rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
1979 rtw89_set_channel(rtwdev);
1980 }
1981 }
1982
rtw89_chanctx_ops_assign_vif(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct ieee80211_chanctx_conf * ctx)1983 int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
1984 struct rtw89_vif *rtwvif,
1985 struct ieee80211_chanctx_conf *ctx)
1986 {
1987 struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
1988
1989 rtwvif->sub_entity_idx = cfg->idx;
1990 rtwvif->chanctx_assigned = true;
1991 return 0;
1992 }
1993
rtw89_chanctx_ops_unassign_vif(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct ieee80211_chanctx_conf * ctx)1994 void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
1995 struct rtw89_vif *rtwvif,
1996 struct ieee80211_chanctx_conf *ctx)
1997 {
1998 rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
1999 rtwvif->chanctx_assigned = false;
2000 }
2001