1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
5 * Copyright (C) 2015-2017 Intel Deutschland GmbH
6 */
7 #if defined(__FreeBSD__)
8 #include <linux/math64.h>
9 #endif
10 #include <net/mac80211.h>
11
12 #include "iwl-debug.h"
13 #include "iwl-io.h"
14 #include "iwl-prph.h"
15 #include "iwl-csr.h"
16 #include "mvm.h"
17 #include "fw/api/rs.h"
18 #include "fw/img.h"
19
20 /*
21 * Will return 0 even if the cmd failed when RFKILL is asserted unless
22 * CMD_WANT_SKB is set in cmd->flags.
23 */
iwl_mvm_send_cmd(struct iwl_mvm * mvm,struct iwl_host_cmd * cmd)24 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
25 {
26 int ret;
27
28 /*
29 * Synchronous commands from this op-mode must hold
30 * the mutex, this ensures we don't try to send two
31 * (or more) synchronous commands at a time.
32 */
33 if (!(cmd->flags & CMD_ASYNC))
34 lockdep_assert_held(&mvm->mutex);
35
36 ret = iwl_trans_send_cmd(mvm->trans, cmd);
37
38 /*
39 * If the caller wants the SKB, then don't hide any problems, the
40 * caller might access the response buffer which will be NULL if
41 * the command failed.
42 */
43 if (cmd->flags & CMD_WANT_SKB)
44 return ret;
45
46 /*
47 * Silently ignore failures if RFKILL is asserted or
48 * we are in suspend\resume process
49 */
50 if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN)
51 return 0;
52 return ret;
53 }
54
iwl_mvm_send_cmd_pdu(struct iwl_mvm * mvm,u32 id,u32 flags,u16 len,const void * data)55 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
56 u32 flags, u16 len, const void *data)
57 {
58 struct iwl_host_cmd cmd = {
59 .id = id,
60 .len = { len, },
61 .data = { data, },
62 .flags = flags,
63 };
64
65 return iwl_mvm_send_cmd(mvm, &cmd);
66 }
67
68 /*
69 * We assume that the caller set the status to the success value
70 */
iwl_mvm_send_cmd_status(struct iwl_mvm * mvm,struct iwl_host_cmd * cmd,u32 * status)71 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
72 u32 *status)
73 {
74 struct iwl_rx_packet *pkt;
75 struct iwl_cmd_response *resp;
76 int ret, resp_len;
77
78 lockdep_assert_held(&mvm->mutex);
79
80 /*
81 * Only synchronous commands can wait for status,
82 * we use WANT_SKB so the caller can't.
83 */
84 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
85 "cmd flags %x", cmd->flags))
86 return -EINVAL;
87
88 cmd->flags |= CMD_WANT_SKB;
89
90 ret = iwl_trans_send_cmd(mvm->trans, cmd);
91 if (ret == -ERFKILL) {
92 /*
93 * The command failed because of RFKILL, don't update
94 * the status, leave it as success and return 0.
95 */
96 return 0;
97 } else if (ret) {
98 return ret;
99 }
100
101 pkt = cmd->resp_pkt;
102
103 resp_len = iwl_rx_packet_payload_len(pkt);
104 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
105 ret = -EIO;
106 goto out_free_resp;
107 }
108
109 resp = (void *)pkt->data;
110 *status = le32_to_cpu(resp->status);
111 out_free_resp:
112 iwl_free_resp(cmd);
113 return ret;
114 }
115
116 /*
117 * We assume that the caller set the status to the sucess value
118 */
iwl_mvm_send_cmd_pdu_status(struct iwl_mvm * mvm,u32 id,u16 len,const void * data,u32 * status)119 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
120 const void *data, u32 *status)
121 {
122 struct iwl_host_cmd cmd = {
123 .id = id,
124 .len = { len, },
125 .data = { data, },
126 };
127
128 return iwl_mvm_send_cmd_status(mvm, &cmd, status);
129 }
130
iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,enum nl80211_band band)131 int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
132 enum nl80211_band band)
133 {
134 int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
135 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
136 bool is_LB = band == NL80211_BAND_2GHZ;
137
138 if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM)
139 return is_LB ? rate + IWL_FIRST_OFDM_RATE :
140 rate;
141
142 /* CCK is not allowed in HB */
143 return is_LB ? rate : -1;
144 }
145
iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,enum nl80211_band band)146 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
147 enum nl80211_band band)
148 {
149 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
150 int idx;
151 int band_offset = 0;
152
153 /* Legacy rate format, search for match in table */
154 if (band != NL80211_BAND_2GHZ)
155 band_offset = IWL_FIRST_OFDM_RATE;
156 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
157 if (iwl_fw_rate_idx_to_plcp(idx) == rate)
158 return idx - band_offset;
159
160 return -1;
161 }
162
iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw * fw,int rate_idx)163 u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
164 {
165 if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
166 /* In the new rate legacy rates are indexed:
167 * 0 - 3 for CCK and 0 - 7 for OFDM.
168 */
169 return (rate_idx >= IWL_FIRST_OFDM_RATE ?
170 rate_idx - IWL_FIRST_OFDM_RATE :
171 rate_idx);
172
173 return iwl_fw_rate_idx_to_plcp(rate_idx);
174 }
175
iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)176 u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
177 {
178 static const u8 mac80211_ac_to_ucode_ac[] = {
179 AC_VO,
180 AC_VI,
181 AC_BE,
182 AC_BK
183 };
184
185 return mac80211_ac_to_ucode_ac[ac];
186 }
187
iwl_mvm_rx_fw_error(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)188 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
189 {
190 struct iwl_rx_packet *pkt = rxb_addr(rxb);
191 struct iwl_error_resp *err_resp = (void *)pkt->data;
192
193 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
194 le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
195 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
196 le16_to_cpu(err_resp->bad_cmd_seq_num),
197 le32_to_cpu(err_resp->error_service));
198 IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
199 le64_to_cpu(err_resp->timestamp));
200 }
201
202 /*
203 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
204 * The parameter should also be a combination of ANT_[ABC].
205 */
first_antenna(u8 mask)206 u8 first_antenna(u8 mask)
207 {
208 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
209 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
210 return BIT(0);
211 return BIT(ffs(mask) - 1);
212 }
213
214 #define MAX_ANT_NUM 2
215 /*
216 * Toggles between TX antennas to send the probe request on.
217 * Receives the bitmask of valid TX antennas and the *index* used
218 * for the last TX, and returns the next valid *index* to use.
219 * In order to set it in the tx_cmd, must do BIT(idx).
220 */
iwl_mvm_next_antenna(struct iwl_mvm * mvm,u8 valid,u8 last_idx)221 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
222 {
223 u8 ind = last_idx;
224 int i;
225
226 for (i = 0; i < MAX_ANT_NUM; i++) {
227 ind = (ind + 1) % MAX_ANT_NUM;
228 if (valid & BIT(ind))
229 return ind;
230 }
231
232 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
233 return last_idx;
234 }
235
236 /**
237 * iwl_mvm_send_lq_cmd() - Send link quality command
238 * @mvm: Driver data.
239 * @lq: Link quality command to send.
240 *
241 * The link quality command is sent as the last step of station creation.
242 * This is the special case in which init is set and we call a callback in
243 * this case to clear the state indicating that station creation is in
244 * progress.
245 *
246 * Returns: an error code indicating success or failure
247 */
iwl_mvm_send_lq_cmd(struct iwl_mvm * mvm,struct iwl_lq_cmd * lq)248 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
249 {
250 struct iwl_host_cmd cmd = {
251 .id = LQ_CMD,
252 .len = { sizeof(struct iwl_lq_cmd), },
253 .flags = CMD_ASYNC,
254 .data = { lq, },
255 };
256
257 if (WARN_ON(lq->sta_id == IWL_INVALID_STA ||
258 iwl_mvm_has_tlc_offload(mvm)))
259 return -EINVAL;
260
261 return iwl_mvm_send_cmd(mvm, &cmd);
262 }
263
264 /**
265 * iwl_mvm_update_smps - Get a request to change the SMPS mode
266 * @mvm: Driver data.
267 * @vif: Pointer to the ieee80211_vif structure
268 * @req_type: The part of the driver who call for a change.
269 * @smps_request: The request to change the SMPS mode.
270 * @link_id: for MLO link_id, otherwise 0 (deflink)
271 *
272 * Get a requst to change the SMPS mode,
273 * and change it according to all other requests in the driver.
274 */
iwl_mvm_update_smps(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum iwl_mvm_smps_type_request req_type,enum ieee80211_smps_mode smps_request,unsigned int link_id)275 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
276 enum iwl_mvm_smps_type_request req_type,
277 enum ieee80211_smps_mode smps_request,
278 unsigned int link_id)
279 {
280 struct iwl_mvm_vif *mvmvif;
281 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
282 int i;
283
284 lockdep_assert_held(&mvm->mutex);
285
286 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
287 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
288 return;
289
290 if (vif->type != NL80211_IFTYPE_STATION)
291 return;
292
293 /* SMPS is handled by firmware */
294 if (iwl_mvm_has_rlc_offload(mvm))
295 return;
296
297 mvmvif = iwl_mvm_vif_from_mac80211(vif);
298
299 if (WARN_ON_ONCE(!mvmvif->link[link_id]))
300 return;
301
302 mvmvif->link[link_id]->smps_requests[req_type] = smps_request;
303 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
304 if (mvmvif->link[link_id]->smps_requests[i] ==
305 IEEE80211_SMPS_STATIC) {
306 smps_mode = IEEE80211_SMPS_STATIC;
307 break;
308 }
309 if (mvmvif->link[link_id]->smps_requests[i] ==
310 IEEE80211_SMPS_DYNAMIC)
311 smps_mode = IEEE80211_SMPS_DYNAMIC;
312 }
313
314 /* SMPS is disabled in eSR */
315 if (mvmvif->esr_active)
316 smps_mode = IEEE80211_SMPS_OFF;
317
318 ieee80211_request_smps(vif, link_id, smps_mode);
319 }
320
iwl_mvm_update_smps_on_active_links(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum iwl_mvm_smps_type_request req_type,enum ieee80211_smps_mode smps_request)321 void iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm,
322 struct ieee80211_vif *vif,
323 enum iwl_mvm_smps_type_request req_type,
324 enum ieee80211_smps_mode smps_request)
325 {
326 struct ieee80211_bss_conf *link_conf;
327 unsigned int link_id;
328
329 rcu_read_lock();
330 for_each_vif_active_link(vif, link_conf, link_id)
331 iwl_mvm_update_smps(mvm, vif, req_type, smps_request,
332 link_id);
333 rcu_read_unlock();
334 }
335
iwl_wait_stats_complete(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)336 static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
337 struct iwl_rx_packet *pkt, void *data)
338 {
339 WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION);
340
341 return true;
342 }
343
344 #define PERIODIC_STAT_RATE 5
345
iwl_mvm_request_periodic_system_statistics(struct iwl_mvm * mvm,bool enable)346 int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, bool enable)
347 {
348 u32 flags = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
349 u32 type = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
350 IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
351 struct iwl_system_statistics_cmd system_cmd = {
352 .cfg_mask = cpu_to_le32(flags),
353 .config_time_sec = cpu_to_le32(enable ?
354 PERIODIC_STAT_RATE : 0),
355 .type_id_mask = cpu_to_le32(type),
356 };
357
358 return iwl_mvm_send_cmd_pdu(mvm,
359 WIDE_ID(SYSTEM_GROUP,
360 SYSTEM_STATISTICS_CMD),
361 0, sizeof(system_cmd), &system_cmd);
362 }
363
iwl_mvm_request_system_statistics(struct iwl_mvm * mvm,bool clear,u8 cmd_ver)364 static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear,
365 u8 cmd_ver)
366 {
367 struct iwl_system_statistics_cmd system_cmd = {
368 .cfg_mask = clear ?
369 cpu_to_le32(IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK) :
370 cpu_to_le32(IWL_STATS_CFG_FLG_RESET_MSK |
371 IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK),
372 .type_id_mask = cpu_to_le32(IWL_STATS_NTFY_TYPE_ID_OPER |
373 IWL_STATS_NTFY_TYPE_ID_OPER_PART1),
374 };
375 struct iwl_host_cmd cmd = {
376 .id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD),
377 .len[0] = sizeof(system_cmd),
378 .data[0] = &system_cmd,
379 };
380 struct iwl_notification_wait stats_wait;
381 static const u16 stats_complete[] = {
382 WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF),
383 };
384 int ret;
385
386 if (cmd_ver != 1) {
387 IWL_FW_CHECK_FAILED(mvm,
388 "Invalid system statistics command version:%d\n",
389 cmd_ver);
390 return -EOPNOTSUPP;
391 }
392
393 iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
394 stats_complete, ARRAY_SIZE(stats_complete),
395 NULL, NULL);
396
397 mvm->statistics_clear = clear;
398 ret = iwl_mvm_send_cmd(mvm, &cmd);
399 if (ret) {
400 iwl_remove_notification(&mvm->notif_wait, &stats_wait);
401 return ret;
402 }
403
404 /* 500ms for OPERATIONAL, PART1 and END notification should be enough
405 * for FW to collect data from all LMACs and send
406 * STATISTICS_NOTIFICATION to host
407 */
408 ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 2);
409 if (ret)
410 return ret;
411
412 if (clear)
413 iwl_mvm_accu_radio_stats(mvm);
414
415 return ret;
416 }
417
iwl_mvm_request_statistics(struct iwl_mvm * mvm,bool clear)418 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
419 {
420 struct iwl_statistics_cmd scmd = {
421 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
422 };
423
424 struct iwl_host_cmd cmd = {
425 .id = STATISTICS_CMD,
426 .len[0] = sizeof(scmd),
427 .data[0] = &scmd,
428 };
429 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
430 WIDE_ID(SYSTEM_GROUP,
431 SYSTEM_STATISTICS_CMD),
432 IWL_FW_CMD_VER_UNKNOWN);
433 int ret;
434
435 /*
436 * Don't request statistics during restart, they'll not have any useful
437 * information right after restart, nor is clearing needed
438 */
439 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
440 return 0;
441
442 if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
443 return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver);
444
445 /* From version 15 - STATISTICS_NOTIFICATION, the reply for
446 * STATISTICS_CMD is empty, and the response is with
447 * STATISTICS_NOTIFICATION notification
448 */
449 if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
450 STATISTICS_NOTIFICATION, 0) < 15) {
451 cmd.flags = CMD_WANT_SKB;
452
453 ret = iwl_mvm_send_cmd(mvm, &cmd);
454 if (ret)
455 return ret;
456
457 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
458 iwl_free_resp(&cmd);
459 } else {
460 struct iwl_notification_wait stats_wait;
461 static const u16 stats_complete[] = {
462 STATISTICS_NOTIFICATION,
463 };
464
465 iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
466 stats_complete, ARRAY_SIZE(stats_complete),
467 iwl_wait_stats_complete, NULL);
468
469 ret = iwl_mvm_send_cmd(mvm, &cmd);
470 if (ret) {
471 iwl_remove_notification(&mvm->notif_wait, &stats_wait);
472 return ret;
473 }
474
475 /* 200ms should be enough for FW to collect data from all
476 * LMACs and send STATISTICS_NOTIFICATION to host
477 */
478 ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5);
479 if (ret)
480 return ret;
481 }
482
483 if (clear)
484 iwl_mvm_accu_radio_stats(mvm);
485
486 return 0;
487 }
488
iwl_mvm_accu_radio_stats(struct iwl_mvm * mvm)489 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
490 {
491 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
492 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
493 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
494 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
495 }
496
497 struct iwl_mvm_diversity_iter_data {
498 struct iwl_mvm_phy_ctxt *ctxt;
499 bool result;
500 };
501
iwl_mvm_diversity_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)502 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
503 struct ieee80211_vif *vif)
504 {
505 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
506 struct iwl_mvm_diversity_iter_data *data = _data;
507 int i, link_id;
508
509 for_each_mvm_vif_valid_link(mvmvif, link_id) {
510 struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
511
512 if (link_info->phy_ctxt != data->ctxt)
513 continue;
514
515 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
516 if (link_info->smps_requests[i] == IEEE80211_SMPS_STATIC ||
517 link_info->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
518 data->result = false;
519 break;
520 }
521 }
522 }
523 }
524
iwl_mvm_rx_diversity_allowed(struct iwl_mvm * mvm,struct iwl_mvm_phy_ctxt * ctxt)525 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
526 struct iwl_mvm_phy_ctxt *ctxt)
527 {
528 struct iwl_mvm_diversity_iter_data data = {
529 .ctxt = ctxt,
530 .result = true,
531 };
532
533 lockdep_assert_held(&mvm->mutex);
534
535 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
536 return false;
537
538 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
539 return false;
540
541 if (mvm->cfg->rx_with_siso_diversity)
542 return false;
543
544 ieee80211_iterate_active_interfaces_atomic(
545 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
546 iwl_mvm_diversity_iter, &data);
547
548 return data.result;
549 }
550
iwl_mvm_send_low_latency_cmd(struct iwl_mvm * mvm,bool low_latency,u16 mac_id)551 void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
552 bool low_latency, u16 mac_id)
553 {
554 struct iwl_mac_low_latency_cmd cmd = {
555 .mac_id = cpu_to_le32(mac_id)
556 };
557
558 if (!fw_has_capa(&mvm->fw->ucode_capa,
559 IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
560 return;
561
562 if (low_latency) {
563 /* currently we don't care about the direction */
564 cmd.low_latency_rx = 1;
565 cmd.low_latency_tx = 1;
566 }
567
568 if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
569 0, sizeof(cmd), &cmd))
570 IWL_ERR(mvm, "Failed to send low latency command\n");
571 }
572
iwl_mvm_update_low_latency(struct iwl_mvm * mvm,struct ieee80211_vif * vif,bool low_latency,enum iwl_mvm_low_latency_cause cause)573 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
574 bool low_latency,
575 enum iwl_mvm_low_latency_cause cause)
576 {
577 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
578 int res;
579 bool prev;
580
581 lockdep_assert_held(&mvm->mutex);
582
583 prev = iwl_mvm_vif_low_latency(mvmvif);
584 iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
585
586 low_latency = iwl_mvm_vif_low_latency(mvmvif);
587
588 if (low_latency == prev)
589 return 0;
590
591 iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
592
593 res = iwl_mvm_update_quotas(mvm, false, NULL);
594 if (res)
595 return res;
596
597 iwl_mvm_bt_coex_vif_change(mvm);
598
599 return iwl_mvm_power_update_mac(mvm);
600 }
601
602 struct iwl_mvm_low_latency_iter {
603 bool result;
604 bool result_per_band[NUM_NL80211_BANDS];
605 };
606
iwl_mvm_ll_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)607 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
608 {
609 struct iwl_mvm_low_latency_iter *result = _data;
610 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
611 enum nl80211_band band;
612
613 if (iwl_mvm_vif_low_latency(mvmvif)) {
614 result->result = true;
615
616 if (!mvmvif->deflink.phy_ctxt)
617 return;
618
619 band = mvmvif->deflink.phy_ctxt->channel->band;
620 result->result_per_band[band] = true;
621 }
622 }
623
iwl_mvm_low_latency(struct iwl_mvm * mvm)624 bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
625 {
626 struct iwl_mvm_low_latency_iter data = {};
627
628 ieee80211_iterate_active_interfaces_atomic(
629 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
630 iwl_mvm_ll_iter, &data);
631
632 return data.result;
633 }
634
iwl_mvm_low_latency_band(struct iwl_mvm * mvm,enum nl80211_band band)635 bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
636 {
637 struct iwl_mvm_low_latency_iter data = {};
638
639 ieee80211_iterate_active_interfaces_atomic(
640 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
641 iwl_mvm_ll_iter, &data);
642
643 return data.result_per_band[band];
644 }
645
646 struct iwl_bss_iter_data {
647 struct ieee80211_vif *vif;
648 bool error;
649 };
650
iwl_mvm_bss_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)651 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
652 struct ieee80211_vif *vif)
653 {
654 struct iwl_bss_iter_data *data = _data;
655
656 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
657 return;
658
659 if (data->vif) {
660 data->error = true;
661 return;
662 }
663
664 data->vif = vif;
665 }
666
iwl_mvm_get_bss_vif(struct iwl_mvm * mvm)667 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
668 {
669 struct iwl_bss_iter_data bss_iter_data = {};
670
671 ieee80211_iterate_active_interfaces_atomic(
672 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
673 iwl_mvm_bss_iface_iterator, &bss_iter_data);
674
675 if (bss_iter_data.error)
676 return ERR_PTR(-EINVAL);
677
678 return bss_iter_data.vif;
679 }
680
681 struct iwl_bss_find_iter_data {
682 struct ieee80211_vif *vif;
683 u32 macid;
684 };
685
iwl_mvm_bss_find_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)686 static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac,
687 struct ieee80211_vif *vif)
688 {
689 struct iwl_bss_find_iter_data *data = _data;
690 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
691
692 if (mvmvif->id == data->macid)
693 data->vif = vif;
694 }
695
iwl_mvm_get_vif_by_macid(struct iwl_mvm * mvm,u32 macid)696 struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid)
697 {
698 struct iwl_bss_find_iter_data data = {
699 .macid = macid,
700 };
701
702 lockdep_assert_held(&mvm->mutex);
703
704 ieee80211_iterate_active_interfaces_atomic(
705 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
706 iwl_mvm_bss_find_iface_iterator, &data);
707
708 return data.vif;
709 }
710
711 struct iwl_sta_iter_data {
712 bool assoc;
713 };
714
iwl_mvm_sta_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)715 static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
716 struct ieee80211_vif *vif)
717 {
718 struct iwl_sta_iter_data *data = _data;
719
720 if (vif->type != NL80211_IFTYPE_STATION)
721 return;
722
723 if (vif->cfg.assoc)
724 data->assoc = true;
725 }
726
iwl_mvm_is_vif_assoc(struct iwl_mvm * mvm)727 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
728 {
729 struct iwl_sta_iter_data data = {
730 .assoc = false,
731 };
732
733 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
734 IEEE80211_IFACE_ITER_NORMAL,
735 iwl_mvm_sta_iface_iterator,
736 &data);
737 return data.assoc;
738 }
739
iwl_mvm_get_wd_timeout(struct iwl_mvm * mvm,struct ieee80211_vif * vif)740 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
741 struct ieee80211_vif *vif)
742 {
743 unsigned int default_timeout =
744 mvm->trans->mac_cfg->base->wd_timeout;
745
746 /*
747 * We can't know when the station is asleep or awake, so we
748 * must disable the queue hang detection.
749 */
750 if (fw_has_capa(&mvm->fw->ucode_capa,
751 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
752 vif->type == NL80211_IFTYPE_AP)
753 return IWL_WATCHDOG_DISABLED;
754 return default_timeout;
755 }
756
iwl_mvm_connection_loss(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const char * errmsg)757 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
758 const char *errmsg)
759 {
760 struct iwl_fw_dbg_trigger_tlv *trig;
761 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
762
763 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
764 FW_DBG_TRIGGER_MLME);
765 if (!trig)
766 goto out;
767
768 trig_mlme = (void *)trig->data;
769
770 if (trig_mlme->stop_connection_loss &&
771 --trig_mlme->stop_connection_loss)
772 goto out;
773
774 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
775
776 out:
777 ieee80211_connection_loss(vif);
778 }
779
iwl_mvm_event_frame_timeout_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_sta * sta,u16 tid)780 void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
781 struct ieee80211_vif *vif,
782 const struct ieee80211_sta *sta,
783 u16 tid)
784 {
785 struct iwl_fw_dbg_trigger_tlv *trig;
786 struct iwl_fw_dbg_trigger_ba *ba_trig;
787
788 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
789 FW_DBG_TRIGGER_BA);
790 if (!trig)
791 return;
792
793 ba_trig = (void *)trig->data;
794
795 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
796 return;
797
798 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
799 "Frame from %pM timed out, tid %d",
800 sta->addr, tid);
801 }
802
iwl_mvm_tcm_load_percentage(u32 airtime,u32 elapsed)803 u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
804 {
805 if (!elapsed)
806 return 0;
807
808 return (100 * airtime / elapsed) / USEC_PER_MSEC;
809 }
810
811 static enum iwl_mvm_traffic_load
iwl_mvm_tcm_load(struct iwl_mvm * mvm,u32 airtime,unsigned long elapsed)812 iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
813 {
814 u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
815
816 if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
817 return IWL_MVM_TRAFFIC_HIGH;
818 if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
819 return IWL_MVM_TRAFFIC_MEDIUM;
820
821 return IWL_MVM_TRAFFIC_LOW;
822 }
823
iwl_mvm_tcm_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)824 static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
825 {
826 struct iwl_mvm *mvm = _data;
827 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
828 bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
829
830 if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
831 return;
832
833 low_latency = mvm->tcm.result.low_latency[mvmvif->id];
834
835 if (!mvm->tcm.result.change[mvmvif->id] &&
836 prev == low_latency) {
837 iwl_mvm_update_quotas(mvm, false, NULL);
838 return;
839 }
840
841 if (prev != low_latency) {
842 /* this sends traffic load and updates quota as well */
843 iwl_mvm_update_low_latency(mvm, vif, low_latency,
844 LOW_LATENCY_TRAFFIC);
845 } else {
846 iwl_mvm_update_quotas(mvm, false, NULL);
847 }
848 }
849
iwl_mvm_tcm_results(struct iwl_mvm * mvm)850 static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
851 {
852 guard(mvm)(mvm);
853
854 ieee80211_iterate_active_interfaces(
855 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
856 iwl_mvm_tcm_iter, mvm);
857
858 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
859 iwl_mvm_config_scan(mvm);
860 }
861
iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct * wk)862 static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
863 {
864 struct iwl_mvm *mvm;
865 struct iwl_mvm_vif *mvmvif;
866 struct ieee80211_vif *vif;
867
868 mvmvif = container_of(wk, struct iwl_mvm_vif,
869 uapsd_nonagg_detected_wk.work);
870 vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
871 mvm = mvmvif->mvm;
872
873 if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
874 return;
875
876 /* remember that this AP is broken */
877 memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
878 vif->bss_conf.bssid, ETH_ALEN);
879 mvm->uapsd_noagg_bssid_write_idx++;
880 if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
881 mvm->uapsd_noagg_bssid_write_idx = 0;
882
883 iwl_mvm_connection_loss(mvm, vif,
884 "AP isn't using AMPDU with uAPSD enabled");
885 }
886
iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm * mvm,struct ieee80211_vif * vif)887 static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
888 struct ieee80211_vif *vif)
889 {
890 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
891
892 if (vif->type != NL80211_IFTYPE_STATION)
893 return;
894
895 if (!vif->cfg.assoc)
896 return;
897
898 if (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd &&
899 !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd &&
900 !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd &&
901 !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd)
902 return;
903
904 if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
905 return;
906
907 mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
908 IWL_INFO(mvm,
909 "detected AP should do aggregation but isn't, likely due to U-APSD\n");
910 schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk,
911 15 * HZ);
912 }
913
iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm * mvm,unsigned int elapsed,int mac)914 static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
915 unsigned int elapsed,
916 int mac)
917 {
918 u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
919 u64 tpt;
920 unsigned long rate;
921 struct ieee80211_vif *vif;
922
923 rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
924
925 if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
926 mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
927 return;
928
929 if (iwl_mvm_has_new_rx_api(mvm)) {
930 tpt = 8 * bytes; /* kbps */
931 do_div(tpt, elapsed);
932 rate *= 1000; /* kbps */
933 if (tpt < 22 * rate / 100)
934 return;
935 } else {
936 /*
937 * the rate here is actually the threshold, in 100Kbps units,
938 * so do the needed conversion from bytes to 100Kbps:
939 * 100kb = bits / (100 * 1000),
940 * 100kbps = 100kb / (msecs / 1000) ==
941 * (bits / (100 * 1000)) / (msecs / 1000) ==
942 * bits / (100 * msecs)
943 */
944 tpt = (8 * bytes);
945 do_div(tpt, elapsed * 100);
946 if (tpt < rate)
947 return;
948 }
949
950 rcu_read_lock();
951 vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
952 if (vif)
953 iwl_mvm_uapsd_agg_disconnect(mvm, vif);
954 rcu_read_unlock();
955 }
956
iwl_mvm_tcm_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)957 static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
958 struct ieee80211_vif *vif)
959 {
960 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
961 u32 *band = _data;
962
963 if (!mvmvif->deflink.phy_ctxt)
964 return;
965
966 band[mvmvif->id] = mvmvif->deflink.phy_ctxt->channel->band;
967 }
968
iwl_mvm_calc_tcm_stats(struct iwl_mvm * mvm,unsigned long ts,bool handle_uapsd)969 static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
970 unsigned long ts,
971 bool handle_uapsd)
972 {
973 unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
974 unsigned int uapsd_elapsed =
975 jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
976 u32 total_airtime = 0;
977 u32 band_airtime[NUM_NL80211_BANDS] = {0};
978 u32 band[NUM_MAC_INDEX_DRIVER] = {0};
979 int ac, mac, i;
980 bool low_latency = false;
981 enum iwl_mvm_traffic_load load, band_load;
982 bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
983
984 if (handle_ll)
985 mvm->tcm.ll_ts = ts;
986 if (handle_uapsd)
987 mvm->tcm.uapsd_nonagg_ts = ts;
988
989 mvm->tcm.result.elapsed = elapsed;
990
991 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
992 IEEE80211_IFACE_ITER_NORMAL,
993 iwl_mvm_tcm_iterator,
994 &band);
995
996 for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
997 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
998 u32 vo_vi_pkts = 0;
999 u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
1000
1001 total_airtime += airtime;
1002 band_airtime[band[mac]] += airtime;
1003
1004 load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
1005 mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
1006 mvm->tcm.result.load[mac] = load;
1007 mvm->tcm.result.airtime[mac] = airtime;
1008
1009 for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
1010 vo_vi_pkts += mdata->rx.pkts[ac] +
1011 mdata->tx.pkts[ac];
1012
1013 /* enable immediately with enough packets but defer disabling */
1014 if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
1015 mvm->tcm.result.low_latency[mac] = true;
1016 else if (handle_ll)
1017 mvm->tcm.result.low_latency[mac] = false;
1018
1019 if (handle_ll) {
1020 /* clear old data */
1021 memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1022 memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1023 }
1024 low_latency |= mvm->tcm.result.low_latency[mac];
1025
1026 if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
1027 iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
1028 mac);
1029 /* clear old data */
1030 if (handle_uapsd)
1031 mdata->uapsd_nonagg_detect.rx_bytes = 0;
1032 memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1033 memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1034 }
1035
1036 load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
1037 mvm->tcm.result.global_load = load;
1038
1039 for (i = 0; i < NUM_NL80211_BANDS; i++) {
1040 band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
1041 mvm->tcm.result.band_load[i] = band_load;
1042 }
1043
1044 /*
1045 * If the current load isn't low we need to force re-evaluation
1046 * in the TCM period, so that we can return to low load if there
1047 * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
1048 * triggered by traffic).
1049 */
1050 if (load != IWL_MVM_TRAFFIC_LOW)
1051 return MVM_TCM_PERIOD;
1052 /*
1053 * If low-latency is active we need to force re-evaluation after
1054 * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
1055 * when there's no traffic at all.
1056 */
1057 if (low_latency)
1058 return MVM_LL_PERIOD;
1059 /*
1060 * Otherwise, we don't need to run the work struct because we're
1061 * in the default "idle" state - traffic indication is low (which
1062 * also covers the "no traffic" case) and low-latency is disabled
1063 * so there's no state that may need to be disabled when there's
1064 * no traffic at all.
1065 *
1066 * Note that this has no impact on the regular scheduling of the
1067 * updates triggered by traffic - those happen whenever one of the
1068 * two timeouts expire (if there's traffic at all.)
1069 */
1070 return 0;
1071 }
1072
iwl_mvm_recalc_tcm(struct iwl_mvm * mvm)1073 void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
1074 {
1075 unsigned long ts = jiffies;
1076 bool handle_uapsd =
1077 time_after(ts, mvm->tcm.uapsd_nonagg_ts +
1078 msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
1079
1080 spin_lock(&mvm->tcm.lock);
1081 if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1082 spin_unlock(&mvm->tcm.lock);
1083 return;
1084 }
1085 spin_unlock(&mvm->tcm.lock);
1086
1087 if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
1088 guard(mvm)(mvm);
1089 if (iwl_mvm_request_statistics(mvm, true))
1090 handle_uapsd = false;
1091 }
1092
1093 spin_lock(&mvm->tcm.lock);
1094 /* re-check if somebody else won the recheck race */
1095 if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1096 /* calculate statistics */
1097 unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
1098 handle_uapsd);
1099
1100 /* the memset needs to be visible before the timestamp */
1101 smp_mb();
1102 mvm->tcm.ts = ts;
1103 if (work_delay)
1104 schedule_delayed_work(&mvm->tcm.work, work_delay);
1105 }
1106 spin_unlock(&mvm->tcm.lock);
1107
1108 iwl_mvm_tcm_results(mvm);
1109 }
1110
iwl_mvm_tcm_work(struct work_struct * work)1111 void iwl_mvm_tcm_work(struct work_struct *work)
1112 {
1113 struct delayed_work *delayed_work = to_delayed_work(work);
1114 struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
1115 tcm.work);
1116
1117 iwl_mvm_recalc_tcm(mvm);
1118 }
1119
iwl_mvm_pause_tcm(struct iwl_mvm * mvm,bool with_cancel)1120 void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
1121 {
1122 spin_lock_bh(&mvm->tcm.lock);
1123 mvm->tcm.paused = true;
1124 spin_unlock_bh(&mvm->tcm.lock);
1125 if (with_cancel)
1126 cancel_delayed_work_sync(&mvm->tcm.work);
1127 }
1128
iwl_mvm_resume_tcm(struct iwl_mvm * mvm)1129 void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
1130 {
1131 int mac;
1132 bool low_latency = false;
1133
1134 spin_lock_bh(&mvm->tcm.lock);
1135 mvm->tcm.ts = jiffies;
1136 mvm->tcm.ll_ts = jiffies;
1137 for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1138 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1139
1140 memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1141 memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1142 memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1143 memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1144
1145 if (mvm->tcm.result.low_latency[mac])
1146 low_latency = true;
1147 }
1148 /* The TCM data needs to be reset before "paused" flag changes */
1149 smp_mb();
1150 mvm->tcm.paused = false;
1151
1152 /*
1153 * if the current load is not low or low latency is active, force
1154 * re-evaluation to cover the case of no traffic.
1155 */
1156 if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
1157 schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
1158 else if (low_latency)
1159 schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
1160
1161 spin_unlock_bh(&mvm->tcm.lock);
1162 }
1163
iwl_mvm_tcm_add_vif(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1164 void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1165 {
1166 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1167
1168 INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
1169 iwl_mvm_tcm_uapsd_nonagg_detected_wk);
1170 }
1171
iwl_mvm_tcm_rm_vif(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1172 void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1173 {
1174 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1175
1176 cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
1177 }
1178
iwl_mvm_get_systime(struct iwl_mvm * mvm)1179 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
1180 {
1181 u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
1182
1183 if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
1184 mvm->trans->mac_cfg->base->gp2_reg_addr)
1185 reg_addr = mvm->trans->mac_cfg->base->gp2_reg_addr;
1186
1187 return iwl_read_prph(mvm->trans, reg_addr);
1188 }
1189
iwl_mvm_get_sync_time(struct iwl_mvm * mvm,int clock_type,u32 * gp2,u64 * boottime,ktime_t * realtime)1190 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
1191 u32 *gp2, u64 *boottime, ktime_t *realtime)
1192 {
1193 bool ps_disabled;
1194
1195 lockdep_assert_held(&mvm->mutex);
1196
1197 /* Disable power save when reading GP2 */
1198 ps_disabled = mvm->ps_disabled;
1199 if (!ps_disabled) {
1200 mvm->ps_disabled = true;
1201 iwl_mvm_power_update_device(mvm);
1202 }
1203
1204 *gp2 = iwl_mvm_get_systime(mvm);
1205
1206 if (clock_type == CLOCK_BOOTTIME && boottime)
1207 *boottime = ktime_get_boottime_ns();
1208 else if (clock_type == CLOCK_REALTIME && realtime)
1209 *realtime = ktime_get_real();
1210
1211 if (!ps_disabled) {
1212 mvm->ps_disabled = ps_disabled;
1213 iwl_mvm_power_update_device(mvm);
1214 }
1215 }
1216
1217 /* Find if at least two links from different vifs use same channel
1218 * FIXME: consider having a refcount array in struct iwl_mvm_vif for
1219 * used phy_ctxt ids.
1220 */
iwl_mvm_have_links_same_channel(struct iwl_mvm_vif * vif1,struct iwl_mvm_vif * vif2)1221 bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1,
1222 struct iwl_mvm_vif *vif2)
1223 {
1224 unsigned int i, j;
1225
1226 for_each_mvm_vif_valid_link(vif1, i) {
1227 for_each_mvm_vif_valid_link(vif2, j) {
1228 if (vif1->link[i]->phy_ctxt == vif2->link[j]->phy_ctxt)
1229 return true;
1230 }
1231 }
1232
1233 return false;
1234 }
1235
iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)1236 static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
1237 {
1238 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
1239 int idx;
1240 bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
1241 int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
1242 int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
1243
1244 for (idx = offset; idx < last; idx++)
1245 if (iwl_fw_rate_idx_to_plcp(idx) == rate)
1246 return idx - offset;
1247 return IWL_RATE_INVALID;
1248 }
1249
iwl_mvm_v3_rate_from_fw(__le32 rate,u8 rate_ver)1250 u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver)
1251 {
1252 u32 rate_v3 = 0, rate_v1;
1253 u32 dup = 0;
1254
1255 if (rate_ver > 1)
1256 return iwl_v3_rate_from_v2_v3(rate, rate_ver >= 3);
1257
1258 rate_v1 = le32_to_cpu(rate);
1259 if (rate_v1 == 0)
1260 return rate_v1;
1261 /* convert rate */
1262 if (rate_v1 & RATE_MCS_HT_MSK_V1) {
1263 u32 nss;
1264
1265 rate_v3 |= RATE_MCS_MOD_TYPE_HT;
1266 rate_v3 |=
1267 rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1;
1268 nss = u32_get_bits(rate_v1, RATE_HT_MCS_MIMO2_MSK);
1269 rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
1270 } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 ||
1271 rate_v1 & RATE_MCS_HE_MSK_V1) {
1272 u32 nss = u32_get_bits(rate_v1, RATE_VHT_MCS_NSS_MSK);
1273
1274 rate_v3 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK;
1275
1276 rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
1277
1278 if (rate_v1 & RATE_MCS_HE_MSK_V1) {
1279 u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1;
1280 u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1;
1281 u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >>
1282 RATE_MCS_HE_106T_POS_V1;
1283 u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >>
1284 RATE_MCS_HE_GI_LTF_POS;
1285
1286 if ((he_type_bits == RATE_MCS_HE_TYPE_SU ||
1287 he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) &&
1288 he_gi_ltf == RATE_MCS_HE_SU_4_LTF)
1289 /* the new rate have an additional bit to
1290 * represent the value 4 rather then using SGI
1291 * bit for this purpose - as it was done in the
1292 * old rate
1293 */
1294 he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >>
1295 RATE_MCS_SGI_POS_V1;
1296
1297 rate_v3 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS;
1298 rate_v3 |= he_type << RATE_MCS_HE_TYPE_POS;
1299 rate_v3 |= he_106t << RATE_MCS_HE_106T_POS;
1300 rate_v3 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK;
1301 rate_v3 |= RATE_MCS_MOD_TYPE_HE;
1302 } else {
1303 rate_v3 |= RATE_MCS_MOD_TYPE_VHT;
1304 }
1305 /* if legacy format */
1306 } else {
1307 u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
1308
1309 if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
1310 legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
1311 IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
1312
1313 rate_v3 |= legacy_rate;
1314 if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
1315 rate_v3 |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
1316 }
1317
1318 /* convert flags */
1319 if (rate_v1 & RATE_MCS_LDPC_MSK_V1)
1320 rate_v3 |= RATE_MCS_LDPC_MSK;
1321 rate_v3 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) |
1322 (rate_v1 & RATE_MCS_ANT_AB_MSK) |
1323 (rate_v1 & RATE_MCS_STBC_MSK) |
1324 (rate_v1 & RATE_MCS_BF_MSK);
1325
1326 dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1;
1327 if (dup) {
1328 rate_v3 |= RATE_MCS_DUP_MSK;
1329 rate_v3 |= dup << RATE_MCS_CHAN_WIDTH_POS;
1330 }
1331
1332 if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) &&
1333 (rate_v1 & RATE_MCS_SGI_MSK_V1))
1334 rate_v3 |= RATE_MCS_SGI_MSK;
1335
1336 return rate_v3;
1337 }
1338
iwl_mvm_v3_rate_to_fw(u32 rate,u8 rate_ver)1339 __le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver)
1340 {
1341 u32 result = 0;
1342 int rate_idx;
1343
1344 if (rate_ver > 1)
1345 return iwl_v3_rate_to_v2_v3(rate, rate_ver > 2);
1346
1347 switch (rate & RATE_MCS_MOD_TYPE_MSK) {
1348 case RATE_MCS_MOD_TYPE_CCK:
1349 result = RATE_MCS_CCK_MSK_V1;
1350 fallthrough;
1351 case RATE_MCS_MOD_TYPE_LEGACY_OFDM:
1352 rate_idx = u32_get_bits(rate, RATE_LEGACY_RATE_MSK);
1353 if (!(result & RATE_MCS_CCK_MSK_V1))
1354 rate_idx += IWL_FIRST_OFDM_RATE;
1355 result |= u32_encode_bits(iwl_fw_rate_idx_to_plcp(rate_idx),
1356 RATE_LEGACY_RATE_MSK_V1);
1357 break;
1358 case RATE_MCS_MOD_TYPE_HT:
1359 result = RATE_MCS_HT_MSK_V1;
1360 result |= u32_encode_bits(u32_get_bits(rate,
1361 RATE_HT_MCS_CODE_MSK),
1362 RATE_HT_MCS_RATE_CODE_MSK_V1);
1363 result |= u32_encode_bits(u32_get_bits(rate,
1364 RATE_MCS_NSS_MSK),
1365 RATE_HT_MCS_MIMO2_MSK);
1366 break;
1367 case RATE_MCS_MOD_TYPE_VHT:
1368 result = RATE_MCS_VHT_MSK_V1;
1369 result |= u32_encode_bits(u32_get_bits(rate,
1370 RATE_VHT_MCS_NSS_MSK),
1371 RATE_MCS_CODE_MSK);
1372 result |= u32_encode_bits(u32_get_bits(rate, RATE_MCS_NSS_MSK),
1373 RATE_VHT_MCS_NSS_MSK);
1374 break;
1375 case RATE_MCS_MOD_TYPE_HE: /* not generated */
1376 default:
1377 WARN_ONCE(1, "bad modulation type %d\n",
1378 u32_get_bits(rate, RATE_MCS_MOD_TYPE_MSK));
1379 return 0;
1380 }
1381
1382 if (rate & RATE_MCS_LDPC_MSK)
1383 result |= RATE_MCS_LDPC_MSK_V1;
1384 WARN_ON_ONCE(u32_get_bits(rate, RATE_MCS_CHAN_WIDTH_MSK) >
1385 RATE_MCS_CHAN_WIDTH_160_VAL);
1386 result |= (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) |
1387 (rate & RATE_MCS_ANT_AB_MSK) |
1388 (rate & RATE_MCS_STBC_MSK) |
1389 (rate & RATE_MCS_BF_MSK);
1390
1391 /* not handling DUP since we don't use it */
1392 WARN_ON_ONCE(rate & RATE_MCS_DUP_MSK);
1393
1394 if (rate & RATE_MCS_SGI_MSK)
1395 result |= RATE_MCS_SGI_MSK_V1;
1396
1397 return cpu_to_le32(result);
1398 }
1399
iwl_mvm_vif_is_active(struct iwl_mvm_vif * mvmvif)1400 bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif)
1401 {
1402 unsigned int i;
1403
1404 /* FIXME: can it fail when phy_ctxt is assigned? */
1405 for_each_mvm_vif_valid_link(mvmvif, i) {
1406 if (mvmvif->link[i]->phy_ctxt &&
1407 mvmvif->link[i]->phy_ctxt->id < NUM_PHY_CTX)
1408 return true;
1409 }
1410
1411 return false;
1412 }
1413