1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
4 * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
5 * Copyright (C) 2015-2017 Intel Deutschland GmbH
6 */
7 #include <net/mac80211.h>
8
9 #include "iwl-debug.h"
10 #include "iwl-io.h"
11 #include "iwl-prph.h"
12 #include "iwl-csr.h"
13 #include "mvm.h"
14 #include "fw/api/rs.h"
15 #include "fw/img.h"
16
17 /*
18 * Will return 0 even if the cmd failed when RFKILL is asserted unless
19 * CMD_WANT_SKB is set in cmd->flags.
20 */
iwl_mvm_send_cmd(struct iwl_mvm * mvm,struct iwl_host_cmd * cmd)21 int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
22 {
23 int ret;
24
25 /*
26 * Synchronous commands from this op-mode must hold
27 * the mutex, this ensures we don't try to send two
28 * (or more) synchronous commands at a time.
29 */
30 if (!(cmd->flags & CMD_ASYNC))
31 lockdep_assert_held(&mvm->mutex);
32
33 ret = iwl_trans_send_cmd(mvm->trans, cmd);
34
35 /*
36 * If the caller wants the SKB, then don't hide any problems, the
37 * caller might access the response buffer which will be NULL if
38 * the command failed.
39 */
40 if (cmd->flags & CMD_WANT_SKB)
41 return ret;
42
43 /*
44 * Silently ignore failures if RFKILL is asserted or
45 * we are in suspend\resume process
46 */
47 if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN)
48 return 0;
49 return ret;
50 }
51
iwl_mvm_send_cmd_pdu(struct iwl_mvm * mvm,u32 id,u32 flags,u16 len,const void * data)52 int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
53 u32 flags, u16 len, const void *data)
54 {
55 struct iwl_host_cmd cmd = {
56 .id = id,
57 .len = { len, },
58 .data = { data, },
59 .flags = flags,
60 };
61
62 return iwl_mvm_send_cmd(mvm, &cmd);
63 }
64
65 /*
66 * We assume that the caller set the status to the success value
67 */
iwl_mvm_send_cmd_status(struct iwl_mvm * mvm,struct iwl_host_cmd * cmd,u32 * status)68 int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
69 u32 *status)
70 {
71 struct iwl_rx_packet *pkt;
72 struct iwl_cmd_response *resp;
73 int ret, resp_len;
74
75 lockdep_assert_held(&mvm->mutex);
76
77 /*
78 * Only synchronous commands can wait for status,
79 * we use WANT_SKB so the caller can't.
80 */
81 if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
82 "cmd flags %x", cmd->flags))
83 return -EINVAL;
84
85 cmd->flags |= CMD_WANT_SKB;
86
87 ret = iwl_trans_send_cmd(mvm->trans, cmd);
88 if (ret == -ERFKILL) {
89 /*
90 * The command failed because of RFKILL, don't update
91 * the status, leave it as success and return 0.
92 */
93 return 0;
94 } else if (ret) {
95 return ret;
96 }
97
98 pkt = cmd->resp_pkt;
99
100 resp_len = iwl_rx_packet_payload_len(pkt);
101 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
102 ret = -EIO;
103 goto out_free_resp;
104 }
105
106 resp = (void *)pkt->data;
107 *status = le32_to_cpu(resp->status);
108 out_free_resp:
109 iwl_free_resp(cmd);
110 return ret;
111 }
112
113 /*
114 * We assume that the caller set the status to the sucess value
115 */
iwl_mvm_send_cmd_pdu_status(struct iwl_mvm * mvm,u32 id,u16 len,const void * data,u32 * status)116 int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
117 const void *data, u32 *status)
118 {
119 struct iwl_host_cmd cmd = {
120 .id = id,
121 .len = { len, },
122 .data = { data, },
123 };
124
125 return iwl_mvm_send_cmd_status(mvm, &cmd, status);
126 }
127
iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,enum nl80211_band band)128 int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
129 enum nl80211_band band)
130 {
131 int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
132 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
133 bool is_LB = band == NL80211_BAND_2GHZ;
134
135 if (format == RATE_MCS_MOD_TYPE_LEGACY_OFDM)
136 return is_LB ? rate + IWL_FIRST_OFDM_RATE :
137 rate;
138
139 /* CCK is not allowed in HB */
140 return is_LB ? rate : -1;
141 }
142
iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,enum nl80211_band band)143 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
144 enum nl80211_band band)
145 {
146 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
147 int idx;
148 int band_offset = 0;
149
150 /* Legacy rate format, search for match in table */
151 if (band != NL80211_BAND_2GHZ)
152 band_offset = IWL_FIRST_OFDM_RATE;
153 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
154 if (iwl_fw_rate_idx_to_plcp(idx) == rate)
155 return idx - band_offset;
156
157 return -1;
158 }
159
iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw * fw,int rate_idx)160 u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
161 {
162 if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8)
163 /* In the new rate legacy rates are indexed:
164 * 0 - 3 for CCK and 0 - 7 for OFDM.
165 */
166 return (rate_idx >= IWL_FIRST_OFDM_RATE ?
167 rate_idx - IWL_FIRST_OFDM_RATE :
168 rate_idx);
169
170 return iwl_fw_rate_idx_to_plcp(rate_idx);
171 }
172
iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)173 u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
174 {
175 static const u8 mac80211_ac_to_ucode_ac[] = {
176 AC_VO,
177 AC_VI,
178 AC_BE,
179 AC_BK
180 };
181
182 return mac80211_ac_to_ucode_ac[ac];
183 }
184
iwl_mvm_rx_fw_error(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)185 void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
186 {
187 struct iwl_rx_packet *pkt = rxb_addr(rxb);
188 struct iwl_error_resp *err_resp = (void *)pkt->data;
189
190 IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
191 le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
192 IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
193 le16_to_cpu(err_resp->bad_cmd_seq_num),
194 le32_to_cpu(err_resp->error_service));
195 IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n",
196 le64_to_cpu(err_resp->timestamp));
197 }
198
199 /*
200 * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
201 * The parameter should also be a combination of ANT_[ABC].
202 */
first_antenna(u8 mask)203 u8 first_antenna(u8 mask)
204 {
205 BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
206 if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
207 return BIT(0);
208 return BIT(ffs(mask) - 1);
209 }
210
211 #define MAX_ANT_NUM 2
212 /*
213 * Toggles between TX antennas to send the probe request on.
214 * Receives the bitmask of valid TX antennas and the *index* used
215 * for the last TX, and returns the next valid *index* to use.
216 * In order to set it in the tx_cmd, must do BIT(idx).
217 */
iwl_mvm_next_antenna(struct iwl_mvm * mvm,u8 valid,u8 last_idx)218 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
219 {
220 u8 ind = last_idx;
221 int i;
222
223 for (i = 0; i < MAX_ANT_NUM; i++) {
224 ind = (ind + 1) % MAX_ANT_NUM;
225 if (valid & BIT(ind))
226 return ind;
227 }
228
229 WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
230 return last_idx;
231 }
232
233 /**
234 * iwl_mvm_send_lq_cmd() - Send link quality command
235 * @mvm: Driver data.
236 * @lq: Link quality command to send.
237 *
238 * The link quality command is sent as the last step of station creation.
239 * This is the special case in which init is set and we call a callback in
240 * this case to clear the state indicating that station creation is in
241 * progress.
242 *
243 * Returns: an error code indicating success or failure
244 */
iwl_mvm_send_lq_cmd(struct iwl_mvm * mvm,struct iwl_lq_cmd * lq)245 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
246 {
247 struct iwl_host_cmd cmd = {
248 .id = LQ_CMD,
249 .len = { sizeof(struct iwl_lq_cmd), },
250 .flags = CMD_ASYNC,
251 .data = { lq, },
252 };
253
254 if (WARN_ON(lq->sta_id == IWL_INVALID_STA ||
255 iwl_mvm_has_tlc_offload(mvm)))
256 return -EINVAL;
257
258 return iwl_mvm_send_cmd(mvm, &cmd);
259 }
260
261 /**
262 * iwl_mvm_update_smps - Get a request to change the SMPS mode
263 * @mvm: Driver data.
264 * @vif: Pointer to the ieee80211_vif structure
265 * @req_type: The part of the driver who call for a change.
266 * @smps_request: The request to change the SMPS mode.
267 * @link_id: for MLO link_id, otherwise 0 (deflink)
268 *
269 * Get a requst to change the SMPS mode,
270 * and change it according to all other requests in the driver.
271 */
iwl_mvm_update_smps(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum iwl_mvm_smps_type_request req_type,enum ieee80211_smps_mode smps_request,unsigned int link_id)272 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
273 enum iwl_mvm_smps_type_request req_type,
274 enum ieee80211_smps_mode smps_request,
275 unsigned int link_id)
276 {
277 struct iwl_mvm_vif *mvmvif;
278 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
279 int i;
280
281 lockdep_assert_held(&mvm->mutex);
282
283 /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
284 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
285 return;
286
287 if (vif->type != NL80211_IFTYPE_STATION)
288 return;
289
290 /* SMPS is handled by firmware */
291 if (iwl_mvm_has_rlc_offload(mvm))
292 return;
293
294 mvmvif = iwl_mvm_vif_from_mac80211(vif);
295
296 if (WARN_ON_ONCE(!mvmvif->link[link_id]))
297 return;
298
299 mvmvif->link[link_id]->smps_requests[req_type] = smps_request;
300 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
301 if (mvmvif->link[link_id]->smps_requests[i] ==
302 IEEE80211_SMPS_STATIC) {
303 smps_mode = IEEE80211_SMPS_STATIC;
304 break;
305 }
306 if (mvmvif->link[link_id]->smps_requests[i] ==
307 IEEE80211_SMPS_DYNAMIC)
308 smps_mode = IEEE80211_SMPS_DYNAMIC;
309 }
310
311 ieee80211_request_smps(vif, link_id, smps_mode);
312 }
313
iwl_mvm_update_smps_on_active_links(struct iwl_mvm * mvm,struct ieee80211_vif * vif,enum iwl_mvm_smps_type_request req_type,enum ieee80211_smps_mode smps_request)314 void iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm,
315 struct ieee80211_vif *vif,
316 enum iwl_mvm_smps_type_request req_type,
317 enum ieee80211_smps_mode smps_request)
318 {
319 struct ieee80211_bss_conf *link_conf;
320 unsigned int link_id;
321
322 rcu_read_lock();
323 for_each_vif_active_link(vif, link_conf, link_id)
324 iwl_mvm_update_smps(mvm, vif, req_type, smps_request,
325 link_id);
326 rcu_read_unlock();
327 }
328
iwl_wait_stats_complete(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)329 static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
330 struct iwl_rx_packet *pkt, void *data)
331 {
332 WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION);
333
334 return true;
335 }
336
337 #define PERIODIC_STAT_RATE 5
338
iwl_mvm_request_periodic_system_statistics(struct iwl_mvm * mvm,bool enable)339 int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, bool enable)
340 {
341 u32 flags = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
342 u32 type = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
343 IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
344 struct iwl_system_statistics_cmd system_cmd = {
345 .cfg_mask = cpu_to_le32(flags),
346 .config_time_sec = cpu_to_le32(enable ?
347 PERIODIC_STAT_RATE : 0),
348 .type_id_mask = cpu_to_le32(type),
349 };
350
351 return iwl_mvm_send_cmd_pdu(mvm,
352 WIDE_ID(SYSTEM_GROUP,
353 SYSTEM_STATISTICS_CMD),
354 0, sizeof(system_cmd), &system_cmd);
355 }
356
iwl_mvm_request_system_statistics(struct iwl_mvm * mvm,bool clear,u8 cmd_ver)357 static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear,
358 u8 cmd_ver)
359 {
360 struct iwl_system_statistics_cmd system_cmd = {
361 .cfg_mask = clear ?
362 cpu_to_le32(IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK) :
363 cpu_to_le32(IWL_STATS_CFG_FLG_RESET_MSK |
364 IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK),
365 .type_id_mask = cpu_to_le32(IWL_STATS_NTFY_TYPE_ID_OPER |
366 IWL_STATS_NTFY_TYPE_ID_OPER_PART1),
367 };
368 struct iwl_host_cmd cmd = {
369 .id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD),
370 .len[0] = sizeof(system_cmd),
371 .data[0] = &system_cmd,
372 };
373 struct iwl_notification_wait stats_wait;
374 static const u16 stats_complete[] = {
375 WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF),
376 };
377 int ret;
378
379 if (cmd_ver != 1) {
380 IWL_FW_CHECK_FAILED(mvm,
381 "Invalid system statistics command version:%d\n",
382 cmd_ver);
383 return -EOPNOTSUPP;
384 }
385
386 iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
387 stats_complete, ARRAY_SIZE(stats_complete),
388 NULL, NULL);
389
390 mvm->statistics_clear = clear;
391 ret = iwl_mvm_send_cmd(mvm, &cmd);
392 if (ret) {
393 iwl_remove_notification(&mvm->notif_wait, &stats_wait);
394 return ret;
395 }
396
397 /* 500ms for OPERATIONAL, PART1 and END notification should be enough
398 * for FW to collect data from all LMACs and send
399 * STATISTICS_NOTIFICATION to host
400 */
401 ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 2);
402 if (ret)
403 return ret;
404
405 if (clear)
406 iwl_mvm_accu_radio_stats(mvm);
407
408 return ret;
409 }
410
iwl_mvm_request_statistics(struct iwl_mvm * mvm,bool clear)411 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
412 {
413 struct iwl_statistics_cmd scmd = {
414 .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
415 };
416
417 struct iwl_host_cmd cmd = {
418 .id = STATISTICS_CMD,
419 .len[0] = sizeof(scmd),
420 .data[0] = &scmd,
421 };
422 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
423 WIDE_ID(SYSTEM_GROUP,
424 SYSTEM_STATISTICS_CMD),
425 IWL_FW_CMD_VER_UNKNOWN);
426 int ret;
427
428 /*
429 * Don't request statistics during restart, they'll not have any useful
430 * information right after restart, nor is clearing needed
431 */
432 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
433 return 0;
434
435 if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
436 return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver);
437
438 /* From version 15 - STATISTICS_NOTIFICATION, the reply for
439 * STATISTICS_CMD is empty, and the response is with
440 * STATISTICS_NOTIFICATION notification
441 */
442 if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
443 STATISTICS_NOTIFICATION, 0) < 15) {
444 cmd.flags = CMD_WANT_SKB;
445
446 ret = iwl_mvm_send_cmd(mvm, &cmd);
447 if (ret)
448 return ret;
449
450 iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
451 iwl_free_resp(&cmd);
452 } else {
453 struct iwl_notification_wait stats_wait;
454 static const u16 stats_complete[] = {
455 STATISTICS_NOTIFICATION,
456 };
457
458 iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
459 stats_complete, ARRAY_SIZE(stats_complete),
460 iwl_wait_stats_complete, NULL);
461
462 ret = iwl_mvm_send_cmd(mvm, &cmd);
463 if (ret) {
464 iwl_remove_notification(&mvm->notif_wait, &stats_wait);
465 return ret;
466 }
467
468 /* 200ms should be enough for FW to collect data from all
469 * LMACs and send STATISTICS_NOTIFICATION to host
470 */
471 ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5);
472 if (ret)
473 return ret;
474 }
475
476 if (clear)
477 iwl_mvm_accu_radio_stats(mvm);
478
479 return 0;
480 }
481
iwl_mvm_accu_radio_stats(struct iwl_mvm * mvm)482 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
483 {
484 mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
485 mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
486 mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
487 mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
488 }
489
490 struct iwl_mvm_diversity_iter_data {
491 struct iwl_mvm_phy_ctxt *ctxt;
492 bool result;
493 };
494
iwl_mvm_diversity_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)495 static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
496 struct ieee80211_vif *vif)
497 {
498 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
499 struct iwl_mvm_diversity_iter_data *data = _data;
500 int i, link_id;
501
502 for_each_mvm_vif_valid_link(mvmvif, link_id) {
503 struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
504
505 if (link_info->phy_ctxt != data->ctxt)
506 continue;
507
508 for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
509 if (link_info->smps_requests[i] == IEEE80211_SMPS_STATIC ||
510 link_info->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
511 data->result = false;
512 break;
513 }
514 }
515 }
516 }
517
iwl_mvm_rx_diversity_allowed(struct iwl_mvm * mvm,struct iwl_mvm_phy_ctxt * ctxt)518 bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
519 struct iwl_mvm_phy_ctxt *ctxt)
520 {
521 struct iwl_mvm_diversity_iter_data data = {
522 .ctxt = ctxt,
523 .result = true,
524 };
525
526 lockdep_assert_held(&mvm->mutex);
527
528 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
529 return false;
530
531 if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
532 return false;
533
534 if (mvm->cfg->rx_with_siso_diversity)
535 return false;
536
537 ieee80211_iterate_active_interfaces_atomic(
538 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
539 iwl_mvm_diversity_iter, &data);
540
541 return data.result;
542 }
543
iwl_mvm_send_low_latency_cmd(struct iwl_mvm * mvm,bool low_latency,u16 mac_id)544 void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
545 bool low_latency, u16 mac_id)
546 {
547 struct iwl_mac_low_latency_cmd cmd = {
548 .mac_id = cpu_to_le32(mac_id)
549 };
550
551 if (!fw_has_capa(&mvm->fw->ucode_capa,
552 IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
553 return;
554
555 if (low_latency) {
556 /* currently we don't care about the direction */
557 cmd.low_latency_rx = 1;
558 cmd.low_latency_tx = 1;
559 }
560
561 if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD),
562 0, sizeof(cmd), &cmd))
563 IWL_ERR(mvm, "Failed to send low latency command\n");
564 }
565
iwl_mvm_update_low_latency(struct iwl_mvm * mvm,struct ieee80211_vif * vif,bool low_latency,enum iwl_mvm_low_latency_cause cause)566 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
567 bool low_latency,
568 enum iwl_mvm_low_latency_cause cause)
569 {
570 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
571 int res;
572 bool prev;
573
574 lockdep_assert_held(&mvm->mutex);
575
576 prev = iwl_mvm_vif_low_latency(mvmvif);
577 iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
578
579 low_latency = iwl_mvm_vif_low_latency(mvmvif);
580
581 if (low_latency == prev)
582 return 0;
583
584 iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
585
586 res = iwl_mvm_update_quotas(mvm, false, NULL);
587 if (res)
588 return res;
589
590 iwl_mvm_bt_coex_vif_change(mvm);
591
592 return iwl_mvm_power_update_mac(mvm);
593 }
594
595 struct iwl_mvm_low_latency_iter {
596 bool result;
597 bool result_per_band[NUM_NL80211_BANDS];
598 };
599
iwl_mvm_ll_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)600 static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
601 {
602 struct iwl_mvm_low_latency_iter *result = _data;
603 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
604 enum nl80211_band band;
605
606 if (iwl_mvm_vif_low_latency(mvmvif)) {
607 result->result = true;
608
609 if (!mvmvif->deflink.phy_ctxt)
610 return;
611
612 band = mvmvif->deflink.phy_ctxt->channel->band;
613 result->result_per_band[band] = true;
614 }
615 }
616
iwl_mvm_low_latency(struct iwl_mvm * mvm)617 bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
618 {
619 struct iwl_mvm_low_latency_iter data = {};
620
621 ieee80211_iterate_active_interfaces_atomic(
622 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
623 iwl_mvm_ll_iter, &data);
624
625 return data.result;
626 }
627
iwl_mvm_low_latency_band(struct iwl_mvm * mvm,enum nl80211_band band)628 bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
629 {
630 struct iwl_mvm_low_latency_iter data = {};
631
632 ieee80211_iterate_active_interfaces_atomic(
633 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
634 iwl_mvm_ll_iter, &data);
635
636 return data.result_per_band[band];
637 }
638
639 struct iwl_bss_iter_data {
640 struct ieee80211_vif *vif;
641 bool error;
642 };
643
iwl_mvm_bss_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)644 static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
645 struct ieee80211_vif *vif)
646 {
647 struct iwl_bss_iter_data *data = _data;
648
649 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
650 return;
651
652 if (data->vif) {
653 data->error = true;
654 return;
655 }
656
657 data->vif = vif;
658 }
659
iwl_mvm_get_bss_vif(struct iwl_mvm * mvm)660 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
661 {
662 struct iwl_bss_iter_data bss_iter_data = {};
663
664 ieee80211_iterate_active_interfaces_atomic(
665 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
666 iwl_mvm_bss_iface_iterator, &bss_iter_data);
667
668 if (bss_iter_data.error)
669 return ERR_PTR(-EINVAL);
670
671 return bss_iter_data.vif;
672 }
673
674 struct iwl_bss_find_iter_data {
675 struct ieee80211_vif *vif;
676 u32 macid;
677 };
678
iwl_mvm_bss_find_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)679 static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac,
680 struct ieee80211_vif *vif)
681 {
682 struct iwl_bss_find_iter_data *data = _data;
683 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
684
685 if (mvmvif->id == data->macid)
686 data->vif = vif;
687 }
688
iwl_mvm_get_vif_by_macid(struct iwl_mvm * mvm,u32 macid)689 struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid)
690 {
691 struct iwl_bss_find_iter_data data = {
692 .macid = macid,
693 };
694
695 lockdep_assert_held(&mvm->mutex);
696
697 ieee80211_iterate_active_interfaces_atomic(
698 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
699 iwl_mvm_bss_find_iface_iterator, &data);
700
701 return data.vif;
702 }
703
704 struct iwl_sta_iter_data {
705 bool assoc;
706 };
707
iwl_mvm_sta_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)708 static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
709 struct ieee80211_vif *vif)
710 {
711 struct iwl_sta_iter_data *data = _data;
712
713 if (vif->type != NL80211_IFTYPE_STATION)
714 return;
715
716 if (vif->cfg.assoc)
717 data->assoc = true;
718 }
719
iwl_mvm_is_vif_assoc(struct iwl_mvm * mvm)720 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
721 {
722 struct iwl_sta_iter_data data = {
723 .assoc = false,
724 };
725
726 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
727 IEEE80211_IFACE_ITER_NORMAL,
728 iwl_mvm_sta_iface_iterator,
729 &data);
730 return data.assoc;
731 }
732
iwl_mvm_get_wd_timeout(struct iwl_mvm * mvm,struct ieee80211_vif * vif)733 unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
734 struct ieee80211_vif *vif)
735 {
736 unsigned int default_timeout =
737 mvm->trans->mac_cfg->base->wd_timeout;
738
739 /*
740 * We can't know when the station is asleep or awake, so we
741 * must disable the queue hang detection.
742 */
743 if (fw_has_capa(&mvm->fw->ucode_capa,
744 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
745 vif->type == NL80211_IFTYPE_AP)
746 return IWL_WATCHDOG_DISABLED;
747 return default_timeout;
748 }
749
iwl_mvm_connection_loss(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const char * errmsg)750 void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
751 const char *errmsg)
752 {
753 struct iwl_fw_dbg_trigger_tlv *trig;
754 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
755
756 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
757 FW_DBG_TRIGGER_MLME);
758 if (!trig)
759 goto out;
760
761 trig_mlme = (void *)trig->data;
762
763 if (trig_mlme->stop_connection_loss &&
764 --trig_mlme->stop_connection_loss)
765 goto out;
766
767 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
768
769 out:
770 ieee80211_connection_loss(vif);
771 }
772
iwl_mvm_event_frame_timeout_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_sta * sta,u16 tid)773 void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
774 struct ieee80211_vif *vif,
775 const struct ieee80211_sta *sta,
776 u16 tid)
777 {
778 struct iwl_fw_dbg_trigger_tlv *trig;
779 struct iwl_fw_dbg_trigger_ba *ba_trig;
780
781 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
782 FW_DBG_TRIGGER_BA);
783 if (!trig)
784 return;
785
786 ba_trig = (void *)trig->data;
787
788 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
789 return;
790
791 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
792 "Frame from %pM timed out, tid %d",
793 sta->addr, tid);
794 }
795
iwl_mvm_tcm_load_percentage(u32 airtime,u32 elapsed)796 u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
797 {
798 if (!elapsed)
799 return 0;
800
801 return (100 * airtime / elapsed) / USEC_PER_MSEC;
802 }
803
804 static enum iwl_mvm_traffic_load
iwl_mvm_tcm_load(struct iwl_mvm * mvm,u32 airtime,unsigned long elapsed)805 iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
806 {
807 u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
808
809 if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
810 return IWL_MVM_TRAFFIC_HIGH;
811 if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
812 return IWL_MVM_TRAFFIC_MEDIUM;
813
814 return IWL_MVM_TRAFFIC_LOW;
815 }
816
iwl_mvm_tcm_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)817 static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
818 {
819 struct iwl_mvm *mvm = _data;
820 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
821 bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
822
823 if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
824 return;
825
826 low_latency = mvm->tcm.result.low_latency[mvmvif->id];
827
828 if (!mvm->tcm.result.change[mvmvif->id] &&
829 prev == low_latency) {
830 iwl_mvm_update_quotas(mvm, false, NULL);
831 return;
832 }
833
834 if (prev != low_latency) {
835 /* this sends traffic load and updates quota as well */
836 iwl_mvm_update_low_latency(mvm, vif, low_latency,
837 LOW_LATENCY_TRAFFIC);
838 } else {
839 iwl_mvm_update_quotas(mvm, false, NULL);
840 }
841 }
842
iwl_mvm_tcm_results(struct iwl_mvm * mvm)843 static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
844 {
845 guard(mvm)(mvm);
846
847 ieee80211_iterate_active_interfaces(
848 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
849 iwl_mvm_tcm_iter, mvm);
850
851 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
852 iwl_mvm_config_scan(mvm);
853 }
854
iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct * wk)855 static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
856 {
857 struct iwl_mvm *mvm;
858 struct iwl_mvm_vif *mvmvif;
859 struct ieee80211_vif *vif;
860
861 mvmvif = container_of(wk, struct iwl_mvm_vif,
862 uapsd_nonagg_detected_wk.work);
863 vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
864 mvm = mvmvif->mvm;
865
866 if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
867 return;
868
869 /* remember that this AP is broken */
870 memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
871 vif->bss_conf.bssid, ETH_ALEN);
872 mvm->uapsd_noagg_bssid_write_idx++;
873 if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
874 mvm->uapsd_noagg_bssid_write_idx = 0;
875
876 iwl_mvm_connection_loss(mvm, vif,
877 "AP isn't using AMPDU with uAPSD enabled");
878 }
879
iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm * mvm,struct ieee80211_vif * vif)880 static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm,
881 struct ieee80211_vif *vif)
882 {
883 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
884
885 if (vif->type != NL80211_IFTYPE_STATION)
886 return;
887
888 if (!vif->cfg.assoc)
889 return;
890
891 if (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd &&
892 !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd &&
893 !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd &&
894 !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd)
895 return;
896
897 if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected)
898 return;
899
900 mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true;
901 IWL_INFO(mvm,
902 "detected AP should do aggregation but isn't, likely due to U-APSD\n");
903 schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk,
904 15 * HZ);
905 }
906
iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm * mvm,unsigned int elapsed,int mac)907 static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
908 unsigned int elapsed,
909 int mac)
910 {
911 u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
912 u64 tpt;
913 unsigned long rate;
914 struct ieee80211_vif *vif;
915
916 rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
917
918 if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
919 mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
920 return;
921
922 if (iwl_mvm_has_new_rx_api(mvm)) {
923 tpt = 8 * bytes; /* kbps */
924 do_div(tpt, elapsed);
925 rate *= 1000; /* kbps */
926 if (tpt < 22 * rate / 100)
927 return;
928 } else {
929 /*
930 * the rate here is actually the threshold, in 100Kbps units,
931 * so do the needed conversion from bytes to 100Kbps:
932 * 100kb = bits / (100 * 1000),
933 * 100kbps = 100kb / (msecs / 1000) ==
934 * (bits / (100 * 1000)) / (msecs / 1000) ==
935 * bits / (100 * msecs)
936 */
937 tpt = (8 * bytes);
938 do_div(tpt, elapsed * 100);
939 if (tpt < rate)
940 return;
941 }
942
943 rcu_read_lock();
944 vif = rcu_dereference(mvm->vif_id_to_mac[mac]);
945 if (vif)
946 iwl_mvm_uapsd_agg_disconnect(mvm, vif);
947 rcu_read_unlock();
948 }
949
iwl_mvm_tcm_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)950 static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
951 struct ieee80211_vif *vif)
952 {
953 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
954 u32 *band = _data;
955
956 if (!mvmvif->deflink.phy_ctxt)
957 return;
958
959 band[mvmvif->id] = mvmvif->deflink.phy_ctxt->channel->band;
960 }
961
iwl_mvm_calc_tcm_stats(struct iwl_mvm * mvm,unsigned long ts,bool handle_uapsd)962 static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
963 unsigned long ts,
964 bool handle_uapsd)
965 {
966 unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
967 unsigned int uapsd_elapsed =
968 jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
969 u32 total_airtime = 0;
970 u32 band_airtime[NUM_NL80211_BANDS] = {0};
971 u32 band[NUM_MAC_INDEX_DRIVER] = {0};
972 int ac, mac, i;
973 bool low_latency = false;
974 enum iwl_mvm_traffic_load load, band_load;
975 bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
976
977 if (handle_ll)
978 mvm->tcm.ll_ts = ts;
979 if (handle_uapsd)
980 mvm->tcm.uapsd_nonagg_ts = ts;
981
982 mvm->tcm.result.elapsed = elapsed;
983
984 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
985 IEEE80211_IFACE_ITER_NORMAL,
986 iwl_mvm_tcm_iterator,
987 &band);
988
989 for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
990 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
991 u32 vo_vi_pkts = 0;
992 u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
993
994 total_airtime += airtime;
995 band_airtime[band[mac]] += airtime;
996
997 load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
998 mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
999 mvm->tcm.result.load[mac] = load;
1000 mvm->tcm.result.airtime[mac] = airtime;
1001
1002 for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
1003 vo_vi_pkts += mdata->rx.pkts[ac] +
1004 mdata->tx.pkts[ac];
1005
1006 /* enable immediately with enough packets but defer disabling */
1007 if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
1008 mvm->tcm.result.low_latency[mac] = true;
1009 else if (handle_ll)
1010 mvm->tcm.result.low_latency[mac] = false;
1011
1012 if (handle_ll) {
1013 /* clear old data */
1014 memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1015 memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1016 }
1017 low_latency |= mvm->tcm.result.low_latency[mac];
1018
1019 if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
1020 iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
1021 mac);
1022 /* clear old data */
1023 if (handle_uapsd)
1024 mdata->uapsd_nonagg_detect.rx_bytes = 0;
1025 memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1026 memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1027 }
1028
1029 load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
1030 mvm->tcm.result.global_load = load;
1031
1032 for (i = 0; i < NUM_NL80211_BANDS; i++) {
1033 band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
1034 mvm->tcm.result.band_load[i] = band_load;
1035 }
1036
1037 /*
1038 * If the current load isn't low we need to force re-evaluation
1039 * in the TCM period, so that we can return to low load if there
1040 * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
1041 * triggered by traffic).
1042 */
1043 if (load != IWL_MVM_TRAFFIC_LOW)
1044 return MVM_TCM_PERIOD;
1045 /*
1046 * If low-latency is active we need to force re-evaluation after
1047 * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
1048 * when there's no traffic at all.
1049 */
1050 if (low_latency)
1051 return MVM_LL_PERIOD;
1052 /*
1053 * Otherwise, we don't need to run the work struct because we're
1054 * in the default "idle" state - traffic indication is low (which
1055 * also covers the "no traffic" case) and low-latency is disabled
1056 * so there's no state that may need to be disabled when there's
1057 * no traffic at all.
1058 *
1059 * Note that this has no impact on the regular scheduling of the
1060 * updates triggered by traffic - those happen whenever one of the
1061 * two timeouts expire (if there's traffic at all.)
1062 */
1063 return 0;
1064 }
1065
iwl_mvm_recalc_tcm(struct iwl_mvm * mvm)1066 void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
1067 {
1068 unsigned long ts = jiffies;
1069 bool handle_uapsd =
1070 time_after(ts, mvm->tcm.uapsd_nonagg_ts +
1071 msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
1072
1073 spin_lock(&mvm->tcm.lock);
1074 if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1075 spin_unlock(&mvm->tcm.lock);
1076 return;
1077 }
1078 spin_unlock(&mvm->tcm.lock);
1079
1080 if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
1081 guard(mvm)(mvm);
1082 if (iwl_mvm_request_statistics(mvm, true))
1083 handle_uapsd = false;
1084 }
1085
1086 spin_lock(&mvm->tcm.lock);
1087 /* re-check if somebody else won the recheck race */
1088 if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
1089 /* calculate statistics */
1090 unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
1091 handle_uapsd);
1092
1093 /* the memset needs to be visible before the timestamp */
1094 smp_mb();
1095 mvm->tcm.ts = ts;
1096 if (work_delay)
1097 schedule_delayed_work(&mvm->tcm.work, work_delay);
1098 }
1099 spin_unlock(&mvm->tcm.lock);
1100
1101 iwl_mvm_tcm_results(mvm);
1102 }
1103
iwl_mvm_tcm_work(struct work_struct * work)1104 void iwl_mvm_tcm_work(struct work_struct *work)
1105 {
1106 struct delayed_work *delayed_work = to_delayed_work(work);
1107 struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
1108 tcm.work);
1109
1110 iwl_mvm_recalc_tcm(mvm);
1111 }
1112
iwl_mvm_pause_tcm(struct iwl_mvm * mvm,bool with_cancel)1113 void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
1114 {
1115 spin_lock_bh(&mvm->tcm.lock);
1116 mvm->tcm.paused = true;
1117 spin_unlock_bh(&mvm->tcm.lock);
1118 if (with_cancel)
1119 cancel_delayed_work_sync(&mvm->tcm.work);
1120 }
1121
iwl_mvm_resume_tcm(struct iwl_mvm * mvm)1122 void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
1123 {
1124 int mac;
1125 bool low_latency = false;
1126
1127 spin_lock_bh(&mvm->tcm.lock);
1128 mvm->tcm.ts = jiffies;
1129 mvm->tcm.ll_ts = jiffies;
1130 for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
1131 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
1132
1133 memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
1134 memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
1135 memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
1136 memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
1137
1138 if (mvm->tcm.result.low_latency[mac])
1139 low_latency = true;
1140 }
1141 /* The TCM data needs to be reset before "paused" flag changes */
1142 smp_mb();
1143 mvm->tcm.paused = false;
1144
1145 /*
1146 * if the current load is not low or low latency is active, force
1147 * re-evaluation to cover the case of no traffic.
1148 */
1149 if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
1150 schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
1151 else if (low_latency)
1152 schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
1153
1154 spin_unlock_bh(&mvm->tcm.lock);
1155 }
1156
iwl_mvm_tcm_add_vif(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1157 void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1158 {
1159 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1160
1161 INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
1162 iwl_mvm_tcm_uapsd_nonagg_detected_wk);
1163 }
1164
iwl_mvm_tcm_rm_vif(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1165 void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1166 {
1167 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1168
1169 cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
1170 }
1171
iwl_mvm_get_systime(struct iwl_mvm * mvm)1172 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
1173 {
1174 u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
1175
1176 if (mvm->trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
1177 mvm->trans->mac_cfg->base->gp2_reg_addr)
1178 reg_addr = mvm->trans->mac_cfg->base->gp2_reg_addr;
1179
1180 return iwl_read_prph(mvm->trans, reg_addr);
1181 }
1182
iwl_mvm_get_sync_time(struct iwl_mvm * mvm,int clock_type,u32 * gp2,u64 * boottime,ktime_t * realtime)1183 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
1184 u32 *gp2, u64 *boottime, ktime_t *realtime)
1185 {
1186 bool ps_disabled;
1187
1188 lockdep_assert_held(&mvm->mutex);
1189
1190 /* Disable power save when reading GP2 */
1191 ps_disabled = mvm->ps_disabled;
1192 if (!ps_disabled) {
1193 mvm->ps_disabled = true;
1194 iwl_mvm_power_update_device(mvm);
1195 }
1196
1197 *gp2 = iwl_mvm_get_systime(mvm);
1198
1199 if (clock_type == CLOCK_BOOTTIME && boottime)
1200 *boottime = ktime_get_boottime_ns();
1201 else if (clock_type == CLOCK_REALTIME && realtime)
1202 *realtime = ktime_get_real();
1203
1204 if (!ps_disabled) {
1205 mvm->ps_disabled = ps_disabled;
1206 iwl_mvm_power_update_device(mvm);
1207 }
1208 }
1209
1210 /* Find if at least two links from different vifs use same channel
1211 * FIXME: consider having a refcount array in struct iwl_mvm_vif for
1212 * used phy_ctxt ids.
1213 */
iwl_mvm_have_links_same_channel(struct iwl_mvm_vif * vif1,struct iwl_mvm_vif * vif2)1214 bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1,
1215 struct iwl_mvm_vif *vif2)
1216 {
1217 unsigned int i, j;
1218
1219 for_each_mvm_vif_valid_link(vif1, i) {
1220 for_each_mvm_vif_valid_link(vif2, j) {
1221 if (vif1->link[i]->phy_ctxt == vif2->link[j]->phy_ctxt)
1222 return true;
1223 }
1224 }
1225
1226 return false;
1227 }
1228
iwl_mvm_vif_is_active(struct iwl_mvm_vif * mvmvif)1229 bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif)
1230 {
1231 unsigned int i;
1232
1233 /* FIXME: can it fail when phy_ctxt is assigned? */
1234 for_each_mvm_vif_valid_link(mvmvif, i) {
1235 if (mvmvif->link[i]->phy_ctxt &&
1236 mvmvif->link[i]->phy_ctxt->id < NUM_PHY_CTX)
1237 return true;
1238 }
1239
1240 return false;
1241 }
1242
iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)1243 static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
1244 {
1245 int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
1246 int idx;
1247 bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
1248 int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
1249 int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
1250
1251 for (idx = offset; idx < last; idx++)
1252 if (iwl_fw_rate_idx_to_plcp(idx) == rate)
1253 return idx - offset;
1254 return IWL_RATE_INVALID;
1255 }
1256
iwl_mvm_v3_rate_from_fw(__le32 rate,u8 rate_ver)1257 u32 iwl_mvm_v3_rate_from_fw(__le32 rate, u8 rate_ver)
1258 {
1259 u32 rate_v3 = 0, rate_v1;
1260 u32 dup = 0;
1261
1262 if (rate_ver > 1)
1263 return iwl_v3_rate_from_v2_v3(rate, rate_ver >= 3);
1264
1265 rate_v1 = le32_to_cpu(rate);
1266 if (rate_v1 == 0)
1267 return rate_v1;
1268 /* convert rate */
1269 if (rate_v1 & RATE_MCS_HT_MSK_V1) {
1270 u32 nss;
1271
1272 rate_v3 |= RATE_MCS_MOD_TYPE_HT;
1273 rate_v3 |=
1274 rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1;
1275 nss = u32_get_bits(rate_v1, RATE_HT_MCS_MIMO2_MSK);
1276 rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
1277 } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 ||
1278 rate_v1 & RATE_MCS_HE_MSK_V1) {
1279 u32 nss = u32_get_bits(rate_v1, RATE_VHT_MCS_NSS_MSK);
1280
1281 rate_v3 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK;
1282
1283 rate_v3 |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
1284
1285 if (rate_v1 & RATE_MCS_HE_MSK_V1) {
1286 u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1;
1287 u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1;
1288 u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >>
1289 RATE_MCS_HE_106T_POS_V1;
1290 u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >>
1291 RATE_MCS_HE_GI_LTF_POS;
1292
1293 if ((he_type_bits == RATE_MCS_HE_TYPE_SU ||
1294 he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) &&
1295 he_gi_ltf == RATE_MCS_HE_SU_4_LTF)
1296 /* the new rate have an additional bit to
1297 * represent the value 4 rather then using SGI
1298 * bit for this purpose - as it was done in the
1299 * old rate
1300 */
1301 he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >>
1302 RATE_MCS_SGI_POS_V1;
1303
1304 rate_v3 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS;
1305 rate_v3 |= he_type << RATE_MCS_HE_TYPE_POS;
1306 rate_v3 |= he_106t << RATE_MCS_HE_106T_POS;
1307 rate_v3 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK;
1308 rate_v3 |= RATE_MCS_MOD_TYPE_HE;
1309 } else {
1310 rate_v3 |= RATE_MCS_MOD_TYPE_VHT;
1311 }
1312 /* if legacy format */
1313 } else {
1314 u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
1315
1316 if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
1317 legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
1318 IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
1319
1320 rate_v3 |= legacy_rate;
1321 if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
1322 rate_v3 |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
1323 }
1324
1325 /* convert flags */
1326 if (rate_v1 & RATE_MCS_LDPC_MSK_V1)
1327 rate_v3 |= RATE_MCS_LDPC_MSK;
1328 rate_v3 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) |
1329 (rate_v1 & RATE_MCS_ANT_AB_MSK) |
1330 (rate_v1 & RATE_MCS_STBC_MSK) |
1331 (rate_v1 & RATE_MCS_BF_MSK);
1332
1333 dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1;
1334 if (dup) {
1335 rate_v3 |= RATE_MCS_DUP_MSK;
1336 rate_v3 |= dup << RATE_MCS_CHAN_WIDTH_POS;
1337 }
1338
1339 if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) &&
1340 (rate_v1 & RATE_MCS_SGI_MSK_V1))
1341 rate_v3 |= RATE_MCS_SGI_MSK;
1342
1343 return rate_v3;
1344 }
1345
iwl_mvm_v3_rate_to_fw(u32 rate,u8 rate_ver)1346 __le32 iwl_mvm_v3_rate_to_fw(u32 rate, u8 rate_ver)
1347 {
1348 u32 result = 0;
1349 int rate_idx;
1350
1351 if (rate_ver > 1)
1352 return iwl_v3_rate_to_v2_v3(rate, rate_ver > 2);
1353
1354 switch (rate & RATE_MCS_MOD_TYPE_MSK) {
1355 case RATE_MCS_MOD_TYPE_CCK:
1356 result = RATE_MCS_CCK_MSK_V1;
1357 fallthrough;
1358 case RATE_MCS_MOD_TYPE_LEGACY_OFDM:
1359 rate_idx = u32_get_bits(rate, RATE_LEGACY_RATE_MSK);
1360 if (!(result & RATE_MCS_CCK_MSK_V1))
1361 rate_idx += IWL_FIRST_OFDM_RATE;
1362 result |= u32_encode_bits(iwl_fw_rate_idx_to_plcp(rate_idx),
1363 RATE_LEGACY_RATE_MSK_V1);
1364 break;
1365 case RATE_MCS_MOD_TYPE_HT:
1366 result = RATE_MCS_HT_MSK_V1;
1367 result |= u32_encode_bits(u32_get_bits(rate,
1368 RATE_HT_MCS_CODE_MSK),
1369 RATE_HT_MCS_RATE_CODE_MSK_V1);
1370 result |= u32_encode_bits(u32_get_bits(rate,
1371 RATE_MCS_NSS_MSK),
1372 RATE_HT_MCS_MIMO2_MSK);
1373 break;
1374 case RATE_MCS_MOD_TYPE_VHT:
1375 result = RATE_MCS_VHT_MSK_V1;
1376 result |= u32_encode_bits(u32_get_bits(rate,
1377 RATE_VHT_MCS_NSS_MSK),
1378 RATE_MCS_CODE_MSK);
1379 result |= u32_encode_bits(u32_get_bits(rate, RATE_MCS_NSS_MSK),
1380 RATE_VHT_MCS_NSS_MSK);
1381 break;
1382 case RATE_MCS_MOD_TYPE_HE: /* not generated */
1383 default:
1384 WARN_ONCE(1, "bad modulation type %d\n",
1385 u32_get_bits(rate, RATE_MCS_MOD_TYPE_MSK));
1386 return 0;
1387 }
1388
1389 if (rate & RATE_MCS_LDPC_MSK)
1390 result |= RATE_MCS_LDPC_MSK_V1;
1391 WARN_ON_ONCE(u32_get_bits(rate, RATE_MCS_CHAN_WIDTH_MSK) >
1392 RATE_MCS_CHAN_WIDTH_160_VAL);
1393 result |= (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) |
1394 (rate & RATE_MCS_ANT_AB_MSK) |
1395 (rate & RATE_MCS_STBC_MSK) |
1396 (rate & RATE_MCS_BF_MSK);
1397
1398 /* not handling DUP since we don't use it */
1399 WARN_ON_ONCE(rate & RATE_MCS_DUP_MSK);
1400
1401 if (rate & RATE_MCS_SGI_MSK)
1402 result |= RATE_MCS_SGI_MSK_V1;
1403
1404 return cpu_to_le32(result);
1405 }
1406