Lines Matching +full:queue +full:- +full:pkt +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
14 #include "iwl-trans.h"
15 #include "iwl-nvm-utils.h"
16 #include "iwl-utils.h"
19 #include "time-sync.h"
28 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA); in iwl_mvm_bar_check_trigger()
32 ba_trig = (void *)trig->data; in iwl_mvm_bar_check_trigger()
34 if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) in iwl_mvm_bar_check_trigger()
37 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, in iwl_mvm_bar_check_trigger()
49 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_tx_csum()
50 u16 mh_len = ieee80211_hdrlen(hdr->frame_control); in iwl_mvm_tx_csum()
56 if (skb->ip_summed != CHECKSUM_PARTIAL) in iwl_mvm_tx_csum()
60 if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) || in iwl_mvm_tx_csum()
61 (skb->protocol != htons(ETH_P_IP) && in iwl_mvm_tx_csum()
62 skb->protocol != htons(ETH_P_IPV6)), in iwl_mvm_tx_csum()
68 if (skb->protocol == htons(ETH_P_IP)) { in iwl_mvm_tx_csum()
69 protocol = ip_hdr(skb)->protocol; in iwl_mvm_tx_csum()
76 protocol = ipv6h->nexthdr; in iwl_mvm_tx_csum()
89 protocol = hp->nexthdr; in iwl_mvm_tx_csum()
92 /* if we get here - protocol now should be TCP/UDP */ in iwl_mvm_tx_csum()
113 if (skb->protocol == htons(ETH_P_IP) && amsdu) { in iwl_mvm_tx_csum()
114 ip_hdr(skb)->check = 0; in iwl_mvm_tx_csum()
120 tcp_hdr(skb)->check = 0; in iwl_mvm_tx_csum()
122 udp_hdr(skb)->check = 0; in iwl_mvm_tx_csum()
129 * In new Tx API, the IV is always added by the firmware. in iwl_mvm_tx_csum()
131 if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key && in iwl_mvm_tx_csum()
132 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 && in iwl_mvm_tx_csum()
133 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104) in iwl_mvm_tx_csum()
134 mh_len += info->control.hw_key->iv_len; in iwl_mvm_tx_csum()
140 else if (ieee80211_hdrlen(hdr->frame_control) % 4) in iwl_mvm_tx_csum()
148 * Sets most of the Tx cmd's fields
154 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_set_tx_cmd()
155 __le16 fc = hdr->frame_control; in iwl_mvm_set_tx_cmd()
156 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); in iwl_mvm_set_tx_cmd()
157 u32 len = skb->len + FCS_LEN; in iwl_mvm_set_tx_cmd()
161 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) || in iwl_mvm_set_tx_cmd()
163 !is_multicast_ether_addr(hdr->addr1))) in iwl_mvm_set_tx_cmd()
176 tx_cmd->tid_tspec = qc[0] & 0xf; in iwl_mvm_set_tx_cmd()
180 struct ieee80211_bar *bar = (void *)skb->data; in iwl_mvm_set_tx_cmd()
181 u16 control = le16_to_cpu(bar->control); in iwl_mvm_set_tx_cmd()
182 u16 ssn = le16_to_cpu(bar->start_seq_num); in iwl_mvm_set_tx_cmd()
185 tx_cmd->tid_tspec = (control & in iwl_mvm_set_tx_cmd()
188 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); in iwl_mvm_set_tx_cmd()
189 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, in iwl_mvm_set_tx_cmd()
193 tx_cmd->tid_tspec = IWL_TID_NON_QOS; in iwl_mvm_set_tx_cmd()
195 tx_cmd->tid_tspec = IWL_MAX_TID_COUNT; in iwl_mvm_set_tx_cmd()
197 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) in iwl_mvm_set_tx_cmd()
204 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) in iwl_mvm_set_tx_cmd()
205 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; in iwl_mvm_set_tx_cmd()
214 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); in iwl_mvm_set_tx_cmd()
216 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); in iwl_mvm_set_tx_cmd()
218 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); in iwl_mvm_set_tx_cmd()
220 /* The spec allows Action frames in A-MPDU, we don't support in iwl_mvm_set_tx_cmd()
223 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); in iwl_mvm_set_tx_cmd()
224 } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { in iwl_mvm_set_tx_cmd()
225 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); in iwl_mvm_set_tx_cmd()
227 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); in iwl_mvm_set_tx_cmd()
230 if (ieee80211_is_data(fc) && len > mvm->rts_threshold && in iwl_mvm_set_tx_cmd()
231 !is_multicast_ether_addr(hdr->addr1)) in iwl_mvm_set_tx_cmd()
234 if (fw_has_capa(&mvm->fw->ucode_capa, in iwl_mvm_set_tx_cmd()
239 tx_cmd->tx_flags = cpu_to_le32(tx_flags); in iwl_mvm_set_tx_cmd()
240 /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */ in iwl_mvm_set_tx_cmd()
241 tx_cmd->len = cpu_to_le16((u16)skb->len); in iwl_mvm_set_tx_cmd()
242 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); in iwl_mvm_set_tx_cmd()
243 tx_cmd->sta_id = sta_id; in iwl_mvm_set_tx_cmd()
245 tx_cmd->offload_assist = in iwl_mvm_set_tx_cmd()
253 if (info->band == NL80211_BAND_2GHZ && in iwl_mvm_get_tx_ant()
255 return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; in iwl_mvm_get_tx_ant()
260 return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS; in iwl_mvm_get_tx_ant()
263 return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; in iwl_mvm_get_tx_ant()
278 info->control.vif); in iwl_mvm_convert_rate_idx()
280 /* Get PLCP rate for tx_cmd->rate_n_flags */ in iwl_mvm_convert_rate_idx()
281 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx); in iwl_mvm_convert_rate_idx()
286 if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) { in iwl_mvm_convert_rate_idx()
303 struct ieee80211_tx_rate *rate = &info->control.rates[0]; in iwl_mvm_get_inject_tx_rate()
311 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { in iwl_mvm_get_inject_tx_rate()
318 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) in iwl_mvm_get_inject_tx_rate()
320 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) in iwl_mvm_get_inject_tx_rate()
322 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) in iwl_mvm_get_inject_tx_rate()
324 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) in iwl_mvm_get_inject_tx_rate()
327 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) in iwl_mvm_get_inject_tx_rate()
329 } else if (rate->flags & IEEE80211_TX_RC_MCS) { in iwl_mvm_get_inject_tx_rate()
331 result |= u32_encode_bits(rate->idx, in iwl_mvm_get_inject_tx_rate()
334 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) in iwl_mvm_get_inject_tx_rate()
336 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) in iwl_mvm_get_inject_tx_rate()
338 if (info->flags & IEEE80211_TX_CTL_LDPC) in iwl_mvm_get_inject_tx_rate()
340 if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC)) in iwl_mvm_get_inject_tx_rate()
343 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) in iwl_mvm_get_inject_tx_rate()
346 int rate_idx = info->control.rates[0].idx; in iwl_mvm_get_inject_tx_rate()
351 if (info->control.antennas) in iwl_mvm_get_inject_tx_rate()
352 result |= u32_encode_bits(info->control.antennas, in iwl_mvm_get_inject_tx_rate()
364 int rate_idx = -1; in iwl_mvm_get_tx_rate()
366 if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { in iwl_mvm_get_tx_rate()
367 /* info->control is only relevant for non HW rate control */ in iwl_mvm_get_tx_rate()
370 WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS && in iwl_mvm_get_tx_rate()
373 info->control.rates[0].flags, in iwl_mvm_get_tx_rate()
374 info->control.rates[0].idx, in iwl_mvm_get_tx_rate()
376 sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1); in iwl_mvm_get_tx_rate()
378 rate_idx = info->control.rates[0].idx; in iwl_mvm_get_tx_rate()
383 if (info->band != NL80211_BAND_2GHZ || in iwl_mvm_get_tx_rate()
384 (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) in iwl_mvm_get_tx_rate()
398 if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) in iwl_mvm_get_tx_rate_n_flags()
406 * Sets the fields in the Tx cmd that are rate related
413 tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT; in iwl_mvm_set_tx_cmd_rate()
417 tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT; in iwl_mvm_set_tx_cmd_rate()
418 tx_cmd->rts_retry_limit = in iwl_mvm_set_tx_cmd_rate()
419 min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit); in iwl_mvm_set_tx_cmd_rate()
421 tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT; in iwl_mvm_set_tx_cmd_rate()
423 tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY; in iwl_mvm_set_tx_cmd_rate()
432 !(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))) { in iwl_mvm_set_tx_cmd_rate()
435 if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) { in iwl_mvm_set_tx_cmd_rate()
436 tx_cmd->initial_rate_index = 0; in iwl_mvm_set_tx_cmd_rate()
437 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); in iwl_mvm_set_tx_cmd_rate()
441 tx_cmd->tx_flags |= in iwl_mvm_set_tx_cmd_rate()
445 /* Set the rate in the TX cmd */ in iwl_mvm_set_tx_cmd_rate()
446 tx_cmd->rate_n_flags = in iwl_mvm_set_tx_cmd_rate()
453 struct ieee80211_key_conf *keyconf = info->control.hw_key; in iwl_mvm_set_tx_cmd_pn()
456 pn = atomic64_inc_return(&keyconf->tx_pn); in iwl_mvm_set_tx_cmd_pn()
459 crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); in iwl_mvm_set_tx_cmd_pn()
468 * Sets the fields in the Tx cmd that are crypto related
476 struct ieee80211_key_conf *keyconf = info->control.hw_key; in iwl_mvm_set_tx_cmd_crypto()
477 u8 *crypto_hdr = skb_frag->data + hdrlen; in iwl_mvm_set_tx_cmd_crypto()
481 switch (keyconf->cipher) { in iwl_mvm_set_tx_cmd_crypto()
488 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; in iwl_mvm_set_tx_cmd_crypto()
489 pn = atomic64_inc_return(&keyconf->tx_pn); in iwl_mvm_set_tx_cmd_crypto()
491 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); in iwl_mvm_set_tx_cmd_crypto()
495 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; in iwl_mvm_set_tx_cmd_crypto()
498 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | in iwl_mvm_set_tx_cmd_crypto()
499 ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & in iwl_mvm_set_tx_cmd_crypto()
502 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); in iwl_mvm_set_tx_cmd_crypto()
515 tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; in iwl_mvm_set_tx_cmd_crypto()
516 tx_cmd->key[0] = keyconf->hw_key_idx; in iwl_mvm_set_tx_cmd_crypto()
520 tx_cmd->sec_ctl |= TX_CMD_SEC_EXT; in iwl_mvm_set_tx_cmd_crypto()
532 if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) in iwl_mvm_use_host_rate()
535 if (likely(ieee80211_is_data(hdr->frame_control) && in iwl_mvm_use_host_rate()
536 mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED)) in iwl_mvm_use_host_rate()
546 return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ; in iwl_mvm_use_host_rate()
556 memcpy(out_hdr->addr3, addr3_override, ETH_ALEN); in iwl_mvm_copy_hdr()
560 * Allocates and sets the Tx cmd the driver data pointers in the skb
568 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in iwl_mvm_set_tx_params()
572 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); in iwl_mvm_set_tx_params()
577 dev_cmd->hdr.cmd = TX_CMD; in iwl_mvm_set_tx_params()
586 if (ieee80211_is_data_qos(hdr->frame_control)) { in iwl_mvm_set_tx_params()
592 if (!info->control.hw_key) in iwl_mvm_set_tx_params()
605 hdr->frame_control); in iwl_mvm_set_tx_params()
606 } else if (!ieee80211_is_data(hdr->frame_control) || in iwl_mvm_set_tx_params()
607 mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) { in iwl_mvm_set_tx_params()
612 if (mvm->trans->trans_cfg->device_family >= in iwl_mvm_set_tx_params()
614 struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; in iwl_mvm_set_tx_params()
618 cmd->offload_assist = cpu_to_le32(offload_assist); in iwl_mvm_set_tx_params()
621 cmd->len = cpu_to_le16((u16)skb->len); in iwl_mvm_set_tx_params()
624 iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override); in iwl_mvm_set_tx_params()
626 cmd->flags = cpu_to_le16(flags); in iwl_mvm_set_tx_params()
627 cmd->rate_n_flags = cpu_to_le32(rate_n_flags); in iwl_mvm_set_tx_params()
629 struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; in iwl_mvm_set_tx_params()
633 cmd->offload_assist = cpu_to_le16(offload_assist); in iwl_mvm_set_tx_params()
636 cmd->len = cpu_to_le16((u16)skb->len); in iwl_mvm_set_tx_params()
639 iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override); in iwl_mvm_set_tx_params()
641 cmd->flags = cpu_to_le32(flags); in iwl_mvm_set_tx_params()
642 cmd->rate_n_flags = cpu_to_le32(rate_n_flags); in iwl_mvm_set_tx_params()
647 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; in iwl_mvm_set_tx_params()
649 if (info->control.hw_key) in iwl_mvm_set_tx_params()
654 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control); in iwl_mvm_set_tx_params()
657 iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override); in iwl_mvm_set_tx_params()
668 memset(&skb_info->status, 0, sizeof(skb_info->status)); in iwl_mvm_skb_prepare_status()
669 memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data)); in iwl_mvm_skb_prepare_status()
671 skb_info->driver_data[1] = cmd; in iwl_mvm_skb_prepare_status()
679 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_get_ctrl_vif_queue()
680 __le16 fc = hdr->frame_control; in iwl_mvm_get_ctrl_vif_queue()
682 switch (info->control.vif->type) { in iwl_mvm_get_ctrl_vif_queue()
686 * Non-bufferable frames use the broadcast station, thus they in iwl_mvm_get_ctrl_vif_queue()
687 * use the probe queue. in iwl_mvm_get_ctrl_vif_queue()
690 * response (with non-success status) for a station we can't in iwl_mvm_get_ctrl_vif_queue()
698 return link->mgmt_queue; in iwl_mvm_get_ctrl_vif_queue()
701 is_multicast_ether_addr(hdr->addr1)) in iwl_mvm_get_ctrl_vif_queue()
702 return link->cab_queue; in iwl_mvm_get_ctrl_vif_queue()
704 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, in iwl_mvm_get_ctrl_vif_queue()
706 return link->mgmt_queue; in iwl_mvm_get_ctrl_vif_queue()
709 return mvm->p2p_dev_queue; in iwl_mvm_get_ctrl_vif_queue()
712 return mvm->p2p_dev_queue; in iwl_mvm_get_ctrl_vif_queue()
714 WARN_ONCE(1, "Not a ctrl vif, no available queue\n"); in iwl_mvm_get_ctrl_vif_queue()
715 return -1; in iwl_mvm_get_ctrl_vif_queue()
724 iwl_mvm_vif_from_mac80211(info->control.vif); in iwl_mvm_probe_resp_set_noa()
725 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; in iwl_mvm_probe_resp_set_noa()
726 int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt; in iwl_mvm_probe_resp_set_noa()
739 resp_data = rcu_dereference(mvmvif->deflink.probe_resp_data); in iwl_mvm_probe_resp_set_noa()
743 if (!resp_data->notif.noa_active) in iwl_mvm_probe_resp_set_noa()
747 mgmt->u.probe_resp.variable, in iwl_mvm_probe_resp_set_noa()
748 skb->len - base_len, in iwl_mvm_probe_resp_set_noa()
755 if (skb_tailroom(skb) < resp_data->noa_len) { in iwl_mvm_probe_resp_set_noa()
756 if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) { in iwl_mvm_probe_resp_set_noa()
763 pos = skb_put(skb, resp_data->noa_len); in iwl_mvm_probe_resp_set_noa()
767 *pos++ = resp_data->noa_len - 2; in iwl_mvm_probe_resp_set_noa()
773 memcpy(pos, &resp_data->notif.noa_attr, in iwl_mvm_probe_resp_set_noa()
774 resp_data->noa_len - sizeof(struct ieee80211_vendor_ie)); in iwl_mvm_probe_resp_set_noa()
782 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in iwl_mvm_tx_skb_non_sta()
786 int hdrlen = ieee80211_hdrlen(hdr->frame_control); in iwl_mvm_tx_skb_non_sta()
787 __le16 fc = hdr->frame_control; in iwl_mvm_tx_skb_non_sta()
788 bool offchannel = IEEE80211_SKB_CB(skb)->flags & in iwl_mvm_tx_skb_non_sta()
790 int queue = -1; in iwl_mvm_tx_skb_non_sta() local
793 return -1; in iwl_mvm_tx_skb_non_sta()
795 memcpy(&info, skb->cb, sizeof(info)); in iwl_mvm_tx_skb_non_sta()
797 if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen)) in iwl_mvm_tx_skb_non_sta()
798 return -1; in iwl_mvm_tx_skb_non_sta()
801 return -1; in iwl_mvm_tx_skb_non_sta()
808 if ((info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE && in iwl_mvm_tx_skb_non_sta()
810 (info.control.vif->type == NL80211_IFTYPE_STATION && in iwl_mvm_tx_skb_non_sta()
816 * P2P Device uses the offchannel queue. in iwl_mvm_tx_skb_non_sta()
818 * and hence needs to be sent on the aux queue. in iwl_mvm_tx_skb_non_sta()
820 * also P2P Device uses the aux queue. in iwl_mvm_tx_skb_non_sta()
822 sta_id = mvm->aux_sta.sta_id; in iwl_mvm_tx_skb_non_sta()
823 queue = mvm->aux_queue; in iwl_mvm_tx_skb_non_sta()
824 if (WARN_ON(queue == IWL_MVM_INVALID_QUEUE)) in iwl_mvm_tx_skb_non_sta()
825 return -1; in iwl_mvm_tx_skb_non_sta()
826 } else if (info.control.vif->type == in iwl_mvm_tx_skb_non_sta()
828 info.control.vif->type == NL80211_IFTYPE_AP || in iwl_mvm_tx_skb_non_sta()
829 info.control.vif->type == NL80211_IFTYPE_ADHOC) { in iwl_mvm_tx_skb_non_sta()
835 if (info.control.vif->active_links) in iwl_mvm_tx_skb_non_sta()
836 link_id = ffs(info.control.vif->active_links) - 1; in iwl_mvm_tx_skb_non_sta()
841 link = mvmvif->link[link_id]; in iwl_mvm_tx_skb_non_sta()
843 return -1; in iwl_mvm_tx_skb_non_sta()
845 if (!ieee80211_is_data(hdr->frame_control)) in iwl_mvm_tx_skb_non_sta()
846 sta_id = link->bcast_sta.sta_id; in iwl_mvm_tx_skb_non_sta()
848 sta_id = link->mcast_sta.sta_id; in iwl_mvm_tx_skb_non_sta()
850 queue = iwl_mvm_get_ctrl_vif_queue(mvm, link, &info, in iwl_mvm_tx_skb_non_sta()
852 } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { in iwl_mvm_tx_skb_non_sta()
853 queue = mvm->snif_queue; in iwl_mvm_tx_skb_non_sta()
854 sta_id = mvm->snif_sta.sta_id; in iwl_mvm_tx_skb_non_sta()
858 if (queue < 0) { in iwl_mvm_tx_skb_non_sta()
859 IWL_ERR(mvm, "No queue was found. Dropping TX\n"); in iwl_mvm_tx_skb_non_sta()
860 return -1; in iwl_mvm_tx_skb_non_sta()
866 IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue); in iwl_mvm_tx_skb_non_sta()
871 return -1; in iwl_mvm_tx_skb_non_sta()
873 /* From now on, we cannot access info->control */ in iwl_mvm_tx_skb_non_sta()
876 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) { in iwl_mvm_tx_skb_non_sta()
877 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); in iwl_mvm_tx_skb_non_sta()
878 return -1; in iwl_mvm_tx_skb_non_sta()
895 if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) in iwl_mvm_max_amsdu_size()
902 * Add a security margin of 256 for the TX command + headers. in iwl_mvm_max_amsdu_size()
906 val = mvmsta->max_amsdu_len; in iwl_mvm_max_amsdu_size()
908 if (hweight16(sta->valid_links) <= 1) { in iwl_mvm_max_amsdu_size()
909 if (sta->valid_links) { in iwl_mvm_max_amsdu_size()
911 unsigned int link = ffs(sta->valid_links) - 1; in iwl_mvm_max_amsdu_size()
914 link_conf = rcu_dereference(mvmsta->vif->link_conf[link]); in iwl_mvm_max_amsdu_size()
918 band = link_conf->chanreq.oper.chan->band; in iwl_mvm_max_amsdu_size()
921 band = mvmsta->vif->bss_conf.chanreq.oper.chan->band; in iwl_mvm_max_amsdu_size()
925 } else if (fw_has_capa(&mvm->fw->ucode_capa, in iwl_mvm_max_amsdu_size()
930 mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); in iwl_mvm_max_amsdu_size()
937 mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); in iwl_mvm_max_amsdu_size()
948 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_tx_tso()
949 unsigned int mss = skb_shinfo(skb)->gso_size; in iwl_mvm_tx_tso()
957 if (!mvmsta->max_amsdu_len || in iwl_mvm_tx_tso()
958 !ieee80211_is_data_qos(hdr->frame_control) || in iwl_mvm_tx_tso()
959 !mvmsta->amsdu_enabled) in iwl_mvm_tx_tso()
966 if (skb->protocol == htons(ETH_P_IPV6) && in iwl_mvm_tx_tso()
967 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != in iwl_mvm_tx_tso()
975 return -EINVAL; in iwl_mvm_tx_tso()
981 if ((info->flags & IEEE80211_TX_CTL_AMPDU && in iwl_mvm_tx_tso()
982 !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) || in iwl_mvm_tx_tso()
983 !(mvmsta->amsdu_enabled & BIT(tid))) in iwl_mvm_tx_tso()
990 min_t(unsigned int, sta->cur->max_amsdu_len, in iwl_mvm_tx_tso()
994 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not in iwl_mvm_tx_tso()
995 * supported. This is a spec requirement (IEEE 802.11-2015 in iwl_mvm_tx_tso()
998 if (info->flags & IEEE80211_TX_CTL_AMPDU && in iwl_mvm_tx_tso()
999 !sta->deflink.vht_cap.vht_supported) in iwl_mvm_tx_tso()
1004 pad = (4 - subf_len) & 0x3; in iwl_mvm_tx_tso()
1007 * If we have N subframes in the A-MSDU, then the A-MSDU's size is in iwl_mvm_tx_tso()
1008 * N * subf_len + (N - 1) * pad. in iwl_mvm_tx_tso()
1012 if (sta->max_amsdu_subframes && in iwl_mvm_tx_tso()
1013 num_subframes > sta->max_amsdu_subframes) in iwl_mvm_tx_tso()
1014 num_subframes = sta->max_amsdu_subframes; in iwl_mvm_tx_tso()
1016 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - in iwl_mvm_tx_tso()
1017 tcp_hdrlen(skb) + skb->data_len; in iwl_mvm_tx_tso()
1020 * Make sure we have enough TBs for the A-MSDU: in iwl_mvm_tx_tso()
1025 if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > in iwl_mvm_tx_tso()
1026 mvm->trans->max_skb_frags) in iwl_mvm_tx_tso()
1032 /* This skb fits in one single A-MSDU */ in iwl_mvm_tx_tso()
1040 * create SKBs that can fit into one A-MSDU. in iwl_mvm_tx_tso()
1053 return -1; in iwl_mvm_tx_tso()
1057 /* Check if there are any timed-out TIDs on a given shared TXQ */
1060 unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap; in iwl_mvm_txq_should_update()
1068 if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + in iwl_mvm_txq_should_update()
1080 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; in iwl_mvm_tx_airtime()
1086 mdata = &mvm->tcm.data[mac]; in iwl_mvm_tx_airtime()
1088 if (mvm->tcm.paused) in iwl_mvm_tx_airtime()
1091 if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) in iwl_mvm_tx_airtime()
1092 schedule_delayed_work(&mvm->tcm.work, 0); in iwl_mvm_tx_airtime()
1094 mdata->tx.airtime += airtime; in iwl_mvm_tx_airtime()
1101 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; in iwl_mvm_tx_pkt_queued()
1105 return -EINVAL; in iwl_mvm_tx_pkt_queued()
1107 mdata = &mvm->tcm.data[mac]; in iwl_mvm_tx_pkt_queued()
1109 mdata->tx.pkts[ac]++; in iwl_mvm_tx_pkt_queued()
1115 * Sets the fields in the Tx cmd that are crypto related.
1124 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in iwl_mvm_tx_mpdu()
1135 return -1; in iwl_mvm_tx_mpdu()
1138 fc = hdr->frame_control; in iwl_mvm_tx_mpdu()
1142 return -1; in iwl_mvm_tx_mpdu()
1144 if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_INVALID_STA)) in iwl_mvm_tx_mpdu()
1145 return -1; in iwl_mvm_tx_mpdu()
1147 if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he) in iwl_mvm_tx_mpdu()
1148 return -1; in iwl_mvm_tx_mpdu()
1154 sta, mvmsta->deflink.sta_id, in iwl_mvm_tx_mpdu()
1160 * we handle that entirely ourselves -- for uAPSD the firmware in iwl_mvm_tx_mpdu()
1161 * will always send a notification, and for PS-Poll responses in iwl_mvm_tx_mpdu()
1164 info->flags &= ~IEEE80211_TX_STATUS_EOSP; in iwl_mvm_tx_mpdu()
1166 spin_lock(&mvmsta->lock); in iwl_mvm_tx_mpdu()
1168 /* nullfunc frames should go to the MGMT queue regardless of QOS, in iwl_mvm_tx_mpdu()
1177 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; in iwl_mvm_tx_mpdu()
1179 mvmsta->tid_data[tid].state != IWL_AGG_ON, in iwl_mvm_tx_mpdu()
1181 mvmsta->tid_data[tid].state, tid)) in iwl_mvm_tx_mpdu()
1184 seq_number = mvmsta->tid_data[tid].seq_number; in iwl_mvm_tx_mpdu()
1188 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; in iwl_mvm_tx_mpdu()
1190 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); in iwl_mvm_tx_mpdu()
1191 hdr->seq_ctrl |= cpu_to_le16(seq_number); in iwl_mvm_tx_mpdu()
1193 tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; in iwl_mvm_tx_mpdu()
1200 txq_id = mvmsta->tid_data[tid].txq_id; in iwl_mvm_tx_mpdu()
1202 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); in iwl_mvm_tx_mpdu()
1205 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); in iwl_mvm_tx_mpdu()
1206 spin_unlock(&mvmsta->lock); in iwl_mvm_tx_mpdu()
1207 return -1; in iwl_mvm_tx_mpdu()
1212 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; in iwl_mvm_tx_mpdu()
1215 * If we have timed-out TIDs - schedule the worker that will in iwl_mvm_tx_mpdu()
1219 * the TX flow. This isn't dangerous because scheduling in iwl_mvm_tx_mpdu()
1220 * mvm->add_stream_wk can't ruin the state, and if we DON'T in iwl_mvm_tx_mpdu()
1221 * schedule it due to some race condition then next TX we get in iwl_mvm_tx_mpdu()
1224 if (unlikely(mvm->queue_info[txq_id].status == in iwl_mvm_tx_mpdu()
1227 schedule_work(&mvm->add_stream_wk); in iwl_mvm_tx_mpdu()
1230 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n", in iwl_mvm_tx_mpdu()
1231 mvmsta->deflink.sta_id, tid, txq_id, in iwl_mvm_tx_mpdu()
1232 IEEE80211_SEQ_TO_SN(seq_number), skb->len); in iwl_mvm_tx_mpdu()
1234 /* From now on, we cannot access info->control */ in iwl_mvm_tx_mpdu()
1238 * The IV is introduced by the HW for new tx api, and it is not present in iwl_mvm_tx_mpdu()
1244 info->control.hw_key && in iwl_mvm_tx_mpdu()
1246 info->control.hw_key->iv_len : 0); in iwl_mvm_tx_mpdu()
1248 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) in iwl_mvm_tx_mpdu()
1252 mvmsta->tid_data[tid].seq_number = seq_number + 0x10; in iwl_mvm_tx_mpdu()
1254 spin_unlock(&mvmsta->lock); in iwl_mvm_tx_mpdu()
1263 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); in iwl_mvm_tx_mpdu()
1264 spin_unlock(&mvmsta->lock); in iwl_mvm_tx_mpdu()
1266 IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->deflink.sta_id, in iwl_mvm_tx_mpdu()
1268 return -1; in iwl_mvm_tx_mpdu()
1284 return -1; in iwl_mvm_tx_skb_sta()
1288 if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_INVALID_STA)) in iwl_mvm_tx_skb_sta()
1289 return -1; in iwl_mvm_tx_skb_sta()
1291 memcpy(&info, skb->cb, sizeof(info)); in iwl_mvm_tx_skb_sta()
1296 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - in iwl_mvm_tx_skb_sta()
1297 tcp_hdrlen(skb) + skb->data_len; in iwl_mvm_tx_skb_sta()
1299 if (payload_len <= skb_shinfo(skb)->gso_size) in iwl_mvm_tx_skb_sta()
1306 return -1; in iwl_mvm_tx_skb_sta()
1315 * As described in IEEE sta 802.11-2020, table 9-30 (Address in iwl_mvm_tx_skb_sta()
1316 * field contents), A-MSDU address 3 should contain the BSSID in iwl_mvm_tx_skb_sta()
1321 * A-MSDU subframe headers from it. in iwl_mvm_tx_skb_sta()
1323 switch (vif->type) { in iwl_mvm_tx_skb_sta()
1325 addr3 = vif->cfg.ap_addr; in iwl_mvm_tx_skb_sta()
1328 addr3 = vif->addr; in iwl_mvm_tx_skb_sta()
1340 hdr = (void *)skb->data; in iwl_mvm_tx_skb_sta()
1341 amsdu = ieee80211_is_data_qos(hdr->frame_control) && in iwl_mvm_tx_skb_sta()
1354 ieee80211_free_txskb(mvm->hw, skb); in iwl_mvm_tx_skb_sta()
1369 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; in iwl_mvm_check_ratid_empty()
1370 struct ieee80211_vif *vif = mvmsta->vif; in iwl_mvm_check_ratid_empty()
1373 lockdep_assert_held(&mvmsta->lock); in iwl_mvm_check_ratid_empty()
1375 if ((tid_data->state == IWL_AGG_ON || in iwl_mvm_check_ratid_empty()
1376 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && in iwl_mvm_check_ratid_empty()
1379 * Now that this aggregation or DQA queue is empty tell in iwl_mvm_check_ratid_empty()
1390 normalized_ssn = tid_data->ssn; in iwl_mvm_check_ratid_empty()
1391 if (mvm->trans->trans_cfg->gen2) in iwl_mvm_check_ratid_empty()
1394 if (normalized_ssn != tid_data->next_reclaimed) in iwl_mvm_check_ratid_empty()
1397 switch (tid_data->state) { in iwl_mvm_check_ratid_empty()
1401 tid_data->next_reclaimed); in iwl_mvm_check_ratid_empty()
1402 tid_data->state = IWL_AGG_STARTING; in iwl_mvm_check_ratid_empty()
1403 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); in iwl_mvm_check_ratid_empty()
1409 tid_data->next_reclaimed); in iwl_mvm_check_ratid_empty()
1410 tid_data->state = IWL_AGG_OFF; in iwl_mvm_check_ratid_empty()
1411 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); in iwl_mvm_check_ratid_empty()
1484 r->flags |= in iwl_mvm_hwrate_to_tx_rate()
1489 r->flags |= IEEE80211_TX_RC_SHORT_GI; in iwl_mvm_hwrate_to_tx_rate()
1491 r->flags |= IEEE80211_TX_RC_MCS; in iwl_mvm_hwrate_to_tx_rate()
1492 r->idx = rate; in iwl_mvm_hwrate_to_tx_rate()
1497 r->flags |= IEEE80211_TX_RC_VHT_MCS; in iwl_mvm_hwrate_to_tx_rate()
1501 r->idx = 0; in iwl_mvm_hwrate_to_tx_rate()
1503 r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, in iwl_mvm_hwrate_to_tx_rate()
1513 r->flags |= IEEE80211_TX_RC_GREEN_FIELD; in iwl_mvm_hwrate_to_tx_rate_v1()
1515 r->flags |= in iwl_mvm_hwrate_to_tx_rate_v1()
1520 r->flags |= IEEE80211_TX_RC_SHORT_GI; in iwl_mvm_hwrate_to_tx_rate_v1()
1522 r->flags |= IEEE80211_TX_RC_MCS; in iwl_mvm_hwrate_to_tx_rate_v1()
1523 r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1; in iwl_mvm_hwrate_to_tx_rate_v1()
1528 r->flags |= IEEE80211_TX_RC_VHT_MCS; in iwl_mvm_hwrate_to_tx_rate_v1()
1530 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, in iwl_mvm_hwrate_to_tx_rate_v1()
1536 * translate ucode response to mac80211 tx status control values
1542 struct ieee80211_tx_rate *r = &info->status.rates[0]; in iwl_mvm_hwrate_to_tx_status()
1548 info->status.antenna = in iwl_mvm_hwrate_to_tx_status()
1551 info->band, r); in iwl_mvm_hwrate_to_tx_status()
1568 iwl_dbg_tlv_time_point(&mvm->fwrt, in iwl_mvm_tx_status_check_trigger()
1573 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, in iwl_mvm_tx_status_check_trigger()
1578 status_trig = (void *)trig->data; in iwl_mvm_tx_status_check_trigger()
1580 for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { in iwl_mvm_tx_status_check_trigger()
1582 if (!status_trig->statuses[i].status) in iwl_mvm_tx_status_check_trigger()
1585 if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) in iwl_mvm_tx_status_check_trigger()
1588 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, in iwl_mvm_tx_status_check_trigger()
1589 "Tx status %d was received", in iwl_mvm_tx_status_check_trigger()
1596 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
1597 * @tx_resp: the Tx response from the fw (agg or non-agg)
1602 * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
1608 * For 22000-series and lower, this is just 12 bits. For later, 16 bits.
1614 tx_resp->frame_count); in iwl_mvm_get_scd_ssn()
1616 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) in iwl_mvm_get_scd_ssn()
1622 struct iwl_rx_packet *pkt) in iwl_mvm_rx_tx_cmd_single() argument
1625 u16 sequence = le16_to_cpu(pkt->hdr.sequence); in iwl_mvm_rx_tx_cmd_single()
1628 struct iwl_tx_resp *tx_resp = (void *)pkt->data; in iwl_mvm_rx_tx_cmd_single()
1629 int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid); in iwl_mvm_rx_tx_cmd_single()
1630 int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid); in iwl_mvm_rx_tx_cmd_single()
1633 u32 status = le16_to_cpu(agg_status->status); in iwl_mvm_rx_tx_cmd_single()
1644 txq_id = le16_to_cpu(tx_resp->tx_queue); in iwl_mvm_rx_tx_cmd_single()
1646 seq_ctl = le16_to_cpu(tx_resp->seq_ctl); in iwl_mvm_rx_tx_cmd_single()
1649 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false); in iwl_mvm_rx_tx_cmd_single()
1654 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_rx_tx_cmd_single()
1659 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); in iwl_mvm_rx_tx_cmd_single()
1661 memset(&info->status, 0, sizeof(info->status)); in iwl_mvm_rx_tx_cmd_single()
1662 info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED); in iwl_mvm_rx_tx_cmd_single()
1668 info->flags |= IEEE80211_TX_STAT_ACK; in iwl_mvm_rx_tx_cmd_single()
1675 /* the FW should have stopped the queue and not in iwl_mvm_rx_tx_cmd_single()
1679 "FW reported TX filtered, status=0x%x, FC=0x%x\n", in iwl_mvm_rx_tx_cmd_single()
1680 status, le16_to_cpu(hdr->frame_control)); in iwl_mvm_rx_tx_cmd_single()
1681 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; in iwl_mvm_rx_tx_cmd_single()
1688 ieee80211_is_mgmt(hdr->frame_control)) in iwl_mvm_rx_tx_cmd_single()
1689 iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); in iwl_mvm_rx_tx_cmd_single()
1697 info->flags |= IEEE80211_TX_STAT_ACK; in iwl_mvm_rx_tx_cmd_single()
1699 iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control); in iwl_mvm_rx_tx_cmd_single()
1701 info->status.rates[0].count = tx_resp->failure_frame + 1; in iwl_mvm_rx_tx_cmd_single()
1703 iwl_mvm_hwrate_to_tx_status(mvm->fw, in iwl_mvm_rx_tx_cmd_single()
1704 le32_to_cpu(tx_resp->initial_rate), in iwl_mvm_rx_tx_cmd_single()
1710 info->status.status_driver_data[1] = in iwl_mvm_rx_tx_cmd_single()
1711 (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate); in iwl_mvm_rx_tx_cmd_single()
1713 /* Single frame failure in an AMPDU queue => send BAR */ in iwl_mvm_rx_tx_cmd_single()
1714 if (info->flags & IEEE80211_TX_CTL_AMPDU && in iwl_mvm_rx_tx_cmd_single()
1715 !(info->flags & IEEE80211_TX_STAT_ACK) && in iwl_mvm_rx_tx_cmd_single()
1716 !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed) in iwl_mvm_rx_tx_cmd_single()
1717 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; in iwl_mvm_rx_tx_cmd_single()
1718 info->flags &= ~IEEE80211_TX_CTL_AMPDU; in iwl_mvm_rx_tx_cmd_single()
1721 if (ieee80211_is_back_req(hdr->frame_control)) in iwl_mvm_rx_tx_cmd_single()
1724 seq_ctl = le16_to_cpu(hdr->seq_ctrl); in iwl_mvm_rx_tx_cmd_single()
1730 * reason, NDPs are never sent to A-MPDU'able queues in iwl_mvm_rx_tx_cmd_single()
1732 * for a single Tx resonse (see WARN_ON below). in iwl_mvm_rx_tx_cmd_single()
1734 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) in iwl_mvm_rx_tx_cmd_single()
1742 info->status.tx_time = in iwl_mvm_rx_tx_cmd_single()
1743 le16_to_cpu(tx_resp->wireless_media_time); in iwl_mvm_rx_tx_cmd_single()
1744 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); in iwl_mvm_rx_tx_cmd_single()
1745 lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); in iwl_mvm_rx_tx_cmd_single()
1746 info->status.status_driver_data[0] = in iwl_mvm_rx_tx_cmd_single()
1747 RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); in iwl_mvm_rx_tx_cmd_single()
1749 if (likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr1))) in iwl_mvm_rx_tx_cmd_single()
1750 ieee80211_tx_status_skb(mvm->hw, skb); in iwl_mvm_rx_tx_cmd_single()
1753 /* This is an aggregation queue or might become one, so we use in iwl_mvm_rx_tx_cmd_single()
1756 * this Tx response relates. But if there is a hole in the in iwl_mvm_rx_tx_cmd_single()
1757 * bitmap of the BA we received, this Tx response may allow to in iwl_mvm_rx_tx_cmd_single()
1775 le32_to_cpu(tx_resp->initial_rate), in iwl_mvm_rx_tx_cmd_single()
1776 tx_resp->failure_frame, SEQ_TO_INDEX(sequence), in iwl_mvm_rx_tx_cmd_single()
1781 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_rx_tx_cmd_single()
1784 * the firmware while we still have packets for it in the Tx queues. in iwl_mvm_rx_tx_cmd_single()
1793 le16_to_cpu(tx_resp->wireless_media_time)); in iwl_mvm_rx_tx_cmd_single()
1796 mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) in iwl_mvm_rx_tx_cmd_single()
1797 iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant); in iwl_mvm_rx_tx_cmd_single()
1799 if (sta->wme && tid != IWL_MGMT_TID) { in iwl_mvm_rx_tx_cmd_single()
1801 &mvmsta->tid_data[tid]; in iwl_mvm_rx_tx_cmd_single()
1804 spin_lock_bh(&mvmsta->lock); in iwl_mvm_rx_tx_cmd_single()
1807 tid_data->next_reclaimed = next_reclaimed; in iwl_mvm_rx_tx_cmd_single()
1816 "NDP - don't update next_reclaimed\n"); in iwl_mvm_rx_tx_cmd_single()
1821 if (mvmsta->sleep_tx_count) { in iwl_mvm_rx_tx_cmd_single()
1822 mvmsta->sleep_tx_count--; in iwl_mvm_rx_tx_cmd_single()
1823 if (mvmsta->sleep_tx_count && in iwl_mvm_rx_tx_cmd_single()
1826 * The number of frames in the queue in iwl_mvm_rx_tx_cmd_single()
1829 * Tx queue. in iwl_mvm_rx_tx_cmd_single()
1842 spin_unlock_bh(&mvmsta->lock); in iwl_mvm_rx_tx_cmd_single()
1847 mvmsta->sleep_tx_count = 0; in iwl_mvm_rx_tx_cmd_single()
1852 if (mvmsta->next_status_eosp) { in iwl_mvm_rx_tx_cmd_single()
1853 mvmsta->next_status_eosp = false; in iwl_mvm_rx_tx_cmd_single()
1885 struct iwl_rx_packet *pkt) in iwl_mvm_rx_tx_cmd_agg_dbg() argument
1887 struct iwl_tx_resp *tx_resp = (void *)pkt->data; in iwl_mvm_rx_tx_cmd_agg_dbg()
1893 for (i = 0; i < tx_resp->frame_count; i++) { in iwl_mvm_rx_tx_cmd_agg_dbg()
1899 "status %s (0x%04x), try-count (%d) seq (0x%x)\n", in iwl_mvm_rx_tx_cmd_agg_dbg()
1908 iwl_dbg_tlv_time_point(&mvm->fwrt, in iwl_mvm_rx_tx_cmd_agg_dbg()
1914 struct iwl_rx_packet *pkt) in iwl_mvm_rx_tx_cmd_agg_dbg() argument
1919 struct iwl_rx_packet *pkt) in iwl_mvm_rx_tx_cmd_agg() argument
1921 struct iwl_tx_resp *tx_resp = (void *)pkt->data; in iwl_mvm_rx_tx_cmd_agg()
1922 int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid); in iwl_mvm_rx_tx_cmd_agg()
1923 int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid); in iwl_mvm_rx_tx_cmd_agg()
1924 u16 sequence = le16_to_cpu(pkt->hdr.sequence); in iwl_mvm_rx_tx_cmd_agg()
1926 int queue = SEQ_TO_QUEUE(sequence); in iwl_mvm_rx_tx_cmd_agg() local
1929 if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && in iwl_mvm_rx_tx_cmd_agg()
1930 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) in iwl_mvm_rx_tx_cmd_agg()
1933 iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt); in iwl_mvm_rx_tx_cmd_agg()
1939 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_rx_tx_cmd_agg()
1940 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) { in iwl_mvm_rx_tx_cmd_agg()
1946 mvmsta->tid_data[tid].rate_n_flags = in iwl_mvm_rx_tx_cmd_agg()
1947 le32_to_cpu(tx_resp->initial_rate); in iwl_mvm_rx_tx_cmd_agg()
1948 mvmsta->tid_data[tid].tx_time = in iwl_mvm_rx_tx_cmd_agg()
1949 le16_to_cpu(tx_resp->wireless_media_time); in iwl_mvm_rx_tx_cmd_agg()
1950 mvmsta->tid_data[tid].lq_color = in iwl_mvm_rx_tx_cmd_agg()
1951 TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); in iwl_mvm_rx_tx_cmd_agg()
1953 le16_to_cpu(tx_resp->wireless_media_time)); in iwl_mvm_rx_tx_cmd_agg()
1961 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_tx_cmd() local
1962 struct iwl_tx_resp *tx_resp = (void *)pkt->data; in iwl_mvm_rx_tx_cmd()
1964 if (tx_resp->frame_count == 1) in iwl_mvm_rx_tx_cmd()
1965 iwl_mvm_rx_tx_cmd_single(mvm, pkt); in iwl_mvm_rx_tx_cmd()
1967 iwl_mvm_rx_tx_cmd_agg(mvm, pkt); in iwl_mvm_rx_tx_cmd()
1982 if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations || in iwl_mvm_tx_reclaim()
1989 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_tx_reclaim()
2001 * block-ack window (we assume that they've been successfully in iwl_mvm_tx_reclaim()
2004 iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush); in iwl_mvm_tx_reclaim()
2009 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); in iwl_mvm_tx_reclaim()
2011 memset(&info->status, 0, sizeof(info->status)); in iwl_mvm_tx_reclaim()
2017 info->flags |= IEEE80211_TX_STAT_ACK; in iwl_mvm_tx_reclaim()
2019 info->flags &= ~IEEE80211_TX_STAT_ACK; in iwl_mvm_tx_reclaim()
2024 * invalidated in order to prevent new Tx from being sent, but there may in iwl_mvm_tx_reclaim()
2025 * be some frames already in-flight). in iwl_mvm_tx_reclaim()
2027 * sta-dependent stuff since it's in the middle of being removed in iwl_mvm_tx_reclaim()
2034 tid_data = &mvmsta->tid_data[tid]; in iwl_mvm_tx_reclaim()
2036 if (tid_data->txq_id != txq) { in iwl_mvm_tx_reclaim()
2039 tid_data->txq_id, tid); in iwl_mvm_tx_reclaim()
2044 spin_lock_bh(&mvmsta->lock); in iwl_mvm_tx_reclaim()
2046 tid_data->next_reclaimed = index; in iwl_mvm_tx_reclaim()
2053 tx_info->status.status_driver_data[0] = in iwl_mvm_tx_reclaim()
2054 RS_DRV_DATA_PACK(tid_data->lq_color, in iwl_mvm_tx_reclaim()
2055 tx_info->status.status_driver_data[0]); in iwl_mvm_tx_reclaim()
2056 tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; in iwl_mvm_tx_reclaim()
2059 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mvm_tx_reclaim()
2063 if (ieee80211_is_data_qos(hdr->frame_control)) in iwl_mvm_tx_reclaim()
2072 info->flags |= IEEE80211_TX_STAT_AMPDU; in iwl_mvm_tx_reclaim()
2073 memcpy(&info->status, &tx_info->status, in iwl_mvm_tx_reclaim()
2074 sizeof(tx_info->status)); in iwl_mvm_tx_reclaim()
2075 iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info); in iwl_mvm_tx_reclaim()
2079 spin_unlock_bh(&mvmsta->lock); in iwl_mvm_tx_reclaim()
2089 /* no TLC offload, so non-MLD mode */ in iwl_mvm_tx_reclaim()
2090 if (mvmsta->vif) in iwl_mvm_tx_reclaim()
2092 rcu_dereference(mvmsta->vif->bss_conf.chanctx_conf); in iwl_mvm_tx_reclaim()
2097 tx_info->band = chanctx_conf->def.chan->band; in iwl_mvm_tx_reclaim()
2098 iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info); in iwl_mvm_tx_reclaim()
2109 ieee80211_tx_status_skb(mvm->hw, skb); in iwl_mvm_tx_reclaim()
2115 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_ba_notif() local
2116 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); in iwl_mvm_rx_ba_notif()
2127 (void *)pkt->data; in iwl_mvm_rx_ba_notif()
2128 u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info); in iwl_mvm_rx_ba_notif()
2136 sta_id = ba_res->sta_id; in iwl_mvm_rx_ba_notif()
2137 ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done); in iwl_mvm_rx_ba_notif()
2138 ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed); in iwl_mvm_rx_ba_notif()
2140 (u16)le32_to_cpu(ba_res->wireless_time); in iwl_mvm_rx_ba_notif()
2142 (void *)(uintptr_t)ba_res->reduced_txp; in iwl_mvm_rx_ba_notif()
2144 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt); in iwl_mvm_rx_ba_notif()
2156 sta_id, le32_to_cpu(ba_res->flags), in iwl_mvm_rx_ba_notif()
2157 le16_to_cpu(ba_res->txed), in iwl_mvm_rx_ba_notif()
2158 le16_to_cpu(ba_res->done)); in iwl_mvm_rx_ba_notif()
2165 * (rcu is invalidated in order to prevent new Tx from being in iwl_mvm_rx_ba_notif()
2166 * sent, but there may be some frames already in-flight). in iwl_mvm_rx_ba_notif()
2168 * sta-dependent stuff since it's in the middle of being removed in iwl_mvm_rx_ba_notif()
2174 struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i]; in iwl_mvm_rx_ba_notif()
2176 tid = ba_tfd->tid; in iwl_mvm_rx_ba_notif()
2181 mvmsta->tid_data[i].lq_color = lq_color; in iwl_mvm_rx_ba_notif()
2184 (int)(le16_to_cpu(ba_tfd->q_num)), in iwl_mvm_rx_ba_notif()
2185 le16_to_cpu(ba_tfd->tfd_index), in iwl_mvm_rx_ba_notif()
2187 le32_to_cpu(ba_res->tx_rate), false); in iwl_mvm_rx_ba_notif()
2192 le32_to_cpu(ba_res->wireless_time)); in iwl_mvm_rx_ba_notif()
2195 le16_to_cpu(ba_res->txed), true, 0); in iwl_mvm_rx_ba_notif()
2201 ba_notif = (void *)pkt->data; in iwl_mvm_rx_ba_notif()
2202 sta_id = ba_notif->sta_id; in iwl_mvm_rx_ba_notif()
2203 tid = ba_notif->tid; in iwl_mvm_rx_ba_notif()
2204 /* "flow" corresponds to Tx queue */ in iwl_mvm_rx_ba_notif()
2205 txq = le16_to_cpu(ba_notif->scd_flow); in iwl_mvm_rx_ba_notif()
2206 /* "ssn" is start of block-ack Tx window, corresponds to index in iwl_mvm_rx_ba_notif()
2207 * (in Tx queue's circular buffer) of first TFD/frame in window */ in iwl_mvm_rx_ba_notif()
2208 index = le16_to_cpu(ba_notif->scd_ssn); in iwl_mvm_rx_ba_notif()
2219 tid_data = &mvmsta->tid_data[tid]; in iwl_mvm_rx_ba_notif()
2221 ba_info.status.ampdu_ack_len = ba_notif->txed_2_done; in iwl_mvm_rx_ba_notif()
2222 ba_info.status.ampdu_len = ba_notif->txed; in iwl_mvm_rx_ba_notif()
2223 ba_info.status.tx_time = tid_data->tx_time; in iwl_mvm_rx_ba_notif()
2225 (void *)(uintptr_t)ba_notif->reduced_txp; in iwl_mvm_rx_ba_notif()
2231 ba_notif->sta_addr, ba_notif->sta_id); in iwl_mvm_rx_ba_notif()
2235 ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl), in iwl_mvm_rx_ba_notif()
2236 le64_to_cpu(ba_notif->bitmap), txq, index, in iwl_mvm_rx_ba_notif()
2237 ba_notif->txed, ba_notif->txed_2_done); in iwl_mvm_rx_ba_notif()
2240 ba_notif->reduced_txp); in iwl_mvm_rx_ba_notif()
2243 tid_data->rate_n_flags, false); in iwl_mvm_rx_ba_notif()
2249 * queue might not be empty. The race-free way to handle this is to:
2251 * 2) flush the Tx path
2287 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0) in iwl_mvm_flush_sta_tids()
2305 ret = -EIO; in iwl_mvm_flush_sta_tids()
2309 rsp = (void *)cmd.resp_pkt->data; in iwl_mvm_flush_sta_tids()
2311 if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id, in iwl_mvm_flush_sta_tids()
2313 sta_id, le16_to_cpu(rsp->sta_id))) { in iwl_mvm_flush_sta_tids()
2314 ret = -EIO; in iwl_mvm_flush_sta_tids()
2318 num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues); in iwl_mvm_flush_sta_tids()
2321 ret = -EIO; in iwl_mvm_flush_sta_tids()
2327 struct iwl_flush_queue_info *queue_info = &rsp->queues[i]; in iwl_mvm_flush_sta_tids()
2328 int tid = le16_to_cpu(queue_info->tid); in iwl_mvm_flush_sta_tids()
2329 int read_before = le16_to_cpu(queue_info->read_before_flush); in iwl_mvm_flush_sta_tids()
2330 int read_after = le16_to_cpu(queue_info->read_after_flush); in iwl_mvm_flush_sta_tids()
2331 int queue_num = le16_to_cpu(queue_info->queue_num); in iwl_mvm_flush_sta_tids()
2337 "tid %d queue_id %d read-before %d read-after %d\n", in iwl_mvm_flush_sta_tids()