Lines Matching +full:queue +full:- +full:pkt +full:- +full:rx

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
12 #include "fw/notif-wait.h"
13 #include "iwl-trans.h"
14 #include "iwl-op-mode.h"
16 #include "iwl-debug.h"
17 #include "iwl-drv.h"
18 #include "iwl-modparams.h"
20 #include "iwl-phy-db.h"
21 #include "iwl-eeprom-parse.h"
22 #include "iwl-csr.h"
23 #include "iwl-io.h"
24 #include "iwl-prph.h"
28 #include "time-event.h"
29 #include "fw-api.h"
32 #include "time-sync.h"
52 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
96 IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, in iwl_mvm_nic_config()
99 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) in iwl_mvm_nic_config()
103 reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev); in iwl_mvm_nic_config()
114 * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC in iwl_mvm_nic_config()
115 * sampling, and shouldn't be set to any non-zero value. in iwl_mvm_nic_config()
121 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) in iwl_mvm_nic_config()
124 if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt)) in iwl_mvm_nic_config()
127 iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, in iwl_mvm_nic_config()
142 if (!mvm->trans->cfg->apmg_not_supported) in iwl_mvm_nic_config()
143 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, in iwl_mvm_nic_config()
151 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_monitor_notif() local
152 struct iwl_datapath_monitor_notif *notif = (void *)pkt->data; in iwl_mvm_rx_monitor_notif()
157 if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA)) in iwl_mvm_rx_monitor_notif()
160 vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id); in iwl_mvm_rx_monitor_notif()
161 if (!vif || vif->type != NL80211_IFTYPE_STATION) in iwl_mvm_rx_monitor_notif()
164 if (!vif->bss_conf.chandef.chan || in iwl_mvm_rx_monitor_notif()
165 vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ || in iwl_mvm_rx_monitor_notif()
166 vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40) in iwl_mvm_rx_monitor_notif()
169 if (!vif->cfg.assoc) in iwl_mvm_rx_monitor_notif()
173 if (mvm->cca_40mhz_workaround) in iwl_mvm_rx_monitor_notif()
177 * We'll decrement this on disconnect - so set to 2 since we'll in iwl_mvm_rx_monitor_notif()
180 mvm->cca_40mhz_workaround = 2; in iwl_mvm_rx_monitor_notif()
184 * easiest choice - otherwise we'd have to do some major changes in iwl_mvm_rx_monitor_notif()
189 sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; in iwl_mvm_rx_monitor_notif()
191 WARN_ON(!sband->ht_cap.ht_supported); in iwl_mvm_rx_monitor_notif()
192 WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)); in iwl_mvm_rx_monitor_notif()
193 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; in iwl_mvm_rx_monitor_notif()
201 WARN_ON(!he->has_he); in iwl_mvm_rx_monitor_notif()
202 WARN_ON(!(he->he_cap_elem.phy_cap_info[0] & in iwl_mvm_rx_monitor_notif()
204 he->he_cap_elem.phy_cap_info[0] &= in iwl_mvm_rx_monitor_notif()
215 struct iwl_mvm *mvm = mvmvif->mvm; in iwl_mvm_update_link_smps()
221 if (mvm->fw_static_smps_request && in iwl_mvm_update_link_smps()
222 link_conf->chandef.width == NL80211_CHAN_WIDTH_160 && in iwl_mvm_update_link_smps()
223 link_conf->he_support) in iwl_mvm_update_link_smps()
227 link_conf->link_id); in iwl_mvm_update_link_smps()
247 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_thermal_dual_chain_req() local
248 struct iwl_thermal_dual_chain_request *req = (void *)pkt->data; in iwl_mvm_rx_thermal_dual_chain_req()
254 mvm->fw_static_smps_request = in iwl_mvm_rx_thermal_dual_chain_req()
255 req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE); in iwl_mvm_rx_thermal_dual_chain_req()
256 ieee80211_iterate_interfaces(mvm->hw, in iwl_mvm_rx_thermal_dual_chain_req()
262 * enum iwl_rx_handler_context context for Rx handler
263 * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
264 * which can't acquire mvm->mutex.
265 * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
267 * it will be called from a worker with mvm->mutex held.
269 * mutex itself, it will be called from a worker without mvm->mutex held.
679 const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs; in iwl_mvm_min_backoff()
685 dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev); in iwl_mvm_min_backoff()
687 while (backoff->pwr) { in iwl_mvm_min_backoff()
688 if (dflt_pwr_limit >= backoff->pwr) in iwl_mvm_min_backoff()
689 return backoff->backoff; in iwl_mvm_min_backoff()
704 mutex_lock(&mvm->mutex); in iwl_mvm_tx_unblock_dwork()
707 rcu_dereference_protected(mvm->csa_tx_blocked_vif, in iwl_mvm_tx_unblock_dwork()
708 lockdep_is_held(&mvm->mutex)); in iwl_mvm_tx_unblock_dwork()
715 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); in iwl_mvm_tx_unblock_dwork()
717 mutex_unlock(&mvm->mutex); in iwl_mvm_tx_unblock_dwork()
724 mutex_lock(&mvm->mutex); in iwl_mvm_fwrt_dump_start()
731 mutex_unlock(&mvm->mutex); in iwl_mvm_fwrt_dump_end()
744 mutex_lock(&mvm->mutex); in iwl_mvm_fwrt_send_hcmd()
746 mutex_unlock(&mvm->mutex); in iwl_mvm_fwrt_send_hcmd()
766 struct iwl_trans *trans = mvm->trans; in iwl_mvm_start_get_nvm()
769 if (trans->csme_own) { in iwl_mvm_start_get_nvm()
770 if (WARN(!mvm->mei_registered, in iwl_mvm_start_get_nvm()
774 mvm->mei_nvm_data = iwl_mei_get_nvm(); in iwl_mvm_start_get_nvm()
775 if (mvm->mei_nvm_data) { in iwl_mvm_start_get_nvm()
777 * mvm->mei_nvm_data is set and because of that, in iwl_mvm_start_get_nvm()
781 mvm->nvm_data = in iwl_mvm_start_get_nvm()
782 iwl_parse_mei_nvm_data(trans, trans->cfg, in iwl_mvm_start_get_nvm()
783 mvm->mei_nvm_data, in iwl_mvm_start_get_nvm()
784 mvm->fw, in iwl_mvm_start_get_nvm()
785 mvm->set_tx_ant, in iwl_mvm_start_get_nvm()
786 mvm->set_rx_ant); in iwl_mvm_start_get_nvm()
796 wiphy_lock(mvm->hw->wiphy); in iwl_mvm_start_get_nvm()
797 mutex_lock(&mvm->mutex); in iwl_mvm_start_get_nvm()
799 ret = iwl_trans_start_hw(mvm->trans); in iwl_mvm_start_get_nvm()
801 mutex_unlock(&mvm->mutex); in iwl_mvm_start_get_nvm()
802 wiphy_unlock(mvm->hw->wiphy); in iwl_mvm_start_get_nvm()
808 if (ret && ret != -ERFKILL) in iwl_mvm_start_get_nvm()
809 iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); in iwl_mvm_start_get_nvm()
811 mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; in iwl_mvm_start_get_nvm()
818 mutex_unlock(&mvm->mutex); in iwl_mvm_start_get_nvm()
819 wiphy_unlock(mvm->hw->wiphy); in iwl_mvm_start_get_nvm()
826 mvm->pldr_sync = false; in iwl_mvm_start_get_nvm()
836 iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); in iwl_mvm_start_post_nvm()
842 mvm->hw_registered = true; in iwl_mvm_start_post_nvm()
846 wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, in iwl_mvm_start_post_nvm()
847 mvm->mei_rfkill_blocked, in iwl_mvm_start_post_nvm()
871 switch (key->cipher) { in iwl_mvm_frob_txf_key_iter()
873 keydata = key->key; in iwl_mvm_frob_txf_key_iter()
874 keylen = key->keylen; in iwl_mvm_frob_txf_key_iter()
885 memset(txf->buf, 0xBB, txf->buflen); in iwl_mvm_frob_txf_key_iter()
893 for (i = 0; i < txf->buflen; i++) { in iwl_mvm_frob_txf_key_iter()
894 if (txf->buf[i] != keydata[match]) { in iwl_mvm_frob_txf_key_iter()
900 memset(txf->buf + i - keylen, 0xAA, keylen); in iwl_mvm_frob_txf_key_iter()
907 for (i = 0; match && i < keylen - match; i++) { in iwl_mvm_frob_txf_key_iter()
908 if (txf->buf[i] != keydata[match]) in iwl_mvm_frob_txf_key_iter()
912 memset(txf->buf, 0xAA, i + 1); in iwl_mvm_frob_txf_key_iter()
913 memset(txf->buf + txf->buflen - matchend, 0xAA, in iwl_mvm_frob_txf_key_iter()
933 ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf); in iwl_mvm_frob_txf()
947 if (hdr->group_id != LONG_GROUP) in iwl_mvm_frob_hcmd()
950 switch (hdr->cmd) { in iwl_mvm_frob_hcmd()
978 memset((u8 *)hcmd + frob_start, 0xAA, frob_end - frob_start); in iwl_mvm_frob_hcmd()
987 switch (mvm->fwrt.cur_fw_img) { in iwl_mvm_frob_mem()
994 excl = mvm->fw->dump_excl; in iwl_mvm_frob_mem()
997 excl = mvm->fw->dump_excl_wowlan; in iwl_mvm_frob_mem()
1001 BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) != in iwl_mvm_frob_mem()
1002 sizeof(mvm->fw->dump_excl_wowlan)); in iwl_mvm_frob_mem()
1004 for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) { in iwl_mvm_frob_mem()
1022 memset((u8 *)mem + start - mem_addr, 0xAA, end - start); in iwl_mvm_frob_mem()
1041 prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true); in iwl_mvm_me_conn_status()
1047 curr_conn_info->conn_info = *conn_info; in iwl_mvm_me_conn_status()
1049 rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info); in iwl_mvm_me_conn_status()
1063 mvm->mei_rfkill_blocked = blocked; in iwl_mvm_mei_rfkill()
1064 if (!mvm->hw_registered) in iwl_mvm_mei_rfkill()
1067 wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, in iwl_mvm_mei_rfkill()
1068 mvm->mei_rfkill_blocked, in iwl_mvm_mei_rfkill()
1076 if (!mvm->hw_registered || !mvm->csme_vif) in iwl_mvm_mei_roaming_forbidden()
1079 iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden); in iwl_mvm_mei_roaming_forbidden()
1102 iwl_fw_flush_dumps(&mvm->fwrt); in iwl_mvm_sap_connected_wk()
1104 iwl_fw_runtime_free(&mvm->fwrt); in iwl_mvm_sap_connected_wk()
1105 iwl_phy_db_free(mvm->phy_db); in iwl_mvm_sap_connected_wk()
1106 kfree(mvm->scan_cmd); in iwl_mvm_sap_connected_wk()
1107 iwl_trans_op_mode_leave(mvm->trans); in iwl_mvm_sap_connected_wk()
1108 kfree(mvm->nvm_data); in iwl_mvm_sap_connected_wk()
1109 kfree(mvm->mei_nvm_data); in iwl_mvm_sap_connected_wk()
1111 ieee80211_free_hw(mvm->hw); in iwl_mvm_sap_connected_wk()
1118 if (!mvm->hw_registered) in iwl_mvm_mei_sap_connected()
1119 schedule_work(&mvm->sap_connected_wk); in iwl_mvm_mei_sap_connected()
1127 cfg80211_shutdown_all_interfaces(mvm->hw->wiphy); in iwl_mvm_mei_nic_stolen()
1157 * index all over the driver - check that its value corresponds to the in iwl_op_mode_mvm_start()
1160 BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != in iwl_op_mode_mvm_start()
1173 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) in iwl_op_mode_mvm_start()
1178 hw->max_rx_aggregation_subframes = max_agg; in iwl_op_mode_mvm_start()
1180 if (cfg->max_tx_agg_size) in iwl_op_mode_mvm_start()
1181 hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; in iwl_op_mode_mvm_start()
1183 hw->max_tx_aggregation_subframes = max_agg; in iwl_op_mode_mvm_start()
1185 op_mode = hw->priv; in iwl_op_mode_mvm_start()
1188 mvm->dev = trans->dev; in iwl_op_mode_mvm_start()
1189 mvm->trans = trans; in iwl_op_mode_mvm_start()
1190 mvm->cfg = cfg; in iwl_op_mode_mvm_start()
1191 mvm->fw = fw; in iwl_op_mode_mvm_start()
1192 mvm->hw = hw; in iwl_op_mode_mvm_start()
1194 iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm, in iwl_op_mode_mvm_start()
1198 iwl_uefi_get_sgom_table(trans, &mvm->fwrt); in iwl_op_mode_mvm_start()
1201 mvm->init_status = 0; in iwl_op_mode_mvm_start()
1204 op_mode->ops = &iwl_mvm_ops_mq; in iwl_op_mode_mvm_start()
1205 trans->rx_mpdu_cmd_hdr_size = in iwl_op_mode_mvm_start()
1206 (trans->trans_cfg->device_family >= in iwl_op_mode_mvm_start()
1211 op_mode->ops = &iwl_mvm_ops; in iwl_op_mode_mvm_start()
1212 trans->rx_mpdu_cmd_hdr_size = in iwl_op_mode_mvm_start()
1215 if (WARN_ON(trans->num_rx_queues > 1)) in iwl_op_mode_mvm_start()
1219 mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; in iwl_op_mode_mvm_start()
1223 * If we have the new TX/queue allocation API initialize them in iwl_op_mode_mvm_start()
1226 * time (e.g. P2P Device is optional), and if a dynamic queue in iwl_op_mode_mvm_start()
1231 mvm->aux_queue = IWL_MVM_INVALID_QUEUE; in iwl_op_mode_mvm_start()
1232 mvm->snif_queue = IWL_MVM_INVALID_QUEUE; in iwl_op_mode_mvm_start()
1233 mvm->probe_queue = IWL_MVM_INVALID_QUEUE; in iwl_op_mode_mvm_start()
1234 mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE; in iwl_op_mode_mvm_start()
1236 mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; in iwl_op_mode_mvm_start()
1237 mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE; in iwl_op_mode_mvm_start()
1238 mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; in iwl_op_mode_mvm_start()
1239 mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; in iwl_op_mode_mvm_start()
1242 mvm->sf_state = SF_UNINIT; in iwl_op_mode_mvm_start()
1244 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); in iwl_op_mode_mvm_start()
1246 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT); in iwl_op_mode_mvm_start()
1247 mvm->drop_bcn_ap_mode = true; in iwl_op_mode_mvm_start()
1249 mutex_init(&mvm->mutex); in iwl_op_mode_mvm_start()
1250 spin_lock_init(&mvm->async_handlers_lock); in iwl_op_mode_mvm_start()
1251 INIT_LIST_HEAD(&mvm->time_event_list); in iwl_op_mode_mvm_start()
1252 INIT_LIST_HEAD(&mvm->aux_roc_te_list); in iwl_op_mode_mvm_start()
1253 INIT_LIST_HEAD(&mvm->async_handlers_list); in iwl_op_mode_mvm_start()
1254 spin_lock_init(&mvm->time_event_lock); in iwl_op_mode_mvm_start()
1255 INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list); in iwl_op_mode_mvm_start()
1256 INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list); in iwl_op_mode_mvm_start()
1257 INIT_LIST_HEAD(&mvm->resp_pasn_list); in iwl_op_mode_mvm_start()
1259 INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); in iwl_op_mode_mvm_start()
1260 INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); in iwl_op_mode_mvm_start()
1261 INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk); in iwl_op_mode_mvm_start()
1262 INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); in iwl_op_mode_mvm_start()
1263 INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); in iwl_op_mode_mvm_start()
1264 INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); in iwl_op_mode_mvm_start()
1265 INIT_LIST_HEAD(&mvm->add_stream_txqs); in iwl_op_mode_mvm_start()
1266 spin_lock_init(&mvm->add_stream_lock); in iwl_op_mode_mvm_start()
1268 init_waitqueue_head(&mvm->rx_sync_waitq); in iwl_op_mode_mvm_start()
1270 mvm->queue_sync_state = 0; in iwl_op_mode_mvm_start()
1272 SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); in iwl_op_mode_mvm_start()
1274 spin_lock_init(&mvm->tcm.lock); in iwl_op_mode_mvm_start()
1275 INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work); in iwl_op_mode_mvm_start()
1276 mvm->tcm.ts = jiffies; in iwl_op_mode_mvm_start()
1277 mvm->tcm.ll_ts = jiffies; in iwl_op_mode_mvm_start()
1278 mvm->tcm.uapsd_nonagg_ts = jiffies; in iwl_op_mode_mvm_start()
1280 INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork); in iwl_op_mode_mvm_start()
1282 mvm->cmd_ver.range_resp = in iwl_op_mode_mvm_start()
1283 iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP, in iwl_op_mode_mvm_start()
1286 if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9)) in iwl_op_mode_mvm_start()
1316 trans->wide_cmd_header = true; in iwl_op_mode_mvm_start()
1318 mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210; in iwl_op_mode_mvm_start()
1330 /* Set a short watchdog for the command queue */ in iwl_op_mode_mvm_start()
1334 snprintf(mvm->hw->wiphy->fw_version, in iwl_op_mode_mvm_start()
1335 sizeof(mvm->hw->wiphy->fw_version), in iwl_op_mode_mvm_start()
1336 "%.31s", fw->fw_version); in iwl_op_mode_mvm_start()
1338 trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa, in iwl_op_mode_mvm_start()
1342 iwl_fw_lookup_cmd_ver(mvm->fw, in iwl_op_mode_mvm_start()
1346 mvm->sta_remove_requires_queue_remove = in iwl_op_mode_mvm_start()
1349 mvm->mld_api_is_used = iwl_mvm_has_mld_api(mvm->fw); in iwl_op_mode_mvm_start()
1352 iwl_trans_configure(mvm->trans, &trans_cfg); in iwl_op_mode_mvm_start()
1354 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; in iwl_op_mode_mvm_start()
1355 trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv; in iwl_op_mode_mvm_start()
1356 trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg; in iwl_op_mode_mvm_start()
1357 memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv, in iwl_op_mode_mvm_start()
1358 sizeof(trans->dbg.conf_tlv)); in iwl_op_mode_mvm_start()
1359 trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv; in iwl_op_mode_mvm_start()
1361 trans->iml = mvm->fw->iml; in iwl_op_mode_mvm_start()
1362 trans->iml_len = mvm->fw->iml_len; in iwl_op_mode_mvm_start()
1365 iwl_notification_wait_init(&mvm->notif_wait); in iwl_op_mode_mvm_start()
1368 mvm->phy_db = iwl_phy_db_init(trans); in iwl_op_mode_mvm_start()
1369 if (!mvm->phy_db) { in iwl_op_mode_mvm_start()
1375 mvm->trans->name, mvm->trans->hw_rev); in iwl_op_mode_mvm_start()
1378 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; in iwl_op_mode_mvm_start()
1380 IWL_DEBUG_EEPROM(mvm->trans->dev, in iwl_op_mode_mvm_start()
1385 mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); in iwl_op_mode_mvm_start()
1386 if (!mvm->scan_cmd) in iwl_op_mode_mvm_start()
1388 mvm->scan_cmd_size = scan_size; in iwl_op_mode_mvm_start()
1391 mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; in iwl_op_mode_mvm_start()
1392 mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; in iwl_op_mode_mvm_start()
1395 mvm->last_ebs_successful = true; in iwl_op_mode_mvm_start()
1401 memset(&mvm->rx_stats_v3, 0, in iwl_op_mode_mvm_start()
1404 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); in iwl_op_mode_mvm_start()
1408 iwl_mvm_init_time_sync(&mvm->time_sync); in iwl_op_mode_mvm_start()
1410 mvm->debugfs_dir = dbgfs_dir; in iwl_op_mode_mvm_start()
1412 mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops); in iwl_op_mode_mvm_start()
1414 iwl_mvm_mei_scan_filter_init(&mvm->mei_scan_filter); in iwl_op_mode_mvm_start()
1422 if (trans->csme_own && mvm->mei_registered) in iwl_op_mode_mvm_start()
1436 if (mvm->mei_registered) { in iwl_op_mode_mvm_start()
1441 iwl_fw_flush_dumps(&mvm->fwrt); in iwl_op_mode_mvm_start()
1442 iwl_fw_runtime_free(&mvm->fwrt); in iwl_op_mode_mvm_start()
1446 iwl_phy_db_free(mvm->phy_db); in iwl_op_mode_mvm_start()
1447 kfree(mvm->scan_cmd); in iwl_op_mode_mvm_start()
1450 ieee80211_free_hw(mvm->hw); in iwl_op_mode_mvm_start()
1456 lockdep_assert_held(&mvm->mutex); in iwl_mvm_stop_device()
1458 iwl_fw_cancel_timestamp(&mvm->fwrt); in iwl_mvm_stop_device()
1460 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); in iwl_mvm_stop_device()
1462 iwl_fw_dbg_stop_sync(&mvm->fwrt); in iwl_mvm_stop_device()
1463 iwl_trans_stop_device(mvm->trans); in iwl_mvm_stop_device()
1464 iwl_free_fw_paging(&mvm->fwrt); in iwl_mvm_stop_device()
1465 iwl_fw_dump_conf_clear(&mvm->fwrt); in iwl_mvm_stop_device()
1474 if (mvm->mei_registered) { in iwl_op_mode_mvm_stop()
1485 cancel_work_sync(&mvm->sap_connected_wk); in iwl_op_mode_mvm_stop()
1499 if (mvm->hw_registered) in iwl_op_mode_mvm_stop()
1500 ieee80211_unregister_hw(mvm->hw); in iwl_op_mode_mvm_stop()
1502 kfree(mvm->scan_cmd); in iwl_op_mode_mvm_stop()
1503 kfree(mvm->mcast_filter_cmd); in iwl_op_mode_mvm_stop()
1504 mvm->mcast_filter_cmd = NULL; in iwl_op_mode_mvm_stop()
1506 kfree(mvm->error_recovery_buf); in iwl_op_mode_mvm_stop()
1507 mvm->error_recovery_buf = NULL; in iwl_op_mode_mvm_stop()
1511 iwl_trans_op_mode_leave(mvm->trans); in iwl_op_mode_mvm_stop()
1513 iwl_phy_db_free(mvm->phy_db); in iwl_op_mode_mvm_stop()
1514 mvm->phy_db = NULL; in iwl_op_mode_mvm_stop()
1516 kfree(mvm->nvm_data); in iwl_op_mode_mvm_stop()
1517 kfree(mvm->mei_nvm_data); in iwl_op_mode_mvm_stop()
1518 kfree(rcu_access_pointer(mvm->csme_conn_info)); in iwl_op_mode_mvm_stop()
1519 kfree(mvm->temp_nvm_data); in iwl_op_mode_mvm_stop()
1521 kfree(mvm->nvm_sections[i].data); in iwl_op_mode_mvm_stop()
1523 cancel_delayed_work_sync(&mvm->tcm.work); in iwl_op_mode_mvm_stop()
1525 iwl_fw_runtime_free(&mvm->fwrt); in iwl_op_mode_mvm_stop()
1526 mutex_destroy(&mvm->mutex); in iwl_op_mode_mvm_stop()
1528 if (mvm->mei_registered) in iwl_op_mode_mvm_stop()
1531 ieee80211_free_hw(mvm->hw); in iwl_op_mode_mvm_stop()
1545 spin_lock_bh(&mvm->async_handlers_lock); in iwl_mvm_async_handlers_purge()
1546 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) { in iwl_mvm_async_handlers_purge()
1547 iwl_free_rxb(&entry->rxb); in iwl_mvm_async_handlers_purge()
1548 list_del(&entry->list); in iwl_mvm_async_handlers_purge()
1551 spin_unlock_bh(&mvm->async_handlers_lock); in iwl_mvm_async_handlers_purge()
1564 * Sync with Rx path with a lock. Remove all the entries from this list, in iwl_mvm_async_handlers_wk()
1567 spin_lock_bh(&mvm->async_handlers_lock); in iwl_mvm_async_handlers_wk()
1568 list_splice_init(&mvm->async_handlers_list, &local_list); in iwl_mvm_async_handlers_wk()
1569 spin_unlock_bh(&mvm->async_handlers_lock); in iwl_mvm_async_handlers_wk()
1572 if (entry->context == RX_HANDLER_ASYNC_LOCKED) in iwl_mvm_async_handlers_wk()
1573 mutex_lock(&mvm->mutex); in iwl_mvm_async_handlers_wk()
1574 entry->fn(mvm, &entry->rxb); in iwl_mvm_async_handlers_wk()
1575 iwl_free_rxb(&entry->rxb); in iwl_mvm_async_handlers_wk()
1576 list_del(&entry->list); in iwl_mvm_async_handlers_wk()
1577 if (entry->context == RX_HANDLER_ASYNC_LOCKED) in iwl_mvm_async_handlers_wk()
1578 mutex_unlock(&mvm->mutex); in iwl_mvm_async_handlers_wk()
1584 struct iwl_rx_packet *pkt) in iwl_mvm_rx_check_trigger() argument
1590 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, in iwl_mvm_rx_check_trigger()
1595 cmds_trig = (void *)trig->data; in iwl_mvm_rx_check_trigger()
1597 for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { in iwl_mvm_rx_check_trigger()
1599 if (!cmds_trig->cmds[i].cmd_id) in iwl_mvm_rx_check_trigger()
1602 if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd || in iwl_mvm_rx_check_trigger()
1603 cmds_trig->cmds[i].group_id != pkt->hdr.group_id) in iwl_mvm_rx_check_trigger()
1606 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, in iwl_mvm_rx_check_trigger()
1608 pkt->hdr.group_id, pkt->hdr.cmd); in iwl_mvm_rx_check_trigger()
1615 struct iwl_rx_packet *pkt) in iwl_mvm_rx_common() argument
1617 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); in iwl_mvm_rx_common()
1619 union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; in iwl_mvm_rx_common()
1621 iwl_dbg_tlv_time_point(&mvm->fwrt, in iwl_mvm_rx_common()
1623 iwl_mvm_rx_check_trigger(mvm, pkt); in iwl_mvm_rx_common()
1626 * Do the notification wait before RX handlers so in iwl_mvm_rx_common()
1627 * even if the RX handler consumes the RXB we have in iwl_mvm_rx_common()
1630 iwl_notification_wait_notify(&mvm->notif_wait, pkt); in iwl_mvm_rx_common()
1636 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) in iwl_mvm_rx_common()
1639 if (IWL_FW_CHECK(mvm, pkt_len < rx_h->min_size, in iwl_mvm_rx_common()
1641 rx_h->cmd_id, pkt_len, rx_h->min_size)) in iwl_mvm_rx_common()
1644 if (rx_h->context == RX_HANDLER_SYNC) { in iwl_mvm_rx_common()
1645 rx_h->fn(mvm, rxb); in iwl_mvm_rx_common()
1654 entry->rxb._page = rxb_steal_page(rxb); in iwl_mvm_rx_common()
1655 entry->rxb._offset = rxb->_offset; in iwl_mvm_rx_common()
1656 entry->rxb._rx_page_order = rxb->_rx_page_order; in iwl_mvm_rx_common()
1657 entry->fn = rx_h->fn; in iwl_mvm_rx_common()
1658 entry->context = rx_h->context; in iwl_mvm_rx_common()
1659 spin_lock(&mvm->async_handlers_lock); in iwl_mvm_rx_common()
1660 list_add_tail(&entry->list, &mvm->async_handlers_list); in iwl_mvm_rx_common()
1661 spin_unlock(&mvm->async_handlers_lock); in iwl_mvm_rx_common()
1662 schedule_work(&mvm->async_handlers_wk); in iwl_mvm_rx_common()
1671 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx() local
1673 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); in iwl_mvm_rx()
1680 iwl_mvm_rx_common(mvm, rxb, pkt); in iwl_mvm_rx()
1687 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_mq() local
1689 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); in iwl_mvm_rx_mq()
1703 iwl_mvm_rx_common(mvm, rxb, pkt); in iwl_mvm_rx_mq()
1706 static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue) in iwl_mvm_is_static_queue() argument
1708 return queue == mvm->aux_queue || queue == mvm->probe_queue || in iwl_mvm_is_static_queue()
1709 queue == mvm->p2p_dev_queue || queue == mvm->snif_queue; in iwl_mvm_is_static_queue()
1725 mvm->tvqm_info[hw_queue].sta_id : in iwl_mvm_queue_state_change()
1726 mvm->queue_info[hw_queue].ra_sta_id; in iwl_mvm_queue_state_change()
1728 if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) in iwl_mvm_queue_state_change()
1733 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); in iwl_mvm_queue_state_change()
1740 ieee80211_stop_queues(mvm->hw); in iwl_mvm_queue_state_change()
1741 else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST) in iwl_mvm_queue_state_change()
1742 ieee80211_wake_queues(mvm->hw); in iwl_mvm_queue_state_change()
1748 int tid = mvm->tvqm_info[hw_queue].txq_tid; in iwl_mvm_queue_state_change()
1752 tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap; in iwl_mvm_queue_state_change()
1761 txq = sta->txq[tid]; in iwl_mvm_queue_state_change()
1764 clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state); in iwl_mvm_queue_state_change()
1766 set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state); in iwl_mvm_queue_state_change()
1768 if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) { in iwl_mvm_queue_state_change()
1770 iwl_mvm_mac_itxq_xmit(mvm->hw, txq); in iwl_mvm_queue_state_change()
1794 wake_up(&mvm->rx_sync_waitq); in iwl_mvm_set_rfkill_state()
1796 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state); in iwl_mvm_set_rfkill_state()
1802 set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); in iwl_mvm_set_hw_ctkill_state()
1804 clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); in iwl_mvm_set_hw_ctkill_state()
1811 return rcu_dereference_protected(mvm->csme_conn_info, in iwl_mvm_get_csme_conn_info()
1812 lockdep_is_held(&mvm->mutex)); in iwl_mvm_get_csme_conn_info()
1818 bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done); in iwl_mvm_set_hw_rfkill_state()
1822 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); in iwl_mvm_set_hw_rfkill_state()
1824 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); in iwl_mvm_set_hw_rfkill_state()
1830 iwl_abort_notification_waits(&mvm->notif_wait); in iwl_mvm_set_hw_rfkill_state()
1852 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); in iwl_mvm_free_skb()
1853 ieee80211_free_txskb(mvm->hw, skb); in iwl_mvm_free_skb()
1866 if (device_reprobe(reprobe->dev)) in iwl_mvm_reprobe_wk()
1867 dev_err(reprobe->dev, "reprobe failed!\n"); in iwl_mvm_reprobe_wk()
1868 put_device(reprobe->dev); in iwl_mvm_reprobe_wk()
1875 iwl_abort_notification_waits(&mvm->notif_wait); in iwl_mvm_nic_restart()
1876 iwl_dbg_tlv_del_timers(mvm->trans); in iwl_mvm_nic_restart()
1896 if (!mvm->fw_restart && fw_error) { in iwl_mvm_nic_restart()
1897 iwl_fw_error_collect(&mvm->fwrt, false); in iwl_mvm_nic_restart()
1899 &mvm->status)) { in iwl_mvm_nic_restart()
1901 } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { in iwl_mvm_nic_restart()
1905 "Firmware error during reconfiguration - reprobe!\n"); in iwl_mvm_nic_restart()
1913 IWL_ERR(mvm, "Module is being unloaded - abort\n"); in iwl_mvm_nic_restart()
1922 reprobe->dev = get_device(mvm->trans->dev); in iwl_mvm_nic_restart()
1923 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); in iwl_mvm_nic_restart()
1924 schedule_work(&reprobe->work); in iwl_mvm_nic_restart()
1926 &mvm->status)) { in iwl_mvm_nic_restart()
1928 } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && in iwl_mvm_nic_restart()
1929 mvm->hw_registered && in iwl_mvm_nic_restart()
1930 !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { in iwl_mvm_nic_restart()
1935 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); in iwl_mvm_nic_restart()
1937 if (mvm->fw->ucode_capa.error_log_size) { in iwl_mvm_nic_restart()
1938 u32 src_size = mvm->fw->ucode_capa.error_log_size; in iwl_mvm_nic_restart()
1939 u32 src_addr = mvm->fw->ucode_capa.error_log_addr; in iwl_mvm_nic_restart()
1943 mvm->error_recovery_buf = recover_buf; in iwl_mvm_nic_restart()
1944 iwl_trans_read_mem_bytes(mvm->trans, in iwl_mvm_nic_restart()
1951 iwl_fw_error_collect(&mvm->fwrt, false); in iwl_mvm_nic_restart()
1953 if (fw_error && mvm->fw_restart > 0) { in iwl_mvm_nic_restart()
1954 mvm->fw_restart--; in iwl_mvm_nic_restart()
1955 ieee80211_restart_hw(mvm->hw); in iwl_mvm_nic_restart()
1956 } else if (mvm->fwrt.trans->dbg.restart_required) { in iwl_mvm_nic_restart()
1958 mvm->fwrt.trans->dbg.restart_required = FALSE; in iwl_mvm_nic_restart()
1959 ieee80211_restart_hw(mvm->hw); in iwl_mvm_nic_restart()
1960 } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) { in iwl_mvm_nic_restart()
1961 ieee80211_restart_hw(mvm->hw); in iwl_mvm_nic_restart()
1970 if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) && in iwl_mvm_nic_error()
1972 &mvm->status)) in iwl_mvm_nic_error()
1976 iwl_fw_error_collect(&mvm->fwrt, true); in iwl_mvm_nic_error()
1990 if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status)) in iwl_mvm_nic_error()
2010 iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data); in iwl_op_mode_mvm_time_point()
2029 .rx = iwl_mvm_rx,
2035 unsigned int queue) in iwl_mvm_rx_mq_rss() argument
2038 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mvm_rx_mq_rss() local
2039 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); in iwl_mvm_rx_mq_rss()
2041 if (unlikely(queue >= mvm->trans->num_rx_queues)) in iwl_mvm_rx_mq_rss()
2045 iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); in iwl_mvm_rx_mq_rss()
2048 iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue); in iwl_mvm_rx_mq_rss()
2050 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); in iwl_mvm_rx_mq_rss()
2055 .rx = iwl_mvm_rx_mq,