1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2024-2025 Intel Corporation 4 */ 5 6 #include "mld.h" 7 #include "notif.h" 8 #include "scan.h" 9 #include "iface.h" 10 #include "mlo.h" 11 #include "iwl-trans.h" 12 #include "fw/file.h" 13 #include "fw/dbg.h" 14 #include "fw/api/cmdhdr.h" 15 #include "fw/api/mac-cfg.h" 16 #include "session-protect.h" 17 #include "fw/api/time-event.h" 18 #include "fw/api/tx.h" 19 #include "fw/api/rs.h" 20 #include "fw/api/offload.h" 21 #include "fw/api/stats.h" 22 #include "fw/api/rfi.h" 23 #include "fw/api/coex.h" 24 25 #include "mcc.h" 26 #include "link.h" 27 #include "tx.h" 28 #include "rx.h" 29 #include "tlc.h" 30 #include "agg.h" 31 #include "mac80211.h" 32 #include "thermal.h" 33 #include "roc.h" 34 #include "stats.h" 35 #include "coex.h" 36 #include "time_sync.h" 37 #include "ftm-initiator.h" 38 39 /* Please use this in an increasing order of the versions */ 40 #define CMD_VER_ENTRY(_ver, _struct) \ 41 { .size = sizeof(struct _struct), .ver = _ver }, 42 #define CMD_VERSIONS(name, ...) \ 43 static const struct iwl_notif_struct_size \ 44 iwl_notif_struct_sizes_##name[] = { __VA_ARGS__ }; 45 46 #define RX_HANDLER_NO_OBJECT(_grp, _cmd, _name, _context) \ 47 {.cmd_id = WIDE_ID(_grp, _cmd), \ 48 .context = _context, \ 49 .fn = iwl_mld_handle_##_name, \ 50 .sizes = iwl_notif_struct_sizes_##_name, \ 51 .n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name), \ 52 }, 53 54 /* Use this for Rx handlers that do not need notification validation */ 55 #define RX_HANDLER_NO_VAL(_grp, _cmd, _name, _context) \ 56 {.cmd_id = WIDE_ID(_grp, _cmd), \ 57 .context = _context, \ 58 .fn = iwl_mld_handle_##_name, \ 59 }, 60 61 #define RX_HANDLER_VAL_FN(_grp, _cmd, _name, _context) \ 62 { .cmd_id = WIDE_ID(_grp, _cmd), \ 63 .context = _context, \ 64 .fn = iwl_mld_handle_##_name, \ 65 .val_fn = iwl_mld_validate_##_name, \ 66 }, 67 68 #define DEFINE_SIMPLE_CANCELLATION(name, notif_struct, id_member) \ 69 static bool iwl_mld_cancel_##name##_notif(struct iwl_mld *mld, \ 70 struct iwl_rx_packet *pkt, \ 71 u32 obj_id) \ 72 { \ 73 const struct notif_struct *notif = (const void *)pkt->data; \ 74 \ 75 return obj_id == _Generic((notif)->id_member, \ 76 __le32: le32_to_cpu((notif)->id_member), \ 77 __le16: le16_to_cpu((notif)->id_member), \ 78 u8: (notif)->id_member); \ 79 } 80 81 static bool iwl_mld_always_cancel(struct iwl_mld *mld, 82 struct iwl_rx_packet *pkt, 83 u32 obj_id) 84 { 85 return true; 86 } 87 88 /* Currently only defined for the RX_HANDLER_SIZES options. Use this for 89 * notifications that belong to a specific object, and that should be 90 * canceled when the object is removed 91 */ 92 #define RX_HANDLER_OF_OBJ(_grp, _cmd, _name, _obj_type) \ 93 {.cmd_id = WIDE_ID(_grp, _cmd), \ 94 /* Only async handlers can be canceled */ \ 95 .context = RX_HANDLER_ASYNC, \ 96 .fn = iwl_mld_handle_##_name, \ 97 .sizes = iwl_notif_struct_sizes_##_name, \ 98 .n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name), \ 99 .obj_type = IWL_MLD_OBJECT_TYPE_##_obj_type, \ 100 .cancel = iwl_mld_cancel_##_name, \ 101 }, 102 103 #define RX_HANDLER_OF_LINK(_grp, _cmd, _name) \ 104 RX_HANDLER_OF_OBJ(_grp, _cmd, _name, LINK) \ 105 106 #define RX_HANDLER_OF_VIF(_grp, _cmd, _name) \ 107 RX_HANDLER_OF_OBJ(_grp, _cmd, _name, VIF) \ 108 109 #define RX_HANDLER_OF_STA(_grp, _cmd, _name) \ 110 RX_HANDLER_OF_OBJ(_grp, _cmd, _name, STA) \ 111 112 #define RX_HANDLER_OF_ROC(_grp, _cmd, _name) \ 113 RX_HANDLER_OF_OBJ(_grp, _cmd, _name, ROC) 114 115 #define RX_HANDLER_OF_SCAN(_grp, _cmd, _name) \ 116 RX_HANDLER_OF_OBJ(_grp, _cmd, _name, SCAN) 117 118 #define RX_HANDLER_OF_FTM_REQ(_grp, _cmd, _name) \ 119 RX_HANDLER_OF_OBJ(_grp, _cmd, _name, FTM_REQ) 120 121 static void iwl_mld_handle_mfuart_notif(struct iwl_mld *mld, 122 struct iwl_rx_packet *pkt) 123 { 124 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; 125 126 IWL_DEBUG_INFO(mld, 127 "MFUART: installed ver: 0x%08x, external ver: 0x%08x\n", 128 le32_to_cpu(mfuart_notif->installed_ver), 129 le32_to_cpu(mfuart_notif->external_ver)); 130 IWL_DEBUG_INFO(mld, 131 "MFUART: status: 0x%08x, duration: 0x%08x image size: 0x%08x\n", 132 le32_to_cpu(mfuart_notif->status), 133 le32_to_cpu(mfuart_notif->duration), 134 le32_to_cpu(mfuart_notif->image_size)); 135 } 136 137 static void iwl_mld_mu_mimo_iface_iterator(void *_data, u8 *mac, 138 struct ieee80211_vif *vif) 139 { 140 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 141 unsigned int link_id = 0; 142 143 if (WARN(hweight16(vif->active_links) > 1, 144 "no support for this notif while in EMLSR 0x%x\n", 145 vif->active_links)) 146 return; 147 148 if (ieee80211_vif_is_mld(vif)) { 149 link_id = __ffs(vif->active_links); 150 bss_conf = link_conf_dereference_check(vif, link_id); 151 } 152 153 if (!WARN_ON(!bss_conf) && bss_conf->mu_mimo_owner) { 154 const struct iwl_mu_group_mgmt_notif *notif = _data; 155 156 BUILD_BUG_ON(sizeof(notif->membership_status) != 157 WLAN_MEMBERSHIP_LEN); 158 BUILD_BUG_ON(sizeof(notif->user_position) != 159 WLAN_USER_POSITION_LEN); 160 161 /* MU-MIMO Group Id action frame is little endian. We treat 162 * the data received from firmware as if it came from the 163 * action frame, so no conversion is needed. 164 */ 165 ieee80211_update_mu_groups(vif, link_id, 166 (u8 *)¬if->membership_status, 167 (u8 *)¬if->user_position); 168 } 169 } 170 171 /* This handler is called in SYNC mode because it needs to be serialized with 172 * Rx as specified in ieee80211_update_mu_groups()'s documentation. 173 */ 174 static void iwl_mld_handle_mu_mimo_grp_notif(struct iwl_mld *mld, 175 struct iwl_rx_packet *pkt) 176 { 177 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; 178 179 ieee80211_iterate_active_interfaces_atomic(mld->hw, 180 IEEE80211_IFACE_ITER_NORMAL, 181 iwl_mld_mu_mimo_iface_iterator, 182 notif); 183 } 184 185 static void 186 iwl_mld_handle_stored_beacon_notif(struct iwl_mld *mld, 187 struct iwl_rx_packet *pkt) 188 { 189 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); 190 struct iwl_stored_beacon_notif *sb = (void *)pkt->data; 191 struct ieee80211_rx_status rx_status = {}; 192 struct sk_buff *skb; 193 u32 size = le32_to_cpu(sb->common.byte_count); 194 195 if (size == 0) 196 return; 197 198 if (pkt_len < struct_size(sb, data, size)) 199 return; 200 201 skb = alloc_skb(size, GFP_ATOMIC); 202 if (!skb) { 203 IWL_ERR(mld, "alloc_skb failed\n"); 204 return; 205 } 206 207 /* update rx_status according to the notification's metadata */ 208 rx_status.mactime = le64_to_cpu(sb->common.tsf); 209 /* TSF as indicated by the firmware is at INA time */ 210 rx_status.flag |= RX_FLAG_MACTIME_PLCP_START; 211 rx_status.device_timestamp = le32_to_cpu(sb->common.system_time); 212 rx_status.band = 213 iwl_mld_phy_band_to_nl80211(le16_to_cpu(sb->common.band)); 214 rx_status.freq = 215 ieee80211_channel_to_frequency(le16_to_cpu(sb->common.channel), 216 rx_status.band); 217 218 /* copy the data */ 219 skb_put_data(skb, sb->data, size); 220 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 221 222 /* pass it as regular rx to mac80211 */ 223 ieee80211_rx_napi(mld->hw, NULL, skb, NULL); 224 } 225 226 static void 227 iwl_mld_handle_channel_switch_start_notif(struct iwl_mld *mld, 228 struct iwl_rx_packet *pkt) 229 { 230 struct iwl_channel_switch_start_notif *notif = (void *)pkt->data; 231 u32 link_id = le32_to_cpu(notif->link_id); 232 struct ieee80211_bss_conf *link_conf = 233 iwl_mld_fw_id_to_link_conf(mld, link_id); 234 struct ieee80211_vif *vif; 235 236 if (WARN_ON(!link_conf)) 237 return; 238 239 vif = link_conf->vif; 240 241 IWL_DEBUG_INFO(mld, 242 "CSA Start Notification with vif type: %d, link_id: %d\n", 243 vif->type, 244 link_conf->link_id); 245 246 switch (vif->type) { 247 case NL80211_IFTYPE_AP: 248 /* We don't support canceling a CSA as it was advertised 249 * by the AP itself 250 */ 251 if (!link_conf->csa_active) 252 return; 253 254 ieee80211_csa_finish(vif, link_conf->link_id); 255 break; 256 case NL80211_IFTYPE_STATION: 257 if (!link_conf->csa_active) { 258 /* Either unexpected cs notif or mac80211 chose to 259 * ignore, for example in channel switch to same channel 260 */ 261 struct iwl_cancel_channel_switch_cmd cmd = { 262 .id = cpu_to_le32(link_id), 263 }; 264 265 if (iwl_mld_send_cmd_pdu(mld, 266 WIDE_ID(MAC_CONF_GROUP, 267 CANCEL_CHANNEL_SWITCH_CMD), 268 &cmd)) 269 IWL_ERR(mld, 270 "Failed to cancel the channel switch\n"); 271 return; 272 } 273 274 ieee80211_chswitch_done(vif, true, link_conf->link_id); 275 break; 276 277 default: 278 WARN(1, "CSA on invalid vif type: %d", vif->type); 279 } 280 } 281 282 static void 283 iwl_mld_handle_channel_switch_error_notif(struct iwl_mld *mld, 284 struct iwl_rx_packet *pkt) 285 { 286 struct iwl_channel_switch_error_notif *notif = (void *)pkt->data; 287 struct ieee80211_bss_conf *link_conf; 288 struct ieee80211_vif *vif; 289 u32 link_id = le32_to_cpu(notif->link_id); 290 u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask); 291 292 link_conf = iwl_mld_fw_id_to_link_conf(mld, link_id); 293 if (WARN_ON(!link_conf)) 294 return; 295 296 vif = link_conf->vif; 297 298 IWL_DEBUG_INFO(mld, "FW reports CSA error: id=%u, csa_err_mask=%u\n", 299 link_id, csa_err_mask); 300 301 if (csa_err_mask & (CS_ERR_COUNT_ERROR | 302 CS_ERR_LONG_DELAY_AFTER_CS | 303 CS_ERR_TX_BLOCK_TIMER_EXPIRED)) 304 ieee80211_channel_switch_disconnect(vif); 305 } 306 307 static void iwl_mld_handle_beacon_notification(struct iwl_mld *mld, 308 struct iwl_rx_packet *pkt) 309 { 310 struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; 311 312 mld->ibss_manager = !!beacon->ibss_mgr_status; 313 } 314 315 /** 316 * DOC: Notification versioning 317 * 318 * The firmware's notifications change from time to time. In order to 319 * differentiate between different versions of the same notification, the 320 * firmware advertises the version of each notification. 321 * Here are listed all the notifications that are supported. Several versions 322 * of the same notification can be allowed at the same time: 323 * 324 * CMD_VERSION(my_multi_version_notif, 325 * CMD_VER_ENTRY(1, iwl_my_multi_version_notif_ver1) 326 * CMD_VER_ENTRY(2, iwl_my_multi_version_notif_ver2) 327 * 328 * etc... 329 * 330 * The driver will enforce that the notification coming from the firmware 331 * has its version listed here and it'll also enforce that the firmware sent 332 * at least enough bytes to cover the structure listed in the CMD_VER_ENTRY. 333 */ 334 335 CMD_VERSIONS(scan_complete_notif, 336 CMD_VER_ENTRY(1, iwl_umac_scan_complete)) 337 CMD_VERSIONS(scan_iter_complete_notif, 338 CMD_VER_ENTRY(2, iwl_umac_scan_iter_complete_notif)) 339 CMD_VERSIONS(mfuart_notif, 340 CMD_VER_ENTRY(2, iwl_mfuart_load_notif)) 341 CMD_VERSIONS(update_mcc, 342 CMD_VER_ENTRY(1, iwl_mcc_chub_notif)) 343 CMD_VERSIONS(session_prot_notif, 344 CMD_VER_ENTRY(3, iwl_session_prot_notif)) 345 CMD_VERSIONS(missed_beacon_notif, 346 CMD_VER_ENTRY(5, iwl_missed_beacons_notif)) 347 CMD_VERSIONS(tx_resp_notif, 348 CMD_VER_ENTRY(8, iwl_tx_resp)) 349 CMD_VERSIONS(compressed_ba_notif, 350 CMD_VER_ENTRY(5, iwl_compressed_ba_notif) 351 CMD_VER_ENTRY(6, iwl_compressed_ba_notif)) 352 CMD_VERSIONS(tlc_notif, 353 CMD_VER_ENTRY(3, iwl_tlc_update_notif)) 354 CMD_VERSIONS(mu_mimo_grp_notif, 355 CMD_VER_ENTRY(1, iwl_mu_group_mgmt_notif)) 356 CMD_VERSIONS(channel_switch_start_notif, 357 CMD_VER_ENTRY(3, iwl_channel_switch_start_notif)) 358 CMD_VERSIONS(channel_switch_error_notif, 359 CMD_VER_ENTRY(2, iwl_channel_switch_error_notif)) 360 CMD_VERSIONS(ct_kill_notif, 361 CMD_VER_ENTRY(2, ct_kill_notif)) 362 CMD_VERSIONS(temp_notif, 363 CMD_VER_ENTRY(2, iwl_dts_measurement_notif)) 364 CMD_VERSIONS(stored_beacon_notif, 365 CMD_VER_ENTRY(4, iwl_stored_beacon_notif)) 366 CMD_VERSIONS(roc_notif, 367 CMD_VER_ENTRY(1, iwl_roc_notif)) 368 CMD_VERSIONS(probe_resp_data_notif, 369 CMD_VER_ENTRY(1, iwl_probe_resp_data_notif)) 370 CMD_VERSIONS(datapath_monitor_notif, 371 CMD_VER_ENTRY(1, iwl_datapath_monitor_notif)) 372 CMD_VERSIONS(stats_oper_notif, 373 CMD_VER_ENTRY(3, iwl_system_statistics_notif_oper)) 374 CMD_VERSIONS(stats_oper_part1_notif, 375 CMD_VER_ENTRY(4, iwl_system_statistics_part1_notif_oper)) 376 CMD_VERSIONS(bt_coex_notif, 377 CMD_VER_ENTRY(1, iwl_bt_coex_profile_notif)) 378 CMD_VERSIONS(beacon_notification, 379 CMD_VER_ENTRY(6, iwl_extended_beacon_notif)) 380 CMD_VERSIONS(emlsr_mode_notif, 381 CMD_VER_ENTRY(1, iwl_esr_mode_notif)) 382 CMD_VERSIONS(emlsr_trans_fail_notif, 383 CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif)) 384 CMD_VERSIONS(uapsd_misbehaving_ap_notif, 385 CMD_VER_ENTRY(1, iwl_uapsd_misbehaving_ap_notif)) 386 CMD_VERSIONS(time_msmt_notif, 387 CMD_VER_ENTRY(1, iwl_time_msmt_notify)) 388 CMD_VERSIONS(time_sync_confirm_notif, 389 CMD_VER_ENTRY(1, iwl_time_msmt_cfm_notify)) 390 CMD_VERSIONS(omi_status_notif, 391 CMD_VER_ENTRY(1, iwl_omi_send_status_notif)) 392 CMD_VERSIONS(ftm_resp_notif, CMD_VER_ENTRY(9, iwl_tof_range_rsp_ntfy)) 393 394 DEFINE_SIMPLE_CANCELLATION(session_prot, iwl_session_prot_notif, mac_link_id) 395 DEFINE_SIMPLE_CANCELLATION(tlc, iwl_tlc_update_notif, sta_id) 396 DEFINE_SIMPLE_CANCELLATION(channel_switch_start, 397 iwl_channel_switch_start_notif, link_id) 398 DEFINE_SIMPLE_CANCELLATION(channel_switch_error, 399 iwl_channel_switch_error_notif, link_id) 400 DEFINE_SIMPLE_CANCELLATION(datapath_monitor, iwl_datapath_monitor_notif, 401 link_id) 402 DEFINE_SIMPLE_CANCELLATION(roc, iwl_roc_notif, activity) 403 DEFINE_SIMPLE_CANCELLATION(scan_complete, iwl_umac_scan_complete, uid) 404 DEFINE_SIMPLE_CANCELLATION(probe_resp_data, iwl_probe_resp_data_notif, 405 mac_id) 406 DEFINE_SIMPLE_CANCELLATION(uapsd_misbehaving_ap, iwl_uapsd_misbehaving_ap_notif, 407 mac_id) 408 #define iwl_mld_cancel_omi_status_notif iwl_mld_always_cancel 409 DEFINE_SIMPLE_CANCELLATION(ftm_resp, iwl_tof_range_rsp_ntfy, request_id) 410 411 /** 412 * DOC: Handlers for fw notifications 413 * 414 * Here are listed the notifications IDs (including the group ID), the handler 415 * of the notification and how it should be called: 416 * 417 * - RX_HANDLER_SYNC: will be called as part of the Rx path 418 * - RX_HANDLER_ASYNC: will be handled in a working with the wiphy_lock held 419 * 420 * This means that if the firmware sends two notifications A and B in that 421 * order and notification A is RX_HANDLER_ASYNC and notification is 422 * RX_HANDLER_SYNC, the handler of B will likely be called before the handler 423 * of A. 424 * 425 * This list should be in order of frequency for performance purposes. 426 * The handler can be one from two contexts, see &iwl_rx_handler_context 427 * 428 * A handler can declare that it relies on a specific object in which case it 429 * can be cancelled in case the object is deleted. In order to use this 430 * mechanism, a cancellation function is needed. The cancellation function must 431 * receive an object id (the index of that object in the firmware) and a 432 * notification payload. It'll return true if that specific notification should 433 * be cancelled upon the obliteration of the specific instance of the object. 434 * 435 * DEFINE_SIMPLE_CANCELLATION allows to easily create a cancellation function 436 * that wills simply return true if a given object id matches the object id in 437 * the firmware notification. 438 */ 439 440 VISIBLE_IF_IWLWIFI_KUNIT 441 const struct iwl_rx_handler iwl_mld_rx_handlers[] = { 442 RX_HANDLER_NO_OBJECT(LEGACY_GROUP, TX_CMD, tx_resp_notif, 443 RX_HANDLER_SYNC) 444 RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BA_NOTIF, compressed_ba_notif, 445 RX_HANDLER_SYNC) 446 RX_HANDLER_OF_SCAN(LEGACY_GROUP, SCAN_COMPLETE_UMAC, 447 scan_complete_notif) 448 RX_HANDLER_NO_OBJECT(LEGACY_GROUP, SCAN_ITERATION_COMPLETE_UMAC, 449 scan_iter_complete_notif, 450 RX_HANDLER_SYNC) 451 RX_HANDLER_NO_VAL(LEGACY_GROUP, MATCH_FOUND_NOTIFICATION, 452 match_found_notif, RX_HANDLER_SYNC) 453 454 RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_NOTIF, 455 stats_oper_notif, RX_HANDLER_ASYNC) 456 RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF, 457 stats_oper_part1_notif, RX_HANDLER_ASYNC) 458 459 RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MFUART_LOAD_NOTIFICATION, 460 mfuart_notif, RX_HANDLER_SYNC) 461 462 RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, 463 temp_notif, RX_HANDLER_ASYNC) 464 RX_HANDLER_OF_LINK(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF, 465 session_prot_notif) 466 RX_HANDLER_OF_LINK(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF, 467 missed_beacon_notif) 468 RX_HANDLER_OF_STA(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, tlc_notif) 469 RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF, 470 channel_switch_start_notif) 471 RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, 472 channel_switch_error_notif) 473 RX_HANDLER_OF_ROC(MAC_CONF_GROUP, ROC_NOTIF, roc_notif) 474 RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, 475 mu_mimo_grp_notif, RX_HANDLER_SYNC) 476 RX_HANDLER_NO_OBJECT(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, 477 stored_beacon_notif, RX_HANDLER_SYNC) 478 RX_HANDLER_OF_VIF(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF, 479 probe_resp_data_notif) 480 RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, 481 ct_kill_notif, RX_HANDLER_ASYNC) 482 RX_HANDLER_OF_LINK(DATA_PATH_GROUP, MONITOR_NOTIF, 483 datapath_monitor_notif) 484 RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MCC_CHUB_UPDATE_CMD, update_mcc, 485 RX_HANDLER_ASYNC) 486 RX_HANDLER_NO_OBJECT(BT_COEX_GROUP, PROFILE_NOTIF, 487 bt_coex_notif, RX_HANDLER_ASYNC) 488 RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BEACON_NOTIFICATION, 489 beacon_notification, RX_HANDLER_ASYNC) 490 RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, ESR_MODE_NOTIF, 491 emlsr_mode_notif, RX_HANDLER_ASYNC) 492 RX_HANDLER_NO_OBJECT(MAC_CONF_GROUP, EMLSR_TRANS_FAIL_NOTIF, 493 emlsr_trans_fail_notif, RX_HANDLER_ASYNC) 494 RX_HANDLER_OF_VIF(LEGACY_GROUP, PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, 495 uapsd_misbehaving_ap_notif) 496 RX_HANDLER_NO_OBJECT(LEGACY_GROUP, 497 WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION, 498 time_msmt_notif, RX_HANDLER_SYNC) 499 RX_HANDLER_NO_OBJECT(LEGACY_GROUP, 500 WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION, 501 time_sync_confirm_notif, RX_HANDLER_ASYNC) 502 RX_HANDLER_OF_LINK(DATA_PATH_GROUP, OMI_SEND_STATUS_NOTIF, 503 omi_status_notif) 504 RX_HANDLER_OF_FTM_REQ(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, 505 ftm_resp_notif) 506 }; 507 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers); 508 509 #if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) 510 const unsigned int iwl_mld_rx_handlers_num = ARRAY_SIZE(iwl_mld_rx_handlers); 511 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers_num); 512 #endif 513 514 static bool 515 iwl_mld_notif_is_valid(struct iwl_mld *mld, struct iwl_rx_packet *pkt, 516 const struct iwl_rx_handler *handler) 517 { 518 unsigned int size = iwl_rx_packet_payload_len(pkt); 519 size_t notif_ver; 520 521 /* If n_sizes == 0, it indicates that a validation function may be used 522 * or that no validation is required. 523 */ 524 if (!handler->n_sizes) { 525 if (handler->val_fn) 526 return handler->val_fn(mld, pkt); 527 return true; 528 } 529 530 notif_ver = iwl_fw_lookup_notif_ver(mld->fw, 531 iwl_cmd_groupid(handler->cmd_id), 532 iwl_cmd_opcode(handler->cmd_id), 533 IWL_FW_CMD_VER_UNKNOWN); 534 535 for (int i = 0; i < handler->n_sizes; i++) { 536 if (handler->sizes[i].ver != notif_ver) 537 continue; 538 539 if (IWL_FW_CHECK(mld, size < handler->sizes[i].size, 540 "unexpected notification 0x%04x size %d, need %d\n", 541 handler->cmd_id, size, handler->sizes[i].size)) 542 return false; 543 return true; 544 } 545 546 IWL_FW_CHECK_FAILED(mld, 547 "notif 0x%04x ver %zu missing expected size, use version %u size\n", 548 handler->cmd_id, notif_ver, 549 handler->sizes[handler->n_sizes - 1].ver); 550 551 return size < handler->sizes[handler->n_sizes - 1].size; 552 } 553 554 struct iwl_async_handler_entry { 555 struct list_head list; 556 struct iwl_rx_cmd_buffer rxb; 557 const struct iwl_rx_handler *rx_h; 558 }; 559 560 static void 561 iwl_mld_log_async_handler_op(struct iwl_mld *mld, const char *op, 562 struct iwl_rx_cmd_buffer *rxb) 563 { 564 struct iwl_rx_packet *pkt = rxb_addr(rxb); 565 566 IWL_DEBUG_HC(mld, 567 "%s async handler for notif %s (%.2x.%2x, seq 0x%x)\n", 568 op, iwl_get_cmd_string(mld->trans, 569 WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)), 570 pkt->hdr.group_id, pkt->hdr.cmd, 571 le16_to_cpu(pkt->hdr.sequence)); 572 } 573 574 static void iwl_mld_rx_notif(struct iwl_mld *mld, 575 struct iwl_rx_cmd_buffer *rxb, 576 struct iwl_rx_packet *pkt) 577 { 578 for (int i = 0; i < ARRAY_SIZE(iwl_mld_rx_handlers); i++) { 579 const struct iwl_rx_handler *rx_h = &iwl_mld_rx_handlers[i]; 580 struct iwl_async_handler_entry *entry; 581 582 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) 583 continue; 584 585 if (!iwl_mld_notif_is_valid(mld, pkt, rx_h)) 586 return; 587 588 if (rx_h->context == RX_HANDLER_SYNC) { 589 rx_h->fn(mld, pkt); 590 break; 591 } 592 593 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 594 /* we can't do much... */ 595 if (!entry) 596 return; 597 598 /* Set the async handler entry */ 599 entry->rxb._page = rxb_steal_page(rxb); 600 entry->rxb._offset = rxb->_offset; 601 entry->rxb._rx_page_order = rxb->_rx_page_order; 602 603 entry->rx_h = rx_h; 604 605 /* Add it to the list and queue the work */ 606 spin_lock(&mld->async_handlers_lock); 607 list_add_tail(&entry->list, &mld->async_handlers_list); 608 spin_unlock(&mld->async_handlers_lock); 609 610 wiphy_work_queue(mld->hw->wiphy, 611 &mld->async_handlers_wk); 612 613 iwl_mld_log_async_handler_op(mld, "Queued", rxb); 614 break; 615 } 616 617 iwl_notification_wait_notify(&mld->notif_wait, pkt); 618 } 619 620 void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi, 621 struct iwl_rx_cmd_buffer *rxb) 622 { 623 struct iwl_rx_packet *pkt = rxb_addr(rxb); 624 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); 625 u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); 626 627 if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) 628 iwl_mld_rx_mpdu(mld, napi, rxb, 0); 629 else if (cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) 630 iwl_mld_handle_frame_release_notif(mld, napi, pkt, 0); 631 else if (cmd_id == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE)) 632 iwl_mld_handle_bar_frame_release_notif(mld, napi, pkt, 0); 633 else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP, 634 RX_QUEUES_NOTIFICATION))) 635 iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, 0); 636 else if (cmd_id == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) 637 iwl_mld_rx_monitor_no_data(mld, napi, pkt, 0); 638 else 639 iwl_mld_rx_notif(mld, rxb, pkt); 640 } 641 642 void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi, 643 struct iwl_rx_cmd_buffer *rxb, unsigned int queue) 644 { 645 struct iwl_rx_packet *pkt = rxb_addr(rxb); 646 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); 647 u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); 648 649 if (unlikely(queue >= mld->trans->num_rx_queues)) 650 return; 651 652 if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) 653 iwl_mld_rx_mpdu(mld, napi, rxb, queue); 654 else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP, 655 RX_QUEUES_NOTIFICATION))) 656 iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, queue); 657 else if (unlikely(cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) 658 iwl_mld_handle_frame_release_notif(mld, napi, pkt, queue); 659 } 660 661 void iwl_mld_delete_handlers(struct iwl_mld *mld, const u16 *cmds, int n_cmds) 662 { 663 struct iwl_async_handler_entry *entry, *tmp; 664 665 spin_lock_bh(&mld->async_handlers_lock); 666 list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) { 667 bool match = false; 668 669 for (int i = 0; i < n_cmds; i++) { 670 if (entry->rx_h->cmd_id == cmds[i]) { 671 match = true; 672 break; 673 } 674 } 675 676 if (!match) 677 continue; 678 679 iwl_mld_log_async_handler_op(mld, "Delete", &entry->rxb); 680 iwl_free_rxb(&entry->rxb); 681 list_del(&entry->list); 682 kfree(entry); 683 } 684 spin_unlock_bh(&mld->async_handlers_lock); 685 } 686 687 void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk) 688 { 689 struct iwl_mld *mld = 690 container_of(wk, struct iwl_mld, async_handlers_wk); 691 struct iwl_async_handler_entry *entry, *tmp; 692 LIST_HEAD(local_list); 693 694 /* Sync with Rx path with a lock. Remove all the entries from this 695 * list, add them to a local one (lock free), and then handle them. 696 */ 697 spin_lock_bh(&mld->async_handlers_lock); 698 list_splice_init(&mld->async_handlers_list, &local_list); 699 spin_unlock_bh(&mld->async_handlers_lock); 700 701 list_for_each_entry_safe(entry, tmp, &local_list, list) { 702 iwl_mld_log_async_handler_op(mld, "Handle", &entry->rxb); 703 entry->rx_h->fn(mld, rxb_addr(&entry->rxb)); 704 iwl_free_rxb(&entry->rxb); 705 list_del(&entry->list); 706 kfree(entry); 707 } 708 } 709 710 void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld) 711 { 712 struct iwl_async_handler_entry *entry, *tmp; 713 714 spin_lock_bh(&mld->async_handlers_lock); 715 list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) { 716 iwl_mld_log_async_handler_op(mld, "Purged", &entry->rxb); 717 iwl_free_rxb(&entry->rxb); 718 list_del(&entry->list); 719 kfree(entry); 720 } 721 spin_unlock_bh(&mld->async_handlers_lock); 722 } 723 724 void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld, 725 enum iwl_mld_object_type obj_type, 726 u32 obj_id) 727 { 728 struct iwl_async_handler_entry *entry, *tmp; 729 LIST_HEAD(cancel_list); 730 731 lockdep_assert_wiphy(mld->wiphy); 732 733 if (WARN_ON(obj_type == IWL_MLD_OBJECT_TYPE_NONE)) 734 return; 735 736 /* Sync with RX path and remove matching entries from the async list */ 737 spin_lock_bh(&mld->async_handlers_lock); 738 list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) { 739 const struct iwl_rx_handler *rx_h = entry->rx_h; 740 741 if (rx_h->obj_type != obj_type || WARN_ON(!rx_h->cancel)) 742 continue; 743 744 if (rx_h->cancel(mld, rxb_addr(&entry->rxb), obj_id)) { 745 iwl_mld_log_async_handler_op(mld, "Cancel", &entry->rxb); 746 list_del(&entry->list); 747 list_add_tail(&entry->list, &cancel_list); 748 } 749 } 750 751 spin_unlock_bh(&mld->async_handlers_lock); 752 753 /* Free the matching entries outside of the spinlock */ 754 list_for_each_entry_safe(entry, tmp, &cancel_list, list) { 755 iwl_free_rxb(&entry->rxb); 756 list_del(&entry->list); 757 kfree(entry); 758 } 759 } 760