1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 5 */ 6 #include <linux/skbuff.h> 7 #include <linux/ctype.h> 8 #include <net/mac80211.h> 9 #include <net/cfg80211.h> 10 #include <linux/completion.h> 11 #include <linux/if_ether.h> 12 #include <linux/types.h> 13 #include <linux/pci.h> 14 #include <linux/uuid.h> 15 #include <linux/time.h> 16 #include <linux/of.h> 17 #include <linux/cleanup.h> 18 #include "core.h" 19 #include "debugfs.h" 20 #include "debug.h" 21 #include "mac.h" 22 #include "hw.h" 23 #include "peer.h" 24 #include "p2p.h" 25 #include "testmode.h" 26 27 struct ath12k_wmi_svc_ready_parse { 28 bool wmi_svc_bitmap_done; 29 }; 30 31 struct wmi_tlv_fw_stats_parse { 32 const struct wmi_stats_event *ev; 33 struct ath12k_fw_stats *stats; 34 const struct wmi_per_chain_rssi_stat_params *rssi; 35 int rssi_num; 36 bool chain_rssi_done; 37 }; 38 39 struct ath12k_wmi_dma_ring_caps_parse { 40 struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps; 41 u32 n_dma_ring_caps; 42 }; 43 44 struct ath12k_wmi_service_ext_arg { 45 u32 default_conc_scan_config_bits; 46 u32 default_fw_config_bits; 47 struct ath12k_wmi_ppe_threshold_arg ppet; 48 u32 he_cap_info; 49 u32 mpdu_density; 50 u32 max_bssid_rx_filters; 51 u32 num_hw_modes; 52 u32 num_phy; 53 }; 54 55 struct ath12k_wmi_svc_rdy_ext_parse { 56 struct ath12k_wmi_service_ext_arg arg; 57 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps; 58 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; 59 u32 n_hw_mode_caps; 60 u32 tot_phy_id; 61 struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps; 62 struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps; 63 u32 n_mac_phy_caps; 64 const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps; 65 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps; 66 u32 n_ext_hal_reg_caps; 67 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; 68 bool hw_mode_done; 69 bool mac_phy_done; 70 bool ext_hal_reg_done; 71 bool mac_phy_chainmask_combo_done; 72 bool mac_phy_chainmask_cap_done; 73 bool oem_dma_ring_cap_done; 74 bool dma_ring_cap_done; 75 }; 76 77 struct ath12k_wmi_svc_rdy_ext2_arg { 78 u32 reg_db_version; 79 u32 hw_min_max_tx_power_2ghz; 80 u32 hw_min_max_tx_power_5ghz; 81 u32 chwidth_num_peer_caps; 82 u32 preamble_puncture_bw; 83 u32 max_user_per_ppdu_ofdma; 84 u32 max_user_per_ppdu_mumimo; 85 u32 target_cap_flags; 86 u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE]; 87 u32 max_num_linkview_peers; 88 u32 max_num_msduq_supported_per_tid; 89 u32 default_num_msduq_supported_per_tid; 90 }; 91 92 struct ath12k_wmi_svc_rdy_ext2_parse { 93 struct ath12k_wmi_svc_rdy_ext2_arg arg; 94 struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse; 95 bool dma_ring_cap_done; 96 bool spectral_bin_scaling_done; 97 bool mac_phy_caps_ext_done; 98 bool hal_reg_caps_ext2_done; 99 bool scan_radio_caps_ext2_done; 100 bool twt_caps_done; 101 bool htt_msdu_idx_to_qtype_map_done; 102 bool dbs_or_sbs_cap_ext_done; 103 }; 104 105 struct ath12k_wmi_rdy_parse { 106 u32 num_extra_mac_addr; 107 }; 108 109 struct ath12k_wmi_dma_buf_release_arg { 110 struct ath12k_wmi_dma_buf_release_fixed_params fixed; 111 const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry; 112 const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data; 113 u32 num_buf_entry; 114 u32 num_meta; 115 bool buf_entry_done; 116 bool meta_data_done; 117 }; 118 119 struct ath12k_wmi_tlv_policy { 120 size_t min_len; 121 }; 122 123 struct wmi_tlv_mgmt_rx_parse { 124 const struct ath12k_wmi_mgmt_rx_params *fixed; 125 const u8 *frame_buf; 126 bool frame_buf_done; 127 }; 128 129 struct wmi_pdev_set_obss_bitmap_arg { 130 u32 tlv_tag; 131 u32 pdev_id; 132 u32 cmd_id; 133 const u32 *bitmap; 134 const char *label; 135 }; 136 137 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = { 138 [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 }, 139 [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 }, 140 [WMI_TAG_SERVICE_READY_EVENT] = { 141 .min_len = sizeof(struct wmi_service_ready_event) }, 142 [WMI_TAG_SERVICE_READY_EXT_EVENT] = { 143 .min_len = sizeof(struct wmi_service_ready_ext_event) }, 144 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = { 145 .min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) }, 146 [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = { 147 .min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) }, 148 [WMI_TAG_VDEV_START_RESPONSE_EVENT] = { 149 .min_len = sizeof(struct wmi_vdev_start_resp_event) }, 150 [WMI_TAG_PEER_DELETE_RESP_EVENT] = { 151 .min_len = sizeof(struct wmi_peer_delete_resp_event) }, 152 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = { 153 .min_len = sizeof(struct wmi_bcn_tx_status_event) }, 154 [WMI_TAG_VDEV_STOPPED_EVENT] = { 155 .min_len = sizeof(struct wmi_vdev_stopped_event) }, 156 [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = { 157 .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) }, 158 [WMI_TAG_MGMT_RX_HDR] = { 159 .min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) }, 160 [WMI_TAG_MGMT_TX_COMPL_EVENT] = { 161 .min_len = sizeof(struct wmi_mgmt_tx_compl_event) }, 162 [WMI_TAG_SCAN_EVENT] = { 163 .min_len = sizeof(struct wmi_scan_event) }, 164 [WMI_TAG_PEER_STA_KICKOUT_EVENT] = { 165 .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, 166 [WMI_TAG_ROAM_EVENT] = { 167 .min_len = sizeof(struct wmi_roam_event) }, 168 [WMI_TAG_CHAN_INFO_EVENT] = { 169 .min_len = sizeof(struct wmi_chan_info_event) }, 170 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = { 171 .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) }, 172 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = { 173 .min_len = sizeof(struct wmi_vdev_install_key_compl_event) }, 174 [WMI_TAG_READY_EVENT] = { 175 .min_len = sizeof(struct ath12k_wmi_ready_event_min_params) }, 176 [WMI_TAG_SERVICE_AVAILABLE_EVENT] = { 177 .min_len = sizeof(struct wmi_service_available_event) }, 178 [WMI_TAG_PEER_ASSOC_CONF_EVENT] = { 179 .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, 180 [WMI_TAG_RFKILL_EVENT] = { 181 .min_len = sizeof(struct wmi_rfkill_state_change_event) }, 182 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { 183 .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, 184 [WMI_TAG_HOST_SWFDA_EVENT] = { 185 .min_len = sizeof(struct wmi_fils_discovery_event) }, 186 [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = { 187 .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, 188 [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { 189 .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, 190 [WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = { 191 .min_len = sizeof(struct wmi_twt_enable_event) }, 192 [WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = { 193 .min_len = sizeof(struct wmi_twt_disable_event) }, 194 [WMI_TAG_P2P_NOA_INFO] = { 195 .min_len = sizeof(struct ath12k_wmi_p2p_noa_info) }, 196 [WMI_TAG_P2P_NOA_EVENT] = { 197 .min_len = sizeof(struct wmi_p2p_noa_event) }, 198 [WMI_TAG_11D_NEW_COUNTRY_EVENT] = { 199 .min_len = sizeof(struct wmi_11d_new_cc_event) }, 200 [WMI_TAG_PER_CHAIN_RSSI_STATS] = { 201 .min_len = sizeof(struct wmi_per_chain_rssi_stat_params) }, 202 [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = { 203 .min_len = sizeof(struct wmi_obss_color_collision_event) }, 204 }; 205 206 __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len) 207 { 208 return le32_encode_bits(cmd, WMI_TLV_TAG) | 209 le32_encode_bits(len, WMI_TLV_LEN); 210 } 211 212 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len) 213 { 214 return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE); 215 } 216 217 #define PRIMAP(_hw_mode_) \ 218 [_hw_mode_] = _hw_mode_##_PRI 219 220 static const int ath12k_hw_mode_pri_map[] = { 221 PRIMAP(WMI_HOST_HW_MODE_SINGLE), 222 PRIMAP(WMI_HOST_HW_MODE_DBS), 223 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE), 224 PRIMAP(WMI_HOST_HW_MODE_SBS), 225 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS), 226 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS), 227 /* keep last */ 228 PRIMAP(WMI_HOST_HW_MODE_MAX), 229 }; 230 231 static int 232 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, 233 int (*iter)(struct ath12k_base *ab, u16 tag, u16 len, 234 const void *ptr, void *data), 235 void *data) 236 { 237 const void *begin = ptr; 238 const struct wmi_tlv *tlv; 239 u16 tlv_tag, tlv_len; 240 int ret; 241 242 while (len > 0) { 243 if (len < sizeof(*tlv)) { 244 ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 245 ptr - begin, len, sizeof(*tlv)); 246 return -EINVAL; 247 } 248 249 tlv = ptr; 250 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 251 tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN); 252 ptr += sizeof(*tlv); 253 len -= sizeof(*tlv); 254 255 if (tlv_len > len) { 256 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 257 tlv_tag, ptr - begin, len, tlv_len); 258 return -EINVAL; 259 } 260 261 if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) && 262 ath12k_wmi_tlv_policies[tlv_tag].min_len && 263 ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) { 264 ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n", 265 tlv_tag, ptr - begin, tlv_len, 266 ath12k_wmi_tlv_policies[tlv_tag].min_len); 267 return -EINVAL; 268 } 269 270 ret = iter(ab, tlv_tag, tlv_len, ptr, data); 271 if (ret) 272 return ret; 273 274 ptr += tlv_len; 275 len -= tlv_len; 276 } 277 278 return 0; 279 } 280 281 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len, 282 const void *ptr, void *data) 283 { 284 const void **tb = data; 285 286 if (tag < WMI_TAG_MAX) 287 tb[tag] = ptr; 288 289 return 0; 290 } 291 292 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb, 293 const void *ptr, size_t len) 294 { 295 return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse, 296 (void *)tb); 297 } 298 299 static const void ** 300 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, 301 struct sk_buff *skb, gfp_t gfp) 302 { 303 const void **tb; 304 int ret; 305 306 tb = kzalloc_objs(*tb, WMI_TAG_MAX, gfp); 307 if (!tb) 308 return ERR_PTR(-ENOMEM); 309 310 ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len); 311 if (ret) { 312 kfree(tb); 313 return ERR_PTR(ret); 314 } 315 316 return tb; 317 } 318 319 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, 320 u32 cmd_id) 321 { 322 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); 323 struct ath12k_base *ab = wmi->wmi_ab->ab; 324 struct wmi_cmd_hdr *cmd_hdr; 325 int ret; 326 327 if (!skb_push(skb, sizeof(struct wmi_cmd_hdr))) 328 return -ENOMEM; 329 330 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 331 cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID); 332 333 memset(skb_cb, 0, sizeof(*skb_cb)); 334 ret = ath12k_htc_send(&ab->htc, wmi->eid, skb); 335 336 if (ret) 337 goto err_pull; 338 339 return 0; 340 341 err_pull: 342 skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 343 return ret; 344 } 345 346 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb, 347 u32 cmd_id) 348 { 349 struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab; 350 int ret = -EOPNOTSUPP; 351 352 might_sleep(); 353 354 wait_event_timeout(wmi_ab->tx_credits_wq, ({ 355 ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id); 356 357 if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags)) 358 ret = -ESHUTDOWN; 359 360 (ret != -EAGAIN); 361 }), WMI_SEND_TIMEOUT_HZ); 362 363 if (ret == -EAGAIN) 364 ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id); 365 366 return ret; 367 } 368 369 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, 370 const void *ptr, 371 struct ath12k_wmi_service_ext_arg *arg) 372 { 373 const struct wmi_service_ready_ext_event *ev = ptr; 374 int i; 375 376 if (!ev) 377 return -EINVAL; 378 379 /* Move this to host based bitmap */ 380 arg->default_conc_scan_config_bits = 381 le32_to_cpu(ev->default_conc_scan_config_bits); 382 arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits); 383 arg->he_cap_info = le32_to_cpu(ev->he_cap_info); 384 arg->mpdu_density = le32_to_cpu(ev->mpdu_density); 385 arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters); 386 arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1); 387 arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info); 388 389 for (i = 0; i < WMI_MAX_NUM_SS; i++) 390 arg->ppet.ppet16_ppet8_ru3_ru0[i] = 391 le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]); 392 393 return 0; 394 } 395 396 static int 397 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, 398 struct ath12k_wmi_svc_rdy_ext_parse *svc, 399 u8 hw_mode_id, u8 phy_id, 400 struct ath12k_pdev *pdev) 401 { 402 const struct ath12k_wmi_mac_phy_caps_params *mac_caps; 403 const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps; 404 const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps; 405 const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps; 406 struct ath12k_base *ab = wmi_handle->wmi_ab->ab; 407 struct ath12k_band_cap *cap_band; 408 struct ath12k_pdev_cap *pdev_cap = &pdev->cap; 409 struct ath12k_fw_pdev *fw_pdev; 410 u32 supported_bands; 411 u32 phy_map; 412 u32 hw_idx, phy_idx = 0; 413 int i; 414 415 if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps) 416 return -EINVAL; 417 418 for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) { 419 if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id)) 420 break; 421 422 phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map); 423 phy_idx = fls(phy_map); 424 } 425 426 if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes)) 427 return -EINVAL; 428 429 phy_idx += phy_id; 430 if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy)) 431 return -EINVAL; 432 433 mac_caps = wmi_mac_phy_caps + phy_idx; 434 supported_bands = le32_to_cpu(mac_caps->supported_bands); 435 436 if (!(supported_bands & WMI_HOST_WLAN_2GHZ_CAP) && 437 !(supported_bands & WMI_HOST_WLAN_5GHZ_CAP)) 438 return -EINVAL; 439 440 pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps); 441 pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps); 442 pdev_cap->supported_bands |= supported_bands; 443 pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density); 444 445 fw_pdev = &ab->fw_pdev[ab->fw_pdev_count]; 446 fw_pdev->supported_bands = supported_bands; 447 fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps); 448 fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id); 449 ab->fw_pdev_count++; 450 451 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from 452 * band to band for a single radio, need to see how this should be 453 * handled. 454 */ 455 if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) { 456 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g); 457 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g); 458 } 459 460 if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP) { 461 pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g); 462 pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g); 463 pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 464 pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g); 465 pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g); 466 pdev_cap->nss_ratio_enabled = 467 WMI_NSS_RATIO_EN_DIS_GET(mac_caps->nss_ratio); 468 pdev_cap->nss_ratio_info = 469 WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio); 470 } 471 472 /* tx/rx chainmask reported from fw depends on the actual hw chains used, 473 * For example, for 4x4 capable macphys, first 4 chains can be used for first 474 * mac and the remaining 4 chains can be used for the second mac or vice-versa. 475 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 476 * will be advertised for second mac or vice-versa. Compute the shift value 477 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to 478 * mac80211. 479 */ 480 pdev_cap->tx_chain_mask_shift = 481 find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32); 482 pdev_cap->rx_chain_mask_shift = 483 find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32); 484 485 if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) { 486 cap_band = &pdev_cap->band[NL80211_BAND_2GHZ]; 487 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id); 488 cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g); 489 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g); 490 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g); 491 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext); 492 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g); 493 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 494 cap_band->he_cap_phy_info[i] = 495 le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]); 496 497 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1); 498 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info); 499 500 for (i = 0; i < WMI_MAX_NUM_SS; i++) 501 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 502 le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]); 503 } 504 505 if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP) { 506 cap_band = &pdev_cap->band[NL80211_BAND_5GHZ]; 507 cap_band->phy_id = le32_to_cpu(mac_caps->phy_id); 508 cap_band->max_bw_supported = 509 le32_to_cpu(mac_caps->max_bw_supported_5g); 510 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g); 511 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g); 512 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext); 513 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 514 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 515 cap_band->he_cap_phy_info[i] = 516 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]); 517 518 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1); 519 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info); 520 521 for (i = 0; i < WMI_MAX_NUM_SS; i++) 522 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 523 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]); 524 525 cap_band = &pdev_cap->band[NL80211_BAND_6GHZ]; 526 cap_band->max_bw_supported = 527 le32_to_cpu(mac_caps->max_bw_supported_5g); 528 cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g); 529 cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g); 530 cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext); 531 cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g); 532 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 533 cap_band->he_cap_phy_info[i] = 534 le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]); 535 536 cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1); 537 cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info); 538 539 for (i = 0; i < WMI_MAX_NUM_SS; i++) 540 cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] = 541 le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]); 542 } 543 544 return 0; 545 } 546 547 static int 548 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle, 549 const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps, 550 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps, 551 u8 phy_idx, 552 struct ath12k_wmi_hal_reg_capabilities_ext_arg *param) 553 { 554 const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap; 555 556 if (!reg_caps || !ext_caps) 557 return -EINVAL; 558 559 if (phy_idx >= le32_to_cpu(reg_caps->num_phy)) 560 return -EINVAL; 561 562 ext_reg_cap = &ext_caps[phy_idx]; 563 564 param->phy_id = le32_to_cpu(ext_reg_cap->phy_id); 565 param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain); 566 param->eeprom_reg_domain_ext = 567 le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext); 568 param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1); 569 param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2); 570 /* check if param->wireless_mode is needed */ 571 param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan); 572 param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan); 573 param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan); 574 param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan); 575 576 return 0; 577 } 578 579 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab, 580 const void *evt_buf, 581 struct ath12k_wmi_target_cap_arg *cap) 582 { 583 const struct wmi_service_ready_event *ev = evt_buf; 584 585 if (!ev) { 586 ath12k_err(ab, "%s: failed by NULL param\n", 587 __func__); 588 return -EINVAL; 589 } 590 591 cap->phy_capability = le32_to_cpu(ev->phy_capability); 592 cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry); 593 cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains); 594 cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info); 595 cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info); 596 cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs); 597 cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power); 598 cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power); 599 cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info); 600 cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable); 601 cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size); 602 cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels); 603 cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs); 604 cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps); 605 cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask); 606 cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index); 607 cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc); 608 609 return 0; 610 } 611 612 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in 613 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each 614 * 4-byte word. 615 */ 616 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi, 617 const u32 *wmi_svc_bm) 618 { 619 int i, j; 620 621 for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) { 622 do { 623 if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32)) 624 set_bit(j, wmi->wmi_ab->svc_map); 625 } while (++j % WMI_SERVICE_BITS_IN_SIZE32); 626 } 627 } 628 629 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len, 630 const void *ptr, void *data) 631 { 632 struct ath12k_wmi_svc_ready_parse *svc_ready = data; 633 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 634 u16 expect_len; 635 636 switch (tag) { 637 case WMI_TAG_SERVICE_READY_EVENT: 638 if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps)) 639 return -EINVAL; 640 break; 641 642 case WMI_TAG_ARRAY_UINT32: 643 if (!svc_ready->wmi_svc_bitmap_done) { 644 expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32); 645 if (len < expect_len) { 646 ath12k_warn(ab, "invalid len %d for the tag 0x%x\n", 647 len, tag); 648 return -EINVAL; 649 } 650 651 ath12k_wmi_service_bitmap_copy(wmi_handle, ptr); 652 653 svc_ready->wmi_svc_bitmap_done = true; 654 } 655 break; 656 default: 657 break; 658 } 659 660 return 0; 661 } 662 663 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb) 664 { 665 struct ath12k_wmi_svc_ready_parse svc_ready = { }; 666 int ret; 667 668 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 669 ath12k_wmi_svc_rdy_parse, 670 &svc_ready); 671 if (ret) { 672 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 673 return ret; 674 } 675 676 return 0; 677 } 678 679 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar, 680 struct ieee80211_tx_info *info) 681 { 682 struct ath12k_base *ab = ar->ab; 683 u32 freq = 0; 684 685 if (ab->hw_params->single_pdev_only && 686 ar->scan.is_roc && 687 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) 688 freq = ar->scan.roc_freq; 689 690 return freq; 691 } 692 693 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len) 694 { 695 struct sk_buff *skb; 696 struct ath12k_base *ab = wmi_ab->ab; 697 u32 round_len = roundup(len, 4); 698 699 skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len); 700 if (!skb) 701 return NULL; 702 703 skb_reserve(skb, WMI_SKB_HEADROOM); 704 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 705 ath12k_warn(ab, "unaligned WMI skb data\n"); 706 707 skb_put(skb, round_len); 708 memset(skb->data, 0, round_len); 709 710 return skb; 711 } 712 713 int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id, 714 struct sk_buff *frame) 715 { 716 struct ath12k *ar = arvif->ar; 717 struct ath12k_wmi_pdev *wmi = ar->wmi; 718 struct wmi_mgmt_send_cmd *cmd; 719 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame); 720 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)frame->data; 721 struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); 722 int cmd_len = sizeof(struct ath12k_wmi_mgmt_send_tx_params); 723 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr; 724 struct ath12k_wmi_mlo_mgmt_send_params *ml_params; 725 struct ath12k_base *ab = ar->ab; 726 struct wmi_tlv *frame_tlv, *tlv; 727 struct ath12k_skb_cb *skb_cb; 728 u32 buf_len, buf_len_aligned; 729 u32 vdev_id = arvif->vdev_id; 730 bool link_agnostic = false; 731 struct sk_buff *skb; 732 int ret, len; 733 void *ptr; 734 735 buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN); 736 737 buf_len_aligned = roundup(buf_len, sizeof(u32)); 738 739 len = sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned; 740 741 if (ieee80211_vif_is_mld(vif)) { 742 skb_cb = ATH12K_SKB_CB(frame); 743 if ((skb_cb->flags & ATH12K_SKB_MLO_STA) && 744 ab->hw_params->hw_ops->is_frame_link_agnostic && 745 ab->hw_params->hw_ops->is_frame_link_agnostic(arvif, mgmt)) { 746 len += cmd_len + TLV_HDR_SIZE + sizeof(*ml_params); 747 ath12k_generic_dbg(ATH12K_DBG_MGMT, 748 "Sending Mgmt Frame fc 0x%0x as link agnostic", 749 mgmt->frame_control); 750 link_agnostic = true; 751 } 752 } 753 754 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 755 if (!skb) 756 return -ENOMEM; 757 758 cmd = (struct wmi_mgmt_send_cmd *)skb->data; 759 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD, 760 sizeof(*cmd)); 761 cmd->vdev_id = cpu_to_le32(vdev_id); 762 cmd->desc_id = cpu_to_le32(buf_id); 763 cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info)); 764 cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr)); 765 cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr)); 766 cmd->frame_len = cpu_to_le32(frame->len); 767 cmd->buf_len = cpu_to_le32(buf_len); 768 cmd->tx_params_valid = 0; 769 770 frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 771 frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len_aligned); 772 773 memcpy(frame_tlv->value, frame->data, buf_len); 774 775 if (!link_agnostic) 776 goto send; 777 778 ptr = skb->data + sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned; 779 780 tlv = ptr; 781 782 /* Tx params not used currently */ 783 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TX_SEND_PARAMS, cmd_len); 784 ptr += cmd_len; 785 786 tlv = ptr; 787 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*ml_params)); 788 ptr += TLV_HDR_SIZE; 789 790 ml_params = ptr; 791 ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TX_SEND_PARAMS, 792 sizeof(*ml_params)); 793 794 ml_params->hw_link_id = cpu_to_le32(WMI_MGMT_LINK_AGNOSTIC_ID); 795 796 send: 797 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID); 798 if (ret) { 799 ath12k_warn(ar->ab, 800 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n"); 801 dev_kfree_skb(skb); 802 } 803 804 return ret; 805 } 806 807 int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id, 808 u32 vdev_id, u32 pdev_id) 809 { 810 struct ath12k_wmi_pdev *wmi = ar->wmi; 811 struct wmi_request_stats_cmd *cmd; 812 struct sk_buff *skb; 813 int ret; 814 815 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 816 if (!skb) 817 return -ENOMEM; 818 819 cmd = (struct wmi_request_stats_cmd *)skb->data; 820 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD, 821 sizeof(*cmd)); 822 823 cmd->stats_id = cpu_to_le32(stats_id); 824 cmd->vdev_id = cpu_to_le32(vdev_id); 825 cmd->pdev_id = cpu_to_le32(pdev_id); 826 827 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID); 828 if (ret) { 829 ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n"); 830 dev_kfree_skb(skb); 831 } 832 833 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 834 "WMI request stats 0x%x vdev id %d pdev id %d\n", 835 stats_id, vdev_id, pdev_id); 836 837 return ret; 838 } 839 840 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, 841 struct ath12k_wmi_vdev_create_arg *args) 842 { 843 struct ath12k_wmi_pdev *wmi = ar->wmi; 844 struct wmi_vdev_create_cmd *cmd; 845 struct sk_buff *skb; 846 struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams; 847 bool is_ml_vdev = is_valid_ether_addr(args->mld_addr); 848 struct wmi_vdev_create_mlo_params *ml_params; 849 struct wmi_tlv *tlv; 850 int ret, len; 851 void *ptr; 852 853 /* It can be optimized my sending tx/rx chain configuration 854 * only for supported bands instead of always sending it for 855 * both the bands. 856 */ 857 len = sizeof(*cmd) + TLV_HDR_SIZE + 858 (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) + 859 (is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0); 860 861 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 862 if (!skb) 863 return -ENOMEM; 864 865 cmd = (struct wmi_vdev_create_cmd *)skb->data; 866 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD, 867 sizeof(*cmd)); 868 869 cmd->vdev_id = cpu_to_le32(args->if_id); 870 cmd->vdev_type = cpu_to_le32(args->type); 871 cmd->vdev_subtype = cpu_to_le32(args->subtype); 872 cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX); 873 cmd->pdev_id = cpu_to_le32(args->pdev_id); 874 cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags); 875 cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id); 876 cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id); 877 ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); 878 879 if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID) 880 cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0)); 881 882 ptr = skb->data + sizeof(*cmd); 883 len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); 884 885 tlv = ptr; 886 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 887 888 ptr += TLV_HDR_SIZE; 889 txrx_streams = ptr; 890 len = sizeof(*txrx_streams); 891 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, 892 len); 893 txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G); 894 txrx_streams->supported_tx_streams = 895 cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx); 896 txrx_streams->supported_rx_streams = 897 cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx); 898 899 txrx_streams++; 900 txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS, 901 len); 902 txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G); 903 txrx_streams->supported_tx_streams = 904 cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx); 905 txrx_streams->supported_rx_streams = 906 cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx); 907 908 ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams); 909 910 if (is_ml_vdev) { 911 tlv = ptr; 912 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 913 sizeof(*ml_params)); 914 ptr += TLV_HDR_SIZE; 915 ml_params = ptr; 916 917 ml_params->tlv_header = 918 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS, 919 sizeof(*ml_params)); 920 ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr); 921 } 922 923 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 924 "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n", 925 args->if_id, args->type, args->subtype, 926 macaddr, args->pdev_id); 927 928 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID); 929 if (ret) { 930 ath12k_warn(ar->ab, 931 "failed to submit WMI_VDEV_CREATE_CMDID\n"); 932 dev_kfree_skb(skb); 933 } 934 935 return ret; 936 } 937 938 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id) 939 { 940 struct ath12k_wmi_pdev *wmi = ar->wmi; 941 struct wmi_vdev_delete_cmd *cmd; 942 struct sk_buff *skb; 943 int ret; 944 945 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 946 if (!skb) 947 return -ENOMEM; 948 949 cmd = (struct wmi_vdev_delete_cmd *)skb->data; 950 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD, 951 sizeof(*cmd)); 952 cmd->vdev_id = cpu_to_le32(vdev_id); 953 954 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id); 955 956 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID); 957 if (ret) { 958 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n"); 959 dev_kfree_skb(skb); 960 } 961 962 return ret; 963 } 964 965 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id) 966 { 967 struct ath12k_wmi_pdev *wmi = ar->wmi; 968 struct wmi_vdev_stop_cmd *cmd; 969 struct sk_buff *skb; 970 int ret; 971 972 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 973 if (!skb) 974 return -ENOMEM; 975 976 cmd = (struct wmi_vdev_stop_cmd *)skb->data; 977 978 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD, 979 sizeof(*cmd)); 980 cmd->vdev_id = cpu_to_le32(vdev_id); 981 982 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id); 983 984 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID); 985 if (ret) { 986 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n"); 987 dev_kfree_skb(skb); 988 } 989 990 return ret; 991 } 992 993 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id) 994 { 995 struct ath12k_wmi_pdev *wmi = ar->wmi; 996 struct wmi_vdev_down_cmd *cmd; 997 struct sk_buff *skb; 998 int ret; 999 1000 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1001 if (!skb) 1002 return -ENOMEM; 1003 1004 cmd = (struct wmi_vdev_down_cmd *)skb->data; 1005 1006 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD, 1007 sizeof(*cmd)); 1008 cmd->vdev_id = cpu_to_le32(vdev_id); 1009 1010 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id); 1011 1012 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID); 1013 if (ret) { 1014 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n"); 1015 dev_kfree_skb(skb); 1016 } 1017 1018 return ret; 1019 } 1020 1021 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan, 1022 struct wmi_vdev_start_req_arg *arg) 1023 { 1024 u32 center_freq1 = arg->band_center_freq1; 1025 1026 memset(chan, 0, sizeof(*chan)); 1027 1028 chan->mhz = cpu_to_le32(arg->freq); 1029 chan->band_center_freq1 = cpu_to_le32(center_freq1); 1030 if (arg->mode == MODE_11BE_EHT320) { 1031 if (arg->freq > center_freq1) 1032 chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80); 1033 else 1034 chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80); 1035 1036 chan->band_center_freq2 = cpu_to_le32(center_freq1); 1037 1038 } else if (arg->mode == MODE_11BE_EHT160 || 1039 arg->mode == MODE_11AX_HE160) { 1040 if (arg->freq > center_freq1) 1041 chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40); 1042 else 1043 chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40); 1044 1045 chan->band_center_freq2 = cpu_to_le32(center_freq1); 1046 } else { 1047 chan->band_center_freq2 = 0; 1048 } 1049 1050 chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE); 1051 if (arg->passive) 1052 chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE); 1053 if (arg->allow_ibss) 1054 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED); 1055 if (arg->allow_ht) 1056 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT); 1057 if (arg->allow_vht) 1058 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT); 1059 if (arg->allow_he) 1060 chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE); 1061 if (arg->ht40plus) 1062 chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS); 1063 if (arg->chan_radar) 1064 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS); 1065 if (arg->freq2_radar) 1066 chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2); 1067 1068 chan->reg_info_1 = le32_encode_bits(arg->max_power, 1069 WMI_CHAN_REG_INFO1_MAX_PWR) | 1070 le32_encode_bits(arg->max_reg_power, 1071 WMI_CHAN_REG_INFO1_MAX_REG_PWR); 1072 1073 chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain, 1074 WMI_CHAN_REG_INFO2_ANT_MAX) | 1075 le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR); 1076 } 1077 1078 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, 1079 bool restart) 1080 { 1081 struct wmi_vdev_start_mlo_params *ml_params; 1082 struct wmi_partner_link_info *partner_info; 1083 struct ath12k_wmi_pdev *wmi = ar->wmi; 1084 struct wmi_vdev_start_request_cmd *cmd; 1085 struct sk_buff *skb; 1086 struct ath12k_wmi_channel_params *chan; 1087 struct wmi_tlv *tlv; 1088 void *ptr; 1089 int ret, len, i, ml_arg_size = 0; 1090 1091 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 1092 return -EINVAL; 1093 1094 len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE; 1095 1096 if (!restart && arg->ml.enabled) { 1097 ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) + 1098 TLV_HDR_SIZE + (arg->ml.num_partner_links * 1099 sizeof(*partner_info)); 1100 len += ml_arg_size; 1101 } 1102 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1103 if (!skb) 1104 return -ENOMEM; 1105 1106 cmd = (struct wmi_vdev_start_request_cmd *)skb->data; 1107 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD, 1108 sizeof(*cmd)); 1109 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1110 cmd->beacon_interval = cpu_to_le32(arg->bcn_intval); 1111 cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate); 1112 cmd->dtim_period = cpu_to_le32(arg->dtim_period); 1113 cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors); 1114 cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams); 1115 cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams); 1116 cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms); 1117 cmd->regdomain = cpu_to_le32(arg->regdomain); 1118 cmd->he_ops = cpu_to_le32(arg->he_ops); 1119 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap); 1120 cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags); 1121 cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id); 1122 1123 if (!restart) { 1124 if (arg->ssid) { 1125 cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len); 1126 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); 1127 } 1128 if (arg->hidden_ssid) 1129 cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID); 1130 if (arg->pmf_enabled) 1131 cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED); 1132 } 1133 1134 cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED); 1135 1136 ptr = skb->data + sizeof(*cmd); 1137 chan = ptr; 1138 1139 ath12k_wmi_put_wmi_channel(chan, arg); 1140 1141 chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, 1142 sizeof(*chan)); 1143 ptr += sizeof(*chan); 1144 1145 tlv = ptr; 1146 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 1147 1148 /* Note: This is a nested TLV containing: 1149 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv].. 1150 */ 1151 1152 ptr += sizeof(*tlv); 1153 1154 if (ml_arg_size) { 1155 tlv = ptr; 1156 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1157 sizeof(*ml_params)); 1158 ptr += TLV_HDR_SIZE; 1159 1160 ml_params = ptr; 1161 1162 ml_params->tlv_header = 1163 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS, 1164 sizeof(*ml_params)); 1165 1166 ml_params->flags = le32_encode_bits(arg->ml.enabled, 1167 ATH12K_WMI_FLAG_MLO_ENABLED) | 1168 le32_encode_bits(arg->ml.assoc_link, 1169 ATH12K_WMI_FLAG_MLO_ASSOC_LINK) | 1170 le32_encode_bits(arg->ml.mcast_link, 1171 ATH12K_WMI_FLAG_MLO_MCAST_VDEV) | 1172 le32_encode_bits(arg->ml.link_add, 1173 ATH12K_WMI_FLAG_MLO_LINK_ADD); 1174 1175 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n", 1176 arg->vdev_id, ml_params->flags); 1177 1178 ptr += sizeof(*ml_params); 1179 1180 tlv = ptr; 1181 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1182 arg->ml.num_partner_links * 1183 sizeof(*partner_info)); 1184 ptr += TLV_HDR_SIZE; 1185 1186 partner_info = ptr; 1187 1188 for (i = 0; i < arg->ml.num_partner_links; i++) { 1189 partner_info->tlv_header = 1190 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS, 1191 sizeof(*partner_info)); 1192 partner_info->vdev_id = 1193 cpu_to_le32(arg->ml.partner_info[i].vdev_id); 1194 partner_info->hw_link_id = 1195 cpu_to_le32(arg->ml.partner_info[i].hw_link_id); 1196 ether_addr_copy(partner_info->vdev_addr.addr, 1197 arg->ml.partner_info[i].addr); 1198 1199 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n", 1200 partner_info->vdev_id, partner_info->hw_link_id, 1201 partner_info->vdev_addr.addr); 1202 1203 partner_info++; 1204 } 1205 1206 ptr = partner_info; 1207 } 1208 1209 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n", 1210 restart ? "restart" : "start", arg->vdev_id, 1211 arg->freq, arg->mode); 1212 1213 if (restart) 1214 ret = ath12k_wmi_cmd_send(wmi, skb, 1215 WMI_VDEV_RESTART_REQUEST_CMDID); 1216 else 1217 ret = ath12k_wmi_cmd_send(wmi, skb, 1218 WMI_VDEV_START_REQUEST_CMDID); 1219 if (ret) { 1220 ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n", 1221 restart ? "restart" : "start"); 1222 dev_kfree_skb(skb); 1223 } 1224 1225 return ret; 1226 } 1227 1228 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params) 1229 { 1230 struct ath12k_wmi_pdev *wmi = ar->wmi; 1231 struct wmi_vdev_up_cmd *cmd; 1232 struct sk_buff *skb; 1233 int ret; 1234 1235 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1236 if (!skb) 1237 return -ENOMEM; 1238 1239 cmd = (struct wmi_vdev_up_cmd *)skb->data; 1240 1241 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD, 1242 sizeof(*cmd)); 1243 cmd->vdev_id = cpu_to_le32(params->vdev_id); 1244 cmd->vdev_assoc_id = cpu_to_le32(params->aid); 1245 1246 ether_addr_copy(cmd->vdev_bssid.addr, params->bssid); 1247 1248 if (params->tx_bssid) { 1249 ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid); 1250 cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx); 1251 cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt); 1252 } 1253 1254 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1255 "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n", 1256 params->vdev_id, params->aid, params->bssid); 1257 1258 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID); 1259 if (ret) { 1260 ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n"); 1261 dev_kfree_skb(skb); 1262 } 1263 1264 return ret; 1265 } 1266 1267 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar, 1268 struct ath12k_wmi_peer_create_arg *arg) 1269 { 1270 struct ath12k_wmi_pdev *wmi = ar->wmi; 1271 struct wmi_peer_create_cmd *cmd; 1272 struct sk_buff *skb; 1273 int ret, len; 1274 struct wmi_peer_create_mlo_params *ml_param; 1275 void *ptr; 1276 struct wmi_tlv *tlv; 1277 1278 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param); 1279 1280 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1281 if (!skb) 1282 return -ENOMEM; 1283 1284 cmd = (struct wmi_peer_create_cmd *)skb->data; 1285 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD, 1286 sizeof(*cmd)); 1287 1288 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr); 1289 cmd->peer_type = cpu_to_le32(arg->peer_type); 1290 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1291 1292 ptr = skb->data + sizeof(*cmd); 1293 tlv = ptr; 1294 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 1295 sizeof(*ml_param)); 1296 ptr += TLV_HDR_SIZE; 1297 ml_param = ptr; 1298 ml_param->tlv_header = 1299 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS, 1300 sizeof(*ml_param)); 1301 if (arg->ml_enabled) 1302 ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 1303 1304 ptr += sizeof(*ml_param); 1305 1306 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1307 "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n", 1308 arg->vdev_id, arg->peer_addr, ml_param->flags); 1309 1310 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID); 1311 if (ret) { 1312 ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n"); 1313 dev_kfree_skb(skb); 1314 } 1315 1316 return ret; 1317 } 1318 1319 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar, 1320 const u8 *peer_addr, u8 vdev_id) 1321 { 1322 struct ath12k_wmi_pdev *wmi = ar->wmi; 1323 struct wmi_peer_delete_cmd *cmd; 1324 struct sk_buff *skb; 1325 int ret; 1326 1327 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1328 if (!skb) 1329 return -ENOMEM; 1330 1331 cmd = (struct wmi_peer_delete_cmd *)skb->data; 1332 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD, 1333 sizeof(*cmd)); 1334 1335 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1336 cmd->vdev_id = cpu_to_le32(vdev_id); 1337 1338 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1339 "WMI peer delete vdev_id %d peer_addr %pM\n", 1340 vdev_id, peer_addr); 1341 1342 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID); 1343 if (ret) { 1344 ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n"); 1345 dev_kfree_skb(skb); 1346 } 1347 1348 return ret; 1349 } 1350 1351 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar, 1352 struct ath12k_wmi_pdev_set_regdomain_arg *arg) 1353 { 1354 struct ath12k_wmi_pdev *wmi = ar->wmi; 1355 struct wmi_pdev_set_regdomain_cmd *cmd; 1356 struct sk_buff *skb; 1357 int ret; 1358 1359 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1360 if (!skb) 1361 return -ENOMEM; 1362 1363 cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; 1364 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD, 1365 sizeof(*cmd)); 1366 1367 cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use); 1368 cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g); 1369 cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g); 1370 cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g); 1371 cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g); 1372 cmd->dfs_domain = cpu_to_le32(arg->dfs_domain); 1373 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 1374 1375 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1376 "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n", 1377 arg->current_rd_in_use, arg->current_rd_2g, 1378 arg->current_rd_5g, arg->dfs_domain, arg->pdev_id); 1379 1380 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID); 1381 if (ret) { 1382 ath12k_warn(ar->ab, 1383 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n"); 1384 dev_kfree_skb(skb); 1385 } 1386 1387 return ret; 1388 } 1389 1390 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr, 1391 u32 vdev_id, u32 param_id, u32 param_val) 1392 { 1393 struct ath12k_wmi_pdev *wmi = ar->wmi; 1394 struct wmi_peer_set_param_cmd *cmd; 1395 struct sk_buff *skb; 1396 int ret; 1397 1398 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1399 if (!skb) 1400 return -ENOMEM; 1401 1402 cmd = (struct wmi_peer_set_param_cmd *)skb->data; 1403 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD, 1404 sizeof(*cmd)); 1405 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1406 cmd->vdev_id = cpu_to_le32(vdev_id); 1407 cmd->param_id = cpu_to_le32(param_id); 1408 cmd->param_value = cpu_to_le32(param_val); 1409 1410 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1411 "WMI vdev %d peer 0x%pM set param %d value %d\n", 1412 vdev_id, peer_addr, param_id, param_val); 1413 1414 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID); 1415 if (ret) { 1416 ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n"); 1417 dev_kfree_skb(skb); 1418 } 1419 1420 return ret; 1421 } 1422 1423 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar, 1424 u8 peer_addr[ETH_ALEN], 1425 u32 peer_tid_bitmap, 1426 u8 vdev_id) 1427 { 1428 struct ath12k_wmi_pdev *wmi = ar->wmi; 1429 struct wmi_peer_flush_tids_cmd *cmd; 1430 struct sk_buff *skb; 1431 int ret; 1432 1433 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1434 if (!skb) 1435 return -ENOMEM; 1436 1437 cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; 1438 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD, 1439 sizeof(*cmd)); 1440 1441 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1442 cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap); 1443 cmd->vdev_id = cpu_to_le32(vdev_id); 1444 1445 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1446 "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n", 1447 vdev_id, peer_addr, peer_tid_bitmap); 1448 1449 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID); 1450 if (ret) { 1451 ath12k_warn(ar->ab, 1452 "failed to send WMI_PEER_FLUSH_TIDS cmd\n"); 1453 dev_kfree_skb(skb); 1454 } 1455 1456 return ret; 1457 } 1458 1459 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar, 1460 int vdev_id, const u8 *addr, 1461 dma_addr_t paddr, u8 tid, 1462 u8 ba_window_size_valid, 1463 u32 ba_window_size) 1464 { 1465 struct wmi_peer_reorder_queue_setup_cmd *cmd; 1466 struct sk_buff *skb; 1467 int ret; 1468 1469 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 1470 if (!skb) 1471 return -ENOMEM; 1472 1473 cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data; 1474 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD, 1475 sizeof(*cmd)); 1476 1477 ether_addr_copy(cmd->peer_macaddr.addr, addr); 1478 cmd->vdev_id = cpu_to_le32(vdev_id); 1479 cmd->tid = cpu_to_le32(tid); 1480 cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr)); 1481 cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr)); 1482 cmd->queue_no = cpu_to_le32(tid); 1483 cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid); 1484 cmd->ba_window_size = cpu_to_le32(ba_window_size); 1485 1486 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1487 "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n", 1488 addr, vdev_id, tid); 1489 1490 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 1491 WMI_PEER_REORDER_QUEUE_SETUP_CMDID); 1492 if (ret) { 1493 ath12k_warn(ar->ab, 1494 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n"); 1495 dev_kfree_skb(skb); 1496 } 1497 1498 return ret; 1499 } 1500 1501 int 1502 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar, 1503 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg) 1504 { 1505 struct ath12k_wmi_pdev *wmi = ar->wmi; 1506 struct wmi_peer_reorder_queue_remove_cmd *cmd; 1507 struct sk_buff *skb; 1508 int ret; 1509 1510 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1511 if (!skb) 1512 return -ENOMEM; 1513 1514 cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data; 1515 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD, 1516 sizeof(*cmd)); 1517 1518 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr); 1519 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1520 cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap); 1521 1522 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1523 "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__, 1524 arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap); 1525 1526 ret = ath12k_wmi_cmd_send(wmi, skb, 1527 WMI_PEER_REORDER_QUEUE_REMOVE_CMDID); 1528 if (ret) { 1529 ath12k_warn(ar->ab, 1530 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID"); 1531 dev_kfree_skb(skb); 1532 } 1533 1534 return ret; 1535 } 1536 1537 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id, 1538 u32 param_value, u8 pdev_id) 1539 { 1540 struct ath12k_wmi_pdev *wmi = ar->wmi; 1541 struct wmi_pdev_set_param_cmd *cmd; 1542 struct sk_buff *skb; 1543 int ret; 1544 1545 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1546 if (!skb) 1547 return -ENOMEM; 1548 1549 cmd = (struct wmi_pdev_set_param_cmd *)skb->data; 1550 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD, 1551 sizeof(*cmd)); 1552 cmd->pdev_id = cpu_to_le32(pdev_id); 1553 cmd->param_id = cpu_to_le32(param_id); 1554 cmd->param_value = cpu_to_le32(param_value); 1555 1556 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1557 "WMI pdev set param %d pdev id %d value %d\n", 1558 param_id, pdev_id, param_value); 1559 1560 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID); 1561 if (ret) { 1562 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); 1563 dev_kfree_skb(skb); 1564 } 1565 1566 return ret; 1567 } 1568 1569 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable) 1570 { 1571 struct ath12k_wmi_pdev *wmi = ar->wmi; 1572 struct wmi_pdev_set_ps_mode_cmd *cmd; 1573 struct sk_buff *skb; 1574 int ret; 1575 1576 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1577 if (!skb) 1578 return -ENOMEM; 1579 1580 cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data; 1581 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD, 1582 sizeof(*cmd)); 1583 cmd->vdev_id = cpu_to_le32(vdev_id); 1584 cmd->sta_ps_mode = cpu_to_le32(enable); 1585 1586 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1587 "WMI vdev set psmode %d vdev id %d\n", 1588 enable, vdev_id); 1589 1590 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID); 1591 if (ret) { 1592 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n"); 1593 dev_kfree_skb(skb); 1594 } 1595 1596 return ret; 1597 } 1598 1599 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt, 1600 u32 pdev_id) 1601 { 1602 struct ath12k_wmi_pdev *wmi = ar->wmi; 1603 struct wmi_pdev_suspend_cmd *cmd; 1604 struct sk_buff *skb; 1605 int ret; 1606 1607 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1608 if (!skb) 1609 return -ENOMEM; 1610 1611 cmd = (struct wmi_pdev_suspend_cmd *)skb->data; 1612 1613 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD, 1614 sizeof(*cmd)); 1615 1616 cmd->suspend_opt = cpu_to_le32(suspend_opt); 1617 cmd->pdev_id = cpu_to_le32(pdev_id); 1618 1619 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1620 "WMI pdev suspend pdev_id %d\n", pdev_id); 1621 1622 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID); 1623 if (ret) { 1624 ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n"); 1625 dev_kfree_skb(skb); 1626 } 1627 1628 return ret; 1629 } 1630 1631 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id) 1632 { 1633 struct ath12k_wmi_pdev *wmi = ar->wmi; 1634 struct wmi_pdev_resume_cmd *cmd; 1635 struct sk_buff *skb; 1636 int ret; 1637 1638 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1639 if (!skb) 1640 return -ENOMEM; 1641 1642 cmd = (struct wmi_pdev_resume_cmd *)skb->data; 1643 1644 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD, 1645 sizeof(*cmd)); 1646 cmd->pdev_id = cpu_to_le32(pdev_id); 1647 1648 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1649 "WMI pdev resume pdev id %d\n", pdev_id); 1650 1651 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID); 1652 if (ret) { 1653 ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n"); 1654 dev_kfree_skb(skb); 1655 } 1656 1657 return ret; 1658 } 1659 1660 /* TODO FW Support for the cmd is not available yet. 1661 * Can be tested once the command and corresponding 1662 * event is implemented in FW 1663 */ 1664 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar, 1665 enum wmi_bss_chan_info_req_type type) 1666 { 1667 struct ath12k_wmi_pdev *wmi = ar->wmi; 1668 struct wmi_pdev_bss_chan_info_req_cmd *cmd; 1669 struct sk_buff *skb; 1670 int ret; 1671 1672 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1673 if (!skb) 1674 return -ENOMEM; 1675 1676 cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data; 1677 1678 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST, 1679 sizeof(*cmd)); 1680 cmd->req_type = cpu_to_le32(type); 1681 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 1682 1683 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1684 "WMI bss chan info req type %d\n", type); 1685 1686 ret = ath12k_wmi_cmd_send(wmi, skb, 1687 WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID); 1688 if (ret) { 1689 ath12k_warn(ar->ab, 1690 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n"); 1691 dev_kfree_skb(skb); 1692 } 1693 1694 return ret; 1695 } 1696 1697 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr, 1698 struct ath12k_wmi_ap_ps_arg *arg) 1699 { 1700 struct ath12k_wmi_pdev *wmi = ar->wmi; 1701 struct wmi_ap_ps_peer_cmd *cmd; 1702 struct sk_buff *skb; 1703 int ret; 1704 1705 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1706 if (!skb) 1707 return -ENOMEM; 1708 1709 cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; 1710 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD, 1711 sizeof(*cmd)); 1712 1713 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 1714 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 1715 cmd->param = cpu_to_le32(arg->param); 1716 cmd->value = cpu_to_le32(arg->value); 1717 1718 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1719 "WMI set ap ps vdev id %d peer %pM param %d value %d\n", 1720 arg->vdev_id, peer_addr, arg->param, arg->value); 1721 1722 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID); 1723 if (ret) { 1724 ath12k_warn(ar->ab, 1725 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n"); 1726 dev_kfree_skb(skb); 1727 } 1728 1729 return ret; 1730 } 1731 1732 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id, 1733 u32 param, u32 param_value) 1734 { 1735 struct ath12k_wmi_pdev *wmi = ar->wmi; 1736 struct wmi_sta_powersave_param_cmd *cmd; 1737 struct sk_buff *skb; 1738 int ret; 1739 1740 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1741 if (!skb) 1742 return -ENOMEM; 1743 1744 cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; 1745 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD, 1746 sizeof(*cmd)); 1747 1748 cmd->vdev_id = cpu_to_le32(vdev_id); 1749 cmd->param = cpu_to_le32(param); 1750 cmd->value = cpu_to_le32(param_value); 1751 1752 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1753 "WMI set sta ps vdev_id %d param %d value %d\n", 1754 vdev_id, param, param_value); 1755 1756 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID); 1757 if (ret) { 1758 ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID"); 1759 dev_kfree_skb(skb); 1760 } 1761 1762 return ret; 1763 } 1764 1765 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms) 1766 { 1767 struct ath12k_wmi_pdev *wmi = ar->wmi; 1768 struct wmi_force_fw_hang_cmd *cmd; 1769 struct sk_buff *skb; 1770 int ret, len; 1771 1772 len = sizeof(*cmd); 1773 1774 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1775 if (!skb) 1776 return -ENOMEM; 1777 1778 cmd = (struct wmi_force_fw_hang_cmd *)skb->data; 1779 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD, 1780 len); 1781 1782 cmd->type = cpu_to_le32(type); 1783 cmd->delay_time_ms = cpu_to_le32(delay_time_ms); 1784 1785 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID); 1786 1787 if (ret) { 1788 ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID"); 1789 dev_kfree_skb(skb); 1790 } 1791 return ret; 1792 } 1793 1794 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id, 1795 u32 param_id, u32 param_value) 1796 { 1797 struct ath12k_wmi_pdev *wmi = ar->wmi; 1798 struct wmi_vdev_set_param_cmd *cmd; 1799 struct sk_buff *skb; 1800 int ret; 1801 1802 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1803 if (!skb) 1804 return -ENOMEM; 1805 1806 cmd = (struct wmi_vdev_set_param_cmd *)skb->data; 1807 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD, 1808 sizeof(*cmd)); 1809 1810 cmd->vdev_id = cpu_to_le32(vdev_id); 1811 cmd->param_id = cpu_to_le32(param_id); 1812 cmd->param_value = cpu_to_le32(param_value); 1813 1814 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1815 "WMI vdev id 0x%x set param %d value %d\n", 1816 vdev_id, param_id, param_value); 1817 1818 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID); 1819 if (ret) { 1820 ath12k_warn(ar->ab, 1821 "failed to send WMI_VDEV_SET_PARAM_CMDID\n"); 1822 dev_kfree_skb(skb); 1823 } 1824 1825 return ret; 1826 } 1827 1828 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar) 1829 { 1830 struct ath12k_wmi_pdev *wmi = ar->wmi; 1831 struct wmi_get_pdev_temperature_cmd *cmd; 1832 struct sk_buff *skb; 1833 int ret; 1834 1835 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1836 if (!skb) 1837 return -ENOMEM; 1838 1839 cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data; 1840 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD, 1841 sizeof(*cmd)); 1842 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 1843 1844 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1845 "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id); 1846 1847 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID); 1848 if (ret) { 1849 ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n"); 1850 dev_kfree_skb(skb); 1851 } 1852 1853 return ret; 1854 } 1855 1856 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar, 1857 u32 vdev_id, u32 bcn_ctrl_op) 1858 { 1859 struct ath12k_wmi_pdev *wmi = ar->wmi; 1860 struct wmi_bcn_offload_ctrl_cmd *cmd; 1861 struct sk_buff *skb; 1862 int ret; 1863 1864 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 1865 if (!skb) 1866 return -ENOMEM; 1867 1868 cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data; 1869 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD, 1870 sizeof(*cmd)); 1871 1872 cmd->vdev_id = cpu_to_le32(vdev_id); 1873 cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op); 1874 1875 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1876 "WMI bcn ctrl offload vdev id %d ctrl_op %d\n", 1877 vdev_id, bcn_ctrl_op); 1878 1879 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID); 1880 if (ret) { 1881 ath12k_warn(ar->ab, 1882 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n"); 1883 dev_kfree_skb(skb); 1884 } 1885 1886 return ret; 1887 } 1888 1889 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id, 1890 const u8 *p2p_ie) 1891 { 1892 struct ath12k_wmi_pdev *wmi = ar->wmi; 1893 struct wmi_p2p_go_set_beacon_ie_cmd *cmd; 1894 size_t p2p_ie_len, aligned_len; 1895 struct wmi_tlv *tlv; 1896 struct sk_buff *skb; 1897 void *ptr; 1898 int ret, len; 1899 1900 p2p_ie_len = p2p_ie[1] + 2; 1901 aligned_len = roundup(p2p_ie_len, sizeof(u32)); 1902 1903 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; 1904 1905 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1906 if (!skb) 1907 return -ENOMEM; 1908 1909 ptr = skb->data; 1910 cmd = ptr; 1911 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE, 1912 sizeof(*cmd)); 1913 cmd->vdev_id = cpu_to_le32(vdev_id); 1914 cmd->ie_buf_len = cpu_to_le32(p2p_ie_len); 1915 1916 ptr += sizeof(*cmd); 1917 tlv = ptr; 1918 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE, 1919 aligned_len); 1920 memcpy(tlv->value, p2p_ie, p2p_ie_len); 1921 1922 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE); 1923 if (ret) { 1924 ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n"); 1925 dev_kfree_skb(skb); 1926 } 1927 1928 return ret; 1929 } 1930 1931 int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif, 1932 struct ieee80211_mutable_offsets *offs, 1933 struct sk_buff *bcn, 1934 struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args) 1935 { 1936 struct ath12k *ar = arvif->ar; 1937 struct ath12k_wmi_pdev *wmi = ar->wmi; 1938 struct ath12k_base *ab = ar->ab; 1939 struct wmi_bcn_tmpl_cmd *cmd; 1940 struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info; 1941 struct ath12k_vif *ahvif = arvif->ahvif; 1942 struct ieee80211_bss_conf *conf; 1943 u32 vdev_id = arvif->vdev_id; 1944 struct wmi_tlv *tlv; 1945 struct sk_buff *skb; 1946 u32 ema_params = 0; 1947 void *ptr; 1948 int ret, len; 1949 size_t aligned_len = roundup(bcn->len, 4); 1950 1951 conf = ath12k_mac_get_link_bss_conf(arvif); 1952 if (!conf) { 1953 ath12k_warn(ab, 1954 "unable to access bss link conf in beacon template command for vif %pM link %u\n", 1955 ahvif->vif->addr, arvif->link_id); 1956 return -EINVAL; 1957 } 1958 1959 len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; 1960 1961 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 1962 if (!skb) 1963 return -ENOMEM; 1964 1965 cmd = (struct wmi_bcn_tmpl_cmd *)skb->data; 1966 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD, 1967 sizeof(*cmd)); 1968 cmd->vdev_id = cpu_to_le32(vdev_id); 1969 cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset); 1970 1971 if (conf->csa_active) { 1972 cmd->csa_switch_count_offset = 1973 cpu_to_le32(offs->cntdwn_counter_offs[0]); 1974 cmd->ext_csa_switch_count_offset = 1975 cpu_to_le32(offs->cntdwn_counter_offs[1]); 1976 cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF); 1977 arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]]; 1978 } 1979 1980 cmd->buf_len = cpu_to_le32(bcn->len); 1981 cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off); 1982 if (ema_args) { 1983 u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT); 1984 u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX); 1985 if (ema_args->bcn_index == 0) 1986 u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST); 1987 if (ema_args->bcn_index + 1 == ema_args->bcn_cnt) 1988 u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST); 1989 cmd->ema_params = cpu_to_le32(ema_params); 1990 } 1991 cmd->feature_enable_bitmap = 1992 cpu_to_le32(u32_encode_bits(arvif->beacon_prot, 1993 WMI_BEACON_PROTECTION_EN_BIT)); 1994 1995 ptr = skb->data + sizeof(*cmd); 1996 1997 bcn_prb_info = ptr; 1998 len = sizeof(*bcn_prb_info); 1999 bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO, 2000 len); 2001 bcn_prb_info->caps = 0; 2002 bcn_prb_info->erp = 0; 2003 2004 ptr += sizeof(*bcn_prb_info); 2005 2006 tlv = ptr; 2007 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 2008 memcpy(tlv->value, bcn->data, bcn->len); 2009 2010 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID); 2011 if (ret) { 2012 ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n"); 2013 dev_kfree_skb(skb); 2014 } 2015 2016 return ret; 2017 } 2018 2019 int ath12k_wmi_vdev_install_key(struct ath12k *ar, 2020 struct wmi_vdev_install_key_arg *arg) 2021 { 2022 struct ath12k_wmi_pdev *wmi = ar->wmi; 2023 struct wmi_vdev_install_key_cmd *cmd; 2024 struct wmi_tlv *tlv; 2025 struct sk_buff *skb; 2026 int ret, len, key_len_aligned; 2027 2028 /* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key 2029 * length is specified in cmd->key_len. 2030 */ 2031 key_len_aligned = roundup(arg->key_len, 4); 2032 2033 len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned; 2034 2035 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2036 if (!skb) 2037 return -ENOMEM; 2038 2039 cmd = (struct wmi_vdev_install_key_cmd *)skb->data; 2040 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD, 2041 sizeof(*cmd)); 2042 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2043 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); 2044 cmd->key_idx = cpu_to_le32(arg->key_idx); 2045 cmd->key_flags = cpu_to_le32(arg->key_flags); 2046 cmd->key_cipher = cpu_to_le32(arg->key_cipher); 2047 cmd->key_len = cpu_to_le32(arg->key_len); 2048 cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len); 2049 cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len); 2050 2051 if (arg->key_rsc_counter) 2052 cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter); 2053 2054 tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); 2055 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned); 2056 memcpy(tlv->value, arg->key_data, arg->key_len); 2057 2058 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2059 "WMI vdev install key idx %d cipher %d len %d\n", 2060 arg->key_idx, arg->key_cipher, arg->key_len); 2061 2062 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); 2063 if (ret) { 2064 ath12k_warn(ar->ab, 2065 "failed to send WMI_VDEV_INSTALL_KEY cmd\n"); 2066 dev_kfree_skb(skb); 2067 } 2068 2069 return ret; 2070 } 2071 2072 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, 2073 struct ath12k_wmi_peer_assoc_arg *arg, 2074 bool hw_crypto_disabled) 2075 { 2076 cmd->peer_flags = 0; 2077 cmd->peer_flags_ext = 0; 2078 2079 if (arg->is_wme_set) { 2080 if (arg->qos_flag) 2081 cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS); 2082 if (arg->apsd_flag) 2083 cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD); 2084 if (arg->ht_flag) 2085 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT); 2086 if (arg->bw_40) 2087 cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ); 2088 if (arg->bw_80) 2089 cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ); 2090 if (arg->bw_160) 2091 cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ); 2092 if (arg->bw_320) 2093 cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ); 2094 2095 /* Typically if STBC is enabled for VHT it should be enabled 2096 * for HT as well 2097 **/ 2098 if (arg->stbc_flag) 2099 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC); 2100 2101 /* Typically if LDPC is enabled for VHT it should be enabled 2102 * for HT as well 2103 **/ 2104 if (arg->ldpc_flag) 2105 cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC); 2106 2107 if (arg->static_mimops_flag) 2108 cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS); 2109 if (arg->dynamic_mimops_flag) 2110 cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS); 2111 if (arg->spatial_mux_flag) 2112 cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX); 2113 if (arg->vht_flag) 2114 cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT); 2115 if (arg->he_flag) 2116 cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE); 2117 if (arg->twt_requester) 2118 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ); 2119 if (arg->twt_responder) 2120 cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP); 2121 if (arg->eht_flag) 2122 cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT); 2123 } 2124 2125 /* Suppress authorization for all AUTH modes that need 4-way handshake 2126 * (during re-association). 2127 * Authorization will be done for these modes on key installation. 2128 */ 2129 if (arg->auth_flag) 2130 cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH); 2131 if (arg->need_ptk_4_way) { 2132 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY); 2133 if (!hw_crypto_disabled && arg->is_assoc) 2134 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH); 2135 } 2136 if (arg->need_gtk_2_way) 2137 cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY); 2138 /* safe mode bypass the 4-way handshake */ 2139 if (arg->safe_mode_enabled) 2140 cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY | 2141 WMI_PEER_NEED_GTK_2_WAY)); 2142 2143 if (arg->is_pmf_enabled) 2144 cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF); 2145 2146 /* Disable AMSDU for station transmit, if user configures it */ 2147 /* Disable AMSDU for AP transmit to 11n Stations, if user configures 2148 * it 2149 * if (arg->amsdu_disable) Add after FW support 2150 **/ 2151 2152 /* Target asserts if node is marked HT and all MCS is set to 0. 2153 * Mark the node as non-HT if all the mcs rates are disabled through 2154 * iwpriv 2155 **/ 2156 if (arg->peer_ht_rates.num_rates == 0) 2157 cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT); 2158 } 2159 2160 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar, 2161 struct ath12k_wmi_peer_assoc_arg *arg) 2162 { 2163 struct ath12k_wmi_pdev *wmi = ar->wmi; 2164 struct wmi_peer_assoc_complete_cmd *cmd; 2165 struct ath12k_wmi_vht_rate_set_params *mcs; 2166 struct ath12k_wmi_he_rate_set_params *he_mcs; 2167 struct ath12k_wmi_eht_rate_set_params *eht_mcs; 2168 struct wmi_peer_assoc_mlo_params *ml_params; 2169 struct wmi_peer_assoc_mlo_partner_info_params *partner_info; 2170 struct sk_buff *skb; 2171 struct wmi_tlv *tlv; 2172 void *ptr; 2173 u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay; 2174 u32 peer_ht_rates_align, eml_trans_timeout; 2175 int i, ret, len; 2176 u16 eml_cap; 2177 __le32 v; 2178 2179 peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates, 2180 sizeof(u32)); 2181 peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates, 2182 sizeof(u32)); 2183 2184 len = sizeof(*cmd) + 2185 TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) + 2186 TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) + 2187 sizeof(*mcs) + TLV_HDR_SIZE + 2188 (sizeof(*he_mcs) * arg->peer_he_mcs_count) + 2189 TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count); 2190 2191 if (arg->ml.enabled) 2192 len += TLV_HDR_SIZE + sizeof(*ml_params) + 2193 TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info)); 2194 else 2195 len += (2 * TLV_HDR_SIZE); 2196 2197 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2198 if (!skb) 2199 return -ENOMEM; 2200 2201 ptr = skb->data; 2202 2203 cmd = ptr; 2204 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD, 2205 sizeof(*cmd)); 2206 2207 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2208 2209 cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc); 2210 cmd->peer_associd = cpu_to_le32(arg->peer_associd); 2211 cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap); 2212 2213 ath12k_wmi_copy_peer_flags(cmd, arg, 2214 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, 2215 &ar->ab->dev_flags)); 2216 2217 ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac); 2218 2219 cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps); 2220 cmd->peer_caps = cpu_to_le32(arg->peer_caps); 2221 cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval); 2222 cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps); 2223 cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu); 2224 cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density); 2225 cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps); 2226 cmd->peer_phymode = cpu_to_le32(arg->peer_phymode); 2227 2228 /* Update 11ax capabilities */ 2229 cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]); 2230 cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]); 2231 cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal); 2232 cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz); 2233 cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops); 2234 for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++) 2235 cmd->peer_he_cap_phy[i] = 2236 cpu_to_le32(arg->peer_he_cap_phyinfo[i]); 2237 cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1); 2238 cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask); 2239 for (i = 0; i < WMI_MAX_NUM_SS; i++) 2240 cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] = 2241 cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]); 2242 2243 /* Update 11be capabilities */ 2244 memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac), 2245 arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac), 2246 0); 2247 memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy), 2248 arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy), 2249 0); 2250 memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet), 2251 &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0); 2252 2253 /* Update peer legacy rate information */ 2254 ptr += sizeof(*cmd); 2255 2256 tlv = ptr; 2257 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align); 2258 2259 ptr += TLV_HDR_SIZE; 2260 2261 cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates); 2262 memcpy(ptr, arg->peer_legacy_rates.rates, 2263 arg->peer_legacy_rates.num_rates); 2264 2265 /* Update peer HT rate information */ 2266 ptr += peer_legacy_rates_align; 2267 2268 tlv = ptr; 2269 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align); 2270 ptr += TLV_HDR_SIZE; 2271 cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates); 2272 memcpy(ptr, arg->peer_ht_rates.rates, 2273 arg->peer_ht_rates.num_rates); 2274 2275 /* VHT Rates */ 2276 ptr += peer_ht_rates_align; 2277 2278 mcs = ptr; 2279 2280 mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET, 2281 sizeof(*mcs)); 2282 2283 cmd->peer_nss = cpu_to_le32(arg->peer_nss); 2284 2285 /* Update bandwidth-NSS mapping */ 2286 cmd->peer_bw_rxnss_override = 0; 2287 cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override); 2288 2289 if (arg->vht_capable) { 2290 /* Firmware interprets mcs->tx_mcs_set field as peer's 2291 * RX capability 2292 */ 2293 mcs->rx_max_rate = cpu_to_le32(arg->tx_max_rate); 2294 mcs->rx_mcs_set = cpu_to_le32(arg->tx_mcs_set); 2295 mcs->tx_max_rate = cpu_to_le32(arg->rx_max_rate); 2296 mcs->tx_mcs_set = cpu_to_le32(arg->rx_mcs_set); 2297 } 2298 2299 /* HE Rates */ 2300 cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count); 2301 cmd->min_data_rate = cpu_to_le32(arg->min_data_rate); 2302 2303 ptr += sizeof(*mcs); 2304 2305 len = arg->peer_he_mcs_count * sizeof(*he_mcs); 2306 2307 tlv = ptr; 2308 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2309 ptr += TLV_HDR_SIZE; 2310 2311 /* Loop through the HE rate set */ 2312 for (i = 0; i < arg->peer_he_mcs_count; i++) { 2313 he_mcs = ptr; 2314 he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET, 2315 sizeof(*he_mcs)); 2316 2317 he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]); 2318 he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]); 2319 ptr += sizeof(*he_mcs); 2320 } 2321 2322 tlv = ptr; 2323 len = arg->ml.enabled ? sizeof(*ml_params) : 0; 2324 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2325 ptr += TLV_HDR_SIZE; 2326 if (!len) 2327 goto skip_ml_params; 2328 2329 ml_params = ptr; 2330 ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS, 2331 len); 2332 ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 2333 2334 if (arg->ml.assoc_link) 2335 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK); 2336 2337 if (arg->ml.primary_umac) 2338 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC); 2339 2340 if (arg->ml.logical_link_idx_valid) 2341 ml_params->flags |= 2342 cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID); 2343 2344 if (arg->ml.peer_id_valid) 2345 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID); 2346 2347 ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr); 2348 ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx); 2349 ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id); 2350 ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id); 2351 2352 eml_cap = arg->ml.eml_cap; 2353 if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) { 2354 ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT); 2355 /* Padding delay */ 2356 eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap); 2357 ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay); 2358 /* Transition delay */ 2359 eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap); 2360 ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay); 2361 /* Transition timeout */ 2362 eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap); 2363 ml_params->emlsr_trans_timeout_us = 2364 cpu_to_le32(eml_trans_timeout); 2365 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u", 2366 arg->peer_mac, eml_pad_delay, eml_trans_delay, 2367 eml_trans_timeout); 2368 } 2369 2370 ptr += sizeof(*ml_params); 2371 2372 skip_ml_params: 2373 /* Loop through the EHT rate set */ 2374 len = arg->peer_eht_mcs_count * sizeof(*eht_mcs); 2375 tlv = ptr; 2376 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2377 ptr += TLV_HDR_SIZE; 2378 2379 for (i = 0; i < arg->peer_eht_mcs_count; i++) { 2380 eht_mcs = ptr; 2381 eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET, 2382 sizeof(*eht_mcs)); 2383 2384 eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]); 2385 eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]); 2386 ptr += sizeof(*eht_mcs); 2387 } 2388 2389 /* Update MCS15 capability */ 2390 if (arg->eht_disable_mcs15) 2391 cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE); 2392 2393 tlv = ptr; 2394 len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0; 2395 /* fill ML Partner links */ 2396 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 2397 ptr += TLV_HDR_SIZE; 2398 2399 if (len == 0) 2400 goto send; 2401 2402 for (i = 0; i < arg->ml.num_partner_links; i++) { 2403 u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC; 2404 2405 partner_info = ptr; 2406 partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd, 2407 sizeof(*partner_info)); 2408 partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id); 2409 partner_info->hw_link_id = 2410 cpu_to_le32(arg->ml.partner_info[i].hw_link_id); 2411 partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED); 2412 2413 if (arg->ml.partner_info[i].assoc_link) 2414 partner_info->flags |= 2415 cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK); 2416 2417 if (arg->ml.partner_info[i].primary_umac) 2418 partner_info->flags |= 2419 cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC); 2420 2421 if (arg->ml.partner_info[i].logical_link_idx_valid) { 2422 v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID); 2423 partner_info->flags |= v; 2424 } 2425 2426 partner_info->logical_link_idx = 2427 cpu_to_le32(arg->ml.partner_info[i].logical_link_idx); 2428 ptr += sizeof(*partner_info); 2429 } 2430 2431 send: 2432 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2433 "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n", 2434 cmd->vdev_id, cmd->peer_associd, arg->peer_mac, 2435 cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, 2436 cmd->peer_listen_intval, cmd->peer_ht_caps, 2437 cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, 2438 cmd->peer_mpdu_density, 2439 cmd->peer_vht_caps, cmd->peer_he_cap_info, 2440 cmd->peer_he_ops, cmd->peer_he_cap_info_ext, 2441 cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1], 2442 cmd->peer_he_cap_phy[2], 2443 cmd->peer_bw_rxnss_override, cmd->peer_flags_ext, 2444 cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1], 2445 cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1], 2446 cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops); 2447 2448 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID); 2449 if (ret) { 2450 ath12k_warn(ar->ab, 2451 "failed to send WMI_PEER_ASSOC_CMDID\n"); 2452 dev_kfree_skb(skb); 2453 } 2454 2455 return ret; 2456 } 2457 2458 void ath12k_wmi_start_scan_init(struct ath12k *ar, 2459 struct ath12k_wmi_scan_req_arg *arg) 2460 { 2461 /* setup commonly used values */ 2462 arg->scan_req_id = 1; 2463 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 2464 arg->dwell_time_active = 50; 2465 arg->dwell_time_active_2g = 0; 2466 arg->dwell_time_passive = 150; 2467 arg->dwell_time_active_6g = 70; 2468 arg->dwell_time_passive_6g = 70; 2469 arg->min_rest_time = 50; 2470 arg->max_rest_time = 500; 2471 arg->repeat_probe_time = 0; 2472 arg->probe_spacing_time = 0; 2473 arg->idle_time = 0; 2474 arg->max_scan_time = 20000; 2475 arg->probe_delay = 5; 2476 arg->notify_scan_events = WMI_SCAN_EVENT_STARTED | 2477 WMI_SCAN_EVENT_COMPLETED | 2478 WMI_SCAN_EVENT_BSS_CHANNEL | 2479 WMI_SCAN_EVENT_FOREIGN_CHAN | 2480 WMI_SCAN_EVENT_DEQUEUED; 2481 arg->scan_f_chan_stat_evnt = 1; 2482 arg->num_bssid = 1; 2483 2484 /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be 2485 * ZEROs in probe request 2486 */ 2487 eth_broadcast_addr(arg->bssid_list[0].addr); 2488 } 2489 2490 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd, 2491 struct ath12k_wmi_scan_req_arg *arg) 2492 { 2493 /* Scan events subscription */ 2494 if (arg->scan_ev_started) 2495 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED); 2496 if (arg->scan_ev_completed) 2497 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED); 2498 if (arg->scan_ev_bss_chan) 2499 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL); 2500 if (arg->scan_ev_foreign_chan) 2501 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN); 2502 if (arg->scan_ev_dequeued) 2503 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED); 2504 if (arg->scan_ev_preempted) 2505 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED); 2506 if (arg->scan_ev_start_failed) 2507 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED); 2508 if (arg->scan_ev_restarted) 2509 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED); 2510 if (arg->scan_ev_foreign_chn_exit) 2511 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT); 2512 if (arg->scan_ev_suspended) 2513 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED); 2514 if (arg->scan_ev_resumed) 2515 cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED); 2516 2517 /** Set scan control flags */ 2518 cmd->scan_ctrl_flags = 0; 2519 if (arg->scan_f_passive) 2520 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE); 2521 if (arg->scan_f_strict_passive_pch) 2522 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN); 2523 if (arg->scan_f_promisc_mode) 2524 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS); 2525 if (arg->scan_f_capture_phy_err) 2526 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR); 2527 if (arg->scan_f_half_rate) 2528 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT); 2529 if (arg->scan_f_quarter_rate) 2530 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT); 2531 if (arg->scan_f_cck_rates) 2532 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES); 2533 if (arg->scan_f_ofdm_rates) 2534 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES); 2535 if (arg->scan_f_chan_stat_evnt) 2536 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT); 2537 if (arg->scan_f_filter_prb_req) 2538 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); 2539 if (arg->scan_f_bcast_probe) 2540 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ); 2541 if (arg->scan_f_offchan_mgmt_tx) 2542 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX); 2543 if (arg->scan_f_offchan_data_tx) 2544 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX); 2545 if (arg->scan_f_force_active_dfs_chn) 2546 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS); 2547 if (arg->scan_f_add_tpc_ie_in_probe) 2548 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ); 2549 if (arg->scan_f_add_ds_ie_in_probe) 2550 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ); 2551 if (arg->scan_f_add_spoofed_mac_in_probe) 2552 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ); 2553 if (arg->scan_f_add_rand_seq_in_probe) 2554 cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ); 2555 if (arg->scan_f_en_ie_whitelist_in_probe) 2556 cmd->scan_ctrl_flags |= 2557 cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ); 2558 2559 cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode, 2560 WMI_SCAN_DWELL_MODE_MASK); 2561 } 2562 2563 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar, 2564 struct ath12k_wmi_scan_req_arg *arg) 2565 { 2566 struct ath12k_wmi_pdev *wmi = ar->wmi; 2567 struct wmi_start_scan_cmd *cmd; 2568 struct ath12k_wmi_ssid_params *ssid = NULL; 2569 struct ath12k_wmi_mac_addr_params *bssid; 2570 struct sk_buff *skb; 2571 struct wmi_tlv *tlv; 2572 void *ptr; 2573 int i, ret, len; 2574 u32 *tmp_ptr, extraie_len_with_pad = 0; 2575 struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL; 2576 struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL; 2577 2578 len = sizeof(*cmd); 2579 2580 len += TLV_HDR_SIZE; 2581 if (arg->num_chan) 2582 len += arg->num_chan * sizeof(u32); 2583 2584 len += TLV_HDR_SIZE; 2585 if (arg->num_ssids) 2586 len += arg->num_ssids * sizeof(*ssid); 2587 2588 len += TLV_HDR_SIZE; 2589 if (arg->num_bssid) 2590 len += sizeof(*bssid) * arg->num_bssid; 2591 2592 if (arg->num_hint_bssid) 2593 len += TLV_HDR_SIZE + 2594 arg->num_hint_bssid * sizeof(*hint_bssid); 2595 2596 if (arg->num_hint_s_ssid) 2597 len += TLV_HDR_SIZE + 2598 arg->num_hint_s_ssid * sizeof(*s_ssid); 2599 2600 len += TLV_HDR_SIZE; 2601 if (arg->extraie.len) 2602 extraie_len_with_pad = 2603 roundup(arg->extraie.len, sizeof(u32)); 2604 if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) { 2605 len += extraie_len_with_pad; 2606 } else { 2607 ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n", 2608 arg->extraie.len); 2609 extraie_len_with_pad = 0; 2610 } 2611 2612 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2613 if (!skb) 2614 return -ENOMEM; 2615 2616 ptr = skb->data; 2617 2618 cmd = ptr; 2619 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD, 2620 sizeof(*cmd)); 2621 2622 cmd->scan_id = cpu_to_le32(arg->scan_id); 2623 cmd->scan_req_id = cpu_to_le32(arg->scan_req_id); 2624 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2625 if (ar->state_11d == ATH12K_11D_PREPARING) 2626 arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM; 2627 else 2628 arg->scan_priority = WMI_SCAN_PRIORITY_LOW; 2629 cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events); 2630 2631 ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg); 2632 2633 cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active); 2634 cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g); 2635 cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive); 2636 cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g); 2637 cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g); 2638 cmd->min_rest_time = cpu_to_le32(arg->min_rest_time); 2639 cmd->max_rest_time = cpu_to_le32(arg->max_rest_time); 2640 cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time); 2641 cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time); 2642 cmd->idle_time = cpu_to_le32(arg->idle_time); 2643 cmd->max_scan_time = cpu_to_le32(arg->max_scan_time); 2644 cmd->probe_delay = cpu_to_le32(arg->probe_delay); 2645 cmd->burst_duration = cpu_to_le32(arg->burst_duration); 2646 cmd->num_chan = cpu_to_le32(arg->num_chan); 2647 cmd->num_bssid = cpu_to_le32(arg->num_bssid); 2648 cmd->num_ssids = cpu_to_le32(arg->num_ssids); 2649 cmd->ie_len = cpu_to_le32(arg->extraie.len); 2650 cmd->n_probes = cpu_to_le32(arg->n_probes); 2651 2652 ptr += sizeof(*cmd); 2653 2654 len = arg->num_chan * sizeof(u32); 2655 2656 tlv = ptr; 2657 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len); 2658 ptr += TLV_HDR_SIZE; 2659 tmp_ptr = (u32 *)ptr; 2660 2661 memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4); 2662 2663 ptr += len; 2664 2665 len = arg->num_ssids * sizeof(*ssid); 2666 tlv = ptr; 2667 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2668 2669 ptr += TLV_HDR_SIZE; 2670 2671 if (arg->num_ssids) { 2672 ssid = ptr; 2673 for (i = 0; i < arg->num_ssids; ++i) { 2674 ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len); 2675 memcpy(ssid->ssid, arg->ssid[i].ssid, 2676 arg->ssid[i].ssid_len); 2677 ssid++; 2678 } 2679 } 2680 2681 ptr += (arg->num_ssids * sizeof(*ssid)); 2682 len = arg->num_bssid * sizeof(*bssid); 2683 tlv = ptr; 2684 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2685 2686 ptr += TLV_HDR_SIZE; 2687 bssid = ptr; 2688 2689 if (arg->num_bssid) { 2690 for (i = 0; i < arg->num_bssid; ++i) { 2691 ether_addr_copy(bssid->addr, 2692 arg->bssid_list[i].addr); 2693 bssid++; 2694 } 2695 } 2696 2697 ptr += arg->num_bssid * sizeof(*bssid); 2698 2699 len = extraie_len_with_pad; 2700 tlv = ptr; 2701 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len); 2702 ptr += TLV_HDR_SIZE; 2703 2704 if (extraie_len_with_pad) 2705 memcpy(ptr, arg->extraie.ptr, 2706 arg->extraie.len); 2707 2708 ptr += extraie_len_with_pad; 2709 2710 if (arg->num_hint_s_ssid) { 2711 len = arg->num_hint_s_ssid * sizeof(*s_ssid); 2712 tlv = ptr; 2713 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2714 ptr += TLV_HDR_SIZE; 2715 s_ssid = ptr; 2716 for (i = 0; i < arg->num_hint_s_ssid; ++i) { 2717 s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags; 2718 s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid; 2719 s_ssid++; 2720 } 2721 ptr += len; 2722 } 2723 2724 if (arg->num_hint_bssid) { 2725 len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg); 2726 tlv = ptr; 2727 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len); 2728 ptr += TLV_HDR_SIZE; 2729 hint_bssid = ptr; 2730 for (i = 0; i < arg->num_hint_bssid; ++i) { 2731 hint_bssid->freq_flags = 2732 arg->hint_bssid[i].freq_flags; 2733 ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0], 2734 &hint_bssid->bssid.addr[0]); 2735 hint_bssid++; 2736 } 2737 } 2738 2739 ret = ath12k_wmi_cmd_send(wmi, skb, 2740 WMI_START_SCAN_CMDID); 2741 if (ret) { 2742 ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n"); 2743 dev_kfree_skb(skb); 2744 } 2745 2746 return ret; 2747 } 2748 2749 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar, 2750 struct ath12k_wmi_scan_cancel_arg *arg) 2751 { 2752 struct ath12k_wmi_pdev *wmi = ar->wmi; 2753 struct wmi_stop_scan_cmd *cmd; 2754 struct sk_buff *skb; 2755 int ret; 2756 2757 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2758 if (!skb) 2759 return -ENOMEM; 2760 2761 cmd = (struct wmi_stop_scan_cmd *)skb->data; 2762 2763 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD, 2764 sizeof(*cmd)); 2765 2766 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 2767 cmd->requestor = cpu_to_le32(arg->requester); 2768 cmd->scan_id = cpu_to_le32(arg->scan_id); 2769 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 2770 /* stop the scan with the corresponding scan_id */ 2771 if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) { 2772 /* Cancelling all scans */ 2773 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL); 2774 } else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) { 2775 /* Cancelling VAP scans */ 2776 cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL); 2777 } else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) { 2778 /* Cancelling specific scan */ 2779 cmd->req_type = WMI_SCAN_STOP_ONE; 2780 } else { 2781 ath12k_warn(ar->ab, "invalid scan cancel req_type %d", 2782 arg->req_type); 2783 dev_kfree_skb(skb); 2784 return -EINVAL; 2785 } 2786 2787 ret = ath12k_wmi_cmd_send(wmi, skb, 2788 WMI_STOP_SCAN_CMDID); 2789 if (ret) { 2790 ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n"); 2791 dev_kfree_skb(skb); 2792 } 2793 2794 return ret; 2795 } 2796 2797 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar, 2798 struct ath12k_wmi_scan_chan_list_arg *arg) 2799 { 2800 struct ath12k_wmi_pdev *wmi = ar->wmi; 2801 struct wmi_scan_chan_list_cmd *cmd; 2802 struct sk_buff *skb; 2803 struct ath12k_wmi_channel_params *chan_info; 2804 struct ath12k_wmi_channel_arg *channel_arg; 2805 struct wmi_tlv *tlv; 2806 void *ptr; 2807 int i, ret, len; 2808 u16 num_send_chans, num_sends = 0, max_chan_limit = 0; 2809 __le32 *reg1, *reg2; 2810 2811 channel_arg = &arg->channel[0]; 2812 while (arg->nallchans) { 2813 len = sizeof(*cmd) + TLV_HDR_SIZE; 2814 max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) / 2815 sizeof(*chan_info); 2816 2817 num_send_chans = min3(arg->nallchans, max_chan_limit, 2818 ATH12K_WMI_MAX_NUM_CHAN_PER_CMD); 2819 2820 arg->nallchans -= num_send_chans; 2821 len += sizeof(*chan_info) * num_send_chans; 2822 2823 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 2824 if (!skb) 2825 return -ENOMEM; 2826 2827 cmd = (struct wmi_scan_chan_list_cmd *)skb->data; 2828 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD, 2829 sizeof(*cmd)); 2830 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 2831 cmd->num_scan_chans = cpu_to_le32(num_send_chans); 2832 if (num_sends) 2833 cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG); 2834 2835 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2836 "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n", 2837 num_send_chans, len, cmd->pdev_id, num_sends); 2838 2839 ptr = skb->data + sizeof(*cmd); 2840 2841 len = sizeof(*chan_info) * num_send_chans; 2842 tlv = ptr; 2843 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT, 2844 len); 2845 ptr += TLV_HDR_SIZE; 2846 2847 for (i = 0; i < num_send_chans; ++i) { 2848 chan_info = ptr; 2849 memset(chan_info, 0, sizeof(*chan_info)); 2850 len = sizeof(*chan_info); 2851 chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL, 2852 len); 2853 2854 reg1 = &chan_info->reg_info_1; 2855 reg2 = &chan_info->reg_info_2; 2856 chan_info->mhz = cpu_to_le32(channel_arg->mhz); 2857 chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1); 2858 chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2); 2859 2860 if (channel_arg->is_chan_passive) 2861 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE); 2862 if (channel_arg->allow_he) 2863 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE); 2864 else if (channel_arg->allow_vht) 2865 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT); 2866 else if (channel_arg->allow_ht) 2867 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT); 2868 if (channel_arg->half_rate) 2869 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE); 2870 if (channel_arg->quarter_rate) 2871 chan_info->info |= 2872 cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE); 2873 2874 if (channel_arg->psc_channel) 2875 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC); 2876 2877 if (channel_arg->dfs_set) 2878 chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS); 2879 2880 chan_info->info |= le32_encode_bits(channel_arg->phy_mode, 2881 WMI_CHAN_INFO_MODE); 2882 *reg1 |= le32_encode_bits(channel_arg->minpower, 2883 WMI_CHAN_REG_INFO1_MIN_PWR); 2884 *reg1 |= le32_encode_bits(channel_arg->maxpower, 2885 WMI_CHAN_REG_INFO1_MAX_PWR); 2886 *reg1 |= le32_encode_bits(channel_arg->maxregpower, 2887 WMI_CHAN_REG_INFO1_MAX_REG_PWR); 2888 *reg1 |= le32_encode_bits(channel_arg->reg_class_id, 2889 WMI_CHAN_REG_INFO1_REG_CLS); 2890 *reg2 |= le32_encode_bits(channel_arg->antennamax, 2891 WMI_CHAN_REG_INFO2_ANT_MAX); 2892 *reg2 |= le32_encode_bits(channel_arg->maxregpower, 2893 WMI_CHAN_REG_INFO2_MAX_TX_PWR); 2894 2895 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2896 "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", 2897 i, chan_info->mhz, chan_info->info); 2898 2899 ptr += sizeof(*chan_info); 2900 2901 channel_arg++; 2902 } 2903 2904 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID); 2905 if (ret) { 2906 ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n"); 2907 dev_kfree_skb(skb); 2908 return ret; 2909 } 2910 2911 num_sends++; 2912 } 2913 2914 return 0; 2915 } 2916 2917 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id, 2918 struct wmi_wmm_params_all_arg *param) 2919 { 2920 struct ath12k_wmi_pdev *wmi = ar->wmi; 2921 struct wmi_vdev_set_wmm_params_cmd *cmd; 2922 struct wmi_wmm_params *wmm_param; 2923 struct wmi_wmm_params_arg *wmi_wmm_arg; 2924 struct sk_buff *skb; 2925 int ret, ac; 2926 2927 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2928 if (!skb) 2929 return -ENOMEM; 2930 2931 cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data; 2932 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, 2933 sizeof(*cmd)); 2934 2935 cmd->vdev_id = cpu_to_le32(vdev_id); 2936 cmd->wmm_param_type = 0; 2937 2938 for (ac = 0; ac < WME_NUM_AC; ac++) { 2939 switch (ac) { 2940 case WME_AC_BE: 2941 wmi_wmm_arg = ¶m->ac_be; 2942 break; 2943 case WME_AC_BK: 2944 wmi_wmm_arg = ¶m->ac_bk; 2945 break; 2946 case WME_AC_VI: 2947 wmi_wmm_arg = ¶m->ac_vi; 2948 break; 2949 case WME_AC_VO: 2950 wmi_wmm_arg = ¶m->ac_vo; 2951 break; 2952 } 2953 2954 wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac]; 2955 wmm_param->tlv_header = 2956 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD, 2957 sizeof(*wmm_param)); 2958 2959 wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs); 2960 wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin); 2961 wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax); 2962 wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop); 2963 wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm); 2964 wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack); 2965 2966 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 2967 "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", 2968 ac, wmm_param->aifs, wmm_param->cwmin, 2969 wmm_param->cwmax, wmm_param->txoplimit, 2970 wmm_param->acm, wmm_param->no_ack); 2971 } 2972 ret = ath12k_wmi_cmd_send(wmi, skb, 2973 WMI_VDEV_SET_WMM_PARAMS_CMDID); 2974 if (ret) { 2975 ath12k_warn(ar->ab, 2976 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID"); 2977 dev_kfree_skb(skb); 2978 } 2979 2980 return ret; 2981 } 2982 2983 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar, 2984 u32 pdev_id) 2985 { 2986 struct ath12k_wmi_pdev *wmi = ar->wmi; 2987 struct wmi_dfs_phyerr_offload_cmd *cmd; 2988 struct sk_buff *skb; 2989 int ret; 2990 2991 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 2992 if (!skb) 2993 return -ENOMEM; 2994 2995 cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data; 2996 cmd->tlv_header = 2997 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD, 2998 sizeof(*cmd)); 2999 3000 cmd->pdev_id = cpu_to_le32(pdev_id); 3001 3002 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3003 "WMI dfs phy err offload enable pdev id %d\n", pdev_id); 3004 3005 ret = ath12k_wmi_cmd_send(wmi, skb, 3006 WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID); 3007 if (ret) { 3008 ath12k_warn(ar->ab, 3009 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n"); 3010 dev_kfree_skb(skb); 3011 } 3012 3013 return ret; 3014 } 3015 3016 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id, 3017 const u8 *buf, size_t buf_len) 3018 { 3019 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3020 struct wmi_pdev_set_bios_interface_cmd *cmd; 3021 struct wmi_tlv *tlv; 3022 struct sk_buff *skb; 3023 u8 *ptr; 3024 u32 len, len_aligned; 3025 int ret; 3026 3027 len_aligned = roundup(buf_len, sizeof(u32)); 3028 len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned; 3029 3030 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3031 if (!skb) 3032 return -ENOMEM; 3033 3034 cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data; 3035 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD, 3036 sizeof(*cmd)); 3037 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); 3038 cmd->param_type_id = cpu_to_le32(param_id); 3039 cmd->length = cpu_to_le32(buf_len); 3040 3041 ptr = skb->data + sizeof(*cmd); 3042 tlv = (struct wmi_tlv *)ptr; 3043 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned); 3044 ptr += TLV_HDR_SIZE; 3045 memcpy(ptr, buf, buf_len); 3046 3047 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], 3048 skb, 3049 WMI_PDEV_SET_BIOS_INTERFACE_CMDID); 3050 if (ret) { 3051 ath12k_warn(ab, 3052 "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n", 3053 param_id, ret); 3054 dev_kfree_skb(skb); 3055 } 3056 3057 return 0; 3058 } 3059 3060 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table) 3061 { 3062 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3063 struct wmi_pdev_set_bios_sar_table_cmd *cmd; 3064 struct wmi_tlv *tlv; 3065 struct sk_buff *skb; 3066 int ret; 3067 u8 *buf_ptr; 3068 u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned; 3069 const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET; 3070 const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET; 3071 3072 sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32)); 3073 sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN, 3074 sizeof(u32)); 3075 len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned + 3076 TLV_HDR_SIZE + sar_dbs_backoff_len_aligned; 3077 3078 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3079 if (!skb) 3080 return -ENOMEM; 3081 3082 cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data; 3083 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD, 3084 sizeof(*cmd)); 3085 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); 3086 cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN); 3087 cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN); 3088 3089 buf_ptr = skb->data + sizeof(*cmd); 3090 tlv = (struct wmi_tlv *)buf_ptr; 3091 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, 3092 sar_table_len_aligned); 3093 buf_ptr += TLV_HDR_SIZE; 3094 memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN); 3095 3096 buf_ptr += sar_table_len_aligned; 3097 tlv = (struct wmi_tlv *)buf_ptr; 3098 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, 3099 sar_dbs_backoff_len_aligned); 3100 buf_ptr += TLV_HDR_SIZE; 3101 memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN); 3102 3103 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], 3104 skb, 3105 WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID); 3106 if (ret) { 3107 ath12k_warn(ab, 3108 "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n", 3109 ret); 3110 dev_kfree_skb(skb); 3111 } 3112 3113 return ret; 3114 } 3115 3116 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table) 3117 { 3118 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 3119 struct wmi_pdev_set_bios_geo_table_cmd *cmd; 3120 struct wmi_tlv *tlv; 3121 struct sk_buff *skb; 3122 int ret; 3123 u8 *buf_ptr; 3124 u32 len, sar_geo_len_aligned; 3125 const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET; 3126 3127 sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32)); 3128 len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned; 3129 3130 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 3131 if (!skb) 3132 return -ENOMEM; 3133 3134 cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data; 3135 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD, 3136 sizeof(*cmd)); 3137 cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC); 3138 cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN); 3139 3140 buf_ptr = skb->data + sizeof(*cmd); 3141 tlv = (struct wmi_tlv *)buf_ptr; 3142 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned); 3143 buf_ptr += TLV_HDR_SIZE; 3144 memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN); 3145 3146 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], 3147 skb, 3148 WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID); 3149 if (ret) { 3150 ath12k_warn(ab, 3151 "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n", 3152 ret); 3153 dev_kfree_skb(skb); 3154 } 3155 3156 return ret; 3157 } 3158 3159 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, 3160 u32 tid, u32 initiator, u32 reason) 3161 { 3162 struct ath12k_wmi_pdev *wmi = ar->wmi; 3163 struct wmi_delba_send_cmd *cmd; 3164 struct sk_buff *skb; 3165 int ret; 3166 3167 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3168 if (!skb) 3169 return -ENOMEM; 3170 3171 cmd = (struct wmi_delba_send_cmd *)skb->data; 3172 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD, 3173 sizeof(*cmd)); 3174 cmd->vdev_id = cpu_to_le32(vdev_id); 3175 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3176 cmd->tid = cpu_to_le32(tid); 3177 cmd->initiator = cpu_to_le32(initiator); 3178 cmd->reasoncode = cpu_to_le32(reason); 3179 3180 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3181 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n", 3182 vdev_id, mac, tid, initiator, reason); 3183 3184 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID); 3185 3186 if (ret) { 3187 ath12k_warn(ar->ab, 3188 "failed to send WMI_DELBA_SEND_CMDID cmd\n"); 3189 dev_kfree_skb(skb); 3190 } 3191 3192 return ret; 3193 } 3194 3195 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac, 3196 u32 tid, u32 status) 3197 { 3198 struct ath12k_wmi_pdev *wmi = ar->wmi; 3199 struct wmi_addba_setresponse_cmd *cmd; 3200 struct sk_buff *skb; 3201 int ret; 3202 3203 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3204 if (!skb) 3205 return -ENOMEM; 3206 3207 cmd = (struct wmi_addba_setresponse_cmd *)skb->data; 3208 cmd->tlv_header = 3209 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD, 3210 sizeof(*cmd)); 3211 cmd->vdev_id = cpu_to_le32(vdev_id); 3212 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3213 cmd->tid = cpu_to_le32(tid); 3214 cmd->statuscode = cpu_to_le32(status); 3215 3216 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3217 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n", 3218 vdev_id, mac, tid, status); 3219 3220 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID); 3221 3222 if (ret) { 3223 ath12k_warn(ar->ab, 3224 "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n"); 3225 dev_kfree_skb(skb); 3226 } 3227 3228 return ret; 3229 } 3230 3231 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac, 3232 u32 tid, u32 buf_size) 3233 { 3234 struct ath12k_wmi_pdev *wmi = ar->wmi; 3235 struct wmi_addba_send_cmd *cmd; 3236 struct sk_buff *skb; 3237 int ret; 3238 3239 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3240 if (!skb) 3241 return -ENOMEM; 3242 3243 cmd = (struct wmi_addba_send_cmd *)skb->data; 3244 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD, 3245 sizeof(*cmd)); 3246 cmd->vdev_id = cpu_to_le32(vdev_id); 3247 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3248 cmd->tid = cpu_to_le32(tid); 3249 cmd->buffersize = cpu_to_le32(buf_size); 3250 3251 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3252 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n", 3253 vdev_id, mac, tid, buf_size); 3254 3255 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID); 3256 3257 if (ret) { 3258 ath12k_warn(ar->ab, 3259 "failed to send WMI_ADDBA_SEND_CMDID cmd\n"); 3260 dev_kfree_skb(skb); 3261 } 3262 3263 return ret; 3264 } 3265 3266 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac) 3267 { 3268 struct ath12k_wmi_pdev *wmi = ar->wmi; 3269 struct wmi_addba_clear_resp_cmd *cmd; 3270 struct sk_buff *skb; 3271 int ret; 3272 3273 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3274 if (!skb) 3275 return -ENOMEM; 3276 3277 cmd = (struct wmi_addba_clear_resp_cmd *)skb->data; 3278 cmd->tlv_header = 3279 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD, 3280 sizeof(*cmd)); 3281 cmd->vdev_id = cpu_to_le32(vdev_id); 3282 ether_addr_copy(cmd->peer_macaddr.addr, mac); 3283 3284 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3285 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n", 3286 vdev_id, mac); 3287 3288 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID); 3289 3290 if (ret) { 3291 ath12k_warn(ar->ab, 3292 "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n"); 3293 dev_kfree_skb(skb); 3294 } 3295 3296 return ret; 3297 } 3298 3299 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar, 3300 struct ath12k_wmi_init_country_arg *arg) 3301 { 3302 struct ath12k_wmi_pdev *wmi = ar->wmi; 3303 struct wmi_init_country_cmd *cmd; 3304 struct sk_buff *skb; 3305 int ret; 3306 3307 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3308 if (!skb) 3309 return -ENOMEM; 3310 3311 cmd = (struct wmi_init_country_cmd *)skb->data; 3312 cmd->tlv_header = 3313 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD, 3314 sizeof(*cmd)); 3315 3316 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 3317 3318 switch (arg->flags) { 3319 case ALPHA_IS_SET: 3320 cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA; 3321 memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3); 3322 break; 3323 case CC_IS_SET: 3324 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE); 3325 cmd->cc_info.country_code = 3326 cpu_to_le32(arg->cc_info.country_code); 3327 break; 3328 case REGDMN_IS_SET: 3329 cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN); 3330 cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id); 3331 break; 3332 default: 3333 ret = -EINVAL; 3334 goto out; 3335 } 3336 3337 ret = ath12k_wmi_cmd_send(wmi, skb, 3338 WMI_SET_INIT_COUNTRY_CMDID); 3339 3340 out: 3341 if (ret) { 3342 ath12k_warn(ar->ab, 3343 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n", 3344 ret); 3345 dev_kfree_skb(skb); 3346 } 3347 3348 return ret; 3349 } 3350 3351 int ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar, 3352 struct wmi_set_current_country_arg *arg) 3353 { 3354 struct ath12k_wmi_pdev *wmi = ar->wmi; 3355 struct wmi_set_current_country_cmd *cmd; 3356 struct sk_buff *skb; 3357 int ret; 3358 3359 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3360 if (!skb) 3361 return -ENOMEM; 3362 3363 cmd = (struct wmi_set_current_country_cmd *)skb->data; 3364 cmd->tlv_header = 3365 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_CURRENT_COUNTRY_CMD, 3366 sizeof(*cmd)); 3367 3368 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 3369 memcpy(&cmd->new_alpha2, &arg->alpha2, sizeof(arg->alpha2)); 3370 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID); 3371 3372 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3373 "set current country pdev id %d alpha2 %c%c\n", 3374 ar->pdev->pdev_id, 3375 arg->alpha2[0], 3376 arg->alpha2[1]); 3377 3378 if (ret) { 3379 ath12k_warn(ar->ab, 3380 "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret); 3381 dev_kfree_skb(skb); 3382 } 3383 3384 return ret; 3385 } 3386 3387 int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar, 3388 struct wmi_11d_scan_start_arg *arg) 3389 { 3390 struct ath12k_wmi_pdev *wmi = ar->wmi; 3391 struct wmi_11d_scan_start_cmd *cmd; 3392 struct sk_buff *skb; 3393 int ret; 3394 3395 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3396 if (!skb) 3397 return -ENOMEM; 3398 3399 cmd = (struct wmi_11d_scan_start_cmd *)skb->data; 3400 cmd->tlv_header = 3401 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_START_CMD, 3402 sizeof(*cmd)); 3403 3404 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 3405 cmd->scan_period_msec = cpu_to_le32(arg->scan_period_msec); 3406 cmd->start_interval_msec = cpu_to_le32(arg->start_interval_msec); 3407 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID); 3408 3409 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3410 "send 11d scan start vdev id %d period %d ms internal %d ms\n", 3411 arg->vdev_id, arg->scan_period_msec, 3412 arg->start_interval_msec); 3413 3414 if (ret) { 3415 ath12k_warn(ar->ab, 3416 "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret); 3417 dev_kfree_skb(skb); 3418 } 3419 3420 return ret; 3421 } 3422 3423 int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id) 3424 { 3425 struct ath12k_wmi_pdev *wmi = ar->wmi; 3426 struct wmi_11d_scan_stop_cmd *cmd; 3427 struct sk_buff *skb; 3428 int ret; 3429 3430 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); 3431 if (!skb) 3432 return -ENOMEM; 3433 3434 cmd = (struct wmi_11d_scan_stop_cmd *)skb->data; 3435 cmd->tlv_header = 3436 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_STOP_CMD, 3437 sizeof(*cmd)); 3438 3439 cmd->vdev_id = cpu_to_le32(vdev_id); 3440 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID); 3441 3442 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3443 "send 11d scan stop vdev id %d\n", 3444 cmd->vdev_id); 3445 3446 if (ret) { 3447 ath12k_warn(ar->ab, 3448 "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret); 3449 dev_kfree_skb(skb); 3450 } 3451 3452 return ret; 3453 } 3454 3455 int 3456 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id) 3457 { 3458 struct ath12k_wmi_pdev *wmi = ar->wmi; 3459 struct ath12k_base *ab = wmi->wmi_ab->ab; 3460 struct wmi_twt_enable_params_cmd *cmd; 3461 struct sk_buff *skb; 3462 int ret, len; 3463 3464 len = sizeof(*cmd); 3465 3466 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3467 if (!skb) 3468 return -ENOMEM; 3469 3470 cmd = (struct wmi_twt_enable_params_cmd *)skb->data; 3471 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD, 3472 len); 3473 cmd->pdev_id = cpu_to_le32(pdev_id); 3474 cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS); 3475 cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE); 3476 cmd->congestion_thresh_setup = 3477 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP); 3478 cmd->congestion_thresh_teardown = 3479 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN); 3480 cmd->congestion_thresh_critical = 3481 cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL); 3482 cmd->interference_thresh_teardown = 3483 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN); 3484 cmd->interference_thresh_setup = 3485 cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP); 3486 cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP); 3487 cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN); 3488 cmd->no_of_bcast_mcast_slots = 3489 cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS); 3490 cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS); 3491 cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT); 3492 cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL); 3493 cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL); 3494 cmd->remove_sta_slot_interval = 3495 cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL); 3496 /* TODO add MBSSID support */ 3497 cmd->mbss_support = 0; 3498 3499 ret = ath12k_wmi_cmd_send(wmi, skb, 3500 WMI_TWT_ENABLE_CMDID); 3501 if (ret) { 3502 ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID"); 3503 dev_kfree_skb(skb); 3504 } 3505 return ret; 3506 } 3507 3508 int 3509 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id) 3510 { 3511 struct ath12k_wmi_pdev *wmi = ar->wmi; 3512 struct ath12k_base *ab = wmi->wmi_ab->ab; 3513 struct wmi_twt_disable_params_cmd *cmd; 3514 struct sk_buff *skb; 3515 int ret, len; 3516 3517 len = sizeof(*cmd); 3518 3519 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3520 if (!skb) 3521 return -ENOMEM; 3522 3523 cmd = (struct wmi_twt_disable_params_cmd *)skb->data; 3524 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD, 3525 len); 3526 cmd->pdev_id = cpu_to_le32(pdev_id); 3527 3528 ret = ath12k_wmi_cmd_send(wmi, skb, 3529 WMI_TWT_DISABLE_CMDID); 3530 if (ret) { 3531 ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID"); 3532 dev_kfree_skb(skb); 3533 } 3534 return ret; 3535 } 3536 3537 int 3538 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id, 3539 struct ieee80211_he_obss_pd *he_obss_pd) 3540 { 3541 struct ath12k_wmi_pdev *wmi = ar->wmi; 3542 struct ath12k_base *ab = wmi->wmi_ab->ab; 3543 struct wmi_obss_spatial_reuse_params_cmd *cmd; 3544 struct sk_buff *skb; 3545 int ret, len; 3546 3547 len = sizeof(*cmd); 3548 3549 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3550 if (!skb) 3551 return -ENOMEM; 3552 3553 cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data; 3554 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD, 3555 len); 3556 cmd->vdev_id = cpu_to_le32(vdev_id); 3557 cmd->enable = cpu_to_le32(he_obss_pd->enable); 3558 cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset); 3559 cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset); 3560 3561 ret = ath12k_wmi_cmd_send(wmi, skb, 3562 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID); 3563 if (ret) { 3564 ath12k_warn(ab, 3565 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID"); 3566 dev_kfree_skb(skb); 3567 } 3568 return ret; 3569 } 3570 3571 u32 ath12k_wmi_build_obss_pd(const struct ath12k_wmi_obss_pd_arg *arg) 3572 { 3573 u32 param_val = 0; 3574 3575 param_val |= u32_encode_bits((u8)arg->srg_th, GENMASK(15, 8)); 3576 param_val |= u32_encode_bits((u8)arg->non_srg_th, GENMASK(7, 0)); 3577 3578 if (arg->srp_support) 3579 param_val |= ATH12K_OBSS_PD_THRESHOLD_IN_DBM; 3580 3581 if (arg->srg_enabled && arg->srp_support) 3582 param_val |= ATH12K_OBSS_PD_SRG_EN; 3583 3584 if (arg->non_srg_enabled) 3585 param_val |= ATH12K_OBSS_PD_NON_SRG_EN; 3586 3587 return param_val; 3588 } 3589 3590 static int ath12k_wmi_pdev_set_obss_bitmap(struct ath12k *ar, 3591 const struct wmi_pdev_set_obss_bitmap_arg *arg) 3592 { 3593 struct wmi_pdev_obss_pd_bitmap_cmd *cmd; 3594 struct ath12k_wmi_pdev *wmi = ar->wmi; 3595 const int len = sizeof(*cmd); 3596 struct sk_buff *skb; 3597 int ret; 3598 3599 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3600 if (!skb) 3601 return -ENOMEM; 3602 3603 cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data; 3604 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(arg->tlv_tag, len); 3605 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 3606 memcpy(cmd->bitmap, arg->bitmap, sizeof(cmd->bitmap)); 3607 3608 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3609 "wmi set pdev %u %s %08x %08x\n", 3610 arg->pdev_id, arg->label, arg->bitmap[0], arg->bitmap[1]); 3611 3612 ret = ath12k_wmi_cmd_send(wmi, skb, arg->cmd_id); 3613 if (ret) { 3614 ath12k_warn(ar->ab, "failed to send %s: %d\n", arg->label, ret); 3615 dev_kfree_skb(skb); 3616 } 3617 3618 return ret; 3619 } 3620 3621 int ath12k_wmi_pdev_set_srg_bss_color_bitmap(struct ath12k *ar, 3622 u32 pdev_id, const u32 *bitmap) 3623 { 3624 struct wmi_pdev_set_obss_bitmap_arg arg = { 3625 .tlv_tag = WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD, 3626 .pdev_id = pdev_id, 3627 .cmd_id = WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID, 3628 .bitmap = bitmap, 3629 .label = "SRG bss color bitmap", 3630 }; 3631 3632 return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg); 3633 } 3634 3635 int ath12k_wmi_pdev_set_srg_partial_bssid_bitmap(struct ath12k *ar, 3636 u32 pdev_id, const u32 *bitmap) 3637 { 3638 struct wmi_pdev_set_obss_bitmap_arg arg = { 3639 .tlv_tag = WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD, 3640 .pdev_id = pdev_id, 3641 .cmd_id = WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID, 3642 .bitmap = bitmap, 3643 .label = "SRG partial bssid bitmap", 3644 }; 3645 3646 return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg); 3647 } 3648 3649 int ath12k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath12k *ar, 3650 u32 pdev_id, const u32 *bitmap) 3651 { 3652 struct wmi_pdev_set_obss_bitmap_arg arg = { 3653 .tlv_tag = WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD, 3654 .pdev_id = pdev_id, 3655 .cmd_id = WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID, 3656 .bitmap = bitmap, 3657 .label = "SRG obss color enable bitmap", 3658 }; 3659 3660 return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg); 3661 } 3662 3663 int ath12k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath12k *ar, 3664 u32 pdev_id, const u32 *bitmap) 3665 { 3666 struct wmi_pdev_set_obss_bitmap_arg arg = { 3667 .tlv_tag = WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD, 3668 .pdev_id = pdev_id, 3669 .cmd_id = WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID, 3670 .bitmap = bitmap, 3671 .label = "SRG obss bssid enable bitmap", 3672 }; 3673 3674 return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg); 3675 } 3676 3677 int ath12k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath12k *ar, 3678 u32 pdev_id, const u32 *bitmap) 3679 { 3680 struct wmi_pdev_set_obss_bitmap_arg arg = { 3681 .tlv_tag = WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD, 3682 .pdev_id = pdev_id, 3683 .cmd_id = WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID, 3684 .bitmap = bitmap, 3685 .label = "non SRG obss color enable bitmap", 3686 }; 3687 3688 return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg); 3689 } 3690 3691 int ath12k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath12k *ar, 3692 u32 pdev_id, const u32 *bitmap) 3693 { 3694 struct wmi_pdev_set_obss_bitmap_arg arg = { 3695 .tlv_tag = WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD, 3696 .pdev_id = pdev_id, 3697 .cmd_id = WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID, 3698 .bitmap = bitmap, 3699 .label = "non SRG obss bssid enable bitmap", 3700 }; 3701 3702 return ath12k_wmi_pdev_set_obss_bitmap(ar, &arg); 3703 } 3704 3705 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id, 3706 u8 bss_color, u32 period, 3707 bool enable) 3708 { 3709 struct ath12k_wmi_pdev *wmi = ar->wmi; 3710 struct ath12k_base *ab = wmi->wmi_ab->ab; 3711 struct wmi_obss_color_collision_cfg_params_cmd *cmd; 3712 struct sk_buff *skb; 3713 int ret, len; 3714 3715 len = sizeof(*cmd); 3716 3717 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3718 if (!skb) 3719 return -ENOMEM; 3720 3721 cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data; 3722 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG, 3723 len); 3724 cmd->vdev_id = cpu_to_le32(vdev_id); 3725 cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) : 3726 cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE); 3727 cmd->current_bss_color = cpu_to_le32(bss_color); 3728 cmd->detection_period_ms = cpu_to_le32(period); 3729 cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS); 3730 cmd->free_slot_expiry_time_ms = 0; 3731 cmd->flags = 0; 3732 3733 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3734 "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n", 3735 cmd->vdev_id, cmd->evt_type, cmd->current_bss_color, 3736 cmd->detection_period_ms, cmd->scan_period_ms); 3737 3738 ret = ath12k_wmi_cmd_send(wmi, skb, 3739 WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID); 3740 if (ret) { 3741 ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID"); 3742 dev_kfree_skb(skb); 3743 } 3744 return ret; 3745 } 3746 3747 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id, 3748 bool enable) 3749 { 3750 struct ath12k_wmi_pdev *wmi = ar->wmi; 3751 struct ath12k_base *ab = wmi->wmi_ab->ab; 3752 struct wmi_bss_color_change_enable_params_cmd *cmd; 3753 struct sk_buff *skb; 3754 int ret, len; 3755 3756 len = sizeof(*cmd); 3757 3758 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 3759 if (!skb) 3760 return -ENOMEM; 3761 3762 cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data; 3763 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE, 3764 len); 3765 cmd->vdev_id = cpu_to_le32(vdev_id); 3766 cmd->enable = enable ? cpu_to_le32(1) : 0; 3767 3768 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3769 "wmi_send_bss_color_change_enable id %d enable %d\n", 3770 cmd->vdev_id, cmd->enable); 3771 3772 ret = ath12k_wmi_cmd_send(wmi, skb, 3773 WMI_BSS_COLOR_CHANGE_ENABLE_CMDID); 3774 if (ret) { 3775 ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID"); 3776 dev_kfree_skb(skb); 3777 } 3778 return ret; 3779 } 3780 3781 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id, 3782 struct sk_buff *tmpl) 3783 { 3784 struct wmi_tlv *tlv; 3785 struct sk_buff *skb; 3786 void *ptr; 3787 int ret, len; 3788 size_t aligned_len; 3789 struct wmi_fils_discovery_tmpl_cmd *cmd; 3790 3791 aligned_len = roundup(tmpl->len, 4); 3792 len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; 3793 3794 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3795 "WMI vdev %i set FILS discovery template\n", vdev_id); 3796 3797 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3798 if (!skb) 3799 return -ENOMEM; 3800 3801 cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data; 3802 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD, 3803 sizeof(*cmd)); 3804 cmd->vdev_id = cpu_to_le32(vdev_id); 3805 cmd->buf_len = cpu_to_le32(tmpl->len); 3806 ptr = skb->data + sizeof(*cmd); 3807 3808 tlv = ptr; 3809 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 3810 memcpy(tlv->value, tmpl->data, tmpl->len); 3811 3812 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID); 3813 if (ret) { 3814 ath12k_warn(ar->ab, 3815 "WMI vdev %i failed to send FILS discovery template command\n", 3816 vdev_id); 3817 dev_kfree_skb(skb); 3818 } 3819 return ret; 3820 } 3821 3822 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id, 3823 struct sk_buff *tmpl) 3824 { 3825 struct wmi_probe_tmpl_cmd *cmd; 3826 struct ath12k_wmi_bcn_prb_info_params *probe_info; 3827 struct wmi_tlv *tlv; 3828 struct sk_buff *skb; 3829 void *ptr; 3830 int ret, len; 3831 size_t aligned_len = roundup(tmpl->len, 4); 3832 3833 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3834 "WMI vdev %i set probe response template\n", vdev_id); 3835 3836 len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len; 3837 3838 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3839 if (!skb) 3840 return -ENOMEM; 3841 3842 cmd = (struct wmi_probe_tmpl_cmd *)skb->data; 3843 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD, 3844 sizeof(*cmd)); 3845 cmd->vdev_id = cpu_to_le32(vdev_id); 3846 cmd->buf_len = cpu_to_le32(tmpl->len); 3847 3848 ptr = skb->data + sizeof(*cmd); 3849 3850 probe_info = ptr; 3851 len = sizeof(*probe_info); 3852 probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO, 3853 len); 3854 probe_info->caps = 0; 3855 probe_info->erp = 0; 3856 3857 ptr += sizeof(*probe_info); 3858 3859 tlv = ptr; 3860 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); 3861 memcpy(tlv->value, tmpl->data, tmpl->len); 3862 3863 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID); 3864 if (ret) { 3865 ath12k_warn(ar->ab, 3866 "WMI vdev %i failed to send probe response template command\n", 3867 vdev_id); 3868 dev_kfree_skb(skb); 3869 } 3870 return ret; 3871 } 3872 3873 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval, 3874 bool unsol_bcast_probe_resp_enabled) 3875 { 3876 struct sk_buff *skb; 3877 int ret, len; 3878 struct wmi_fils_discovery_cmd *cmd; 3879 3880 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 3881 "WMI vdev %i set %s interval to %u TU\n", 3882 vdev_id, unsol_bcast_probe_resp_enabled ? 3883 "unsolicited broadcast probe response" : "FILS discovery", 3884 interval); 3885 3886 len = sizeof(*cmd); 3887 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 3888 if (!skb) 3889 return -ENOMEM; 3890 3891 cmd = (struct wmi_fils_discovery_cmd *)skb->data; 3892 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD, 3893 len); 3894 cmd->vdev_id = cpu_to_le32(vdev_id); 3895 cmd->interval = cpu_to_le32(interval); 3896 cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled); 3897 3898 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID); 3899 if (ret) { 3900 ath12k_warn(ar->ab, 3901 "WMI vdev %i failed to send FILS discovery enable/disable command\n", 3902 vdev_id); 3903 dev_kfree_skb(skb); 3904 } 3905 return ret; 3906 } 3907 3908 static void 3909 ath12k_wmi_obss_color_collision_event(struct ath12k_base *ab, struct sk_buff *skb) 3910 { 3911 const struct wmi_obss_color_collision_event *ev; 3912 struct ath12k_link_vif *arvif; 3913 u32 vdev_id, evt_type; 3914 u64 bitmap; 3915 3916 const void **tb __free(kfree) = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 3917 if (IS_ERR(tb)) { 3918 ath12k_warn(ab, "failed to parse OBSS color collision tlv %ld\n", 3919 PTR_ERR(tb)); 3920 return; 3921 } 3922 3923 ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT]; 3924 if (!ev) { 3925 ath12k_warn(ab, "failed to fetch OBSS color collision event\n"); 3926 return; 3927 } 3928 3929 vdev_id = le32_to_cpu(ev->vdev_id); 3930 evt_type = le32_to_cpu(ev->evt_type); 3931 bitmap = le64_to_cpu(ev->obss_color_bitmap); 3932 3933 guard(rcu)(); 3934 3935 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id); 3936 if (!arvif) { 3937 ath12k_warn(ab, "no arvif found for vdev %u in OBSS color collision event\n", 3938 vdev_id); 3939 return; 3940 } 3941 3942 switch (evt_type) { 3943 case WMI_BSS_COLOR_COLLISION_DETECTION: 3944 ieee80211_obss_color_collision_notify(arvif->ahvif->vif, 3945 bitmap, 3946 arvif->link_id); 3947 ath12k_dbg(ab, ATH12K_DBG_WMI, 3948 "obss color collision detected vdev %u event %d bitmap %016llx\n", 3949 vdev_id, evt_type, bitmap); 3950 break; 3951 case WMI_BSS_COLOR_COLLISION_DISABLE: 3952 case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY: 3953 case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE: 3954 break; 3955 default: 3956 ath12k_warn(ab, "unknown OBSS color collision event type %d\n", evt_type); 3957 } 3958 } 3959 3960 static void 3961 ath12k_fill_band_to_mac_param(struct ath12k_base *soc, 3962 struct ath12k_wmi_pdev_band_arg *arg) 3963 { 3964 u8 i; 3965 struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap; 3966 struct ath12k_pdev *pdev; 3967 3968 for (i = 0; i < soc->num_radios; i++) { 3969 pdev = &soc->pdevs[i]; 3970 hal_reg_cap = &soc->hal_reg_cap[i]; 3971 arg[i].pdev_id = pdev->pdev_id; 3972 3973 switch (pdev->cap.supported_bands) { 3974 case WMI_HOST_WLAN_2GHZ_5GHZ_CAP: 3975 arg[i].start_freq = hal_reg_cap->low_2ghz_chan; 3976 arg[i].end_freq = hal_reg_cap->high_5ghz_chan; 3977 break; 3978 case WMI_HOST_WLAN_2GHZ_CAP: 3979 arg[i].start_freq = hal_reg_cap->low_2ghz_chan; 3980 arg[i].end_freq = hal_reg_cap->high_2ghz_chan; 3981 break; 3982 case WMI_HOST_WLAN_5GHZ_CAP: 3983 arg[i].start_freq = hal_reg_cap->low_5ghz_chan; 3984 arg[i].end_freq = hal_reg_cap->high_5ghz_chan; 3985 break; 3986 default: 3987 break; 3988 } 3989 } 3990 } 3991 3992 static void 3993 ath12k_wmi_copy_resource_config(struct ath12k_base *ab, 3994 struct ath12k_wmi_resource_config_params *wmi_cfg, 3995 struct ath12k_wmi_resource_config_arg *tg_cfg) 3996 { 3997 wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs); 3998 wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers); 3999 wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers); 4000 wmi_cfg->num_offload_reorder_buffs = 4001 cpu_to_le32(tg_cfg->num_offload_reorder_buffs); 4002 wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys); 4003 wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids); 4004 wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit); 4005 wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask); 4006 wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask); 4007 wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]); 4008 wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]); 4009 wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]); 4010 wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]); 4011 wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode); 4012 wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req); 4013 wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev); 4014 wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev); 4015 wmi_cfg->roam_offload_max_ap_profiles = 4016 cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles); 4017 wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups); 4018 wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems); 4019 wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode); 4020 wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size); 4021 wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries); 4022 wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size); 4023 wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim); 4024 wmi_cfg->rx_skip_defrag_timeout_dup_detection_check = 4025 cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check); 4026 wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config); 4027 wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev); 4028 wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc); 4029 wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries); 4030 wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs); 4031 wmi_cfg->num_tdls_conn_table_entries = 4032 cpu_to_le32(tg_cfg->num_tdls_conn_table_entries); 4033 wmi_cfg->beacon_tx_offload_max_vdev = 4034 cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev); 4035 wmi_cfg->num_multicast_filter_entries = 4036 cpu_to_le32(tg_cfg->num_multicast_filter_entries); 4037 wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters); 4038 wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern); 4039 wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size); 4040 wmi_cfg->max_tdls_concurrent_sleep_sta = 4041 cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta); 4042 wmi_cfg->max_tdls_concurrent_buffer_sta = 4043 cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta); 4044 wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate); 4045 wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs); 4046 wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels); 4047 wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules); 4048 wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size); 4049 wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters); 4050 wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id); 4051 wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config | 4052 WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 | 4053 WMI_RSRC_CFG_FLAG1_ACK_RSSI); 4054 wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version); 4055 wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params); 4056 wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count); 4057 wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count); 4058 wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver, 4059 WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION); 4060 wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported << 4061 WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT); 4062 if (ab->hw_params->reoq_lut_support) 4063 wmi_cfg->host_service_flags |= 4064 cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT); 4065 wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt); 4066 wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period); 4067 wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET); 4068 } 4069 4070 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi, 4071 struct ath12k_wmi_init_cmd_arg *arg) 4072 { 4073 struct ath12k_base *ab = wmi->wmi_ab->ab; 4074 struct sk_buff *skb; 4075 struct wmi_init_cmd *cmd; 4076 struct ath12k_wmi_resource_config_params *cfg; 4077 struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode; 4078 struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac; 4079 struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks; 4080 struct wmi_tlv *tlv; 4081 size_t ret, len; 4082 void *ptr; 4083 u32 hw_mode_len = 0; 4084 u16 idx; 4085 4086 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) 4087 hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE + 4088 (arg->num_band_to_mac * sizeof(*band_to_mac)); 4089 4090 len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len + 4091 (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0); 4092 4093 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 4094 if (!skb) 4095 return -ENOMEM; 4096 4097 cmd = (struct wmi_init_cmd *)skb->data; 4098 4099 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD, 4100 sizeof(*cmd)); 4101 4102 ptr = skb->data + sizeof(*cmd); 4103 cfg = ptr; 4104 4105 ath12k_wmi_copy_resource_config(ab, cfg, &arg->res_cfg); 4106 4107 cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG, 4108 sizeof(*cfg)); 4109 4110 ptr += sizeof(*cfg); 4111 host_mem_chunks = ptr + TLV_HDR_SIZE; 4112 len = sizeof(struct ath12k_wmi_host_mem_chunk_params); 4113 4114 for (idx = 0; idx < arg->num_mem_chunks; ++idx) { 4115 host_mem_chunks[idx].tlv_header = 4116 ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK, 4117 len); 4118 4119 host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr); 4120 host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len); 4121 host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id); 4122 4123 ath12k_dbg(ab, ATH12K_DBG_WMI, 4124 "WMI host mem chunk req_id %d paddr 0x%llx len %d\n", 4125 arg->mem_chunks[idx].req_id, 4126 (u64)arg->mem_chunks[idx].paddr, 4127 arg->mem_chunks[idx].len); 4128 } 4129 cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks); 4130 len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks; 4131 4132 /* num_mem_chunks is zero */ 4133 tlv = ptr; 4134 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 4135 ptr += TLV_HDR_SIZE + len; 4136 4137 if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) { 4138 hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr; 4139 hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD, 4140 sizeof(*hw_mode)); 4141 4142 hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id); 4143 hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac); 4144 4145 ptr += sizeof(*hw_mode); 4146 4147 len = arg->num_band_to_mac * sizeof(*band_to_mac); 4148 tlv = ptr; 4149 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len); 4150 4151 ptr += TLV_HDR_SIZE; 4152 len = sizeof(*band_to_mac); 4153 4154 for (idx = 0; idx < arg->num_band_to_mac; idx++) { 4155 band_to_mac = (void *)ptr; 4156 4157 band_to_mac->tlv_header = 4158 ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC, 4159 len); 4160 band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id); 4161 band_to_mac->start_freq = 4162 cpu_to_le32(arg->band_to_mac[idx].start_freq); 4163 band_to_mac->end_freq = 4164 cpu_to_le32(arg->band_to_mac[idx].end_freq); 4165 ptr += sizeof(*band_to_mac); 4166 } 4167 } 4168 4169 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID); 4170 if (ret) { 4171 ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n"); 4172 dev_kfree_skb(skb); 4173 } 4174 4175 return ret; 4176 } 4177 4178 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar, 4179 int pdev_id) 4180 { 4181 struct ath12k_wmi_pdev_lro_config_cmd *cmd; 4182 struct sk_buff *skb; 4183 int ret; 4184 4185 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4186 if (!skb) 4187 return -ENOMEM; 4188 4189 cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data; 4190 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD, 4191 sizeof(*cmd)); 4192 4193 get_random_bytes(cmd->th_4, sizeof(cmd->th_4)); 4194 get_random_bytes(cmd->th_6, sizeof(cmd->th_6)); 4195 4196 cmd->pdev_id = cpu_to_le32(pdev_id); 4197 4198 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4199 "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id); 4200 4201 ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID); 4202 if (ret) { 4203 ath12k_warn(ar->ab, 4204 "failed to send lro cfg req wmi cmd\n"); 4205 goto err; 4206 } 4207 4208 return 0; 4209 err: 4210 dev_kfree_skb(skb); 4211 return ret; 4212 } 4213 4214 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab) 4215 { 4216 unsigned long time_left; 4217 4218 time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready, 4219 WMI_SERVICE_READY_TIMEOUT_HZ); 4220 if (!time_left) 4221 return -ETIMEDOUT; 4222 4223 return 0; 4224 } 4225 4226 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab) 4227 { 4228 unsigned long time_left; 4229 4230 time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready, 4231 WMI_SERVICE_READY_TIMEOUT_HZ); 4232 if (!time_left) 4233 return -ETIMEDOUT; 4234 4235 return 0; 4236 } 4237 4238 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab, 4239 enum wmi_host_hw_mode_config_type mode) 4240 { 4241 struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd; 4242 struct sk_buff *skb; 4243 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 4244 int len; 4245 int ret; 4246 4247 len = sizeof(*cmd); 4248 4249 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 4250 if (!skb) 4251 return -ENOMEM; 4252 4253 cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data; 4254 4255 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD, 4256 sizeof(*cmd)); 4257 4258 cmd->pdev_id = WMI_PDEV_ID_SOC; 4259 cmd->hw_mode_index = cpu_to_le32(mode); 4260 4261 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID); 4262 if (ret) { 4263 ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n"); 4264 dev_kfree_skb(skb); 4265 } 4266 4267 return ret; 4268 } 4269 4270 int ath12k_wmi_cmd_init(struct ath12k_base *ab) 4271 { 4272 struct ath12k_dp *dp = ath12k_ab_to_dp(ab); 4273 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 4274 struct ath12k_wmi_init_cmd_arg arg = {}; 4275 4276 if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, 4277 ab->wmi_ab.svc_map)) 4278 arg.res_cfg.is_reg_cc_ext_event_supported = true; 4279 4280 ab->hw_params->wmi_init(ab, &arg.res_cfg); 4281 ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode; 4282 4283 arg.num_mem_chunks = wmi_ab->num_mem_chunks; 4284 arg.hw_mode_id = wmi_ab->preferred_hw_mode; 4285 arg.mem_chunks = wmi_ab->mem_chunks; 4286 4287 if (ab->hw_params->single_pdev_only) 4288 arg.hw_mode_id = WMI_HOST_HW_MODE_MAX; 4289 4290 arg.num_band_to_mac = ab->num_radios; 4291 ath12k_fill_band_to_mac_param(ab, arg.band_to_mac); 4292 4293 dp->peer_metadata_ver = arg.res_cfg.peer_metadata_ver; 4294 4295 return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg); 4296 } 4297 4298 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar, 4299 struct ath12k_wmi_vdev_spectral_conf_arg *arg) 4300 { 4301 struct ath12k_wmi_vdev_spectral_conf_cmd *cmd; 4302 struct sk_buff *skb; 4303 int ret; 4304 4305 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4306 if (!skb) 4307 return -ENOMEM; 4308 4309 cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data; 4310 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD, 4311 sizeof(*cmd)); 4312 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 4313 cmd->scan_count = cpu_to_le32(arg->scan_count); 4314 cmd->scan_period = cpu_to_le32(arg->scan_period); 4315 cmd->scan_priority = cpu_to_le32(arg->scan_priority); 4316 cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size); 4317 cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena); 4318 cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena); 4319 cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref); 4320 cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay); 4321 cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr); 4322 cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr); 4323 cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode); 4324 cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode); 4325 cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr); 4326 cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format); 4327 cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode); 4328 cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale); 4329 cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj); 4330 cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask); 4331 4332 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4333 "WMI spectral scan config cmd vdev_id 0x%x\n", 4334 arg->vdev_id); 4335 4336 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 4337 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); 4338 if (ret) { 4339 ath12k_warn(ar->ab, 4340 "failed to send spectral scan config wmi cmd\n"); 4341 goto err; 4342 } 4343 4344 return 0; 4345 err: 4346 dev_kfree_skb(skb); 4347 return ret; 4348 } 4349 4350 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id, 4351 u32 trigger, u32 enable) 4352 { 4353 struct ath12k_wmi_vdev_spectral_enable_cmd *cmd; 4354 struct sk_buff *skb; 4355 int ret; 4356 4357 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4358 if (!skb) 4359 return -ENOMEM; 4360 4361 cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data; 4362 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD, 4363 sizeof(*cmd)); 4364 4365 cmd->vdev_id = cpu_to_le32(vdev_id); 4366 cmd->trigger_cmd = cpu_to_le32(trigger); 4367 cmd->enable_cmd = cpu_to_le32(enable); 4368 4369 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4370 "WMI spectral enable cmd vdev id 0x%x\n", 4371 vdev_id); 4372 4373 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 4374 WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); 4375 if (ret) { 4376 ath12k_warn(ar->ab, 4377 "failed to send spectral enable wmi cmd\n"); 4378 goto err; 4379 } 4380 4381 return 0; 4382 err: 4383 dev_kfree_skb(skb); 4384 return ret; 4385 } 4386 4387 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar, 4388 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg) 4389 { 4390 struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd; 4391 struct sk_buff *skb; 4392 int ret; 4393 4394 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); 4395 if (!skb) 4396 return -ENOMEM; 4397 4398 cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data; 4399 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ, 4400 sizeof(*cmd)); 4401 4402 cmd->pdev_id = cpu_to_le32(arg->pdev_id); 4403 cmd->module_id = cpu_to_le32(arg->module_id); 4404 cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo); 4405 cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi); 4406 cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo); 4407 cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi); 4408 cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo); 4409 cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi); 4410 cmd->num_elems = cpu_to_le32(arg->num_elems); 4411 cmd->buf_size = cpu_to_le32(arg->buf_size); 4412 cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event); 4413 cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms); 4414 4415 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 4416 "WMI DMA ring cfg req cmd pdev_id 0x%x\n", 4417 arg->pdev_id); 4418 4419 ret = ath12k_wmi_cmd_send(ar->wmi, skb, 4420 WMI_PDEV_DMA_RING_CFG_REQ_CMDID); 4421 if (ret) { 4422 ath12k_warn(ar->ab, 4423 "failed to send dma ring cfg req wmi cmd\n"); 4424 goto err; 4425 } 4426 4427 return 0; 4428 err: 4429 dev_kfree_skb(skb); 4430 return ret; 4431 } 4432 4433 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc, 4434 u16 tag, u16 len, 4435 const void *ptr, void *data) 4436 { 4437 struct ath12k_wmi_dma_buf_release_arg *arg = data; 4438 4439 if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY) 4440 return -EPROTO; 4441 4442 if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry)) 4443 return -ENOBUFS; 4444 4445 arg->num_buf_entry++; 4446 return 0; 4447 } 4448 4449 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc, 4450 u16 tag, u16 len, 4451 const void *ptr, void *data) 4452 { 4453 struct ath12k_wmi_dma_buf_release_arg *arg = data; 4454 4455 if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA) 4456 return -EPROTO; 4457 4458 if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry)) 4459 return -ENOBUFS; 4460 4461 arg->num_meta++; 4462 4463 return 0; 4464 } 4465 4466 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab, 4467 u16 tag, u16 len, 4468 const void *ptr, void *data) 4469 { 4470 struct ath12k_wmi_dma_buf_release_arg *arg = data; 4471 const struct ath12k_wmi_dma_buf_release_fixed_params *fixed; 4472 u32 pdev_id; 4473 int ret; 4474 4475 switch (tag) { 4476 case WMI_TAG_DMA_BUF_RELEASE: 4477 fixed = ptr; 4478 arg->fixed = *fixed; 4479 pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id)); 4480 arg->fixed.pdev_id = cpu_to_le32(pdev_id); 4481 break; 4482 case WMI_TAG_ARRAY_STRUCT: 4483 if (!arg->buf_entry_done) { 4484 arg->num_buf_entry = 0; 4485 arg->buf_entry = ptr; 4486 4487 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4488 ath12k_wmi_dma_buf_entry_parse, 4489 arg); 4490 if (ret) { 4491 ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n", 4492 ret); 4493 return ret; 4494 } 4495 4496 arg->buf_entry_done = true; 4497 } else if (!arg->meta_data_done) { 4498 arg->num_meta = 0; 4499 arg->meta_data = ptr; 4500 4501 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4502 ath12k_wmi_dma_buf_meta_parse, 4503 arg); 4504 if (ret) { 4505 ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n", 4506 ret); 4507 return ret; 4508 } 4509 4510 arg->meta_data_done = true; 4511 } 4512 break; 4513 default: 4514 break; 4515 } 4516 return 0; 4517 } 4518 4519 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab, 4520 struct sk_buff *skb) 4521 { 4522 struct ath12k_wmi_dma_buf_release_arg arg = {}; 4523 struct ath12k_dbring_buf_release_event param; 4524 int ret; 4525 4526 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 4527 ath12k_wmi_dma_buf_parse, 4528 &arg); 4529 if (ret) { 4530 ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret); 4531 return; 4532 } 4533 4534 param.fixed = arg.fixed; 4535 param.buf_entry = arg.buf_entry; 4536 param.num_buf_entry = arg.num_buf_entry; 4537 param.meta_data = arg.meta_data; 4538 param.num_meta = arg.num_meta; 4539 4540 ret = ath12k_dbring_buffer_release_event(ab, ¶m); 4541 if (ret) { 4542 ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret); 4543 return; 4544 } 4545 } 4546 4547 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc, 4548 u16 tag, u16 len, 4549 const void *ptr, void *data) 4550 { 4551 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4552 struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap; 4553 u32 phy_map = 0; 4554 4555 if (tag != WMI_TAG_HW_MODE_CAPABILITIES) 4556 return -EPROTO; 4557 4558 if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes) 4559 return -ENOBUFS; 4560 4561 hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params, 4562 hw_mode_id); 4563 svc_rdy_ext->n_hw_mode_caps++; 4564 4565 phy_map = le32_to_cpu(hw_mode_cap->phy_id_map); 4566 svc_rdy_ext->tot_phy_id += fls(phy_map); 4567 4568 return 0; 4569 } 4570 4571 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc, 4572 u16 len, const void *ptr, void *data) 4573 { 4574 struct ath12k_svc_ext_info *svc_ext_info = &soc->wmi_ab.svc_ext_info; 4575 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4576 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps; 4577 enum wmi_host_hw_mode_config_type mode, pref; 4578 u32 i; 4579 int ret; 4580 4581 svc_rdy_ext->n_hw_mode_caps = 0; 4582 svc_rdy_ext->hw_mode_caps = ptr; 4583 4584 ret = ath12k_wmi_tlv_iter(soc, ptr, len, 4585 ath12k_wmi_hw_mode_caps_parse, 4586 svc_rdy_ext); 4587 if (ret) { 4588 ath12k_warn(soc, "failed to parse tlv %d\n", ret); 4589 return ret; 4590 } 4591 4592 for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) { 4593 hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i]; 4594 mode = le32_to_cpu(hw_mode_caps->hw_mode_id); 4595 4596 if (mode >= WMI_HOST_HW_MODE_MAX) 4597 continue; 4598 4599 pref = soc->wmi_ab.preferred_hw_mode; 4600 4601 if (ath12k_hw_mode_pri_map[mode] <= ath12k_hw_mode_pri_map[pref]) { 4602 svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps; 4603 soc->wmi_ab.preferred_hw_mode = mode; 4604 } 4605 } 4606 4607 svc_ext_info->num_hw_modes = svc_rdy_ext->n_hw_mode_caps; 4608 4609 ath12k_dbg(soc, ATH12K_DBG_WMI, "num hw modes %u preferred_hw_mode %d\n", 4610 svc_ext_info->num_hw_modes, soc->wmi_ab.preferred_hw_mode); 4611 4612 if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX) 4613 return -EINVAL; 4614 4615 return 0; 4616 } 4617 4618 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc, 4619 u16 tag, u16 len, 4620 const void *ptr, void *data) 4621 { 4622 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4623 4624 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES) 4625 return -EPROTO; 4626 4627 if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id) 4628 return -ENOBUFS; 4629 4630 len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params)); 4631 if (!svc_rdy_ext->n_mac_phy_caps) { 4632 svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len, 4633 GFP_ATOMIC); 4634 if (!svc_rdy_ext->mac_phy_caps) 4635 return -ENOMEM; 4636 } 4637 4638 memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len); 4639 svc_rdy_ext->n_mac_phy_caps++; 4640 return 0; 4641 } 4642 4643 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc, 4644 u16 tag, u16 len, 4645 const void *ptr, void *data) 4646 { 4647 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4648 4649 if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT) 4650 return -EPROTO; 4651 4652 if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy) 4653 return -ENOBUFS; 4654 4655 svc_rdy_ext->n_ext_hal_reg_caps++; 4656 return 0; 4657 } 4658 4659 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc, 4660 u16 len, const void *ptr, void *data) 4661 { 4662 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0]; 4663 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4664 struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap; 4665 int ret; 4666 u32 i; 4667 4668 svc_rdy_ext->n_ext_hal_reg_caps = 0; 4669 svc_rdy_ext->ext_hal_reg_caps = ptr; 4670 ret = ath12k_wmi_tlv_iter(soc, ptr, len, 4671 ath12k_wmi_ext_hal_reg_caps_parse, 4672 svc_rdy_ext); 4673 if (ret) { 4674 ath12k_warn(soc, "failed to parse tlv %d\n", ret); 4675 return ret; 4676 } 4677 4678 for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) { 4679 ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle, 4680 svc_rdy_ext->soc_hal_reg_caps, 4681 svc_rdy_ext->ext_hal_reg_caps, i, 4682 ®_cap); 4683 if (ret) { 4684 ath12k_warn(soc, "failed to extract reg cap %d\n", i); 4685 return ret; 4686 } 4687 4688 if (reg_cap.phy_id >= MAX_RADIOS) { 4689 ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id); 4690 return -EINVAL; 4691 } 4692 4693 soc->hal_reg_cap[reg_cap.phy_id] = reg_cap; 4694 } 4695 return 0; 4696 } 4697 4698 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc, 4699 u16 len, const void *ptr, 4700 void *data) 4701 { 4702 struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0]; 4703 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4704 u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id); 4705 u32 phy_id_map; 4706 int pdev_index = 0; 4707 int ret; 4708 4709 svc_rdy_ext->soc_hal_reg_caps = ptr; 4710 svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy); 4711 4712 soc->num_radios = 0; 4713 phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map); 4714 soc->fw_pdev_count = 0; 4715 4716 while (phy_id_map && soc->num_radios < MAX_RADIOS) { 4717 ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle, 4718 svc_rdy_ext, 4719 hw_mode_id, soc->num_radios, 4720 &soc->pdevs[pdev_index]); 4721 if (ret) { 4722 ath12k_warn(soc, "failed to extract mac caps, idx :%d\n", 4723 soc->num_radios); 4724 return ret; 4725 } 4726 4727 soc->num_radios++; 4728 4729 /* For single_pdev_only targets, 4730 * save mac_phy capability in the same pdev 4731 */ 4732 if (soc->hw_params->single_pdev_only) 4733 pdev_index = 0; 4734 else 4735 pdev_index = soc->num_radios; 4736 4737 /* TODO: mac_phy_cap prints */ 4738 phy_id_map >>= 1; 4739 } 4740 4741 if (soc->hw_params->single_pdev_only) { 4742 soc->num_radios = 1; 4743 soc->pdevs[0].pdev_id = 0; 4744 } 4745 4746 return 0; 4747 } 4748 4749 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc, 4750 u16 tag, u16 len, 4751 const void *ptr, void *data) 4752 { 4753 struct ath12k_wmi_dma_ring_caps_parse *parse = data; 4754 4755 if (tag != WMI_TAG_DMA_RING_CAPABILITIES) 4756 return -EPROTO; 4757 4758 parse->n_dma_ring_caps++; 4759 return 0; 4760 } 4761 4762 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab, 4763 u32 num_cap) 4764 { 4765 size_t sz; 4766 void *ptr; 4767 4768 sz = num_cap * sizeof(struct ath12k_dbring_cap); 4769 ptr = kzalloc(sz, GFP_ATOMIC); 4770 if (!ptr) 4771 return -ENOMEM; 4772 4773 ab->db_caps = ptr; 4774 ab->num_db_cap = num_cap; 4775 4776 return 0; 4777 } 4778 4779 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab) 4780 { 4781 kfree(ab->db_caps); 4782 ab->db_caps = NULL; 4783 ab->num_db_cap = 0; 4784 } 4785 4786 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab, 4787 u16 len, const void *ptr, void *data) 4788 { 4789 struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data; 4790 struct ath12k_wmi_dma_ring_caps_params *dma_caps; 4791 struct ath12k_dbring_cap *dir_buff_caps; 4792 int ret; 4793 u32 i; 4794 4795 dma_caps_parse->n_dma_ring_caps = 0; 4796 dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr; 4797 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4798 ath12k_wmi_dma_ring_caps_parse, 4799 dma_caps_parse); 4800 if (ret) { 4801 ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret); 4802 return ret; 4803 } 4804 4805 if (!dma_caps_parse->n_dma_ring_caps) 4806 return 0; 4807 4808 if (ab->num_db_cap) { 4809 ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n"); 4810 return 0; 4811 } 4812 4813 ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps); 4814 if (ret) 4815 return ret; 4816 4817 dir_buff_caps = ab->db_caps; 4818 for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) { 4819 if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) { 4820 ath12k_warn(ab, "Invalid module id %d\n", 4821 le32_to_cpu(dma_caps[i].module_id)); 4822 ret = -EINVAL; 4823 goto free_dir_buff; 4824 } 4825 4826 dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id); 4827 dir_buff_caps[i].pdev_id = 4828 DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id)); 4829 dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem); 4830 dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz); 4831 dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align); 4832 } 4833 4834 return 0; 4835 4836 free_dir_buff: 4837 ath12k_wmi_free_dbring_caps(ab); 4838 return ret; 4839 } 4840 4841 static void 4842 ath12k_wmi_save_mac_phy_info(struct ath12k_base *ab, 4843 const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap, 4844 struct ath12k_svc_ext_mac_phy_info *mac_phy_info) 4845 { 4846 mac_phy_info->phy_id = __le32_to_cpu(mac_phy_cap->phy_id); 4847 mac_phy_info->supported_bands = __le32_to_cpu(mac_phy_cap->supported_bands); 4848 mac_phy_info->hw_freq_range.low_2ghz_freq = 4849 __le32_to_cpu(mac_phy_cap->low_2ghz_chan_freq); 4850 mac_phy_info->hw_freq_range.high_2ghz_freq = 4851 __le32_to_cpu(mac_phy_cap->high_2ghz_chan_freq); 4852 mac_phy_info->hw_freq_range.low_5ghz_freq = 4853 __le32_to_cpu(mac_phy_cap->low_5ghz_chan_freq); 4854 mac_phy_info->hw_freq_range.high_5ghz_freq = 4855 __le32_to_cpu(mac_phy_cap->high_5ghz_chan_freq); 4856 } 4857 4858 static void 4859 ath12k_wmi_save_all_mac_phy_info(struct ath12k_base *ab, 4860 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext) 4861 { 4862 struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info; 4863 const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap; 4864 const struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap; 4865 struct ath12k_svc_ext_mac_phy_info *mac_phy_info; 4866 u32 hw_mode_id, phy_bit_map; 4867 u8 hw_idx; 4868 4869 mac_phy_info = &svc_ext_info->mac_phy_info[0]; 4870 mac_phy_cap = svc_rdy_ext->mac_phy_caps; 4871 4872 for (hw_idx = 0; hw_idx < svc_ext_info->num_hw_modes; hw_idx++) { 4873 hw_mode_cap = &svc_rdy_ext->hw_mode_caps[hw_idx]; 4874 hw_mode_id = __le32_to_cpu(hw_mode_cap->hw_mode_id); 4875 phy_bit_map = __le32_to_cpu(hw_mode_cap->phy_id_map); 4876 4877 while (phy_bit_map) { 4878 ath12k_wmi_save_mac_phy_info(ab, mac_phy_cap, mac_phy_info); 4879 mac_phy_info->hw_mode_config_type = 4880 le32_get_bits(hw_mode_cap->hw_mode_config_type, 4881 WMI_HW_MODE_CAP_CFG_TYPE); 4882 ath12k_dbg(ab, ATH12K_DBG_WMI, 4883 "hw_idx %u hw_mode_id %u hw_mode_config_type %u supported_bands %u phy_id %u 2 GHz [%u - %u] 5 GHz [%u - %u]\n", 4884 hw_idx, hw_mode_id, 4885 mac_phy_info->hw_mode_config_type, 4886 mac_phy_info->supported_bands, mac_phy_info->phy_id, 4887 mac_phy_info->hw_freq_range.low_2ghz_freq, 4888 mac_phy_info->hw_freq_range.high_2ghz_freq, 4889 mac_phy_info->hw_freq_range.low_5ghz_freq, 4890 mac_phy_info->hw_freq_range.high_5ghz_freq); 4891 4892 mac_phy_cap++; 4893 mac_phy_info++; 4894 4895 phy_bit_map >>= 1; 4896 } 4897 } 4898 } 4899 4900 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab, 4901 u16 tag, u16 len, 4902 const void *ptr, void *data) 4903 { 4904 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 4905 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data; 4906 int ret; 4907 4908 switch (tag) { 4909 case WMI_TAG_SERVICE_READY_EXT_EVENT: 4910 ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr, 4911 &svc_rdy_ext->arg); 4912 if (ret) { 4913 ath12k_warn(ab, "unable to extract ext params\n"); 4914 return ret; 4915 } 4916 break; 4917 4918 case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS: 4919 svc_rdy_ext->hw_caps = ptr; 4920 svc_rdy_ext->arg.num_hw_modes = 4921 le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes); 4922 break; 4923 4924 case WMI_TAG_SOC_HAL_REG_CAPABILITIES: 4925 ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr, 4926 svc_rdy_ext); 4927 if (ret) 4928 return ret; 4929 break; 4930 4931 case WMI_TAG_ARRAY_STRUCT: 4932 if (!svc_rdy_ext->hw_mode_done) { 4933 ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext); 4934 if (ret) 4935 return ret; 4936 4937 svc_rdy_ext->hw_mode_done = true; 4938 } else if (!svc_rdy_ext->mac_phy_done) { 4939 svc_rdy_ext->n_mac_phy_caps = 0; 4940 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 4941 ath12k_wmi_mac_phy_caps_parse, 4942 svc_rdy_ext); 4943 if (ret) { 4944 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 4945 return ret; 4946 } 4947 4948 ath12k_wmi_save_all_mac_phy_info(ab, svc_rdy_ext); 4949 4950 svc_rdy_ext->mac_phy_done = true; 4951 } else if (!svc_rdy_ext->ext_hal_reg_done) { 4952 ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext); 4953 if (ret) 4954 return ret; 4955 4956 svc_rdy_ext->ext_hal_reg_done = true; 4957 } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) { 4958 svc_rdy_ext->mac_phy_chainmask_combo_done = true; 4959 } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) { 4960 svc_rdy_ext->mac_phy_chainmask_cap_done = true; 4961 } else if (!svc_rdy_ext->oem_dma_ring_cap_done) { 4962 svc_rdy_ext->oem_dma_ring_cap_done = true; 4963 } else if (!svc_rdy_ext->dma_ring_cap_done) { 4964 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, 4965 &svc_rdy_ext->dma_caps_parse); 4966 if (ret) 4967 return ret; 4968 4969 svc_rdy_ext->dma_ring_cap_done = true; 4970 } 4971 break; 4972 4973 default: 4974 break; 4975 } 4976 return 0; 4977 } 4978 4979 static int ath12k_service_ready_ext_event(struct ath12k_base *ab, 4980 struct sk_buff *skb) 4981 { 4982 struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { }; 4983 int ret; 4984 4985 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 4986 ath12k_wmi_svc_rdy_ext_parse, 4987 &svc_rdy_ext); 4988 if (ret) { 4989 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 4990 goto err; 4991 } 4992 4993 if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map)) 4994 complete(&ab->wmi_ab.service_ready); 4995 4996 kfree(svc_rdy_ext.mac_phy_caps); 4997 return 0; 4998 4999 err: 5000 kfree(svc_rdy_ext.mac_phy_caps); 5001 ath12k_wmi_free_dbring_caps(ab); 5002 return ret; 5003 } 5004 5005 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle, 5006 const void *ptr, 5007 struct ath12k_wmi_svc_rdy_ext2_arg *arg) 5008 { 5009 const struct wmi_service_ready_ext2_event *ev = ptr; 5010 5011 if (!ev) 5012 return -EINVAL; 5013 5014 arg->reg_db_version = le32_to_cpu(ev->reg_db_version); 5015 arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz); 5016 arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz); 5017 arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps); 5018 arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw); 5019 arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma); 5020 arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo); 5021 arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags); 5022 return 0; 5023 } 5024 5025 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band, 5026 const __le32 cap_mac_info[], 5027 const __le32 cap_phy_info[], 5028 const __le32 supp_mcs[], 5029 const struct ath12k_wmi_ppe_threshold_params *ppet, 5030 __le32 cap_info_internal) 5031 { 5032 struct ath12k_band_cap *cap_band = &pdev->cap.band[band]; 5033 u32 support_320mhz; 5034 u8 i; 5035 5036 if (band == NL80211_BAND_6GHZ) 5037 support_320mhz = cap_band->eht_cap_phy_info[0] & 5038 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; 5039 5040 for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++) 5041 cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]); 5042 5043 for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++) 5044 cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]); 5045 5046 if (band == NL80211_BAND_6GHZ) 5047 cap_band->eht_cap_phy_info[0] |= support_320mhz; 5048 5049 cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]); 5050 cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]); 5051 if (band != NL80211_BAND_2GHZ) { 5052 cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]); 5053 cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]); 5054 } 5055 5056 cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1); 5057 cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info); 5058 for (i = 0; i < WMI_MAX_NUM_SS; i++) 5059 cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] = 5060 le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]); 5061 5062 cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal); 5063 } 5064 5065 static int 5066 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab, 5067 const struct ath12k_wmi_caps_ext_params *caps, 5068 struct ath12k_pdev *pdev) 5069 { 5070 u32 bands; 5071 int i; 5072 5073 if (ab->hw_params->single_pdev_only) { 5074 for (i = 0; i < ab->fw_pdev_count; i++) { 5075 struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i]; 5076 5077 if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) && 5078 fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) { 5079 bands = fw_pdev->supported_bands; 5080 break; 5081 } 5082 } 5083 5084 if (i == ab->fw_pdev_count) 5085 return -EINVAL; 5086 } else { 5087 bands = pdev->cap.supported_bands; 5088 } 5089 5090 if (bands & WMI_HOST_WLAN_2GHZ_CAP) { 5091 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ, 5092 caps->eht_cap_mac_info_2ghz, 5093 caps->eht_cap_phy_info_2ghz, 5094 caps->eht_supp_mcs_ext_2ghz, 5095 &caps->eht_ppet_2ghz, 5096 caps->eht_cap_info_internal); 5097 } 5098 5099 if (bands & WMI_HOST_WLAN_5GHZ_CAP) { 5100 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ, 5101 caps->eht_cap_mac_info_5ghz, 5102 caps->eht_cap_phy_info_5ghz, 5103 caps->eht_supp_mcs_ext_5ghz, 5104 &caps->eht_ppet_5ghz, 5105 caps->eht_cap_info_internal); 5106 5107 ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ, 5108 caps->eht_cap_mac_info_5ghz, 5109 caps->eht_cap_phy_info_5ghz, 5110 caps->eht_supp_mcs_ext_5ghz, 5111 &caps->eht_ppet_5ghz, 5112 caps->eht_cap_info_internal); 5113 } 5114 5115 pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability); 5116 pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability); 5117 5118 return 0; 5119 } 5120 5121 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag, 5122 u16 len, const void *ptr, 5123 void *data) 5124 { 5125 const struct ath12k_wmi_caps_ext_params *caps = ptr; 5126 struct ath12k_band_cap *cap_band; 5127 u32 support_320mhz; 5128 int i = 0, ret; 5129 5130 if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT) 5131 return -EPROTO; 5132 5133 if (ab->hw_params->single_pdev_only) { 5134 if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) { 5135 support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) & 5136 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; 5137 cap_band = &ab->pdevs[0].cap.band[NL80211_BAND_6GHZ]; 5138 cap_band->eht_cap_phy_info[0] |= support_320mhz; 5139 } 5140 5141 if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id)) 5142 return 0; 5143 } else { 5144 for (i = 0; i < ab->num_radios; i++) { 5145 if (ab->pdevs[i].pdev_id == 5146 ath12k_wmi_caps_ext_get_pdev_id(caps)) 5147 break; 5148 } 5149 5150 if (i == ab->num_radios) 5151 return -EINVAL; 5152 } 5153 5154 ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]); 5155 if (ret) { 5156 ath12k_warn(ab, 5157 "failed to parse extended MAC PHY capabilities for pdev %d: %d\n", 5158 ret, ab->pdevs[i].pdev_id); 5159 return ret; 5160 } 5161 5162 return 0; 5163 } 5164 5165 static void 5166 ath12k_wmi_update_freq_info(struct ath12k_base *ab, 5167 struct ath12k_svc_ext_mac_phy_info *mac_cap, 5168 enum ath12k_hw_mode mode, 5169 u32 phy_id) 5170 { 5171 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5172 struct ath12k_hw_mode_freq_range_arg *mac_range; 5173 5174 mac_range = &hw_mode_info->freq_range_caps[mode][phy_id]; 5175 5176 if (mac_cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) { 5177 mac_range->low_2ghz_freq = max_t(u32, 5178 mac_cap->hw_freq_range.low_2ghz_freq, 5179 ATH12K_MIN_2GHZ_FREQ); 5180 mac_range->high_2ghz_freq = mac_cap->hw_freq_range.high_2ghz_freq ? 5181 min_t(u32, 5182 mac_cap->hw_freq_range.high_2ghz_freq, 5183 ATH12K_MAX_2GHZ_FREQ) : 5184 ATH12K_MAX_2GHZ_FREQ; 5185 } 5186 5187 if (mac_cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) { 5188 mac_range->low_5ghz_freq = max_t(u32, 5189 mac_cap->hw_freq_range.low_5ghz_freq, 5190 ATH12K_MIN_5GHZ_FREQ); 5191 mac_range->high_5ghz_freq = mac_cap->hw_freq_range.high_5ghz_freq ? 5192 min_t(u32, 5193 mac_cap->hw_freq_range.high_5ghz_freq, 5194 ATH12K_MAX_6GHZ_FREQ) : 5195 ATH12K_MAX_6GHZ_FREQ; 5196 } 5197 } 5198 5199 static bool 5200 ath12k_wmi_all_phy_range_updated(struct ath12k_base *ab, 5201 enum ath12k_hw_mode hwmode) 5202 { 5203 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5204 struct ath12k_hw_mode_freq_range_arg *mac_range; 5205 u8 phy_id; 5206 5207 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5208 mac_range = &hw_mode_info->freq_range_caps[hwmode][phy_id]; 5209 /* modify SBS/DBS range only when both phy for DBS are filled */ 5210 if (!mac_range->low_2ghz_freq && !mac_range->low_5ghz_freq) 5211 return false; 5212 } 5213 5214 return true; 5215 } 5216 5217 static void ath12k_wmi_update_dbs_freq_info(struct ath12k_base *ab) 5218 { 5219 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5220 struct ath12k_hw_mode_freq_range_arg *mac_range; 5221 u8 phy_id; 5222 5223 mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_DBS]; 5224 /* Reset 5 GHz range for shared mac for DBS */ 5225 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5226 if (mac_range[phy_id].low_2ghz_freq && 5227 mac_range[phy_id].low_5ghz_freq) { 5228 mac_range[phy_id].low_5ghz_freq = 0; 5229 mac_range[phy_id].high_5ghz_freq = 0; 5230 } 5231 } 5232 } 5233 5234 static u32 5235 ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range) 5236 { 5237 u32 highest_freq = 0; 5238 u8 phy_id; 5239 5240 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5241 if (range[phy_id].high_5ghz_freq > highest_freq) 5242 highest_freq = range[phy_id].high_5ghz_freq; 5243 } 5244 5245 return highest_freq ? highest_freq : ATH12K_MAX_6GHZ_FREQ; 5246 } 5247 5248 static u32 5249 ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range) 5250 { 5251 u32 lowest_freq = 0; 5252 u8 phy_id; 5253 5254 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5255 if ((!lowest_freq && range[phy_id].low_5ghz_freq) || 5256 range[phy_id].low_5ghz_freq < lowest_freq) 5257 lowest_freq = range[phy_id].low_5ghz_freq; 5258 } 5259 5260 return lowest_freq ? lowest_freq : ATH12K_MIN_5GHZ_FREQ; 5261 } 5262 5263 static void 5264 ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base *ab, 5265 u16 sbs_range_sep, 5266 struct ath12k_hw_mode_freq_range_arg *ref_freq) 5267 { 5268 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5269 struct ath12k_hw_mode_freq_range_arg *upper_sbs_freq_range; 5270 u8 phy_id; 5271 5272 upper_sbs_freq_range = 5273 hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE]; 5274 5275 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5276 upper_sbs_freq_range[phy_id].low_2ghz_freq = 5277 ref_freq[phy_id].low_2ghz_freq; 5278 upper_sbs_freq_range[phy_id].high_2ghz_freq = 5279 ref_freq[phy_id].high_2ghz_freq; 5280 5281 /* update for shared mac */ 5282 if (upper_sbs_freq_range[phy_id].low_2ghz_freq) { 5283 upper_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10; 5284 upper_sbs_freq_range[phy_id].high_5ghz_freq = 5285 ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq); 5286 } else { 5287 upper_sbs_freq_range[phy_id].low_5ghz_freq = 5288 ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq); 5289 upper_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep; 5290 } 5291 } 5292 } 5293 5294 static void 5295 ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base *ab, 5296 u16 sbs_range_sep, 5297 struct ath12k_hw_mode_freq_range_arg *ref_freq) 5298 { 5299 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5300 struct ath12k_hw_mode_freq_range_arg *lower_sbs_freq_range; 5301 u8 phy_id; 5302 5303 lower_sbs_freq_range = 5304 hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE]; 5305 5306 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5307 lower_sbs_freq_range[phy_id].low_2ghz_freq = 5308 ref_freq[phy_id].low_2ghz_freq; 5309 lower_sbs_freq_range[phy_id].high_2ghz_freq = 5310 ref_freq[phy_id].high_2ghz_freq; 5311 5312 /* update for shared mac */ 5313 if (lower_sbs_freq_range[phy_id].low_2ghz_freq) { 5314 lower_sbs_freq_range[phy_id].low_5ghz_freq = 5315 ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq); 5316 lower_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep; 5317 } else { 5318 lower_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10; 5319 lower_sbs_freq_range[phy_id].high_5ghz_freq = 5320 ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq); 5321 } 5322 } 5323 } 5324 5325 static const char *ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode) 5326 { 5327 static const char * const mode_str[] = { 5328 [ATH12K_HW_MODE_SMM] = "SMM", 5329 [ATH12K_HW_MODE_DBS] = "DBS", 5330 [ATH12K_HW_MODE_SBS] = "SBS", 5331 [ATH12K_HW_MODE_SBS_UPPER_SHARE] = "SBS_UPPER_SHARE", 5332 [ATH12K_HW_MODE_SBS_LOWER_SHARE] = "SBS_LOWER_SHARE", 5333 }; 5334 5335 if (hw_mode >= ARRAY_SIZE(mode_str)) 5336 return "Unknown"; 5337 5338 return mode_str[hw_mode]; 5339 } 5340 5341 static void 5342 ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base *ab, 5343 struct ath12k_hw_mode_freq_range_arg *freq_range, 5344 enum ath12k_hw_mode hw_mode) 5345 { 5346 u8 i; 5347 5348 for (i = 0; i < MAX_RADIOS; i++) 5349 if (freq_range[i].low_2ghz_freq || freq_range[i].low_5ghz_freq) 5350 ath12k_dbg(ab, ATH12K_DBG_WMI, 5351 "frequency range: %s(%d) mac %d 2 GHz [%d - %d] 5 GHz [%d - %d]", 5352 ath12k_wmi_hw_mode_to_str(hw_mode), 5353 hw_mode, i, 5354 freq_range[i].low_2ghz_freq, 5355 freq_range[i].high_2ghz_freq, 5356 freq_range[i].low_5ghz_freq, 5357 freq_range[i].high_5ghz_freq); 5358 } 5359 5360 static void ath12k_wmi_dump_freq_range(struct ath12k_base *ab) 5361 { 5362 struct ath12k_hw_mode_freq_range_arg *freq_range; 5363 u8 i; 5364 5365 for (i = ATH12K_HW_MODE_SMM; i < ATH12K_HW_MODE_MAX; i++) { 5366 freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[i]; 5367 ath12k_wmi_dump_freq_range_per_mac(ab, freq_range, i); 5368 } 5369 } 5370 5371 static int ath12k_wmi_modify_sbs_freq(struct ath12k_base *ab, u8 phy_id) 5372 { 5373 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5374 struct ath12k_hw_mode_freq_range_arg *sbs_mac_range, *shared_mac_range; 5375 struct ath12k_hw_mode_freq_range_arg *non_shared_range; 5376 u8 shared_phy_id; 5377 5378 sbs_mac_range = &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][phy_id]; 5379 5380 /* if SBS mac range has both 2.4 and 5 GHz ranges, i.e. shared phy_id 5381 * keep the range as it is in SBS 5382 */ 5383 if (sbs_mac_range->low_2ghz_freq && sbs_mac_range->low_5ghz_freq) 5384 return 0; 5385 5386 if (sbs_mac_range->low_2ghz_freq && !sbs_mac_range->low_5ghz_freq) { 5387 ath12k_err(ab, "Invalid DBS/SBS mode with only 2.4Ghz"); 5388 ath12k_wmi_dump_freq_range_per_mac(ab, sbs_mac_range, ATH12K_HW_MODE_SBS); 5389 return -EINVAL; 5390 } 5391 5392 non_shared_range = sbs_mac_range; 5393 /* if SBS mac range has only 5 GHz then it's the non-shared phy, so 5394 * modify the range as per the shared mac. 5395 */ 5396 shared_phy_id = phy_id ? 0 : 1; 5397 shared_mac_range = 5398 &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][shared_phy_id]; 5399 5400 if (shared_mac_range->low_5ghz_freq > non_shared_range->low_5ghz_freq) { 5401 ath12k_dbg(ab, ATH12K_DBG_WMI, "high 5 GHz shared"); 5402 /* If the shared mac lower 5 GHz frequency is greater than 5403 * non-shared mac lower 5 GHz frequency then the shared mac has 5404 * high 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz high 5405 * freq should be less than the shared mac's low 5 GHz freq. 5406 */ 5407 if (non_shared_range->high_5ghz_freq >= 5408 shared_mac_range->low_5ghz_freq) 5409 non_shared_range->high_5ghz_freq = 5410 max_t(u32, shared_mac_range->low_5ghz_freq - 10, 5411 non_shared_range->low_5ghz_freq); 5412 } else if (shared_mac_range->high_5ghz_freq < 5413 non_shared_range->high_5ghz_freq) { 5414 ath12k_dbg(ab, ATH12K_DBG_WMI, "low 5 GHz shared"); 5415 /* If the shared mac high 5 GHz frequency is less than 5416 * non-shared mac high 5 GHz frequency then the shared mac has 5417 * low 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz low 5418 * freq should be greater than the shared mac's high 5 GHz freq. 5419 */ 5420 if (shared_mac_range->high_5ghz_freq >= 5421 non_shared_range->low_5ghz_freq) 5422 non_shared_range->low_5ghz_freq = 5423 min_t(u32, shared_mac_range->high_5ghz_freq + 10, 5424 non_shared_range->high_5ghz_freq); 5425 } else { 5426 ath12k_warn(ab, "invalid SBS range with all 5 GHz shared"); 5427 return -EINVAL; 5428 } 5429 5430 return 0; 5431 } 5432 5433 static void ath12k_wmi_update_sbs_freq_info(struct ath12k_base *ab) 5434 { 5435 struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info; 5436 struct ath12k_hw_mode_freq_range_arg *mac_range; 5437 u16 sbs_range_sep; 5438 u8 phy_id; 5439 int ret; 5440 5441 mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS]; 5442 5443 /* If sbs_lower_band_end_freq has a value, then the frequency range 5444 * will be split using that value. 5445 */ 5446 sbs_range_sep = ab->wmi_ab.sbs_lower_band_end_freq; 5447 if (sbs_range_sep) { 5448 ath12k_wmi_fill_upper_share_sbs_freq(ab, sbs_range_sep, 5449 mac_range); 5450 ath12k_wmi_fill_lower_share_sbs_freq(ab, sbs_range_sep, 5451 mac_range); 5452 /* Hardware specifies the range boundary with sbs_range_sep, 5453 * (i.e. the boundary between 5 GHz high and 5 GHz low), 5454 * reset the original one to make sure it will not get used. 5455 */ 5456 memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS); 5457 return; 5458 } 5459 5460 /* If sbs_lower_band_end_freq is not set that means firmware will send one 5461 * shared mac range and one non-shared mac range. so update that freq. 5462 */ 5463 for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) { 5464 ret = ath12k_wmi_modify_sbs_freq(ab, phy_id); 5465 if (ret) { 5466 memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS); 5467 break; 5468 } 5469 } 5470 } 5471 5472 static void 5473 ath12k_wmi_update_mac_freq_info(struct ath12k_base *ab, 5474 enum wmi_host_hw_mode_config_type hw_config_type, 5475 u32 phy_id, 5476 struct ath12k_svc_ext_mac_phy_info *mac_cap) 5477 { 5478 if (phy_id >= MAX_RADIOS) { 5479 ath12k_err(ab, "mac more than two not supported: %d", phy_id); 5480 return; 5481 } 5482 5483 ath12k_dbg(ab, ATH12K_DBG_WMI, 5484 "hw_mode_cfg %d mac %d band 0x%x SBS cutoff freq %d 2 GHz [%d - %d] 5 GHz [%d - %d]", 5485 hw_config_type, phy_id, mac_cap->supported_bands, 5486 ab->wmi_ab.sbs_lower_band_end_freq, 5487 mac_cap->hw_freq_range.low_2ghz_freq, 5488 mac_cap->hw_freq_range.high_2ghz_freq, 5489 mac_cap->hw_freq_range.low_5ghz_freq, 5490 mac_cap->hw_freq_range.high_5ghz_freq); 5491 5492 switch (hw_config_type) { 5493 case WMI_HOST_HW_MODE_SINGLE: 5494 if (phy_id) { 5495 ath12k_dbg(ab, ATH12K_DBG_WMI, "mac phy 1 is not supported"); 5496 break; 5497 } 5498 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SMM, phy_id); 5499 break; 5500 5501 case WMI_HOST_HW_MODE_DBS: 5502 if (!ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS)) 5503 ath12k_wmi_update_freq_info(ab, mac_cap, 5504 ATH12K_HW_MODE_DBS, phy_id); 5505 break; 5506 case WMI_HOST_HW_MODE_DBS_SBS: 5507 case WMI_HOST_HW_MODE_DBS_OR_SBS: 5508 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_DBS, phy_id); 5509 if (ab->wmi_ab.sbs_lower_band_end_freq || 5510 mac_cap->hw_freq_range.low_5ghz_freq || 5511 mac_cap->hw_freq_range.low_2ghz_freq) 5512 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, 5513 phy_id); 5514 5515 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS)) 5516 ath12k_wmi_update_dbs_freq_info(ab); 5517 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS)) 5518 ath12k_wmi_update_sbs_freq_info(ab); 5519 break; 5520 case WMI_HOST_HW_MODE_SBS: 5521 case WMI_HOST_HW_MODE_SBS_PASSIVE: 5522 ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, phy_id); 5523 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS)) 5524 ath12k_wmi_update_sbs_freq_info(ab); 5525 5526 break; 5527 default: 5528 break; 5529 } 5530 } 5531 5532 static bool ath12k_wmi_sbs_range_present(struct ath12k_base *ab) 5533 { 5534 if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS) || 5535 (ab->wmi_ab.sbs_lower_band_end_freq && 5536 ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_LOWER_SHARE) && 5537 ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_UPPER_SHARE))) 5538 return true; 5539 5540 return false; 5541 } 5542 5543 static int ath12k_wmi_update_hw_mode_list(struct ath12k_base *ab) 5544 { 5545 struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info; 5546 struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info; 5547 enum wmi_host_hw_mode_config_type hw_config_type; 5548 struct ath12k_svc_ext_mac_phy_info *tmp; 5549 bool dbs_mode = false, sbs_mode = false; 5550 u32 i, j = 0; 5551 5552 if (!svc_ext_info->num_hw_modes) { 5553 ath12k_err(ab, "invalid number of hw modes"); 5554 return -EINVAL; 5555 } 5556 5557 ath12k_dbg(ab, ATH12K_DBG_WMI, "updated HW mode list: num modes %d", 5558 svc_ext_info->num_hw_modes); 5559 5560 memset(info->freq_range_caps, 0, sizeof(info->freq_range_caps)); 5561 5562 for (i = 0; i < svc_ext_info->num_hw_modes; i++) { 5563 if (j >= ATH12K_MAX_MAC_PHY_CAP) 5564 return -EINVAL; 5565 5566 /* Update for MAC0 */ 5567 tmp = &svc_ext_info->mac_phy_info[j++]; 5568 hw_config_type = tmp->hw_mode_config_type; 5569 ath12k_wmi_update_mac_freq_info(ab, hw_config_type, tmp->phy_id, tmp); 5570 5571 /* SBS and DBS have dual MAC. Up to 2 MACs are considered. */ 5572 if (hw_config_type == WMI_HOST_HW_MODE_DBS || 5573 hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE || 5574 hw_config_type == WMI_HOST_HW_MODE_SBS || 5575 hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) { 5576 if (j >= ATH12K_MAX_MAC_PHY_CAP) 5577 return -EINVAL; 5578 /* Update for MAC1 */ 5579 tmp = &svc_ext_info->mac_phy_info[j++]; 5580 ath12k_wmi_update_mac_freq_info(ab, hw_config_type, 5581 tmp->phy_id, tmp); 5582 5583 if (hw_config_type == WMI_HOST_HW_MODE_DBS || 5584 hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) 5585 dbs_mode = true; 5586 5587 if (ath12k_wmi_sbs_range_present(ab) && 5588 (hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE || 5589 hw_config_type == WMI_HOST_HW_MODE_SBS || 5590 hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS)) 5591 sbs_mode = true; 5592 } 5593 } 5594 5595 info->support_dbs = dbs_mode; 5596 info->support_sbs = sbs_mode; 5597 5598 ath12k_wmi_dump_freq_range(ab); 5599 5600 return 0; 5601 } 5602 5603 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab, 5604 u16 tag, u16 len, 5605 const void *ptr, void *data) 5606 { 5607 const struct ath12k_wmi_dbs_or_sbs_cap_params *dbs_or_sbs_caps; 5608 struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0]; 5609 struct ath12k_wmi_svc_rdy_ext2_parse *parse = data; 5610 int ret; 5611 5612 switch (tag) { 5613 case WMI_TAG_SERVICE_READY_EXT2_EVENT: 5614 ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr, 5615 &parse->arg); 5616 if (ret) { 5617 ath12k_warn(ab, 5618 "failed to extract wmi service ready ext2 parameters: %d\n", 5619 ret); 5620 return ret; 5621 } 5622 5623 ab->wmi_ab.dp_peer_meta_data_ver = 5624 u32_get_bits(parse->arg.target_cap_flags, 5625 WMI_TARGET_CAP_FLAGS_RX_PEER_METADATA_VERSION); 5626 break; 5627 5628 case WMI_TAG_ARRAY_STRUCT: 5629 if (!parse->dma_ring_cap_done) { 5630 ret = ath12k_wmi_dma_ring_caps(ab, len, ptr, 5631 &parse->dma_caps_parse); 5632 if (ret) 5633 return ret; 5634 5635 parse->dma_ring_cap_done = true; 5636 } else if (!parse->spectral_bin_scaling_done) { 5637 /* TODO: This is a place-holder as WMI tag for 5638 * spectral scaling is before 5639 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT 5640 */ 5641 parse->spectral_bin_scaling_done = true; 5642 } else if (!parse->mac_phy_caps_ext_done) { 5643 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 5644 ath12k_wmi_tlv_mac_phy_caps_ext, 5645 parse); 5646 if (ret) { 5647 ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n", 5648 ret); 5649 return ret; 5650 } 5651 5652 parse->mac_phy_caps_ext_done = true; 5653 } else if (!parse->hal_reg_caps_ext2_done) { 5654 parse->hal_reg_caps_ext2_done = true; 5655 } else if (!parse->scan_radio_caps_ext2_done) { 5656 parse->scan_radio_caps_ext2_done = true; 5657 } else if (!parse->twt_caps_done) { 5658 parse->twt_caps_done = true; 5659 } else if (!parse->htt_msdu_idx_to_qtype_map_done) { 5660 parse->htt_msdu_idx_to_qtype_map_done = true; 5661 } else if (!parse->dbs_or_sbs_cap_ext_done) { 5662 dbs_or_sbs_caps = ptr; 5663 ab->wmi_ab.sbs_lower_band_end_freq = 5664 __le32_to_cpu(dbs_or_sbs_caps->sbs_lower_band_end_freq); 5665 5666 ath12k_dbg(ab, ATH12K_DBG_WMI, "sbs_lower_band_end_freq %u\n", 5667 ab->wmi_ab.sbs_lower_band_end_freq); 5668 5669 ret = ath12k_wmi_update_hw_mode_list(ab); 5670 if (ret) { 5671 ath12k_warn(ab, "failed to update hw mode list: %d\n", 5672 ret); 5673 return ret; 5674 } 5675 5676 parse->dbs_or_sbs_cap_ext_done = true; 5677 } 5678 5679 break; 5680 default: 5681 break; 5682 } 5683 5684 return 0; 5685 } 5686 5687 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab, 5688 struct sk_buff *skb) 5689 { 5690 struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { }; 5691 int ret; 5692 5693 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 5694 ath12k_wmi_svc_rdy_ext2_parse, 5695 &svc_rdy_ext2); 5696 if (ret) { 5697 ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret); 5698 goto err; 5699 } 5700 5701 complete(&ab->wmi_ab.service_ready); 5702 5703 return 0; 5704 5705 err: 5706 ath12k_wmi_free_dbring_caps(ab); 5707 return ret; 5708 } 5709 5710 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb, 5711 struct wmi_vdev_start_resp_event *vdev_rsp) 5712 { 5713 const void **tb; 5714 const struct wmi_vdev_start_resp_event *ev; 5715 int ret; 5716 5717 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5718 if (IS_ERR(tb)) { 5719 ret = PTR_ERR(tb); 5720 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 5721 return ret; 5722 } 5723 5724 ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT]; 5725 if (!ev) { 5726 ath12k_warn(ab, "failed to fetch vdev start resp ev"); 5727 kfree(tb); 5728 return -EPROTO; 5729 } 5730 5731 *vdev_rsp = *ev; 5732 5733 kfree(tb); 5734 return 0; 5735 } 5736 5737 static struct ath12k_reg_rule 5738 *create_ext_reg_rules_from_wmi(u32 num_reg_rules, 5739 struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule) 5740 { 5741 struct ath12k_reg_rule *reg_rule_ptr; 5742 u32 count; 5743 5744 reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)), 5745 GFP_ATOMIC); 5746 5747 if (!reg_rule_ptr) 5748 return NULL; 5749 5750 for (count = 0; count < num_reg_rules; count++) { 5751 reg_rule_ptr[count].start_freq = 5752 le32_get_bits(wmi_reg_rule[count].freq_info, 5753 REG_RULE_START_FREQ); 5754 reg_rule_ptr[count].end_freq = 5755 le32_get_bits(wmi_reg_rule[count].freq_info, 5756 REG_RULE_END_FREQ); 5757 reg_rule_ptr[count].max_bw = 5758 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5759 REG_RULE_MAX_BW); 5760 reg_rule_ptr[count].reg_power = 5761 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5762 REG_RULE_REG_PWR); 5763 reg_rule_ptr[count].ant_gain = 5764 le32_get_bits(wmi_reg_rule[count].bw_pwr_info, 5765 REG_RULE_ANT_GAIN); 5766 reg_rule_ptr[count].flags = 5767 le32_get_bits(wmi_reg_rule[count].flag_info, 5768 REG_RULE_FLAGS); 5769 reg_rule_ptr[count].psd_flag = 5770 le32_get_bits(wmi_reg_rule[count].psd_power_info, 5771 REG_RULE_PSD_INFO); 5772 reg_rule_ptr[count].psd_eirp = 5773 le32_get_bits(wmi_reg_rule[count].psd_power_info, 5774 REG_RULE_PSD_EIRP); 5775 } 5776 5777 return reg_rule_ptr; 5778 } 5779 5780 static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule, 5781 u32 num_reg_rules) 5782 { 5783 u8 num_invalid_5ghz_rules = 0; 5784 u32 count, start_freq; 5785 5786 for (count = 0; count < num_reg_rules; count++) { 5787 start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ); 5788 5789 if (start_freq >= ATH12K_MIN_6GHZ_FREQ) 5790 num_invalid_5ghz_rules++; 5791 } 5792 5793 return num_invalid_5ghz_rules; 5794 } 5795 5796 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab, 5797 struct sk_buff *skb, 5798 struct ath12k_reg_info *reg_info) 5799 { 5800 const void **tb; 5801 const struct wmi_reg_chan_list_cc_ext_event *ev; 5802 struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule; 5803 u32 num_2g_reg_rules, num_5g_reg_rules; 5804 u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE]; 5805 u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE]; 5806 u8 num_invalid_5ghz_ext_rules; 5807 u32 total_reg_rules = 0; 5808 int ret, i, j; 5809 5810 ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n"); 5811 5812 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 5813 if (IS_ERR(tb)) { 5814 ret = PTR_ERR(tb); 5815 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 5816 return ret; 5817 } 5818 5819 ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]; 5820 if (!ev) { 5821 ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n"); 5822 kfree(tb); 5823 return -EPROTO; 5824 } 5825 5826 reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules); 5827 reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules); 5828 reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] = 5829 le32_to_cpu(ev->num_6g_reg_rules_ap_lpi); 5830 reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] = 5831 le32_to_cpu(ev->num_6g_reg_rules_ap_sp); 5832 reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] = 5833 le32_to_cpu(ev->num_6g_reg_rules_ap_vlp); 5834 5835 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5836 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] = 5837 le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]); 5838 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] = 5839 le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]); 5840 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] = 5841 le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]); 5842 } 5843 5844 num_2g_reg_rules = reg_info->num_2g_reg_rules; 5845 total_reg_rules += num_2g_reg_rules; 5846 num_5g_reg_rules = reg_info->num_5g_reg_rules; 5847 total_reg_rules += num_5g_reg_rules; 5848 5849 if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) { 5850 ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n", 5851 num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES); 5852 kfree(tb); 5853 return -EINVAL; 5854 } 5855 5856 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 5857 num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i]; 5858 5859 if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) { 5860 ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n", 5861 i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES); 5862 kfree(tb); 5863 return -EINVAL; 5864 } 5865 5866 total_reg_rules += num_6g_reg_rules_ap[i]; 5867 } 5868 5869 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5870 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] = 5871 reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i]; 5872 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i]; 5873 5874 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] = 5875 reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i]; 5876 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i]; 5877 5878 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] = 5879 reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i]; 5880 total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i]; 5881 5882 if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES || 5883 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES || 5884 num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6GHZ_REG_RULES) { 5885 ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n", 5886 i); 5887 kfree(tb); 5888 return -EINVAL; 5889 } 5890 } 5891 5892 if (!total_reg_rules) { 5893 ath12k_warn(ab, "No reg rules available\n"); 5894 kfree(tb); 5895 return -EINVAL; 5896 } 5897 5898 memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN); 5899 5900 reg_info->dfs_region = le32_to_cpu(ev->dfs_region); 5901 reg_info->phybitmap = le32_to_cpu(ev->phybitmap); 5902 reg_info->num_phy = le32_to_cpu(ev->num_phy); 5903 reg_info->phy_id = le32_to_cpu(ev->phy_id); 5904 reg_info->ctry_code = le32_to_cpu(ev->country_id); 5905 reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code); 5906 5907 switch (le32_to_cpu(ev->status_code)) { 5908 case WMI_REG_SET_CC_STATUS_PASS: 5909 reg_info->status_code = REG_SET_CC_STATUS_PASS; 5910 break; 5911 case WMI_REG_CURRENT_ALPHA2_NOT_FOUND: 5912 reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND; 5913 break; 5914 case WMI_REG_INIT_ALPHA2_NOT_FOUND: 5915 reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND; 5916 break; 5917 case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED: 5918 reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED; 5919 break; 5920 case WMI_REG_SET_CC_STATUS_NO_MEMORY: 5921 reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY; 5922 break; 5923 case WMI_REG_SET_CC_STATUS_FAIL: 5924 reg_info->status_code = REG_SET_CC_STATUS_FAIL; 5925 break; 5926 } 5927 5928 reg_info->is_ext_reg_event = true; 5929 5930 reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g); 5931 reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g); 5932 reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g); 5933 reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g); 5934 reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi); 5935 reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi); 5936 reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp); 5937 reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp); 5938 reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp); 5939 reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp); 5940 5941 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 5942 reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] = 5943 le32_to_cpu(ev->min_bw_6g_client_lpi[i]); 5944 reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] = 5945 le32_to_cpu(ev->max_bw_6g_client_lpi[i]); 5946 reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] = 5947 le32_to_cpu(ev->min_bw_6g_client_sp[i]); 5948 reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] = 5949 le32_to_cpu(ev->max_bw_6g_client_sp[i]); 5950 reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] = 5951 le32_to_cpu(ev->min_bw_6g_client_vlp[i]); 5952 reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] = 5953 le32_to_cpu(ev->max_bw_6g_client_vlp[i]); 5954 } 5955 5956 ath12k_dbg(ab, ATH12K_DBG_WMI, 5957 "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x", 5958 __func__, reg_info->alpha2, reg_info->dfs_region, 5959 reg_info->min_bw_2g, reg_info->max_bw_2g, 5960 reg_info->min_bw_5g, reg_info->max_bw_5g, 5961 reg_info->phybitmap); 5962 5963 ath12k_dbg(ab, ATH12K_DBG_WMI, 5964 "num_2g_reg_rules %d num_5g_reg_rules %d", 5965 num_2g_reg_rules, num_5g_reg_rules); 5966 5967 ath12k_dbg(ab, ATH12K_DBG_WMI, 5968 "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d", 5969 num_6g_reg_rules_ap[WMI_REG_INDOOR_AP], 5970 num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP], 5971 num_6g_reg_rules_ap[WMI_REG_VLP_AP]); 5972 5973 ath12k_dbg(ab, ATH12K_DBG_WMI, 5974 "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d", 5975 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT], 5976 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT], 5977 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]); 5978 5979 ath12k_dbg(ab, ATH12K_DBG_WMI, 5980 "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d", 5981 num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT], 5982 num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT], 5983 num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]); 5984 5985 ext_wmi_reg_rule = 5986 (struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev 5987 + sizeof(*ev) 5988 + sizeof(struct wmi_tlv)); 5989 5990 if (num_2g_reg_rules) { 5991 reg_info->reg_rules_2g_ptr = 5992 create_ext_reg_rules_from_wmi(num_2g_reg_rules, 5993 ext_wmi_reg_rule); 5994 5995 if (!reg_info->reg_rules_2g_ptr) { 5996 kfree(tb); 5997 ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n"); 5998 return -ENOMEM; 5999 } 6000 } 6001 6002 ext_wmi_reg_rule += num_2g_reg_rules; 6003 6004 /* Firmware might include 6 GHz reg rule in 5 GHz rule list 6005 * for few countries along with separate 6 GHz rule. 6006 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list 6007 * causes intersect check to be true, and same rules will be 6008 * shown multiple times in iw cmd. 6009 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list 6010 */ 6011 num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule, 6012 num_5g_reg_rules); 6013 6014 if (num_invalid_5ghz_ext_rules) { 6015 ath12k_dbg(ab, ATH12K_DBG_WMI, 6016 "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules", 6017 reg_info->alpha2, reg_info->num_5g_reg_rules, 6018 num_invalid_5ghz_ext_rules); 6019 6020 num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules; 6021 reg_info->num_5g_reg_rules = num_5g_reg_rules; 6022 } 6023 6024 if (num_5g_reg_rules) { 6025 reg_info->reg_rules_5g_ptr = 6026 create_ext_reg_rules_from_wmi(num_5g_reg_rules, 6027 ext_wmi_reg_rule); 6028 6029 if (!reg_info->reg_rules_5g_ptr) { 6030 kfree(tb); 6031 ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n"); 6032 return -ENOMEM; 6033 } 6034 } 6035 6036 /* We have adjusted the number of 5 GHz reg rules above. But still those 6037 * many rules needs to be adjusted in ext_wmi_reg_rule. 6038 * 6039 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases. 6040 */ 6041 ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules); 6042 6043 for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) { 6044 reg_info->reg_rules_6g_ap_ptr[i] = 6045 create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i], 6046 ext_wmi_reg_rule); 6047 6048 if (!reg_info->reg_rules_6g_ap_ptr[i]) { 6049 kfree(tb); 6050 ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n"); 6051 return -ENOMEM; 6052 } 6053 6054 ext_wmi_reg_rule += num_6g_reg_rules_ap[i]; 6055 } 6056 6057 for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) { 6058 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 6059 reg_info->reg_rules_6g_client_ptr[j][i] = 6060 create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i], 6061 ext_wmi_reg_rule); 6062 6063 if (!reg_info->reg_rules_6g_client_ptr[j][i]) { 6064 kfree(tb); 6065 ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n"); 6066 return -ENOMEM; 6067 } 6068 6069 ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i]; 6070 } 6071 } 6072 6073 reg_info->client_type = le32_to_cpu(ev->client_type); 6074 reg_info->rnr_tpe_usable = ev->rnr_tpe_usable; 6075 reg_info->unspecified_ap_usable = ev->unspecified_ap_usable; 6076 reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] = 6077 le32_to_cpu(ev->domain_code_6g_ap_lpi); 6078 reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] = 6079 le32_to_cpu(ev->domain_code_6g_ap_sp); 6080 reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] = 6081 le32_to_cpu(ev->domain_code_6g_ap_vlp); 6082 6083 for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) { 6084 reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] = 6085 le32_to_cpu(ev->domain_code_6g_client_lpi[i]); 6086 reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] = 6087 le32_to_cpu(ev->domain_code_6g_client_sp[i]); 6088 reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] = 6089 le32_to_cpu(ev->domain_code_6g_client_vlp[i]); 6090 } 6091 6092 reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id); 6093 6094 ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d", 6095 reg_info->client_type, reg_info->domain_code_6g_super_id); 6096 6097 ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n"); 6098 6099 kfree(tb); 6100 return 0; 6101 } 6102 6103 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb, 6104 struct wmi_peer_delete_resp_event *peer_del_resp) 6105 { 6106 const void **tb; 6107 const struct wmi_peer_delete_resp_event *ev; 6108 int ret; 6109 6110 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6111 if (IS_ERR(tb)) { 6112 ret = PTR_ERR(tb); 6113 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6114 return ret; 6115 } 6116 6117 ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT]; 6118 if (!ev) { 6119 ath12k_warn(ab, "failed to fetch peer delete resp ev"); 6120 kfree(tb); 6121 return -EPROTO; 6122 } 6123 6124 memset(peer_del_resp, 0, sizeof(*peer_del_resp)); 6125 6126 peer_del_resp->vdev_id = ev->vdev_id; 6127 ether_addr_copy(peer_del_resp->peer_macaddr.addr, 6128 ev->peer_macaddr.addr); 6129 6130 kfree(tb); 6131 return 0; 6132 } 6133 6134 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab, 6135 struct sk_buff *skb, 6136 u32 *vdev_id) 6137 { 6138 const void **tb; 6139 const struct wmi_vdev_delete_resp_event *ev; 6140 int ret; 6141 6142 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6143 if (IS_ERR(tb)) { 6144 ret = PTR_ERR(tb); 6145 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6146 return ret; 6147 } 6148 6149 ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT]; 6150 if (!ev) { 6151 ath12k_warn(ab, "failed to fetch vdev delete resp ev"); 6152 kfree(tb); 6153 return -EPROTO; 6154 } 6155 6156 *vdev_id = le32_to_cpu(ev->vdev_id); 6157 6158 kfree(tb); 6159 return 0; 6160 } 6161 6162 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, 6163 struct sk_buff *skb, 6164 u32 *vdev_id, u32 *tx_status) 6165 { 6166 const void **tb; 6167 const struct wmi_bcn_tx_status_event *ev; 6168 int ret; 6169 6170 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6171 if (IS_ERR(tb)) { 6172 ret = PTR_ERR(tb); 6173 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6174 return ret; 6175 } 6176 6177 ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]; 6178 if (!ev) { 6179 ath12k_warn(ab, "failed to fetch bcn tx status ev"); 6180 kfree(tb); 6181 return -EPROTO; 6182 } 6183 6184 *vdev_id = le32_to_cpu(ev->vdev_id); 6185 *tx_status = le32_to_cpu(ev->tx_status); 6186 6187 kfree(tb); 6188 return 0; 6189 } 6190 6191 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb, 6192 u32 *vdev_id) 6193 { 6194 const void **tb; 6195 const struct wmi_vdev_stopped_event *ev; 6196 int ret; 6197 6198 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6199 if (IS_ERR(tb)) { 6200 ret = PTR_ERR(tb); 6201 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6202 return ret; 6203 } 6204 6205 ev = tb[WMI_TAG_VDEV_STOPPED_EVENT]; 6206 if (!ev) { 6207 ath12k_warn(ab, "failed to fetch vdev stop ev"); 6208 kfree(tb); 6209 return -EPROTO; 6210 } 6211 6212 *vdev_id = le32_to_cpu(ev->vdev_id); 6213 6214 kfree(tb); 6215 return 0; 6216 } 6217 6218 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab, 6219 u16 tag, u16 len, 6220 const void *ptr, void *data) 6221 { 6222 struct wmi_tlv_mgmt_rx_parse *parse = data; 6223 6224 switch (tag) { 6225 case WMI_TAG_MGMT_RX_HDR: 6226 parse->fixed = ptr; 6227 break; 6228 case WMI_TAG_ARRAY_BYTE: 6229 if (!parse->frame_buf_done) { 6230 parse->frame_buf = ptr; 6231 parse->frame_buf_done = true; 6232 } 6233 break; 6234 } 6235 return 0; 6236 } 6237 6238 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab, 6239 struct sk_buff *skb, 6240 struct ath12k_wmi_mgmt_rx_arg *hdr) 6241 { 6242 struct wmi_tlv_mgmt_rx_parse parse = { }; 6243 const struct ath12k_wmi_mgmt_rx_params *ev; 6244 const u8 *frame; 6245 int i, ret; 6246 6247 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 6248 ath12k_wmi_tlv_mgmt_rx_parse, 6249 &parse); 6250 if (ret) { 6251 ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret); 6252 return ret; 6253 } 6254 6255 ev = parse.fixed; 6256 frame = parse.frame_buf; 6257 6258 if (!ev || !frame) { 6259 ath12k_warn(ab, "failed to fetch mgmt rx hdr"); 6260 return -EPROTO; 6261 } 6262 6263 hdr->pdev_id = le32_to_cpu(ev->pdev_id); 6264 hdr->chan_freq = le32_to_cpu(ev->chan_freq); 6265 hdr->channel = le32_to_cpu(ev->channel); 6266 hdr->snr = le32_to_cpu(ev->snr); 6267 hdr->rate = le32_to_cpu(ev->rate); 6268 hdr->phy_mode = le32_to_cpu(ev->phy_mode); 6269 hdr->buf_len = le32_to_cpu(ev->buf_len); 6270 hdr->status = le32_to_cpu(ev->status); 6271 hdr->flags = le32_to_cpu(ev->flags); 6272 hdr->rssi = a_sle32_to_cpu(ev->rssi); 6273 hdr->tsf_delta = le32_to_cpu(ev->tsf_delta); 6274 6275 for (i = 0; i < ATH_MAX_ANTENNA; i++) 6276 hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]); 6277 6278 if (skb->len < (frame - skb->data) + hdr->buf_len) { 6279 ath12k_warn(ab, "invalid length in mgmt rx hdr ev"); 6280 return -EPROTO; 6281 } 6282 6283 /* shift the sk_buff to point to `frame` */ 6284 skb_trim(skb, 0); 6285 skb_put(skb, frame - skb->data); 6286 skb_pull(skb, frame - skb->data); 6287 skb_put(skb, hdr->buf_len); 6288 6289 return 0; 6290 } 6291 6292 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id, 6293 u32 status, u32 ack_rssi) 6294 { 6295 struct sk_buff *msdu; 6296 struct ieee80211_tx_info *info; 6297 struct ath12k_skb_cb *skb_cb; 6298 int num_mgmt; 6299 6300 spin_lock_bh(&ar->txmgmt_idr_lock); 6301 msdu = idr_find(&ar->txmgmt_idr, desc_id); 6302 6303 if (!msdu) { 6304 ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n", 6305 desc_id); 6306 spin_unlock_bh(&ar->txmgmt_idr_lock); 6307 return -ENOENT; 6308 } 6309 6310 idr_remove(&ar->txmgmt_idr, desc_id); 6311 spin_unlock_bh(&ar->txmgmt_idr_lock); 6312 6313 skb_cb = ATH12K_SKB_CB(msdu); 6314 dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 6315 6316 info = IEEE80211_SKB_CB(msdu); 6317 memset(&info->status, 0, sizeof(info->status)); 6318 6319 /* skip tx rate update from ieee80211_status*/ 6320 info->status.rates[0].idx = -1; 6321 6322 if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) { 6323 info->flags |= IEEE80211_TX_STAT_ACK; 6324 info->status.ack_signal = ack_rssi; 6325 info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; 6326 } 6327 6328 if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status) 6329 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 6330 6331 ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu); 6332 6333 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); 6334 6335 /* WARN when we received this event without doing any mgmt tx */ 6336 if (num_mgmt < 0) 6337 WARN_ON_ONCE(1); 6338 6339 if (!num_mgmt) 6340 wake_up(&ar->txmgmt_empty_waitq); 6341 6342 return 0; 6343 } 6344 6345 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab, 6346 struct sk_buff *skb, 6347 struct wmi_mgmt_tx_compl_event *param) 6348 { 6349 const void **tb; 6350 const struct wmi_mgmt_tx_compl_event *ev; 6351 int ret; 6352 6353 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6354 if (IS_ERR(tb)) { 6355 ret = PTR_ERR(tb); 6356 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6357 return ret; 6358 } 6359 6360 ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT]; 6361 if (!ev) { 6362 ath12k_warn(ab, "failed to fetch mgmt tx compl ev"); 6363 kfree(tb); 6364 return -EPROTO; 6365 } 6366 6367 param->pdev_id = ev->pdev_id; 6368 param->desc_id = ev->desc_id; 6369 param->status = ev->status; 6370 param->ppdu_id = ev->ppdu_id; 6371 param->ack_rssi = ev->ack_rssi; 6372 6373 kfree(tb); 6374 return 0; 6375 } 6376 6377 static void ath12k_wmi_event_scan_started(struct ath12k *ar) 6378 { 6379 lockdep_assert_held(&ar->data_lock); 6380 6381 switch (ar->scan.state) { 6382 case ATH12K_SCAN_IDLE: 6383 case ATH12K_SCAN_RUNNING: 6384 case ATH12K_SCAN_ABORTING: 6385 ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n", 6386 ath12k_scan_state_str(ar->scan.state), 6387 ar->scan.state); 6388 break; 6389 case ATH12K_SCAN_STARTING: 6390 ar->scan.state = ATH12K_SCAN_RUNNING; 6391 6392 if (ar->scan.is_roc) 6393 ieee80211_ready_on_channel(ath12k_ar_to_hw(ar)); 6394 6395 complete(&ar->scan.started); 6396 break; 6397 } 6398 } 6399 6400 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar) 6401 { 6402 lockdep_assert_held(&ar->data_lock); 6403 6404 switch (ar->scan.state) { 6405 case ATH12K_SCAN_IDLE: 6406 case ATH12K_SCAN_RUNNING: 6407 case ATH12K_SCAN_ABORTING: 6408 ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n", 6409 ath12k_scan_state_str(ar->scan.state), 6410 ar->scan.state); 6411 break; 6412 case ATH12K_SCAN_STARTING: 6413 complete(&ar->scan.started); 6414 __ath12k_mac_scan_finish(ar); 6415 break; 6416 } 6417 } 6418 6419 static void ath12k_wmi_event_scan_completed(struct ath12k *ar) 6420 { 6421 lockdep_assert_held(&ar->data_lock); 6422 6423 switch (ar->scan.state) { 6424 case ATH12K_SCAN_IDLE: 6425 case ATH12K_SCAN_STARTING: 6426 /* One suspected reason scan can be completed while starting is 6427 * if firmware fails to deliver all scan events to the host, 6428 * e.g. when transport pipe is full. This has been observed 6429 * with spectral scan phyerr events starving wmi transport 6430 * pipe. In such case the "scan completed" event should be (and 6431 * is) ignored by the host as it may be just firmware's scan 6432 * state machine recovering. 6433 */ 6434 ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n", 6435 ath12k_scan_state_str(ar->scan.state), 6436 ar->scan.state); 6437 break; 6438 case ATH12K_SCAN_RUNNING: 6439 case ATH12K_SCAN_ABORTING: 6440 __ath12k_mac_scan_finish(ar); 6441 break; 6442 } 6443 } 6444 6445 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar) 6446 { 6447 lockdep_assert_held(&ar->data_lock); 6448 6449 switch (ar->scan.state) { 6450 case ATH12K_SCAN_IDLE: 6451 case ATH12K_SCAN_STARTING: 6452 ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n", 6453 ath12k_scan_state_str(ar->scan.state), 6454 ar->scan.state); 6455 break; 6456 case ATH12K_SCAN_RUNNING: 6457 case ATH12K_SCAN_ABORTING: 6458 ar->scan_channel = NULL; 6459 break; 6460 } 6461 } 6462 6463 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq) 6464 { 6465 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 6466 6467 lockdep_assert_held(&ar->data_lock); 6468 6469 switch (ar->scan.state) { 6470 case ATH12K_SCAN_IDLE: 6471 case ATH12K_SCAN_STARTING: 6472 ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n", 6473 ath12k_scan_state_str(ar->scan.state), 6474 ar->scan.state); 6475 break; 6476 case ATH12K_SCAN_RUNNING: 6477 case ATH12K_SCAN_ABORTING: 6478 ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq); 6479 6480 if (ar->scan.is_roc && ar->scan.roc_freq == freq) 6481 complete(&ar->scan.on_channel); 6482 6483 break; 6484 } 6485 } 6486 6487 static const char * 6488 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type, 6489 enum wmi_scan_completion_reason reason) 6490 { 6491 switch (type) { 6492 case WMI_SCAN_EVENT_STARTED: 6493 return "started"; 6494 case WMI_SCAN_EVENT_COMPLETED: 6495 switch (reason) { 6496 case WMI_SCAN_REASON_COMPLETED: 6497 return "completed"; 6498 case WMI_SCAN_REASON_CANCELLED: 6499 return "completed [cancelled]"; 6500 case WMI_SCAN_REASON_PREEMPTED: 6501 return "completed [preempted]"; 6502 case WMI_SCAN_REASON_TIMEDOUT: 6503 return "completed [timedout]"; 6504 case WMI_SCAN_REASON_INTERNAL_FAILURE: 6505 return "completed [internal err]"; 6506 case WMI_SCAN_REASON_MAX: 6507 break; 6508 } 6509 return "completed [unknown]"; 6510 case WMI_SCAN_EVENT_BSS_CHANNEL: 6511 return "bss channel"; 6512 case WMI_SCAN_EVENT_FOREIGN_CHAN: 6513 return "foreign channel"; 6514 case WMI_SCAN_EVENT_DEQUEUED: 6515 return "dequeued"; 6516 case WMI_SCAN_EVENT_PREEMPTED: 6517 return "preempted"; 6518 case WMI_SCAN_EVENT_START_FAILED: 6519 return "start failed"; 6520 case WMI_SCAN_EVENT_RESTARTED: 6521 return "restarted"; 6522 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: 6523 return "foreign channel exit"; 6524 default: 6525 return "unknown"; 6526 } 6527 } 6528 6529 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb, 6530 struct wmi_scan_event *scan_evt_param) 6531 { 6532 const void **tb; 6533 const struct wmi_scan_event *ev; 6534 int ret; 6535 6536 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6537 if (IS_ERR(tb)) { 6538 ret = PTR_ERR(tb); 6539 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6540 return ret; 6541 } 6542 6543 ev = tb[WMI_TAG_SCAN_EVENT]; 6544 if (!ev) { 6545 ath12k_warn(ab, "failed to fetch scan ev"); 6546 kfree(tb); 6547 return -EPROTO; 6548 } 6549 6550 scan_evt_param->event_type = ev->event_type; 6551 scan_evt_param->reason = ev->reason; 6552 scan_evt_param->channel_freq = ev->channel_freq; 6553 scan_evt_param->scan_req_id = ev->scan_req_id; 6554 scan_evt_param->scan_id = ev->scan_id; 6555 scan_evt_param->vdev_id = ev->vdev_id; 6556 scan_evt_param->tsf_timestamp = ev->tsf_timestamp; 6557 6558 kfree(tb); 6559 return 0; 6560 } 6561 6562 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb, 6563 struct wmi_peer_sta_kickout_arg *arg) 6564 { 6565 const void **tb; 6566 const struct wmi_peer_sta_kickout_event *ev; 6567 int ret; 6568 6569 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6570 if (IS_ERR(tb)) { 6571 ret = PTR_ERR(tb); 6572 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6573 return ret; 6574 } 6575 6576 ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT]; 6577 if (!ev) { 6578 ath12k_warn(ab, "failed to fetch peer sta kickout ev"); 6579 kfree(tb); 6580 return -EPROTO; 6581 } 6582 6583 arg->mac_addr = ev->peer_macaddr.addr; 6584 arg->reason = le32_to_cpu(ev->reason); 6585 arg->rssi = le32_to_cpu(ev->rssi); 6586 6587 kfree(tb); 6588 return 0; 6589 } 6590 6591 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb, 6592 struct wmi_roam_event *roam_ev) 6593 { 6594 const void **tb; 6595 const struct wmi_roam_event *ev; 6596 int ret; 6597 6598 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6599 if (IS_ERR(tb)) { 6600 ret = PTR_ERR(tb); 6601 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6602 return ret; 6603 } 6604 6605 ev = tb[WMI_TAG_ROAM_EVENT]; 6606 if (!ev) { 6607 ath12k_warn(ab, "failed to fetch roam ev"); 6608 kfree(tb); 6609 return -EPROTO; 6610 } 6611 6612 roam_ev->vdev_id = ev->vdev_id; 6613 roam_ev->reason = ev->reason; 6614 roam_ev->rssi = ev->rssi; 6615 6616 kfree(tb); 6617 return 0; 6618 } 6619 6620 static int freq_to_idx(struct ath12k *ar, int freq) 6621 { 6622 struct ieee80211_supported_band *sband; 6623 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 6624 int band, ch, idx = 0; 6625 6626 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 6627 if (!ar->mac.sbands[band].channels) 6628 continue; 6629 6630 sband = hw->wiphy->bands[band]; 6631 if (!sband) 6632 continue; 6633 6634 for (ch = 0; ch < sband->n_channels; ch++, idx++) 6635 if (sband->channels[ch].center_freq == freq) 6636 goto exit; 6637 } 6638 6639 exit: 6640 return idx; 6641 } 6642 6643 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, 6644 struct wmi_chan_info_event *ch_info_ev) 6645 { 6646 const void **tb; 6647 const struct wmi_chan_info_event *ev; 6648 int ret; 6649 6650 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6651 if (IS_ERR(tb)) { 6652 ret = PTR_ERR(tb); 6653 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6654 return ret; 6655 } 6656 6657 ev = tb[WMI_TAG_CHAN_INFO_EVENT]; 6658 if (!ev) { 6659 ath12k_warn(ab, "failed to fetch chan info ev"); 6660 kfree(tb); 6661 return -EPROTO; 6662 } 6663 6664 ch_info_ev->err_code = ev->err_code; 6665 ch_info_ev->freq = ev->freq; 6666 ch_info_ev->cmd_flags = ev->cmd_flags; 6667 ch_info_ev->noise_floor = ev->noise_floor; 6668 ch_info_ev->rx_clear_count = ev->rx_clear_count; 6669 ch_info_ev->cycle_count = ev->cycle_count; 6670 ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range; 6671 ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; 6672 ch_info_ev->rx_frame_count = ev->rx_frame_count; 6673 ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt; 6674 ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz; 6675 ch_info_ev->vdev_id = ev->vdev_id; 6676 6677 kfree(tb); 6678 return 0; 6679 } 6680 6681 static int 6682 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb, 6683 struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev) 6684 { 6685 const void **tb; 6686 const struct wmi_pdev_bss_chan_info_event *ev; 6687 int ret; 6688 6689 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6690 if (IS_ERR(tb)) { 6691 ret = PTR_ERR(tb); 6692 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6693 return ret; 6694 } 6695 6696 ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]; 6697 if (!ev) { 6698 ath12k_warn(ab, "failed to fetch pdev bss chan info ev"); 6699 kfree(tb); 6700 return -EPROTO; 6701 } 6702 6703 bss_ch_info_ev->pdev_id = ev->pdev_id; 6704 bss_ch_info_ev->freq = ev->freq; 6705 bss_ch_info_ev->noise_floor = ev->noise_floor; 6706 bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low; 6707 bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high; 6708 bss_ch_info_ev->cycle_count_low = ev->cycle_count_low; 6709 bss_ch_info_ev->cycle_count_high = ev->cycle_count_high; 6710 bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low; 6711 bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high; 6712 bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low; 6713 bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high; 6714 bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low; 6715 bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high; 6716 6717 kfree(tb); 6718 return 0; 6719 } 6720 6721 static int 6722 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb, 6723 struct wmi_vdev_install_key_complete_arg *arg) 6724 { 6725 const void **tb; 6726 const struct wmi_vdev_install_key_compl_event *ev; 6727 int ret; 6728 6729 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6730 if (IS_ERR(tb)) { 6731 ret = PTR_ERR(tb); 6732 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6733 return ret; 6734 } 6735 6736 ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]; 6737 if (!ev) { 6738 ath12k_warn(ab, "failed to fetch vdev install key compl ev"); 6739 kfree(tb); 6740 return -EPROTO; 6741 } 6742 6743 arg->vdev_id = le32_to_cpu(ev->vdev_id); 6744 arg->macaddr = ev->peer_macaddr.addr; 6745 arg->key_idx = le32_to_cpu(ev->key_idx); 6746 arg->key_flags = le32_to_cpu(ev->key_flags); 6747 arg->status = le32_to_cpu(ev->status); 6748 6749 kfree(tb); 6750 return 0; 6751 } 6752 6753 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb, 6754 struct wmi_peer_assoc_conf_arg *peer_assoc_conf) 6755 { 6756 const void **tb; 6757 const struct wmi_peer_assoc_conf_event *ev; 6758 int ret; 6759 6760 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6761 if (IS_ERR(tb)) { 6762 ret = PTR_ERR(tb); 6763 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6764 return ret; 6765 } 6766 6767 ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT]; 6768 if (!ev) { 6769 ath12k_warn(ab, "failed to fetch peer assoc conf ev"); 6770 kfree(tb); 6771 return -EPROTO; 6772 } 6773 6774 peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id); 6775 peer_assoc_conf->macaddr = ev->peer_macaddr.addr; 6776 6777 kfree(tb); 6778 return 0; 6779 } 6780 6781 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab) 6782 { 6783 /* try to send pending beacons first. they take priority */ 6784 wake_up(&ab->wmi_ab.tx_credits_wq); 6785 } 6786 6787 static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *skb) 6788 { 6789 const struct wmi_11d_new_cc_event *ev; 6790 struct ath12k *ar; 6791 struct ath12k_pdev *pdev; 6792 const void **tb; 6793 int ret, i; 6794 6795 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 6796 if (IS_ERR(tb)) { 6797 ret = PTR_ERR(tb); 6798 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 6799 return ret; 6800 } 6801 6802 ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT]; 6803 if (!ev) { 6804 kfree(tb); 6805 ath12k_warn(ab, "failed to fetch 11d new cc ev"); 6806 return -EPROTO; 6807 } 6808 6809 spin_lock_bh(&ab->base_lock); 6810 memcpy(&ab->new_alpha2, &ev->new_alpha2, REG_ALPHA2_LEN); 6811 spin_unlock_bh(&ab->base_lock); 6812 6813 ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi 11d new cc %c%c\n", 6814 ab->new_alpha2[0], 6815 ab->new_alpha2[1]); 6816 6817 kfree(tb); 6818 6819 for (i = 0; i < ab->num_radios; i++) { 6820 pdev = &ab->pdevs[i]; 6821 ar = pdev->ar; 6822 ar->state_11d = ATH12K_11D_IDLE; 6823 ar->ah->regd_updated = false; 6824 complete(&ar->completed_11d_scan); 6825 } 6826 6827 queue_work(ab->workqueue, &ab->update_11d_work); 6828 6829 return 0; 6830 } 6831 6832 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab, 6833 struct sk_buff *skb) 6834 { 6835 dev_kfree_skb(skb); 6836 } 6837 6838 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb) 6839 { 6840 struct ath12k_reg_info *reg_info; 6841 struct ath12k *ar = NULL; 6842 u8 pdev_idx = 255; 6843 int ret; 6844 6845 reg_info = kzalloc_obj(*reg_info, GFP_ATOMIC); 6846 if (!reg_info) { 6847 ret = -ENOMEM; 6848 goto fallback; 6849 } 6850 6851 ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info); 6852 if (ret) { 6853 ath12k_warn(ab, "failed to extract regulatory info from received event\n"); 6854 goto mem_free; 6855 } 6856 6857 ret = ath12k_reg_validate_reg_info(ab, reg_info); 6858 if (ret == ATH12K_REG_STATUS_FALLBACK) { 6859 ath12k_warn(ab, "failed to validate reg info %d\n", ret); 6860 /* firmware has successfully switches to new regd but host can not 6861 * continue, so free reginfo and fallback to old regd 6862 */ 6863 goto mem_free; 6864 } else if (ret == ATH12K_REG_STATUS_DROP) { 6865 /* reg info is valid but we will not store it and 6866 * not going to create new regd for it 6867 */ 6868 ret = ATH12K_REG_STATUS_VALID; 6869 goto mem_free; 6870 } 6871 6872 /* free old reg_info if it exist */ 6873 pdev_idx = reg_info->phy_id; 6874 if (ab->reg_info[pdev_idx]) { 6875 ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]); 6876 kfree(ab->reg_info[pdev_idx]); 6877 } 6878 /* reg_info is valid, we store it for later use 6879 * even below regd build failed 6880 */ 6881 ab->reg_info[pdev_idx] = reg_info; 6882 6883 ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC, 6884 IEEE80211_REG_UNSET_AP); 6885 if (ret) { 6886 ath12k_warn(ab, "failed to handle chan list %d\n", ret); 6887 goto fallback; 6888 } 6889 6890 goto out; 6891 6892 mem_free: 6893 ath12k_reg_reset_reg_info(reg_info); 6894 kfree(reg_info); 6895 6896 if (ret == ATH12K_REG_STATUS_VALID) 6897 goto out; 6898 6899 fallback: 6900 /* Fallback to older reg (by sending previous country setting 6901 * again if fw has succeeded and we failed to process here. 6902 * The Regdomain should be uniform across driver and fw. Since the 6903 * FW has processed the command and sent a success status, we expect 6904 * this function to succeed as well. If it doesn't, CTRY needs to be 6905 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent. 6906 */ 6907 /* TODO: This is rare, but still should also be handled */ 6908 WARN_ON(1); 6909 6910 out: 6911 /* In some error cases, even a valid pdev_idx might not be available */ 6912 if (pdev_idx != 255) 6913 ar = ab->pdevs[pdev_idx].ar; 6914 6915 /* During the boot-time update, 'ar' might not be allocated, 6916 * so the completion cannot be marked at that point. 6917 * This boot-time update is handled in ath12k_mac_hw_register() 6918 * before registering the hardware. 6919 */ 6920 if (ar) 6921 complete_all(&ar->regd_update_completed); 6922 6923 return ret; 6924 } 6925 6926 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len, 6927 const void *ptr, void *data) 6928 { 6929 struct ath12k_wmi_rdy_parse *rdy_parse = data; 6930 struct wmi_ready_event fixed_param; 6931 struct ath12k_wmi_mac_addr_params *addr_list; 6932 struct ath12k_pdev *pdev; 6933 u32 num_mac_addr; 6934 int i; 6935 6936 switch (tag) { 6937 case WMI_TAG_READY_EVENT: 6938 memset(&fixed_param, 0, sizeof(fixed_param)); 6939 memcpy(&fixed_param, (struct wmi_ready_event *)ptr, 6940 min_t(u16, sizeof(fixed_param), len)); 6941 ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status); 6942 rdy_parse->num_extra_mac_addr = 6943 le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr); 6944 6945 ether_addr_copy(ab->mac_addr, 6946 fixed_param.ready_event_min.mac_addr.addr); 6947 ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum); 6948 ab->wmi_ready = true; 6949 break; 6950 case WMI_TAG_ARRAY_FIXED_STRUCT: 6951 addr_list = (struct ath12k_wmi_mac_addr_params *)ptr; 6952 num_mac_addr = rdy_parse->num_extra_mac_addr; 6953 6954 if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios)) 6955 break; 6956 6957 for (i = 0; i < ab->num_radios; i++) { 6958 pdev = &ab->pdevs[i]; 6959 ether_addr_copy(pdev->mac_addr, addr_list[i].addr); 6960 } 6961 ab->pdevs_macaddr_valid = true; 6962 break; 6963 default: 6964 break; 6965 } 6966 6967 return 0; 6968 } 6969 6970 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb) 6971 { 6972 struct ath12k_wmi_rdy_parse rdy_parse = { }; 6973 int ret; 6974 6975 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 6976 ath12k_wmi_rdy_parse, &rdy_parse); 6977 if (ret) { 6978 ath12k_warn(ab, "failed to parse tlv %d\n", ret); 6979 return ret; 6980 } 6981 6982 complete(&ab->wmi_ab.unified_ready); 6983 return 0; 6984 } 6985 6986 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb) 6987 { 6988 struct wmi_peer_delete_resp_event peer_del_resp; 6989 struct ath12k *ar; 6990 6991 if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) { 6992 ath12k_warn(ab, "failed to extract peer delete resp"); 6993 return; 6994 } 6995 6996 rcu_read_lock(); 6997 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id)); 6998 if (!ar) { 6999 ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d", 7000 peer_del_resp.vdev_id); 7001 rcu_read_unlock(); 7002 return; 7003 } 7004 7005 complete(&ar->peer_delete_done); 7006 rcu_read_unlock(); 7007 ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n", 7008 peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr); 7009 } 7010 7011 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab, 7012 struct sk_buff *skb) 7013 { 7014 struct ath12k *ar; 7015 u32 vdev_id = 0; 7016 7017 if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) { 7018 ath12k_warn(ab, "failed to extract vdev delete resp"); 7019 return; 7020 } 7021 7022 rcu_read_lock(); 7023 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 7024 if (!ar) { 7025 ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d", 7026 vdev_id); 7027 rcu_read_unlock(); 7028 return; 7029 } 7030 7031 complete(&ar->vdev_delete_done); 7032 7033 rcu_read_unlock(); 7034 7035 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n", 7036 vdev_id); 7037 } 7038 7039 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status) 7040 { 7041 switch (vdev_resp_status) { 7042 case WMI_VDEV_START_RESPONSE_INVALID_VDEVID: 7043 return "invalid vdev id"; 7044 case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED: 7045 return "not supported"; 7046 case WMI_VDEV_START_RESPONSE_DFS_VIOLATION: 7047 return "dfs violation"; 7048 case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN: 7049 return "invalid regdomain"; 7050 default: 7051 return "unknown"; 7052 } 7053 } 7054 7055 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb) 7056 { 7057 struct wmi_vdev_start_resp_event vdev_start_resp; 7058 struct ath12k *ar; 7059 u32 status; 7060 7061 if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) { 7062 ath12k_warn(ab, "failed to extract vdev start resp"); 7063 return; 7064 } 7065 7066 rcu_read_lock(); 7067 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id)); 7068 if (!ar) { 7069 ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d", 7070 vdev_start_resp.vdev_id); 7071 rcu_read_unlock(); 7072 return; 7073 } 7074 7075 ar->last_wmi_vdev_start_status = 0; 7076 7077 status = le32_to_cpu(vdev_start_resp.status); 7078 if (WARN_ON_ONCE(status)) { 7079 ath12k_warn(ab, "vdev start resp error status %d (%s)\n", 7080 status, ath12k_wmi_vdev_resp_print(status)); 7081 ar->last_wmi_vdev_start_status = status; 7082 } 7083 7084 ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power); 7085 7086 complete(&ar->vdev_setup_done); 7087 7088 rcu_read_unlock(); 7089 7090 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d", 7091 vdev_start_resp.vdev_id); 7092 } 7093 7094 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb) 7095 { 7096 struct ath12k_link_vif *arvif; 7097 struct ath12k *ar; 7098 u32 vdev_id, tx_status; 7099 7100 if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) { 7101 ath12k_warn(ab, "failed to extract bcn tx status"); 7102 return; 7103 } 7104 7105 guard(rcu)(); 7106 7107 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id); 7108 if (!arvif) { 7109 ath12k_warn(ab, "invalid vdev %u in bcn tx status\n", 7110 vdev_id); 7111 return; 7112 } 7113 7114 ar = arvif->ar; 7115 wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &arvif->bcn_tx_work); 7116 } 7117 7118 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb) 7119 { 7120 struct ath12k *ar; 7121 u32 vdev_id = 0; 7122 7123 if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) { 7124 ath12k_warn(ab, "failed to extract vdev stopped event"); 7125 return; 7126 } 7127 7128 rcu_read_lock(); 7129 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 7130 if (!ar) { 7131 ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d", 7132 vdev_id); 7133 rcu_read_unlock(); 7134 return; 7135 } 7136 7137 complete(&ar->vdev_setup_done); 7138 7139 rcu_read_unlock(); 7140 7141 ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id); 7142 } 7143 7144 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) 7145 { 7146 struct ath12k_wmi_mgmt_rx_arg rx_ev = {}; 7147 struct ath12k *ar; 7148 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 7149 struct ieee80211_hdr *hdr; 7150 u16 fc; 7151 struct ieee80211_supported_band *sband; 7152 s32 noise_floor; 7153 7154 if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) { 7155 ath12k_warn(ab, "failed to extract mgmt rx event"); 7156 dev_kfree_skb(skb); 7157 return; 7158 } 7159 7160 memset(status, 0, sizeof(*status)); 7161 7162 ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n", 7163 rx_ev.status); 7164 7165 rcu_read_lock(); 7166 ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id); 7167 7168 if (!ar) { 7169 ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n", 7170 rx_ev.pdev_id); 7171 dev_kfree_skb(skb); 7172 goto exit; 7173 } 7174 7175 if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) || 7176 (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT | 7177 WMI_RX_STATUS_ERR_KEY_CACHE_MISS | 7178 WMI_RX_STATUS_ERR_CRC))) { 7179 dev_kfree_skb(skb); 7180 goto exit; 7181 } 7182 7183 if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) 7184 status->flag |= RX_FLAG_MMIC_ERROR; 7185 7186 if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ && 7187 rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) { 7188 status->band = NL80211_BAND_6GHZ; 7189 status->freq = rx_ev.chan_freq; 7190 } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { 7191 status->band = NL80211_BAND_2GHZ; 7192 } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) { 7193 status->band = NL80211_BAND_5GHZ; 7194 } else { 7195 /* Shouldn't happen unless list of advertised channels to 7196 * mac80211 has been changed. 7197 */ 7198 WARN_ON_ONCE(1); 7199 dev_kfree_skb(skb); 7200 goto exit; 7201 } 7202 7203 if (rx_ev.phy_mode == MODE_11B && 7204 (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ)) 7205 ath12k_dbg(ab, ATH12K_DBG_WMI, 7206 "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band); 7207 7208 sband = &ar->mac.sbands[status->band]; 7209 7210 if (status->band != NL80211_BAND_6GHZ) 7211 status->freq = ieee80211_channel_to_frequency(rx_ev.channel, 7212 status->band); 7213 7214 spin_lock_bh(&ar->data_lock); 7215 noise_floor = ath12k_pdev_get_noise_floor(ar); 7216 spin_unlock_bh(&ar->data_lock); 7217 7218 status->signal = rx_ev.snr + noise_floor; 7219 status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); 7220 7221 hdr = (struct ieee80211_hdr *)skb->data; 7222 fc = le16_to_cpu(hdr->frame_control); 7223 7224 /* Firmware is guaranteed to report all essential management frames via 7225 * WMI while it can deliver some extra via HTT. Since there can be 7226 * duplicates split the reporting wrt monitor/sniffing. 7227 */ 7228 status->flag |= RX_FLAG_SKIP_MONITOR; 7229 7230 /* In case of PMF, FW delivers decrypted frames with Protected Bit set 7231 * including group privacy action frames. 7232 */ 7233 if (ieee80211_has_protected(hdr->frame_control)) { 7234 status->flag |= RX_FLAG_DECRYPTED; 7235 7236 if (!ieee80211_is_robust_mgmt_frame(skb)) { 7237 status->flag |= RX_FLAG_IV_STRIPPED | 7238 RX_FLAG_MMIC_STRIPPED; 7239 hdr->frame_control = __cpu_to_le16(fc & 7240 ~IEEE80211_FCTL_PROTECTED); 7241 } 7242 } 7243 7244 if (ieee80211_is_beacon(hdr->frame_control)) 7245 ath12k_mac_handle_beacon(ar, skb); 7246 7247 ath12k_dbg(ab, ATH12K_DBG_MGMT, 7248 "event mgmt rx skb %p len %d ftype %02x stype %02x\n", 7249 skb, skb->len, 7250 fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); 7251 7252 ath12k_dbg(ab, ATH12K_DBG_MGMT, 7253 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", 7254 status->freq, status->band, status->signal, 7255 status->rate_idx); 7256 7257 ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb); 7258 7259 exit: 7260 rcu_read_unlock(); 7261 } 7262 7263 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb) 7264 { 7265 struct wmi_mgmt_tx_compl_event tx_compl_param = {}; 7266 struct ath12k *ar; 7267 7268 if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) { 7269 ath12k_warn(ab, "failed to extract mgmt tx compl event"); 7270 return; 7271 } 7272 7273 rcu_read_lock(); 7274 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id)); 7275 if (!ar) { 7276 ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n", 7277 tx_compl_param.pdev_id); 7278 goto exit; 7279 } 7280 7281 wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id), 7282 le32_to_cpu(tx_compl_param.status), 7283 le32_to_cpu(tx_compl_param.ack_rssi)); 7284 7285 ath12k_dbg(ab, ATH12K_DBG_MGMT, 7286 "mgmt tx compl ev pdev_id %d, desc_id %d, status %d", 7287 tx_compl_param.pdev_id, tx_compl_param.desc_id, 7288 tx_compl_param.status); 7289 7290 exit: 7291 rcu_read_unlock(); 7292 } 7293 7294 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab, 7295 u32 vdev_id, 7296 enum ath12k_scan_state state) 7297 { 7298 int i; 7299 struct ath12k_pdev *pdev; 7300 struct ath12k *ar; 7301 7302 for (i = 0; i < ab->num_radios; i++) { 7303 pdev = rcu_dereference(ab->pdevs_active[i]); 7304 if (pdev && pdev->ar) { 7305 ar = pdev->ar; 7306 7307 spin_lock_bh(&ar->data_lock); 7308 if (ar->scan.state == state && 7309 ar->scan.arvif && 7310 ar->scan.arvif->vdev_id == vdev_id) { 7311 spin_unlock_bh(&ar->data_lock); 7312 return ar; 7313 } 7314 spin_unlock_bh(&ar->data_lock); 7315 } 7316 } 7317 return NULL; 7318 } 7319 7320 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb) 7321 { 7322 struct ath12k *ar; 7323 struct wmi_scan_event scan_ev = {}; 7324 7325 if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) { 7326 ath12k_warn(ab, "failed to extract scan event"); 7327 return; 7328 } 7329 7330 rcu_read_lock(); 7331 7332 /* In case the scan was cancelled, ex. during interface teardown, 7333 * the interface will not be found in active interfaces. 7334 * Rather, in such scenarios, iterate over the active pdev's to 7335 * search 'ar' if the corresponding 'ar' scan is ABORTING and the 7336 * aborting scan's vdev id matches this event info. 7337 */ 7338 if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED && 7339 le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) { 7340 ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), 7341 ATH12K_SCAN_ABORTING); 7342 if (!ar) 7343 ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id), 7344 ATH12K_SCAN_RUNNING); 7345 } else { 7346 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id)); 7347 } 7348 7349 if (!ar) { 7350 ath12k_warn(ab, "Received scan event for unknown vdev"); 7351 rcu_read_unlock(); 7352 return; 7353 } 7354 7355 spin_lock_bh(&ar->data_lock); 7356 7357 ath12k_dbg(ab, ATH12K_DBG_WMI, 7358 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", 7359 ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type), 7360 le32_to_cpu(scan_ev.reason)), 7361 le32_to_cpu(scan_ev.event_type), 7362 le32_to_cpu(scan_ev.reason), 7363 le32_to_cpu(scan_ev.channel_freq), 7364 le32_to_cpu(scan_ev.scan_req_id), 7365 le32_to_cpu(scan_ev.scan_id), 7366 le32_to_cpu(scan_ev.vdev_id), 7367 ath12k_scan_state_str(ar->scan.state), ar->scan.state); 7368 7369 switch (le32_to_cpu(scan_ev.event_type)) { 7370 case WMI_SCAN_EVENT_STARTED: 7371 ath12k_wmi_event_scan_started(ar); 7372 break; 7373 case WMI_SCAN_EVENT_COMPLETED: 7374 ath12k_wmi_event_scan_completed(ar); 7375 break; 7376 case WMI_SCAN_EVENT_BSS_CHANNEL: 7377 ath12k_wmi_event_scan_bss_chan(ar); 7378 break; 7379 case WMI_SCAN_EVENT_FOREIGN_CHAN: 7380 ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq)); 7381 break; 7382 case WMI_SCAN_EVENT_START_FAILED: 7383 ath12k_warn(ab, "received scan start failure event\n"); 7384 ath12k_wmi_event_scan_start_failed(ar); 7385 break; 7386 case WMI_SCAN_EVENT_DEQUEUED: 7387 __ath12k_mac_scan_finish(ar); 7388 break; 7389 case WMI_SCAN_EVENT_PREEMPTED: 7390 case WMI_SCAN_EVENT_RESTARTED: 7391 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT: 7392 default: 7393 break; 7394 } 7395 7396 spin_unlock_bh(&ar->data_lock); 7397 7398 rcu_read_unlock(); 7399 } 7400 7401 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb) 7402 { 7403 struct wmi_peer_sta_kickout_arg arg = {}; 7404 struct ath12k_link_vif *arvif; 7405 struct ieee80211_sta *sta; 7406 struct ath12k_sta *ahsta; 7407 struct ath12k_link_sta *arsta; 7408 struct ath12k *ar; 7409 7410 if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { 7411 ath12k_warn(ab, "failed to extract peer sta kickout event"); 7412 return; 7413 } 7414 7415 rcu_read_lock(); 7416 7417 spin_lock_bh(&ab->base_lock); 7418 7419 arsta = ath12k_link_sta_find_by_addr(ab, arg.mac_addr); 7420 7421 if (!arsta) { 7422 ath12k_warn(ab, "arsta not found %pM\n", 7423 arg.mac_addr); 7424 goto exit; 7425 } 7426 7427 arvif = arsta->arvif; 7428 if (!arvif) { 7429 ath12k_warn(ab, "invalid arvif in peer sta kickout ev for STA %pM", 7430 arg.mac_addr); 7431 goto exit; 7432 } 7433 7434 ar = arvif->ar; 7435 ahsta = arsta->ahsta; 7436 sta = ath12k_ahsta_to_sta(ahsta); 7437 7438 ath12k_dbg(ab, ATH12K_DBG_WMI, 7439 "peer sta kickout event %pM reason: %d rssi: %d\n", 7440 arg.mac_addr, arg.reason, arg.rssi); 7441 7442 switch (arg.reason) { 7443 case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY: 7444 if (arvif->ahvif->vif->type == NL80211_IFTYPE_STATION) { 7445 ath12k_mac_handle_beacon_miss(ar, arvif); 7446 break; 7447 } 7448 fallthrough; 7449 default: 7450 ieee80211_report_low_ack(sta, 10); 7451 } 7452 7453 exit: 7454 spin_unlock_bh(&ab->base_lock); 7455 rcu_read_unlock(); 7456 } 7457 7458 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb) 7459 { 7460 struct ath12k_link_vif *arvif; 7461 struct wmi_roam_event roam_ev = {}; 7462 struct ath12k *ar; 7463 u32 vdev_id; 7464 u8 roam_reason; 7465 7466 if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) { 7467 ath12k_warn(ab, "failed to extract roam event"); 7468 return; 7469 } 7470 7471 vdev_id = le32_to_cpu(roam_ev.vdev_id); 7472 roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason), 7473 WMI_ROAM_REASON_MASK); 7474 7475 ath12k_dbg(ab, ATH12K_DBG_WMI, 7476 "wmi roam event vdev %u reason %d rssi %d\n", 7477 vdev_id, roam_reason, roam_ev.rssi); 7478 7479 guard(rcu)(); 7480 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id); 7481 if (!arvif) { 7482 ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id); 7483 return; 7484 } 7485 7486 ar = arvif->ar; 7487 7488 if (roam_reason >= WMI_ROAM_REASON_MAX) 7489 ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n", 7490 roam_reason, vdev_id); 7491 7492 switch (roam_reason) { 7493 case WMI_ROAM_REASON_BEACON_MISS: 7494 ath12k_mac_handle_beacon_miss(ar, arvif); 7495 break; 7496 case WMI_ROAM_REASON_BETTER_AP: 7497 case WMI_ROAM_REASON_LOW_RSSI: 7498 case WMI_ROAM_REASON_SUITABLE_AP_FOUND: 7499 case WMI_ROAM_REASON_HO_FAILED: 7500 ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n", 7501 roam_reason, vdev_id); 7502 break; 7503 } 7504 } 7505 7506 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) 7507 { 7508 struct wmi_chan_info_event ch_info_ev = {}; 7509 struct ath12k *ar; 7510 struct survey_info *survey; 7511 int idx; 7512 /* HW channel counters frequency value in hertz */ 7513 u32 cc_freq_hz = ab->cc_freq_hz; 7514 7515 if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) { 7516 ath12k_warn(ab, "failed to extract chan info event"); 7517 return; 7518 } 7519 7520 ath12k_dbg(ab, ATH12K_DBG_WMI, 7521 "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n", 7522 ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq, 7523 ch_info_ev.cmd_flags, ch_info_ev.noise_floor, 7524 ch_info_ev.rx_clear_count, ch_info_ev.cycle_count, 7525 ch_info_ev.mac_clk_mhz); 7526 7527 if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) { 7528 ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n"); 7529 return; 7530 } 7531 7532 rcu_read_lock(); 7533 ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id)); 7534 if (!ar) { 7535 ath12k_warn(ab, "invalid vdev id in chan info ev %d", 7536 ch_info_ev.vdev_id); 7537 rcu_read_unlock(); 7538 return; 7539 } 7540 spin_lock_bh(&ar->data_lock); 7541 7542 switch (ar->scan.state) { 7543 case ATH12K_SCAN_IDLE: 7544 case ATH12K_SCAN_STARTING: 7545 ath12k_warn(ab, "received chan info event without a scan request, ignoring\n"); 7546 goto exit; 7547 case ATH12K_SCAN_RUNNING: 7548 case ATH12K_SCAN_ABORTING: 7549 break; 7550 } 7551 7552 idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq)); 7553 if (idx >= ARRAY_SIZE(ar->survey)) { 7554 ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n", 7555 ch_info_ev.freq, idx); 7556 goto exit; 7557 } 7558 7559 /* If FW provides MAC clock frequency in Mhz, overriding the initialized 7560 * HW channel counters frequency value 7561 */ 7562 if (ch_info_ev.mac_clk_mhz) 7563 cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000); 7564 7565 if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) { 7566 survey = &ar->survey[idx]; 7567 memset(survey, 0, sizeof(*survey)); 7568 survey->noise = le32_to_cpu(ch_info_ev.noise_floor); 7569 survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | 7570 SURVEY_INFO_TIME_BUSY; 7571 survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz); 7572 survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count), 7573 cc_freq_hz); 7574 } 7575 exit: 7576 spin_unlock_bh(&ar->data_lock); 7577 rcu_read_unlock(); 7578 } 7579 7580 static void 7581 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb) 7582 { 7583 struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {}; 7584 struct survey_info *survey; 7585 struct ath12k *ar; 7586 u32 cc_freq_hz = ab->cc_freq_hz; 7587 u64 busy, total, tx, rx, rx_bss; 7588 int idx; 7589 7590 if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) { 7591 ath12k_warn(ab, "failed to extract pdev bss chan info event"); 7592 return; 7593 } 7594 7595 busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 | 7596 le32_to_cpu(bss_ch_info_ev.rx_clear_count_low); 7597 7598 total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 | 7599 le32_to_cpu(bss_ch_info_ev.cycle_count_low); 7600 7601 tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 | 7602 le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low); 7603 7604 rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 | 7605 le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low); 7606 7607 rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 | 7608 le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low); 7609 7610 ath12k_dbg(ab, ATH12K_DBG_WMI, 7611 "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n", 7612 bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq, 7613 bss_ch_info_ev.noise_floor, busy, total, 7614 tx, rx, rx_bss); 7615 7616 rcu_read_lock(); 7617 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id)); 7618 7619 if (!ar) { 7620 ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n", 7621 bss_ch_info_ev.pdev_id); 7622 rcu_read_unlock(); 7623 return; 7624 } 7625 7626 spin_lock_bh(&ar->data_lock); 7627 idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq)); 7628 if (idx >= ARRAY_SIZE(ar->survey)) { 7629 ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n", 7630 bss_ch_info_ev.freq, idx); 7631 goto exit; 7632 } 7633 7634 survey = &ar->survey[idx]; 7635 7636 survey->noise = le32_to_cpu(bss_ch_info_ev.noise_floor); 7637 survey->time = div_u64(total, cc_freq_hz); 7638 survey->time_busy = div_u64(busy, cc_freq_hz); 7639 survey->time_rx = div_u64(rx_bss, cc_freq_hz); 7640 survey->time_tx = div_u64(tx, cc_freq_hz); 7641 survey->filled |= (SURVEY_INFO_NOISE_DBM | 7642 SURVEY_INFO_TIME | 7643 SURVEY_INFO_TIME_BUSY | 7644 SURVEY_INFO_TIME_RX | 7645 SURVEY_INFO_TIME_TX); 7646 exit: 7647 spin_unlock_bh(&ar->data_lock); 7648 complete(&ar->bss_survey_done); 7649 7650 rcu_read_unlock(); 7651 } 7652 7653 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab, 7654 struct sk_buff *skb) 7655 { 7656 struct wmi_vdev_install_key_complete_arg install_key_compl = {}; 7657 struct ath12k *ar; 7658 7659 if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) { 7660 ath12k_warn(ab, "failed to extract install key compl event"); 7661 return; 7662 } 7663 7664 ath12k_dbg(ab, ATH12K_DBG_WMI, 7665 "vdev install key ev idx %d flags %08x macaddr %pM status %d\n", 7666 install_key_compl.key_idx, install_key_compl.key_flags, 7667 install_key_compl.macaddr, install_key_compl.status); 7668 7669 rcu_read_lock(); 7670 ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id); 7671 if (!ar) { 7672 ath12k_warn(ab, "invalid vdev id in install key compl ev %d", 7673 install_key_compl.vdev_id); 7674 rcu_read_unlock(); 7675 return; 7676 } 7677 7678 ar->install_key_status = 0; 7679 7680 if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) { 7681 ath12k_warn(ab, "install key failed for %pM status %d\n", 7682 install_key_compl.macaddr, install_key_compl.status); 7683 ar->install_key_status = install_key_compl.status; 7684 } 7685 7686 complete(&ar->install_key_done); 7687 rcu_read_unlock(); 7688 } 7689 7690 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab, 7691 u16 tag, u16 len, 7692 const void *ptr, 7693 void *data) 7694 { 7695 const struct wmi_service_available_event *ev; 7696 u16 wmi_ext2_service_words; 7697 __le32 *wmi_ext2_service_bitmap; 7698 int i, j; 7699 u16 expected_len; 7700 7701 expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32); 7702 if (len < expected_len) { 7703 ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n", 7704 len, tag); 7705 return -EINVAL; 7706 } 7707 7708 switch (tag) { 7709 case WMI_TAG_SERVICE_AVAILABLE_EVENT: 7710 ev = (struct wmi_service_available_event *)ptr; 7711 for (i = 0, j = WMI_MAX_SERVICE; 7712 i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE; 7713 i++) { 7714 do { 7715 if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) & 7716 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) 7717 set_bit(j, ab->wmi_ab.svc_map); 7718 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); 7719 } 7720 7721 ath12k_dbg(ab, ATH12K_DBG_WMI, 7722 "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x", 7723 ev->wmi_service_segment_bitmap[0], 7724 ev->wmi_service_segment_bitmap[1], 7725 ev->wmi_service_segment_bitmap[2], 7726 ev->wmi_service_segment_bitmap[3]); 7727 break; 7728 case WMI_TAG_ARRAY_UINT32: 7729 wmi_ext2_service_bitmap = (__le32 *)ptr; 7730 wmi_ext2_service_words = len / sizeof(u32); 7731 for (i = 0, j = WMI_MAX_EXT_SERVICE; 7732 i < wmi_ext2_service_words && j < WMI_MAX_EXT2_SERVICE; 7733 i++) { 7734 do { 7735 if (__le32_to_cpu(wmi_ext2_service_bitmap[i]) & 7736 BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32)) 7737 set_bit(j, ab->wmi_ab.svc_map); 7738 } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32); 7739 ath12k_dbg(ab, ATH12K_DBG_WMI, 7740 "wmi_ext2_service bitmap 0x%08x\n", 7741 __le32_to_cpu(wmi_ext2_service_bitmap[i])); 7742 } 7743 7744 break; 7745 } 7746 return 0; 7747 } 7748 7749 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb) 7750 { 7751 int ret; 7752 7753 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 7754 ath12k_wmi_tlv_services_parser, 7755 NULL); 7756 return ret; 7757 } 7758 7759 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb) 7760 { 7761 struct wmi_peer_assoc_conf_arg peer_assoc_conf = {}; 7762 struct ath12k *ar; 7763 7764 if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) { 7765 ath12k_warn(ab, "failed to extract peer assoc conf event"); 7766 return; 7767 } 7768 7769 ath12k_dbg(ab, ATH12K_DBG_WMI, 7770 "peer assoc conf ev vdev id %d macaddr %pM\n", 7771 peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr); 7772 7773 rcu_read_lock(); 7774 ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id); 7775 7776 if (!ar) { 7777 ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d", 7778 peer_assoc_conf.vdev_id); 7779 rcu_read_unlock(); 7780 return; 7781 } 7782 7783 complete(&ar->peer_assoc_done); 7784 rcu_read_unlock(); 7785 } 7786 7787 static void 7788 ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar, 7789 struct ath12k_fw_stats *fw_stats, 7790 char *buf, u32 *length) 7791 { 7792 const struct ath12k_fw_stats_vdev *vdev; 7793 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7794 struct ath12k_link_vif *arvif; 7795 u32 len = *length; 7796 u8 *vif_macaddr; 7797 int i; 7798 7799 len += scnprintf(buf + len, buf_len - len, "\n"); 7800 len += scnprintf(buf + len, buf_len - len, "%30s\n", 7801 "ath12k VDEV stats"); 7802 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7803 "================="); 7804 7805 list_for_each_entry(vdev, &fw_stats->vdevs, list) { 7806 arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id); 7807 if (!arvif) 7808 continue; 7809 vif_macaddr = arvif->ahvif->vif->addr; 7810 7811 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7812 "VDEV ID", vdev->vdev_id); 7813 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 7814 "VDEV MAC address", vif_macaddr); 7815 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7816 "beacon snr", vdev->beacon_snr); 7817 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7818 "data snr", vdev->data_snr); 7819 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7820 "num rx frames", vdev->num_rx_frames); 7821 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7822 "num rts fail", vdev->num_rts_fail); 7823 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7824 "num rts success", vdev->num_rts_success); 7825 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7826 "num rx err", vdev->num_rx_err); 7827 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7828 "num rx discard", vdev->num_rx_discard); 7829 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7830 "num tx not acked", vdev->num_tx_not_acked); 7831 7832 for (i = 0 ; i < WLAN_MAX_AC; i++) 7833 len += scnprintf(buf + len, buf_len - len, 7834 "%25s [%02d] %u\n", 7835 "num tx frames", i, 7836 vdev->num_tx_frames[i]); 7837 7838 for (i = 0 ; i < WLAN_MAX_AC; i++) 7839 len += scnprintf(buf + len, buf_len - len, 7840 "%25s [%02d] %u\n", 7841 "num tx frames retries", i, 7842 vdev->num_tx_frames_retries[i]); 7843 7844 for (i = 0 ; i < WLAN_MAX_AC; i++) 7845 len += scnprintf(buf + len, buf_len - len, 7846 "%25s [%02d] %u\n", 7847 "num tx frames failures", i, 7848 vdev->num_tx_frames_failures[i]); 7849 7850 for (i = 0 ; i < MAX_TX_RATE_VALUES; i++) 7851 len += scnprintf(buf + len, buf_len - len, 7852 "%25s [%02d] 0x%08x\n", 7853 "tx rate history", i, 7854 vdev->tx_rate_history[i]); 7855 for (i = 0 ; i < MAX_TX_RATE_VALUES; i++) 7856 len += scnprintf(buf + len, buf_len - len, 7857 "%25s [%02d] %u\n", 7858 "beacon rssi history", i, 7859 vdev->beacon_rssi_history[i]); 7860 7861 len += scnprintf(buf + len, buf_len - len, "\n"); 7862 *length = len; 7863 } 7864 } 7865 7866 static void 7867 ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar, 7868 struct ath12k_fw_stats *fw_stats, 7869 char *buf, u32 *length) 7870 { 7871 const struct ath12k_fw_stats_bcn *bcn; 7872 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7873 struct ath12k_link_vif *arvif; 7874 u32 len = *length; 7875 size_t num_bcn; 7876 7877 num_bcn = list_count_nodes(&fw_stats->bcn); 7878 7879 len += scnprintf(buf + len, buf_len - len, "\n"); 7880 len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", 7881 "ath12k Beacon stats", num_bcn); 7882 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7883 "==================="); 7884 7885 list_for_each_entry(bcn, &fw_stats->bcn, list) { 7886 arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id); 7887 if (!arvif) 7888 continue; 7889 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7890 "VDEV ID", bcn->vdev_id); 7891 len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", 7892 "VDEV MAC address", arvif->ahvif->vif->addr); 7893 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7894 "================"); 7895 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7896 "Num of beacon tx success", bcn->tx_bcn_succ_cnt); 7897 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 7898 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt); 7899 7900 len += scnprintf(buf + len, buf_len - len, "\n"); 7901 *length = len; 7902 } 7903 } 7904 7905 static void 7906 ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 7907 char *buf, u32 *length, u64 fw_soc_drop_cnt) 7908 { 7909 u32 len = *length; 7910 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7911 7912 len = scnprintf(buf + len, buf_len - len, "\n"); 7913 len += scnprintf(buf + len, buf_len - len, "%30s\n", 7914 "ath12k PDEV stats"); 7915 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7916 "================="); 7917 7918 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7919 "Channel noise floor", pdev->ch_noise_floor); 7920 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7921 "Channel TX power", pdev->chan_tx_power); 7922 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7923 "TX frame count", pdev->tx_frame_count); 7924 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7925 "RX frame count", pdev->rx_frame_count); 7926 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7927 "RX clear count", pdev->rx_clear_count); 7928 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7929 "Cycle count", pdev->cycle_count); 7930 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7931 "PHY error count", pdev->phy_err_count); 7932 len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n", 7933 "soc drop count", fw_soc_drop_cnt); 7934 7935 *length = len; 7936 } 7937 7938 static void 7939 ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 7940 char *buf, u32 *length) 7941 { 7942 u32 len = *length; 7943 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 7944 7945 len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 7946 "ath12k PDEV TX stats"); 7947 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 7948 "===================="); 7949 7950 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7951 "HTT cookies queued", pdev->comp_queued); 7952 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7953 "HTT cookies disp.", pdev->comp_delivered); 7954 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7955 "MSDU queued", pdev->msdu_enqued); 7956 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7957 "MPDU queued", pdev->mpdu_enqued); 7958 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7959 "MSDUs dropped", pdev->wmm_drop); 7960 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7961 "Local enqued", pdev->local_enqued); 7962 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7963 "Local freed", pdev->local_freed); 7964 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7965 "HW queued", pdev->hw_queued); 7966 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7967 "PPDUs reaped", pdev->hw_reaped); 7968 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7969 "Num underruns", pdev->underrun); 7970 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7971 "PPDUs cleaned", pdev->tx_abort); 7972 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 7973 "MPDUs requeued", pdev->mpdus_requed); 7974 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7975 "Excessive retries", pdev->tx_ko); 7976 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7977 "HW rate", pdev->data_rc); 7978 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7979 "Sched self triggers", pdev->self_triggers); 7980 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7981 "Dropped due to SW retries", 7982 pdev->sw_retry_failure); 7983 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7984 "Illegal rate phy errors", 7985 pdev->illgl_rate_phy_err); 7986 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7987 "PDEV continuous xretry", pdev->pdev_cont_xretry); 7988 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7989 "TX timeout", pdev->pdev_tx_timeout); 7990 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7991 "PDEV resets", pdev->pdev_resets); 7992 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7993 "Stateless TIDs alloc failures", 7994 pdev->stateless_tid_alloc_failure); 7995 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7996 "PHY underrun", pdev->phy_underrun); 7997 len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", 7998 "MPDU is more than txop limit", pdev->txop_ovf); 7999 *length = len; 8000 } 8001 8002 static void 8003 ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev, 8004 char *buf, u32 *length) 8005 { 8006 u32 len = *length; 8007 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 8008 8009 len += scnprintf(buf + len, buf_len - len, "\n%30s\n", 8010 "ath12k PDEV RX stats"); 8011 len += scnprintf(buf + len, buf_len - len, "%30s\n\n", 8012 "===================="); 8013 8014 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8015 "Mid PPDU route change", 8016 pdev->mid_ppdu_route_change); 8017 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8018 "Tot. number of statuses", pdev->status_rcvd); 8019 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8020 "Extra frags on rings 0", pdev->r0_frags); 8021 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8022 "Extra frags on rings 1", pdev->r1_frags); 8023 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8024 "Extra frags on rings 2", pdev->r2_frags); 8025 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8026 "Extra frags on rings 3", pdev->r3_frags); 8027 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8028 "MSDUs delivered to HTT", pdev->htt_msdus); 8029 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8030 "MPDUs delivered to HTT", pdev->htt_mpdus); 8031 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8032 "MSDUs delivered to stack", pdev->loc_msdus); 8033 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8034 "MPDUs delivered to stack", pdev->loc_mpdus); 8035 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8036 "Oversized AMSUs", pdev->oversize_amsdu); 8037 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8038 "PHY errors", pdev->phy_errs); 8039 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8040 "PHY errors drops", pdev->phy_err_drop); 8041 len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", 8042 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); 8043 *length = len; 8044 } 8045 8046 static void 8047 ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar, 8048 struct ath12k_fw_stats *fw_stats, 8049 char *buf, u32 *length) 8050 { 8051 const struct ath12k_fw_stats_pdev *pdev; 8052 u32 len = *length; 8053 8054 pdev = list_first_entry_or_null(&fw_stats->pdevs, 8055 struct ath12k_fw_stats_pdev, list); 8056 if (!pdev) { 8057 ath12k_warn(ar->ab, "failed to get pdev stats\n"); 8058 return; 8059 } 8060 8061 ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len, 8062 ar->ab->fw_soc_drop_count); 8063 ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len); 8064 ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len); 8065 8066 *length = len; 8067 } 8068 8069 void ath12k_wmi_fw_stats_dump(struct ath12k *ar, 8070 struct ath12k_fw_stats *fw_stats, 8071 u32 stats_id, char *buf) 8072 { 8073 u32 len = 0; 8074 u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; 8075 8076 spin_lock_bh(&ar->data_lock); 8077 8078 switch (stats_id) { 8079 case WMI_REQUEST_VDEV_STAT: 8080 ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len); 8081 break; 8082 case WMI_REQUEST_BCN_STAT: 8083 ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len); 8084 break; 8085 case WMI_REQUEST_PDEV_STAT: 8086 ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len); 8087 break; 8088 default: 8089 break; 8090 } 8091 8092 spin_unlock_bh(&ar->data_lock); 8093 8094 if (len >= buf_len) 8095 buf[len - 1] = 0; 8096 else 8097 buf[len] = 0; 8098 } 8099 8100 static void 8101 ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src, 8102 struct ath12k_fw_stats_vdev *dst) 8103 { 8104 int i; 8105 8106 dst->vdev_id = le32_to_cpu(src->vdev_id); 8107 dst->beacon_snr = le32_to_cpu(src->beacon_snr); 8108 dst->data_snr = le32_to_cpu(src->data_snr); 8109 dst->num_rx_frames = le32_to_cpu(src->num_rx_frames); 8110 dst->num_rts_fail = le32_to_cpu(src->num_rts_fail); 8111 dst->num_rts_success = le32_to_cpu(src->num_rts_success); 8112 dst->num_rx_err = le32_to_cpu(src->num_rx_err); 8113 dst->num_rx_discard = le32_to_cpu(src->num_rx_discard); 8114 dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked); 8115 8116 for (i = 0; i < WLAN_MAX_AC; i++) 8117 dst->num_tx_frames[i] = 8118 le32_to_cpu(src->num_tx_frames[i]); 8119 8120 for (i = 0; i < WLAN_MAX_AC; i++) 8121 dst->num_tx_frames_retries[i] = 8122 le32_to_cpu(src->num_tx_frames_retries[i]); 8123 8124 for (i = 0; i < WLAN_MAX_AC; i++) 8125 dst->num_tx_frames_failures[i] = 8126 le32_to_cpu(src->num_tx_frames_failures[i]); 8127 8128 for (i = 0; i < MAX_TX_RATE_VALUES; i++) 8129 dst->tx_rate_history[i] = 8130 le32_to_cpu(src->tx_rate_history[i]); 8131 8132 for (i = 0; i < MAX_TX_RATE_VALUES; i++) 8133 dst->beacon_rssi_history[i] = 8134 le32_to_cpu(src->beacon_rssi_history[i]); 8135 } 8136 8137 static void 8138 ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src, 8139 struct ath12k_fw_stats_bcn *dst) 8140 { 8141 dst->vdev_id = le32_to_cpu(src->vdev_id); 8142 dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt); 8143 dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt); 8144 } 8145 8146 static void 8147 ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src, 8148 struct ath12k_fw_stats_pdev *dst) 8149 { 8150 dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf); 8151 dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); 8152 dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); 8153 dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count); 8154 dst->cycle_count = __le32_to_cpu(src->cycle_count); 8155 dst->phy_err_count = __le32_to_cpu(src->phy_err_count); 8156 dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); 8157 } 8158 8159 static void 8160 ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src, 8161 struct ath12k_fw_stats_pdev *dst) 8162 { 8163 dst->comp_queued = a_sle32_to_cpu(src->comp_queued); 8164 dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered); 8165 dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued); 8166 dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued); 8167 dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop); 8168 dst->local_enqued = a_sle32_to_cpu(src->local_enqued); 8169 dst->local_freed = a_sle32_to_cpu(src->local_freed); 8170 dst->hw_queued = a_sle32_to_cpu(src->hw_queued); 8171 dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped); 8172 dst->underrun = a_sle32_to_cpu(src->underrun); 8173 dst->tx_abort = a_sle32_to_cpu(src->tx_abort); 8174 dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed); 8175 dst->tx_ko = __le32_to_cpu(src->tx_ko); 8176 dst->data_rc = __le32_to_cpu(src->data_rc); 8177 dst->self_triggers = __le32_to_cpu(src->self_triggers); 8178 dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure); 8179 dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err); 8180 dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry); 8181 dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout); 8182 dst->pdev_resets = __le32_to_cpu(src->pdev_resets); 8183 dst->stateless_tid_alloc_failure = 8184 __le32_to_cpu(src->stateless_tid_alloc_failure); 8185 dst->phy_underrun = __le32_to_cpu(src->phy_underrun); 8186 dst->txop_ovf = __le32_to_cpu(src->txop_ovf); 8187 } 8188 8189 static void 8190 ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src, 8191 struct ath12k_fw_stats_pdev *dst) 8192 { 8193 dst->mid_ppdu_route_change = 8194 a_sle32_to_cpu(src->mid_ppdu_route_change); 8195 dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd); 8196 dst->r0_frags = a_sle32_to_cpu(src->r0_frags); 8197 dst->r1_frags = a_sle32_to_cpu(src->r1_frags); 8198 dst->r2_frags = a_sle32_to_cpu(src->r2_frags); 8199 dst->r3_frags = a_sle32_to_cpu(src->r3_frags); 8200 dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus); 8201 dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus); 8202 dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus); 8203 dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus); 8204 dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu); 8205 dst->phy_errs = a_sle32_to_cpu(src->phy_errs); 8206 dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop); 8207 dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs); 8208 } 8209 8210 static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab, 8211 struct wmi_tlv_fw_stats_parse *parse, 8212 const void *ptr, 8213 u16 len) 8214 { 8215 const struct wmi_stats_event *ev = parse->ev; 8216 struct ath12k_fw_stats *stats = parse->stats; 8217 struct ath12k *ar; 8218 struct ath12k_link_vif *arvif; 8219 struct ath12k_link_sta *arsta; 8220 int i, ret = 0; 8221 const void *data = ptr; 8222 8223 if (!ev) { 8224 ath12k_warn(ab, "failed to fetch update stats ev"); 8225 return -EPROTO; 8226 } 8227 8228 if (!stats) 8229 return -EINVAL; 8230 8231 rcu_read_lock(); 8232 8233 stats->pdev_id = le32_to_cpu(ev->pdev_id); 8234 ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id); 8235 if (!ar) { 8236 ath12k_warn(ab, "invalid pdev id %d in update stats event\n", 8237 le32_to_cpu(ev->pdev_id)); 8238 ret = -EPROTO; 8239 goto exit; 8240 } 8241 8242 for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) { 8243 const struct wmi_vdev_stats_params *src; 8244 struct ath12k_fw_stats_vdev *dst; 8245 8246 src = data; 8247 if (len < sizeof(*src)) { 8248 ret = -EPROTO; 8249 goto exit; 8250 } 8251 8252 arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id)); 8253 if (arvif) { 8254 spin_lock_bh(&ab->base_lock); 8255 arsta = ath12k_link_sta_find_by_addr(ab, arvif->bssid); 8256 if (arsta) { 8257 arsta->rssi_beacon = le32_to_cpu(src->beacon_snr); 8258 ath12k_dbg(ab, ATH12K_DBG_WMI, 8259 "wmi stats vdev id %d snr %d\n", 8260 src->vdev_id, src->beacon_snr); 8261 } else { 8262 ath12k_warn(ab, 8263 "not found link sta with bssid %pM for vdev stat\n", 8264 arvif->bssid); 8265 } 8266 spin_unlock_bh(&ab->base_lock); 8267 } 8268 8269 data += sizeof(*src); 8270 len -= sizeof(*src); 8271 dst = kzalloc_obj(*dst, GFP_ATOMIC); 8272 if (!dst) 8273 continue; 8274 ath12k_wmi_pull_vdev_stats(src, dst); 8275 stats->stats_id = WMI_REQUEST_VDEV_STAT; 8276 list_add_tail(&dst->list, &stats->vdevs); 8277 } 8278 for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) { 8279 const struct ath12k_wmi_bcn_stats_params *src; 8280 struct ath12k_fw_stats_bcn *dst; 8281 8282 src = data; 8283 if (len < sizeof(*src)) { 8284 ret = -EPROTO; 8285 goto exit; 8286 } 8287 8288 data += sizeof(*src); 8289 len -= sizeof(*src); 8290 dst = kzalloc_obj(*dst, GFP_ATOMIC); 8291 if (!dst) 8292 continue; 8293 ath12k_wmi_pull_bcn_stats(src, dst); 8294 stats->stats_id = WMI_REQUEST_BCN_STAT; 8295 list_add_tail(&dst->list, &stats->bcn); 8296 } 8297 for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) { 8298 const struct ath12k_wmi_pdev_stats_params *src; 8299 struct ath12k_fw_stats_pdev *dst; 8300 8301 src = data; 8302 if (len < sizeof(*src)) { 8303 ret = -EPROTO; 8304 goto exit; 8305 } 8306 8307 stats->stats_id = WMI_REQUEST_PDEV_STAT; 8308 8309 data += sizeof(*src); 8310 len -= sizeof(*src); 8311 8312 dst = kzalloc_obj(*dst, GFP_ATOMIC); 8313 if (!dst) 8314 continue; 8315 8316 ath12k_wmi_pull_pdev_stats_base(&src->base, dst); 8317 ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst); 8318 ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst); 8319 list_add_tail(&dst->list, &stats->pdevs); 8320 } 8321 8322 exit: 8323 rcu_read_unlock(); 8324 return ret; 8325 } 8326 8327 static int ath12k_wmi_tlv_rssi_chain_parse(struct ath12k_base *ab, 8328 u16 tag, u16 len, 8329 const void *ptr, void *data) 8330 { 8331 const struct wmi_rssi_stat_params *stats_rssi = ptr; 8332 struct wmi_tlv_fw_stats_parse *parse = data; 8333 const struct wmi_stats_event *ev = parse->ev; 8334 struct ath12k_fw_stats *stats = parse->stats; 8335 struct ath12k_link_vif *arvif; 8336 struct ath12k_link_sta *arsta; 8337 struct ath12k *ar; 8338 int vdev_id; 8339 int j; 8340 8341 if (!ev) { 8342 ath12k_warn(ab, "failed to fetch update stats ev"); 8343 return -EPROTO; 8344 } 8345 8346 if (tag != WMI_TAG_RSSI_STATS) 8347 return -EPROTO; 8348 8349 if (!stats) 8350 return -EINVAL; 8351 8352 stats->pdev_id = le32_to_cpu(ev->pdev_id); 8353 vdev_id = le32_to_cpu(stats_rssi->vdev_id); 8354 guard(rcu)(); 8355 ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id); 8356 if (!ar) { 8357 ath12k_warn(ab, "invalid pdev id %d in rssi chain parse\n", 8358 stats->pdev_id); 8359 return -EPROTO; 8360 } 8361 8362 arvif = ath12k_mac_get_arvif(ar, vdev_id); 8363 if (!arvif) { 8364 ath12k_warn(ab, "not found vif for vdev id %d\n", vdev_id); 8365 return -EPROTO; 8366 } 8367 8368 ath12k_dbg(ab, ATH12K_DBG_WMI, 8369 "stats bssid %pM vif %p\n", 8370 arvif->bssid, arvif->ahvif->vif); 8371 8372 guard(spinlock_bh)(&ab->base_lock); 8373 arsta = ath12k_link_sta_find_by_addr(ab, arvif->bssid); 8374 if (!arsta) { 8375 ath12k_warn(ab, 8376 "not found link sta with bssid %pM for rssi chain\n", 8377 arvif->bssid); 8378 return -EPROTO; 8379 } 8380 8381 BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > 8382 ARRAY_SIZE(stats_rssi->rssi_avg_beacon)); 8383 8384 for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) 8385 arsta->chain_signal[j] = le32_to_cpu(stats_rssi->rssi_avg_beacon[j]); 8386 8387 stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT; 8388 8389 return 0; 8390 } 8391 8392 static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab, 8393 u16 tag, u16 len, 8394 const void *ptr, void *data) 8395 { 8396 struct wmi_tlv_fw_stats_parse *parse = data; 8397 int ret = 0; 8398 8399 switch (tag) { 8400 case WMI_TAG_STATS_EVENT: 8401 parse->ev = ptr; 8402 break; 8403 case WMI_TAG_ARRAY_BYTE: 8404 ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len); 8405 break; 8406 case WMI_TAG_PER_CHAIN_RSSI_STATS: 8407 parse->rssi = ptr; 8408 if (le32_to_cpu(parse->ev->stats_id) & WMI_REQUEST_RSSI_PER_CHAIN_STAT) 8409 parse->rssi_num = le32_to_cpu(parse->rssi->num_per_chain_rssi); 8410 break; 8411 case WMI_TAG_ARRAY_STRUCT: 8412 if (parse->rssi_num && !parse->chain_rssi_done) { 8413 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 8414 ath12k_wmi_tlv_rssi_chain_parse, 8415 parse); 8416 if (ret) 8417 return ret; 8418 8419 parse->chain_rssi_done = true; 8420 } 8421 break; 8422 default: 8423 break; 8424 } 8425 return ret; 8426 } 8427 8428 static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb, 8429 struct ath12k_fw_stats *stats) 8430 { 8431 struct wmi_tlv_fw_stats_parse parse = {}; 8432 8433 stats->stats_id = 0; 8434 parse.stats = stats; 8435 8436 return ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 8437 ath12k_wmi_tlv_fw_stats_parse, 8438 &parse); 8439 } 8440 8441 static void ath12k_wmi_fw_stats_process(struct ath12k *ar, 8442 struct ath12k_fw_stats *stats) 8443 { 8444 struct ath12k_base *ab = ar->ab; 8445 struct ath12k_pdev *pdev; 8446 bool is_end = true; 8447 size_t total_vdevs_started = 0; 8448 int i; 8449 8450 if (stats->stats_id == WMI_REQUEST_VDEV_STAT) { 8451 if (list_empty(&stats->vdevs)) { 8452 ath12k_warn(ab, "empty vdev stats"); 8453 return; 8454 } 8455 /* FW sends all the active VDEV stats irrespective of PDEV, 8456 * hence limit until the count of all VDEVs started 8457 */ 8458 rcu_read_lock(); 8459 for (i = 0; i < ab->num_radios; i++) { 8460 pdev = rcu_dereference(ab->pdevs_active[i]); 8461 if (pdev && pdev->ar) 8462 total_vdevs_started += pdev->ar->num_started_vdevs; 8463 } 8464 rcu_read_unlock(); 8465 8466 if (total_vdevs_started) 8467 is_end = ((++ar->fw_stats.num_vdev_recvd) == 8468 total_vdevs_started); 8469 8470 list_splice_tail_init(&stats->vdevs, 8471 &ar->fw_stats.vdevs); 8472 8473 if (is_end) 8474 complete(&ar->fw_stats_done); 8475 8476 return; 8477 } 8478 8479 if (stats->stats_id == WMI_REQUEST_BCN_STAT) { 8480 if (list_empty(&stats->bcn)) { 8481 ath12k_warn(ab, "empty beacon stats"); 8482 return; 8483 } 8484 8485 list_splice_tail_init(&stats->bcn, 8486 &ar->fw_stats.bcn); 8487 complete(&ar->fw_stats_done); 8488 } 8489 } 8490 8491 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb) 8492 { 8493 struct ath12k_fw_stats stats = {}; 8494 struct ath12k *ar; 8495 int ret; 8496 8497 INIT_LIST_HEAD(&stats.pdevs); 8498 INIT_LIST_HEAD(&stats.vdevs); 8499 INIT_LIST_HEAD(&stats.bcn); 8500 8501 ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats); 8502 if (ret) { 8503 ath12k_warn(ab, "failed to pull fw stats: %d\n", ret); 8504 goto free; 8505 } 8506 8507 ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats"); 8508 8509 rcu_read_lock(); 8510 ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id); 8511 if (!ar) { 8512 rcu_read_unlock(); 8513 ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n", 8514 stats.pdev_id, ret); 8515 goto free; 8516 } 8517 8518 spin_lock_bh(&ar->data_lock); 8519 8520 /* Handle WMI_REQUEST_PDEV_STAT status update */ 8521 if (stats.stats_id == WMI_REQUEST_PDEV_STAT) { 8522 list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs); 8523 complete(&ar->fw_stats_done); 8524 goto complete; 8525 } 8526 8527 /* Handle WMI_REQUEST_RSSI_PER_CHAIN_STAT status update */ 8528 if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) { 8529 complete(&ar->fw_stats_done); 8530 goto complete; 8531 } 8532 8533 /* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */ 8534 ath12k_wmi_fw_stats_process(ar, &stats); 8535 8536 complete: 8537 complete(&ar->fw_stats_complete); 8538 spin_unlock_bh(&ar->data_lock); 8539 rcu_read_unlock(); 8540 8541 /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised 8542 * at this point, no need to free the individual list. 8543 */ 8544 return; 8545 8546 free: 8547 ath12k_fw_stats_free(&stats); 8548 } 8549 8550 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned 8551 * is not part of BDF CTL(Conformance test limits) table entries. 8552 */ 8553 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab, 8554 struct sk_buff *skb) 8555 { 8556 const void **tb; 8557 const struct wmi_pdev_ctl_failsafe_chk_event *ev; 8558 int ret; 8559 8560 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8561 if (IS_ERR(tb)) { 8562 ret = PTR_ERR(tb); 8563 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8564 return; 8565 } 8566 8567 ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]; 8568 if (!ev) { 8569 ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev"); 8570 kfree(tb); 8571 return; 8572 } 8573 8574 ath12k_dbg(ab, ATH12K_DBG_WMI, 8575 "pdev ctl failsafe check ev status %d\n", 8576 ev->ctl_failsafe_status); 8577 8578 /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power 8579 * to 10 dBm else the CTL power entry in the BDF would be picked up. 8580 */ 8581 if (ev->ctl_failsafe_status != 0) 8582 ath12k_warn(ab, "pdev ctl failsafe failure status %d", 8583 ev->ctl_failsafe_status); 8584 8585 kfree(tb); 8586 } 8587 8588 static void 8589 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab, 8590 const struct ath12k_wmi_pdev_csa_event *ev, 8591 const u32 *vdev_ids) 8592 { 8593 u32 current_switch_count = le32_to_cpu(ev->current_switch_count); 8594 u32 num_vdevs = le32_to_cpu(ev->num_vdevs); 8595 struct ieee80211_bss_conf *conf; 8596 struct ath12k_link_vif *arvif; 8597 struct ath12k_vif *ahvif; 8598 int i; 8599 8600 rcu_read_lock(); 8601 for (i = 0; i < num_vdevs; i++) { 8602 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]); 8603 8604 if (!arvif) { 8605 ath12k_warn(ab, "Recvd csa status for unknown vdev %d", 8606 vdev_ids[i]); 8607 continue; 8608 } 8609 ahvif = arvif->ahvif; 8610 8611 if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) { 8612 ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n", 8613 arvif->link_id); 8614 continue; 8615 } 8616 8617 conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]); 8618 if (!conf) { 8619 ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n", 8620 ahvif->vif->addr, arvif->link_id); 8621 continue; 8622 } 8623 8624 if (!arvif->is_up || !conf->csa_active) 8625 continue; 8626 8627 /* Finish CSA when counter reaches zero */ 8628 if (!current_switch_count) { 8629 ieee80211_csa_finish(ahvif->vif, arvif->link_id); 8630 arvif->current_cntdown_counter = 0; 8631 } else if (current_switch_count > 1) { 8632 /* If the count in event is not what we expect, don't update the 8633 * mac80211 count. Since during beacon Tx failure, count in the 8634 * firmware will not decrement and this event will come with the 8635 * previous count value again 8636 */ 8637 if (current_switch_count != arvif->current_cntdown_counter) 8638 continue; 8639 8640 arvif->current_cntdown_counter = 8641 ieee80211_beacon_update_cntdwn(ahvif->vif, 8642 arvif->link_id); 8643 } 8644 } 8645 rcu_read_unlock(); 8646 } 8647 8648 static void 8649 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab, 8650 struct sk_buff *skb) 8651 { 8652 const void **tb; 8653 const struct ath12k_wmi_pdev_csa_event *ev; 8654 const u32 *vdev_ids; 8655 int ret; 8656 8657 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8658 if (IS_ERR(tb)) { 8659 ret = PTR_ERR(tb); 8660 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8661 return; 8662 } 8663 8664 ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT]; 8665 vdev_ids = tb[WMI_TAG_ARRAY_UINT32]; 8666 8667 if (!ev || !vdev_ids) { 8668 ath12k_warn(ab, "failed to fetch pdev csa switch count ev"); 8669 kfree(tb); 8670 return; 8671 } 8672 8673 ath12k_dbg(ab, ATH12K_DBG_WMI, 8674 "pdev csa switch count %d for pdev %d, num_vdevs %d", 8675 ev->current_switch_count, ev->pdev_id, 8676 ev->num_vdevs); 8677 8678 ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids); 8679 8680 kfree(tb); 8681 } 8682 8683 static void 8684 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb) 8685 { 8686 const void **tb; 8687 struct ath12k_mac_get_any_chanctx_conf_arg arg; 8688 const struct ath12k_wmi_pdev_radar_event *ev; 8689 struct ath12k *ar; 8690 int ret; 8691 8692 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8693 if (IS_ERR(tb)) { 8694 ret = PTR_ERR(tb); 8695 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8696 return; 8697 } 8698 8699 ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT]; 8700 8701 if (!ev) { 8702 ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev"); 8703 kfree(tb); 8704 return; 8705 } 8706 8707 ath12k_dbg(ab, ATH12K_DBG_WMI, 8708 "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d", 8709 ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width, 8710 ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, 8711 ev->freq_offset, ev->sidx); 8712 8713 rcu_read_lock(); 8714 8715 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id)); 8716 8717 if (!ar) { 8718 ath12k_warn(ab, "radar detected in invalid pdev %d\n", 8719 ev->pdev_id); 8720 goto exit; 8721 } 8722 8723 arg.ar = ar; 8724 arg.chanctx_conf = NULL; 8725 ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar), 8726 ath12k_mac_get_any_chanctx_conf_iter, &arg); 8727 if (!arg.chanctx_conf) { 8728 ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n"); 8729 goto exit; 8730 } 8731 8732 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n", 8733 ev->pdev_id); 8734 8735 if (ar->dfs_block_radar_events) 8736 ath12k_info(ab, "DFS Radar detected, but ignored as requested\n"); 8737 else 8738 ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf); 8739 8740 exit: 8741 rcu_read_unlock(); 8742 8743 kfree(tb); 8744 } 8745 8746 static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id, 8747 struct sk_buff *skb) 8748 { 8749 const struct ath12k_wmi_ftm_event *ev; 8750 const void **tb; 8751 int ret; 8752 u16 length; 8753 8754 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8755 8756 if (IS_ERR(tb)) { 8757 ret = PTR_ERR(tb); 8758 ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret); 8759 return; 8760 } 8761 8762 ev = tb[WMI_TAG_ARRAY_BYTE]; 8763 if (!ev) { 8764 ath12k_warn(ab, "failed to fetch ftm msg\n"); 8765 kfree(tb); 8766 return; 8767 } 8768 8769 length = skb->len - TLV_HDR_SIZE; 8770 ath12k_tm_process_event(ab, cmd_id, ev, length); 8771 kfree(tb); 8772 tb = NULL; 8773 } 8774 8775 static void 8776 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab, 8777 struct sk_buff *skb) 8778 { 8779 const struct wmi_pdev_temperature_event *ev; 8780 struct ath12k *ar; 8781 const void **tb; 8782 int temp; 8783 u32 pdev_id; 8784 8785 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8786 if (IS_ERR(tb)) { 8787 ath12k_warn(ab, "failed to parse tlv: %ld\n", PTR_ERR(tb)); 8788 return; 8789 } 8790 8791 ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT]; 8792 if (!ev) { 8793 ath12k_warn(ab, "failed to fetch pdev temp ev\n"); 8794 kfree(tb); 8795 return; 8796 } 8797 8798 temp = a_sle32_to_cpu(ev->temp); 8799 pdev_id = le32_to_cpu(ev->pdev_id); 8800 8801 kfree(tb); 8802 8803 ath12k_dbg(ab, ATH12K_DBG_WMI, 8804 "pdev temperature ev temp %d pdev_id %u\n", 8805 temp, pdev_id); 8806 8807 rcu_read_lock(); 8808 8809 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 8810 if (!ar) { 8811 ath12k_warn(ab, "invalid pdev id %u in pdev temperature ev\n", 8812 pdev_id); 8813 goto exit; 8814 } 8815 8816 ath12k_thermal_event_temperature(ar, temp); 8817 8818 exit: 8819 rcu_read_unlock(); 8820 } 8821 8822 static void ath12k_fils_discovery_event(struct ath12k_base *ab, 8823 struct sk_buff *skb) 8824 { 8825 const void **tb; 8826 const struct wmi_fils_discovery_event *ev; 8827 int ret; 8828 8829 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8830 if (IS_ERR(tb)) { 8831 ret = PTR_ERR(tb); 8832 ath12k_warn(ab, 8833 "failed to parse FILS discovery event tlv %d\n", 8834 ret); 8835 return; 8836 } 8837 8838 ev = tb[WMI_TAG_HOST_SWFDA_EVENT]; 8839 if (!ev) { 8840 ath12k_warn(ab, "failed to fetch FILS discovery event\n"); 8841 kfree(tb); 8842 return; 8843 } 8844 8845 ath12k_warn(ab, 8846 "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n", 8847 ev->vdev_id, ev->fils_tt, ev->tbtt); 8848 8849 kfree(tb); 8850 } 8851 8852 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab, 8853 struct sk_buff *skb) 8854 { 8855 const void **tb; 8856 const struct wmi_probe_resp_tx_status_event *ev; 8857 int ret; 8858 8859 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8860 if (IS_ERR(tb)) { 8861 ret = PTR_ERR(tb); 8862 ath12k_warn(ab, 8863 "failed to parse probe response transmission status event tlv: %d\n", 8864 ret); 8865 return; 8866 } 8867 8868 ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT]; 8869 if (!ev) { 8870 ath12k_warn(ab, 8871 "failed to fetch probe response transmission status event"); 8872 kfree(tb); 8873 return; 8874 } 8875 8876 if (ev->tx_status) 8877 ath12k_warn(ab, 8878 "Probe response transmission failed for vdev_id %u, status %u\n", 8879 ev->vdev_id, ev->tx_status); 8880 8881 kfree(tb); 8882 } 8883 8884 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab, 8885 struct sk_buff *skb) 8886 { 8887 const void **tb; 8888 const struct wmi_p2p_noa_event *ev; 8889 const struct ath12k_wmi_p2p_noa_info *noa; 8890 struct ath12k *ar; 8891 int ret, vdev_id; 8892 8893 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8894 if (IS_ERR(tb)) { 8895 ret = PTR_ERR(tb); 8896 ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret); 8897 return ret; 8898 } 8899 8900 ev = tb[WMI_TAG_P2P_NOA_EVENT]; 8901 noa = tb[WMI_TAG_P2P_NOA_INFO]; 8902 8903 if (!ev || !noa) { 8904 ret = -EPROTO; 8905 goto out; 8906 } 8907 8908 vdev_id = __le32_to_cpu(ev->vdev_id); 8909 8910 ath12k_dbg(ab, ATH12K_DBG_WMI, 8911 "wmi tlv p2p noa vdev_id %i descriptors %u\n", 8912 vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM)); 8913 8914 rcu_read_lock(); 8915 ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id); 8916 if (!ar) { 8917 ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n", 8918 vdev_id); 8919 ret = -EINVAL; 8920 goto unlock; 8921 } 8922 8923 ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); 8924 8925 ret = 0; 8926 8927 unlock: 8928 rcu_read_unlock(); 8929 out: 8930 kfree(tb); 8931 return ret; 8932 } 8933 8934 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab, 8935 struct sk_buff *skb) 8936 { 8937 const struct wmi_rfkill_state_change_event *ev; 8938 const void **tb; 8939 int ret; 8940 8941 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8942 if (IS_ERR(tb)) { 8943 ret = PTR_ERR(tb); 8944 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 8945 return; 8946 } 8947 8948 ev = tb[WMI_TAG_RFKILL_EVENT]; 8949 if (!ev) { 8950 kfree(tb); 8951 return; 8952 } 8953 8954 ath12k_dbg(ab, ATH12K_DBG_MAC, 8955 "wmi tlv rfkill state change gpio %d type %d radio_state %d\n", 8956 le32_to_cpu(ev->gpio_pin_num), 8957 le32_to_cpu(ev->int_type), 8958 le32_to_cpu(ev->radio_state)); 8959 8960 spin_lock_bh(&ab->base_lock); 8961 ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON)); 8962 spin_unlock_bh(&ab->base_lock); 8963 8964 queue_work(ab->workqueue, &ab->rfkill_work); 8965 kfree(tb); 8966 } 8967 8968 static void 8969 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb) 8970 { 8971 trace_ath12k_wmi_diag(ab, skb->data, skb->len); 8972 } 8973 8974 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab, 8975 struct sk_buff *skb) 8976 { 8977 const void **tb; 8978 const struct wmi_twt_enable_event *ev; 8979 int ret; 8980 8981 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 8982 if (IS_ERR(tb)) { 8983 ret = PTR_ERR(tb); 8984 ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n", 8985 ret); 8986 return; 8987 } 8988 8989 ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT]; 8990 if (!ev) { 8991 ath12k_warn(ab, "failed to fetch twt enable wmi event\n"); 8992 goto exit; 8993 } 8994 8995 ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n", 8996 le32_to_cpu(ev->pdev_id), 8997 le32_to_cpu(ev->status)); 8998 8999 exit: 9000 kfree(tb); 9001 } 9002 9003 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab, 9004 struct sk_buff *skb) 9005 { 9006 const void **tb; 9007 const struct wmi_twt_disable_event *ev; 9008 int ret; 9009 9010 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9011 if (IS_ERR(tb)) { 9012 ret = PTR_ERR(tb); 9013 ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n", 9014 ret); 9015 return; 9016 } 9017 9018 ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT]; 9019 if (!ev) { 9020 ath12k_warn(ab, "failed to fetch twt disable wmi event\n"); 9021 goto exit; 9022 } 9023 9024 ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n", 9025 le32_to_cpu(ev->pdev_id), 9026 le32_to_cpu(ev->status)); 9027 9028 exit: 9029 kfree(tb); 9030 } 9031 9032 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab, 9033 u16 tag, u16 len, 9034 const void *ptr, void *data) 9035 { 9036 const struct wmi_wow_ev_pg_fault_param *pf_param; 9037 const struct wmi_wow_ev_param *param; 9038 struct wmi_wow_ev_arg *arg = data; 9039 int pf_len; 9040 9041 switch (tag) { 9042 case WMI_TAG_WOW_EVENT_INFO: 9043 param = ptr; 9044 arg->wake_reason = le32_to_cpu(param->wake_reason); 9045 ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n", 9046 arg->wake_reason, wow_reason(arg->wake_reason)); 9047 break; 9048 9049 case WMI_TAG_ARRAY_BYTE: 9050 if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) { 9051 pf_param = ptr; 9052 pf_len = le32_to_cpu(pf_param->len); 9053 if (pf_len > len - sizeof(pf_len) || 9054 pf_len < 0) { 9055 ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n", 9056 pf_len); 9057 return -EINVAL; 9058 } 9059 ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n", 9060 pf_len); 9061 ath12k_dbg_dump(ab, ATH12K_DBG_WMI, 9062 "wow_reason_page_fault packet present", 9063 "wow_pg_fault ", 9064 pf_param->data, 9065 pf_len); 9066 } 9067 break; 9068 default: 9069 break; 9070 } 9071 9072 return 0; 9073 } 9074 9075 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb) 9076 { 9077 struct wmi_wow_ev_arg arg = { }; 9078 int ret; 9079 9080 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 9081 ath12k_wmi_wow_wakeup_host_parse, 9082 &arg); 9083 if (ret) { 9084 ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n", 9085 ret); 9086 return; 9087 } 9088 9089 complete(&ab->wow.wakeup_completed); 9090 } 9091 9092 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab, 9093 struct sk_buff *skb) 9094 { 9095 const struct wmi_gtk_offload_status_event *ev; 9096 struct ath12k_link_vif *arvif; 9097 __be64 replay_ctr_be; 9098 u64 replay_ctr; 9099 const void **tb; 9100 int ret; 9101 9102 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9103 if (IS_ERR(tb)) { 9104 ret = PTR_ERR(tb); 9105 ath12k_warn(ab, "failed to parse tlv: %d\n", ret); 9106 return; 9107 } 9108 9109 ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; 9110 if (!ev) { 9111 ath12k_warn(ab, "failed to fetch gtk offload status ev"); 9112 kfree(tb); 9113 return; 9114 } 9115 9116 rcu_read_lock(); 9117 arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id)); 9118 if (!arvif) { 9119 rcu_read_unlock(); 9120 ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n", 9121 le32_to_cpu(ev->vdev_id)); 9122 kfree(tb); 9123 return; 9124 } 9125 9126 replay_ctr = le64_to_cpu(ev->replay_ctr); 9127 arvif->rekey_data.replay_ctr = replay_ctr; 9128 ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n", 9129 le32_to_cpu(ev->refresh_cnt), replay_ctr); 9130 9131 /* supplicant expects big-endian replay counter */ 9132 replay_ctr_be = cpu_to_be64(replay_ctr); 9133 9134 ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid, 9135 (void *)&replay_ctr_be, GFP_ATOMIC); 9136 9137 rcu_read_unlock(); 9138 9139 kfree(tb); 9140 } 9141 9142 static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab, 9143 struct sk_buff *skb) 9144 { 9145 const struct wmi_mlo_setup_complete_event *ev; 9146 struct ath12k *ar = NULL; 9147 struct ath12k_pdev *pdev; 9148 const void **tb; 9149 int ret, i; 9150 9151 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9152 if (IS_ERR(tb)) { 9153 ret = PTR_ERR(tb); 9154 ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n", 9155 ret); 9156 return; 9157 } 9158 9159 ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT]; 9160 if (!ev) { 9161 ath12k_warn(ab, "failed to fetch mlo setup complete event\n"); 9162 kfree(tb); 9163 return; 9164 } 9165 9166 if (le32_to_cpu(ev->pdev_id) > ab->num_radios) 9167 goto skip_lookup; 9168 9169 for (i = 0; i < ab->num_radios; i++) { 9170 pdev = &ab->pdevs[i]; 9171 if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) { 9172 ar = pdev->ar; 9173 break; 9174 } 9175 } 9176 9177 skip_lookup: 9178 if (!ar) { 9179 ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n", 9180 ev->pdev_id, ev->status); 9181 goto out; 9182 } 9183 9184 ar->mlo_setup_status = le32_to_cpu(ev->status); 9185 complete(&ar->mlo_setup_done); 9186 9187 out: 9188 kfree(tb); 9189 } 9190 9191 static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab, 9192 struct sk_buff *skb) 9193 { 9194 const struct wmi_mlo_teardown_complete_event *ev; 9195 const void **tb; 9196 int ret; 9197 9198 tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); 9199 if (IS_ERR(tb)) { 9200 ret = PTR_ERR(tb); 9201 ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret); 9202 return; 9203 } 9204 9205 ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE]; 9206 if (!ev) { 9207 ath12k_warn(ab, "failed to fetch teardown complete event\n"); 9208 kfree(tb); 9209 return; 9210 } 9211 9212 kfree(tb); 9213 } 9214 9215 #ifdef CONFIG_ATH12K_DEBUGFS 9216 static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab, 9217 const void *ptr, u16 tag, u16 len, 9218 struct wmi_tpc_stats_arg *tpc_stats) 9219 { 9220 u32 len1, len2, len3, len4; 9221 s16 *dst_ptr; 9222 s8 *dst_ptr_ctl; 9223 9224 len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len); 9225 len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len); 9226 len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len); 9227 len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len); 9228 9229 switch (tpc_stats->event_count) { 9230 case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT: 9231 if (len1 > len) 9232 return -ENOBUFS; 9233 9234 if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) { 9235 dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array; 9236 memcpy(dst_ptr, ptr, len1); 9237 } 9238 break; 9239 case ATH12K_TPC_STATS_RATES_EVENT1: 9240 if (len2 > len) 9241 return -ENOBUFS; 9242 9243 if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) { 9244 dst_ptr = tpc_stats->rates_array1.rate_array; 9245 memcpy(dst_ptr, ptr, len2); 9246 } 9247 break; 9248 case ATH12K_TPC_STATS_RATES_EVENT2: 9249 if (len3 > len) 9250 return -ENOBUFS; 9251 9252 if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) { 9253 dst_ptr = tpc_stats->rates_array2.rate_array; 9254 memcpy(dst_ptr, ptr, len3); 9255 } 9256 break; 9257 case ATH12K_TPC_STATS_CTL_TABLE_EVENT: 9258 if (len4 > len) 9259 return -ENOBUFS; 9260 9261 if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) { 9262 dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table; 9263 memcpy(dst_ptr_ctl, ptr, len4); 9264 } 9265 break; 9266 } 9267 return 0; 9268 } 9269 9270 static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab, 9271 struct wmi_tpc_stats_arg *tpc_stats, 9272 struct wmi_max_reg_power_fixed_params *ev) 9273 { 9274 struct wmi_max_reg_power_allowed_arg *reg_pwr; 9275 u32 total_size; 9276 9277 ath12k_dbg(ab, ATH12K_DBG_WMI, 9278 "Received reg power array type %d length %d for tpc stats\n", 9279 ev->reg_power_type, ev->reg_array_len); 9280 9281 switch (le32_to_cpu(ev->reg_power_type)) { 9282 case TPC_STATS_REG_PWR_ALLOWED_TYPE: 9283 reg_pwr = &tpc_stats->max_reg_allowed_power; 9284 break; 9285 default: 9286 return -EINVAL; 9287 } 9288 9289 /* Each entry is 2 byte hence multiplying the indices with 2 */ 9290 total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) * 9291 le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2; 9292 if (le32_to_cpu(ev->reg_array_len) != total_size) { 9293 ath12k_warn(ab, 9294 "Total size and reg_array_len doesn't match for tpc stats\n"); 9295 return -EINVAL; 9296 } 9297 9298 memcpy(®_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params)); 9299 9300 reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len), 9301 GFP_ATOMIC); 9302 if (!reg_pwr->reg_pwr_array) 9303 return -ENOMEM; 9304 9305 tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED; 9306 9307 return 0; 9308 } 9309 9310 static int ath12k_tpc_get_rate_array(struct ath12k_base *ab, 9311 struct wmi_tpc_stats_arg *tpc_stats, 9312 struct wmi_tpc_rates_array_fixed_params *ev) 9313 { 9314 struct wmi_tpc_rates_array_arg *rates_array; 9315 u32 flag = 0, rate_array_len; 9316 9317 ath12k_dbg(ab, ATH12K_DBG_WMI, 9318 "Received rates array type %d length %d for tpc stats\n", 9319 ev->rate_array_type, ev->rate_array_len); 9320 9321 switch (le32_to_cpu(ev->rate_array_type)) { 9322 case ATH12K_TPC_STATS_RATES_ARRAY1: 9323 rates_array = &tpc_stats->rates_array1; 9324 flag = WMI_TPC_RATES_ARRAY1; 9325 break; 9326 case ATH12K_TPC_STATS_RATES_ARRAY2: 9327 rates_array = &tpc_stats->rates_array2; 9328 flag = WMI_TPC_RATES_ARRAY2; 9329 break; 9330 default: 9331 ath12k_warn(ab, 9332 "Received invalid type of rates array for tpc stats\n"); 9333 return -EINVAL; 9334 } 9335 memcpy(&rates_array->tpc_rates_array, ev, 9336 sizeof(struct wmi_tpc_rates_array_fixed_params)); 9337 rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len); 9338 rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC); 9339 if (!rates_array->rate_array) 9340 return -ENOMEM; 9341 9342 tpc_stats->tlvs_rcvd |= flag; 9343 return 0; 9344 } 9345 9346 static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab, 9347 struct wmi_tpc_stats_arg *tpc_stats, 9348 struct wmi_tpc_ctl_pwr_fixed_params *ev) 9349 { 9350 struct wmi_tpc_ctl_pwr_table_arg *ctl_array; 9351 u32 total_size, ctl_array_len, flag = 0; 9352 9353 ath12k_dbg(ab, ATH12K_DBG_WMI, 9354 "Received ctl array type %d length %d for tpc stats\n", 9355 ev->ctl_array_type, ev->ctl_array_len); 9356 9357 switch (le32_to_cpu(ev->ctl_array_type)) { 9358 case ATH12K_TPC_STATS_CTL_ARRAY: 9359 ctl_array = &tpc_stats->ctl_array; 9360 flag = WMI_TPC_CTL_PWR_ARRAY; 9361 break; 9362 default: 9363 ath12k_warn(ab, 9364 "Received invalid type of ctl pwr table for tpc stats\n"); 9365 return -EINVAL; 9366 } 9367 9368 total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) * 9369 le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4); 9370 if (le32_to_cpu(ev->ctl_array_len) != total_size) { 9371 ath12k_warn(ab, 9372 "Total size and ctl_array_len doesn't match for tpc stats\n"); 9373 return -EINVAL; 9374 } 9375 9376 memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params)); 9377 ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len); 9378 ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC); 9379 if (!ctl_array->ctl_pwr_table) 9380 return -ENOMEM; 9381 9382 tpc_stats->tlvs_rcvd |= flag; 9383 return 0; 9384 } 9385 9386 static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab, 9387 u16 tag, u16 len, 9388 const void *ptr, void *data) 9389 { 9390 struct wmi_tpc_rates_array_fixed_params *tpc_rates_array; 9391 struct wmi_max_reg_power_fixed_params *tpc_reg_pwr; 9392 struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr; 9393 struct wmi_tpc_stats_arg *tpc_stats = data; 9394 struct wmi_tpc_config_params *tpc_config; 9395 int ret = 0; 9396 9397 if (!tpc_stats) { 9398 ath12k_warn(ab, "tpc stats memory unavailable\n"); 9399 return -EINVAL; 9400 } 9401 9402 switch (tag) { 9403 case WMI_TAG_TPC_STATS_CONFIG_EVENT: 9404 tpc_config = (struct wmi_tpc_config_params *)ptr; 9405 memcpy(&tpc_stats->tpc_config, tpc_config, 9406 sizeof(struct wmi_tpc_config_params)); 9407 break; 9408 case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED: 9409 tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr; 9410 ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr); 9411 break; 9412 case WMI_TAG_TPC_STATS_RATES_ARRAY: 9413 tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr; 9414 ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array); 9415 break; 9416 case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT: 9417 tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr; 9418 ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr); 9419 break; 9420 default: 9421 ath12k_warn(ab, 9422 "Received invalid tag for tpc stats in subtlvs\n"); 9423 return -EINVAL; 9424 } 9425 return ret; 9426 } 9427 9428 static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab, 9429 u16 tag, u16 len, 9430 const void *ptr, void *data) 9431 { 9432 struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data; 9433 int ret; 9434 9435 switch (tag) { 9436 case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM: 9437 ret = 0; 9438 /* Fixed param is already processed*/ 9439 break; 9440 case WMI_TAG_ARRAY_STRUCT: 9441 /* len 0 is expected for array of struct when there 9442 * is no content of that type to pack inside that tlv 9443 */ 9444 if (len == 0) 9445 return 0; 9446 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 9447 ath12k_wmi_tpc_stats_subtlv_parser, 9448 tpc_stats); 9449 break; 9450 case WMI_TAG_ARRAY_INT16: 9451 if (len == 0) 9452 return 0; 9453 ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr, 9454 WMI_TAG_ARRAY_INT16, 9455 len, tpc_stats); 9456 break; 9457 case WMI_TAG_ARRAY_BYTE: 9458 if (len == 0) 9459 return 0; 9460 ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr, 9461 WMI_TAG_ARRAY_BYTE, 9462 len, tpc_stats); 9463 break; 9464 default: 9465 ath12k_warn(ab, "Received invalid tag for tpc stats\n"); 9466 ret = -EINVAL; 9467 break; 9468 } 9469 return ret; 9470 } 9471 9472 void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar) 9473 { 9474 struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats; 9475 9476 lockdep_assert_held(&ar->data_lock); 9477 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n"); 9478 if (tpc_stats) { 9479 kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array); 9480 kfree(tpc_stats->rates_array1.rate_array); 9481 kfree(tpc_stats->rates_array2.rate_array); 9482 kfree(tpc_stats->ctl_array.ctl_pwr_table); 9483 kfree(tpc_stats); 9484 ar->debug.tpc_stats = NULL; 9485 } 9486 } 9487 9488 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab, 9489 struct sk_buff *skb) 9490 { 9491 struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param; 9492 struct wmi_tpc_stats_arg *tpc_stats; 9493 const struct wmi_tlv *tlv; 9494 void *ptr = skb->data; 9495 struct ath12k *ar; 9496 u16 tlv_tag; 9497 u32 event_count; 9498 int ret; 9499 9500 if (!skb->data) { 9501 ath12k_warn(ab, "No data present in tpc stats event\n"); 9502 return; 9503 } 9504 9505 if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) { 9506 ath12k_warn(ab, "TPC stats event size invalid\n"); 9507 return; 9508 } 9509 9510 tlv = (struct wmi_tlv *)ptr; 9511 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 9512 ptr += sizeof(*tlv); 9513 9514 if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) { 9515 ath12k_warn(ab, "TPC stats without fixed param tlv at start\n"); 9516 return; 9517 } 9518 9519 fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr; 9520 rcu_read_lock(); 9521 ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1); 9522 if (!ar) { 9523 ath12k_warn(ab, "Failed to get ar for tpc stats\n"); 9524 rcu_read_unlock(); 9525 return; 9526 } 9527 spin_lock_bh(&ar->data_lock); 9528 if (!ar->debug.tpc_request) { 9529 /* Event is received either without request or the 9530 * timeout, if memory is already allocated free it 9531 */ 9532 if (ar->debug.tpc_stats) { 9533 ath12k_warn(ab, "Freeing memory for tpc_stats\n"); 9534 ath12k_wmi_free_tpc_stats_mem(ar); 9535 } 9536 goto unlock; 9537 } 9538 9539 event_count = le32_to_cpu(fixed_param->event_count); 9540 if (event_count == 0) { 9541 if (ar->debug.tpc_stats) { 9542 ath12k_warn(ab, 9543 "Invalid tpc memory present\n"); 9544 goto unlock; 9545 } 9546 ar->debug.tpc_stats = 9547 kzalloc_obj(struct wmi_tpc_stats_arg, GFP_ATOMIC); 9548 if (!ar->debug.tpc_stats) { 9549 ath12k_warn(ab, 9550 "Failed to allocate memory for tpc stats\n"); 9551 goto unlock; 9552 } 9553 } 9554 9555 tpc_stats = ar->debug.tpc_stats; 9556 if (!tpc_stats) { 9557 ath12k_warn(ab, "tpc stats memory unavailable\n"); 9558 goto unlock; 9559 } 9560 9561 if (!(event_count == 0)) { 9562 if (event_count != tpc_stats->event_count + 1) { 9563 ath12k_warn(ab, 9564 "Invalid tpc event received\n"); 9565 goto unlock; 9566 } 9567 } 9568 tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id); 9569 tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event); 9570 tpc_stats->event_count = le32_to_cpu(fixed_param->event_count); 9571 ath12k_dbg(ab, ATH12K_DBG_WMI, 9572 "tpc stats event_count %d\n", 9573 tpc_stats->event_count); 9574 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 9575 ath12k_wmi_tpc_stats_event_parser, 9576 tpc_stats); 9577 if (ret) { 9578 ath12k_wmi_free_tpc_stats_mem(ar); 9579 ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret); 9580 goto unlock; 9581 } 9582 9583 if (tpc_stats->end_of_event) 9584 complete(&ar->debug.tpc_complete); 9585 9586 unlock: 9587 spin_unlock_bh(&ar->data_lock); 9588 rcu_read_unlock(); 9589 } 9590 #else 9591 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab, 9592 struct sk_buff *skb) 9593 { 9594 } 9595 #endif 9596 9597 static int 9598 ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser(struct ath12k_base *ab, 9599 u16 tag, u16 len, 9600 const void *ptr, void *data) 9601 { 9602 const struct ath12k_wmi_rssi_dbm_conv_temp_info_params *temp_info; 9603 const struct ath12k_wmi_rssi_dbm_conv_info_params *param_info; 9604 struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info = data; 9605 struct ath12k_wmi_rssi_dbm_conv_param_arg param_arg; 9606 s32 nf_hw_dbm[ATH12K_MAX_NUM_NF_HW_DBM]; 9607 u8 num_20mhz_segments; 9608 s8 min_nf, *nf_ptr; 9609 int i, j; 9610 9611 switch (tag) { 9612 case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO: 9613 if (len < sizeof(*param_info)) { 9614 ath12k_warn(ab, 9615 "RSSI dbm conv subtlv 0x%x invalid len %d rcvd", 9616 tag, len); 9617 return -EINVAL; 9618 } 9619 9620 param_info = ptr; 9621 9622 param_arg.curr_bw = le32_to_cpu(param_info->curr_bw); 9623 param_arg.curr_rx_chainmask = le32_to_cpu(param_info->curr_rx_chainmask); 9624 9625 /* The received array is actually a 2D byte-array for per chain, 9626 * per 20MHz subband. Convert to 2D byte-array 9627 */ 9628 nf_ptr = ¶m_arg.nf_hw_dbm[0][0]; 9629 9630 for (i = 0; i < ATH12K_MAX_NUM_NF_HW_DBM; i++) { 9631 nf_hw_dbm[i] = a_sle32_to_cpu(param_info->nf_hw_dbm[i]); 9632 9633 for (j = 0; j < 4; j++) { 9634 *nf_ptr = (nf_hw_dbm[i] >> (j * 8)) & 0xFF; 9635 nf_ptr++; 9636 } 9637 } 9638 9639 switch (param_arg.curr_bw) { 9640 case WMI_CHAN_WIDTH_20: 9641 num_20mhz_segments = 1; 9642 break; 9643 case WMI_CHAN_WIDTH_40: 9644 num_20mhz_segments = 2; 9645 break; 9646 case WMI_CHAN_WIDTH_80: 9647 num_20mhz_segments = 4; 9648 break; 9649 case WMI_CHAN_WIDTH_160: 9650 num_20mhz_segments = 8; 9651 break; 9652 case WMI_CHAN_WIDTH_320: 9653 num_20mhz_segments = 16; 9654 break; 9655 default: 9656 ath12k_warn(ab, "Invalid current bandwidth %d in RSSI dbm event", 9657 param_arg.curr_bw); 9658 /* In error case, still consider the primary 20 MHz segment since 9659 * that would be much better than instead of dropping the whole 9660 * event 9661 */ 9662 num_20mhz_segments = 1; 9663 } 9664 9665 min_nf = ATH12K_DEFAULT_NOISE_FLOOR; 9666 9667 for (i = 0; i < ATH12K_MAX_NUM_ANTENNA; i++) { 9668 if (!(param_arg.curr_rx_chainmask & BIT(i))) 9669 continue; 9670 9671 for (j = 0; j < num_20mhz_segments; j++) { 9672 if (param_arg.nf_hw_dbm[i][j] < min_nf) 9673 min_nf = param_arg.nf_hw_dbm[i][j]; 9674 } 9675 } 9676 9677 rssi_info->min_nf_dbm = min_nf; 9678 rssi_info->nf_dbm_present = true; 9679 break; 9680 case WMI_TAG_RSSI_DBM_CONVERSION_TEMP_OFFSET_INFO: 9681 if (len < sizeof(*temp_info)) { 9682 ath12k_warn(ab, 9683 "RSSI dbm conv subtlv 0x%x invalid len %d rcvd", 9684 tag, len); 9685 return -EINVAL; 9686 } 9687 9688 temp_info = ptr; 9689 rssi_info->temp_offset = a_sle32_to_cpu(temp_info->offset); 9690 rssi_info->temp_offset_present = true; 9691 break; 9692 default: 9693 ath12k_dbg(ab, ATH12K_DBG_WMI, 9694 "Unknown subtlv 0x%x in RSSI dbm conversion event\n", tag); 9695 } 9696 9697 return 0; 9698 } 9699 9700 static int 9701 ath12k_wmi_rssi_dbm_conv_info_event_parser(struct ath12k_base *ab, 9702 u16 tag, u16 len, 9703 const void *ptr, void *data) 9704 { 9705 int ret = 0; 9706 9707 switch (tag) { 9708 case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM: 9709 /* Fixed param is already processed*/ 9710 break; 9711 case WMI_TAG_ARRAY_STRUCT: 9712 /* len 0 is expected for array of struct when there 9713 * is no content of that type inside that tlv 9714 */ 9715 if (len == 0) 9716 return 0; 9717 9718 ret = ath12k_wmi_tlv_iter(ab, ptr, len, 9719 ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser, 9720 data); 9721 break; 9722 default: 9723 ath12k_dbg(ab, ATH12K_DBG_WMI, 9724 "Received invalid tag 0x%x for RSSI dbm conv info event\n", 9725 tag); 9726 break; 9727 } 9728 9729 return ret; 9730 } 9731 9732 static int 9733 ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(struct ath12k_base *ab, u8 *ptr, 9734 size_t len, int *pdev_id) 9735 { 9736 struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *fixed_param; 9737 const struct wmi_tlv *tlv; 9738 u16 tlv_tag; 9739 9740 if (len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) { 9741 ath12k_warn(ab, "invalid RSSI dbm conv event size %zu\n", len); 9742 return -EINVAL; 9743 } 9744 9745 tlv = (struct wmi_tlv *)ptr; 9746 tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); 9747 ptr += sizeof(*tlv); 9748 9749 if (tlv_tag != WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM) { 9750 ath12k_warn(ab, "RSSI dbm conv event received without fixed param tlv\n"); 9751 return -EINVAL; 9752 } 9753 9754 fixed_param = (struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *)ptr; 9755 *pdev_id = le32_to_cpu(fixed_param->pdev_id); 9756 9757 return 0; 9758 } 9759 9760 static void 9761 ath12k_wmi_update_rssi_offsets(struct ath12k *ar, 9762 struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info) 9763 { 9764 struct ath12k_pdev_rssi_offsets *info = &ar->rssi_info; 9765 9766 lockdep_assert_held(&ar->data_lock); 9767 9768 if (rssi_info->temp_offset_present) 9769 info->temp_offset = rssi_info->temp_offset; 9770 9771 if (rssi_info->nf_dbm_present) 9772 info->min_nf_dbm = rssi_info->min_nf_dbm; 9773 9774 info->noise_floor = info->min_nf_dbm + info->temp_offset; 9775 } 9776 9777 static void 9778 ath12k_wmi_rssi_dbm_conversion_params_info_event(struct ath12k_base *ab, 9779 struct sk_buff *skb) 9780 { 9781 struct ath12k_wmi_rssi_dbm_conv_info_arg rssi_info; 9782 struct ath12k *ar; 9783 s32 noise_floor; 9784 u32 pdev_id; 9785 int ret; 9786 9787 ret = ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(ab, skb->data, skb->len, 9788 &pdev_id); 9789 if (ret) { 9790 ath12k_warn(ab, "failed to parse fixed param in RSSI dbm conv event: %d\n", 9791 ret); 9792 return; 9793 } 9794 9795 rcu_read_lock(); 9796 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); 9797 /* If pdev is not active, ignore the event */ 9798 if (!ar) 9799 goto out_unlock; 9800 9801 ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, 9802 ath12k_wmi_rssi_dbm_conv_info_event_parser, 9803 &rssi_info); 9804 if (ret) { 9805 ath12k_warn(ab, "unable to parse RSSI dbm conversion event\n"); 9806 goto out_unlock; 9807 } 9808 9809 spin_lock_bh(&ar->data_lock); 9810 ath12k_wmi_update_rssi_offsets(ar, &rssi_info); 9811 noise_floor = ath12k_pdev_get_noise_floor(ar); 9812 spin_unlock_bh(&ar->data_lock); 9813 9814 ath12k_dbg(ab, ATH12K_DBG_WMI, 9815 "RSSI noise floor updated, new value is %d dbm\n", noise_floor); 9816 out_unlock: 9817 rcu_read_unlock(); 9818 } 9819 9820 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) 9821 { 9822 struct wmi_cmd_hdr *cmd_hdr; 9823 enum wmi_tlv_event_id id; 9824 9825 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 9826 id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID); 9827 9828 if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr))) 9829 goto out; 9830 9831 switch (id) { 9832 /* Process all the WMI events here */ 9833 case WMI_SERVICE_READY_EVENTID: 9834 ath12k_service_ready_event(ab, skb); 9835 break; 9836 case WMI_SERVICE_READY_EXT_EVENTID: 9837 ath12k_service_ready_ext_event(ab, skb); 9838 break; 9839 case WMI_SERVICE_READY_EXT2_EVENTID: 9840 ath12k_service_ready_ext2_event(ab, skb); 9841 break; 9842 case WMI_REG_CHAN_LIST_CC_EXT_EVENTID: 9843 ath12k_reg_chan_list_event(ab, skb); 9844 break; 9845 case WMI_READY_EVENTID: 9846 ath12k_ready_event(ab, skb); 9847 break; 9848 case WMI_PEER_DELETE_RESP_EVENTID: 9849 ath12k_peer_delete_resp_event(ab, skb); 9850 break; 9851 case WMI_VDEV_START_RESP_EVENTID: 9852 ath12k_vdev_start_resp_event(ab, skb); 9853 break; 9854 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID: 9855 ath12k_bcn_tx_status_event(ab, skb); 9856 break; 9857 case WMI_VDEV_STOPPED_EVENTID: 9858 ath12k_vdev_stopped_event(ab, skb); 9859 break; 9860 case WMI_MGMT_RX_EVENTID: 9861 ath12k_mgmt_rx_event(ab, skb); 9862 /* mgmt_rx_event() owns the skb now! */ 9863 return; 9864 case WMI_MGMT_TX_COMPLETION_EVENTID: 9865 ath12k_mgmt_tx_compl_event(ab, skb); 9866 break; 9867 case WMI_SCAN_EVENTID: 9868 ath12k_scan_event(ab, skb); 9869 break; 9870 case WMI_PEER_STA_KICKOUT_EVENTID: 9871 ath12k_peer_sta_kickout_event(ab, skb); 9872 break; 9873 case WMI_ROAM_EVENTID: 9874 ath12k_roam_event(ab, skb); 9875 break; 9876 case WMI_CHAN_INFO_EVENTID: 9877 ath12k_chan_info_event(ab, skb); 9878 break; 9879 case WMI_PDEV_BSS_CHAN_INFO_EVENTID: 9880 ath12k_pdev_bss_chan_info_event(ab, skb); 9881 break; 9882 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID: 9883 ath12k_vdev_install_key_compl_event(ab, skb); 9884 break; 9885 case WMI_SERVICE_AVAILABLE_EVENTID: 9886 ath12k_service_available_event(ab, skb); 9887 break; 9888 case WMI_PEER_ASSOC_CONF_EVENTID: 9889 ath12k_peer_assoc_conf_event(ab, skb); 9890 break; 9891 case WMI_UPDATE_STATS_EVENTID: 9892 ath12k_update_stats_event(ab, skb); 9893 break; 9894 case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID: 9895 ath12k_pdev_ctl_failsafe_check_event(ab, skb); 9896 break; 9897 case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID: 9898 ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb); 9899 break; 9900 case WMI_PDEV_TEMPERATURE_EVENTID: 9901 ath12k_wmi_pdev_temperature_event(ab, skb); 9902 break; 9903 case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID: 9904 ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb); 9905 break; 9906 case WMI_HOST_FILS_DISCOVERY_EVENTID: 9907 ath12k_fils_discovery_event(ab, skb); 9908 break; 9909 case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: 9910 ath12k_probe_resp_tx_status_event(ab, skb); 9911 break; 9912 case WMI_RFKILL_STATE_CHANGE_EVENTID: 9913 ath12k_rfkill_state_change_event(ab, skb); 9914 break; 9915 case WMI_TWT_ENABLE_EVENTID: 9916 ath12k_wmi_twt_enable_event(ab, skb); 9917 break; 9918 case WMI_TWT_DISABLE_EVENTID: 9919 ath12k_wmi_twt_disable_event(ab, skb); 9920 break; 9921 case WMI_P2P_NOA_EVENTID: 9922 ath12k_wmi_p2p_noa_event(ab, skb); 9923 break; 9924 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: 9925 ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb); 9926 break; 9927 case WMI_VDEV_DELETE_RESP_EVENTID: 9928 ath12k_vdev_delete_resp_event(ab, skb); 9929 break; 9930 case WMI_DIAG_EVENTID: 9931 ath12k_wmi_diag_event(ab, skb); 9932 break; 9933 case WMI_WOW_WAKEUP_HOST_EVENTID: 9934 ath12k_wmi_event_wow_wakeup_host(ab, skb); 9935 break; 9936 case WMI_GTK_OFFLOAD_STATUS_EVENTID: 9937 ath12k_wmi_gtk_offload_status_event(ab, skb); 9938 break; 9939 case WMI_MLO_SETUP_COMPLETE_EVENTID: 9940 ath12k_wmi_event_mlo_setup_complete(ab, skb); 9941 break; 9942 case WMI_MLO_TEARDOWN_COMPLETE_EVENTID: 9943 ath12k_wmi_event_teardown_complete(ab, skb); 9944 break; 9945 case WMI_HALPHY_STATS_CTRL_PATH_EVENTID: 9946 ath12k_wmi_process_tpc_stats(ab, skb); 9947 break; 9948 case WMI_11D_NEW_COUNTRY_EVENTID: 9949 ath12k_reg_11d_new_cc_event(ab, skb); 9950 break; 9951 case WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID: 9952 ath12k_wmi_rssi_dbm_conversion_params_info_event(ab, skb); 9953 break; 9954 case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: 9955 ath12k_wmi_obss_color_collision_event(ab, skb); 9956 break; 9957 /* add Unsupported events (rare) here */ 9958 case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: 9959 case WMI_PEER_OPER_MODE_CHANGE_EVENTID: 9960 case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: 9961 ath12k_dbg(ab, ATH12K_DBG_WMI, 9962 "ignoring unsupported event 0x%x\n", id); 9963 break; 9964 /* add Unsupported events (frequent) here */ 9965 case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID: 9966 case WMI_MGMT_RX_FW_CONSUMED_EVENTID: 9967 /* debug might flood hence silently ignore (no-op) */ 9968 break; 9969 case WMI_PDEV_UTF_EVENTID: 9970 if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags)) 9971 ath12k_tm_wmi_event_segmented(ab, id, skb); 9972 else 9973 ath12k_tm_wmi_event_unsegmented(ab, id, skb); 9974 break; 9975 default: 9976 ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id); 9977 break; 9978 } 9979 9980 out: 9981 dev_kfree_skb(skb); 9982 } 9983 9984 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab, 9985 u32 pdev_idx) 9986 { 9987 int status; 9988 static const u32 svc_id[] = { 9989 ATH12K_HTC_SVC_ID_WMI_CONTROL, 9990 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1, 9991 ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 9992 }; 9993 struct ath12k_htc_svc_conn_req conn_req = {}; 9994 struct ath12k_htc_svc_conn_resp conn_resp = {}; 9995 9996 /* these fields are the same for all service endpoints */ 9997 conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete; 9998 conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx; 9999 conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits; 10000 10001 /* connect to control service */ 10002 conn_req.service_id = svc_id[pdev_idx]; 10003 10004 status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp); 10005 if (status) { 10006 ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n", 10007 status); 10008 return status; 10009 } 10010 10011 ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid; 10012 ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid; 10013 ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len; 10014 10015 return 0; 10016 } 10017 10018 static int 10019 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar, 10020 const struct wmi_unit_test_arg *ut) 10021 { 10022 struct ath12k_wmi_pdev *wmi = ar->wmi; 10023 struct wmi_unit_test_cmd *cmd; 10024 int buf_len, arg_len; 10025 struct sk_buff *skb; 10026 struct wmi_tlv *tlv; 10027 __le32 *ut_cmd_args; 10028 void *ptr; 10029 int ret; 10030 int i; 10031 10032 arg_len = sizeof(*ut_cmd_args) * ut->num_args; 10033 buf_len = sizeof(*cmd) + arg_len + TLV_HDR_SIZE; 10034 10035 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 10036 if (!skb) 10037 return -ENOMEM; 10038 10039 ptr = skb->data; 10040 cmd = ptr; 10041 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD, 10042 sizeof(*cmd)); 10043 cmd->vdev_id = cpu_to_le32(ut->vdev_id); 10044 cmd->module_id = cpu_to_le32(ut->module_id); 10045 cmd->num_args = cpu_to_le32(ut->num_args); 10046 cmd->diag_token = cpu_to_le32(ut->diag_token); 10047 10048 ptr += sizeof(*cmd); 10049 tlv = ptr; 10050 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); 10051 10052 ptr += TLV_HDR_SIZE; 10053 ut_cmd_args = ptr; 10054 for (i = 0; i < ut->num_args; i++) 10055 ut_cmd_args[i] = cpu_to_le32(ut->args[i]); 10056 10057 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10058 "WMI unit test : module %d vdev %d n_args %d token %d\n", 10059 ut->module_id, ut->vdev_id, ut->num_args, ut->diag_token); 10060 10061 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID); 10062 10063 if (ret) { 10064 ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n", 10065 ret); 10066 dev_kfree_skb(skb); 10067 } 10068 10069 return ret; 10070 } 10071 10072 int ath12k_wmi_simulate_radar(struct ath12k *ar) 10073 { 10074 struct ath12k_link_vif *arvif; 10075 struct wmi_unit_test_arg wmi_ut = {}; 10076 bool arvif_found = false; 10077 10078 list_for_each_entry(arvif, &ar->arvifs, list) { 10079 if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) { 10080 arvif_found = true; 10081 break; 10082 } 10083 } 10084 10085 if (!arvif_found) 10086 return -EINVAL; 10087 10088 wmi_ut.args[DFS_TEST_CMDID] = 0; 10089 wmi_ut.args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id; 10090 /* 10091 * Currently we could pass segment_id(b0 - b1), chirp(b2) 10092 * freq offset (b3 - b10) to unit test. For simulation 10093 * purpose this can be set to 0 which is valid. 10094 */ 10095 wmi_ut.args[DFS_TEST_RADAR_PARAM] = 0; 10096 10097 wmi_ut.vdev_id = arvif->vdev_id; 10098 wmi_ut.module_id = DFS_UNIT_TEST_MODULE; 10099 wmi_ut.num_args = DFS_MAX_TEST_ARGS; 10100 wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN; 10101 10102 ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n"); 10103 10104 return ath12k_wmi_send_unit_test_cmd(ar, &wmi_ut); 10105 } 10106 10107 int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar, 10108 enum wmi_halphy_ctrl_path_stats_id tpc_stats_type) 10109 { 10110 struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd; 10111 struct ath12k_wmi_pdev *wmi = ar->wmi; 10112 struct sk_buff *skb; 10113 struct wmi_tlv *tlv; 10114 __le32 *pdev_id; 10115 u32 buf_len; 10116 void *ptr; 10117 int ret; 10118 10119 buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE; 10120 10121 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 10122 if (!skb) 10123 return -ENOMEM; 10124 cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data; 10125 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM, 10126 sizeof(*cmd)); 10127 10128 cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT); 10129 cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET); 10130 cmd->subid = cpu_to_le32(tpc_stats_type); 10131 10132 ptr = skb->data + sizeof(*cmd); 10133 10134 /* The below TLV arrays optionally follow this fixed param TLV structure 10135 * 1. ARRAY_UINT32 pdev_ids[] 10136 * If this array is present and non-zero length, stats should only 10137 * be provided from the pdevs identified in the array. 10138 * 2. ARRAY_UNIT32 vdev_ids[] 10139 * If this array is present and non-zero length, stats should only 10140 * be provided from the vdevs identified in the array. 10141 * 3. ath12k_wmi_mac_addr_params peer_macaddr[]; 10142 * If this array is present and non-zero length, stats should only 10143 * be provided from the peers with the MAC addresses specified 10144 * in the array 10145 */ 10146 tlv = ptr; 10147 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); 10148 ptr += TLV_HDR_SIZE; 10149 10150 pdev_id = ptr; 10151 *pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar)); 10152 ptr += sizeof(*pdev_id); 10153 10154 tlv = ptr; 10155 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 10156 ptr += TLV_HDR_SIZE; 10157 10158 tlv = ptr; 10159 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0); 10160 ptr += TLV_HDR_SIZE; 10161 10162 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID); 10163 if (ret) { 10164 ath12k_warn(ar->ab, 10165 "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n"); 10166 dev_kfree_skb(skb); 10167 return ret; 10168 } 10169 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n", 10170 ar->pdev->pdev_id); 10171 10172 return ret; 10173 } 10174 10175 int ath12k_wmi_connect(struct ath12k_base *ab) 10176 { 10177 u32 i; 10178 u8 wmi_ep_count; 10179 10180 wmi_ep_count = ab->htc.wmi_ep_count; 10181 if (wmi_ep_count > ab->hw_params->max_radios) 10182 return -1; 10183 10184 for (i = 0; i < wmi_ep_count; i++) 10185 ath12k_connect_pdev_htc_service(ab, i); 10186 10187 return 0; 10188 } 10189 10190 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id) 10191 { 10192 if (WARN_ON(pdev_id >= MAX_RADIOS)) 10193 return; 10194 10195 /* TODO: Deinit any pdev specific wmi resource */ 10196 } 10197 10198 int ath12k_wmi_pdev_attach(struct ath12k_base *ab, 10199 u8 pdev_id) 10200 { 10201 struct ath12k_wmi_pdev *wmi_handle; 10202 10203 if (pdev_id >= ab->hw_params->max_radios) 10204 return -EINVAL; 10205 10206 wmi_handle = &ab->wmi_ab.wmi[pdev_id]; 10207 10208 wmi_handle->wmi_ab = &ab->wmi_ab; 10209 10210 ab->wmi_ab.ab = ab; 10211 /* TODO: Init remaining resource specific to pdev */ 10212 10213 return 0; 10214 } 10215 10216 int ath12k_wmi_attach(struct ath12k_base *ab) 10217 { 10218 int ret; 10219 10220 ret = ath12k_wmi_pdev_attach(ab, 0); 10221 if (ret) 10222 return ret; 10223 10224 ab->wmi_ab.ab = ab; 10225 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; 10226 10227 /* It's overwritten when service_ext_ready is handled */ 10228 if (ab->hw_params->single_pdev_only) 10229 ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; 10230 10231 /* TODO: Init remaining wmi soc resources required */ 10232 init_completion(&ab->wmi_ab.service_ready); 10233 init_completion(&ab->wmi_ab.unified_ready); 10234 10235 return 0; 10236 } 10237 10238 void ath12k_wmi_detach(struct ath12k_base *ab) 10239 { 10240 int i; 10241 10242 /* TODO: Deinit wmi resource specific to SOC as required */ 10243 10244 for (i = 0; i < ab->htc.wmi_ep_count; i++) 10245 ath12k_wmi_pdev_detach(ab, i); 10246 10247 ath12k_wmi_free_dbring_caps(ab); 10248 } 10249 10250 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg) 10251 { 10252 struct wmi_hw_data_filter_cmd *cmd; 10253 struct sk_buff *skb; 10254 int len; 10255 10256 len = sizeof(*cmd); 10257 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10258 10259 if (!skb) 10260 return -ENOMEM; 10261 10262 cmd = (struct wmi_hw_data_filter_cmd *)skb->data; 10263 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD, 10264 sizeof(*cmd)); 10265 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 10266 cmd->enable = cpu_to_le32(arg->enable ? 1 : 0); 10267 10268 /* Set all modes in case of disable */ 10269 if (arg->enable) 10270 cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap); 10271 else 10272 cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U); 10273 10274 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10275 "wmi hw data filter enable %d filter_bitmap 0x%x\n", 10276 arg->enable, arg->hw_filter_bitmap); 10277 10278 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); 10279 } 10280 10281 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar) 10282 { 10283 struct wmi_wow_host_wakeup_cmd *cmd; 10284 struct sk_buff *skb; 10285 size_t len; 10286 10287 len = sizeof(*cmd); 10288 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10289 if (!skb) 10290 return -ENOMEM; 10291 10292 cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data; 10293 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD, 10294 sizeof(*cmd)); 10295 10296 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); 10297 10298 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); 10299 } 10300 10301 int ath12k_wmi_wow_enable(struct ath12k *ar) 10302 { 10303 struct wmi_wow_enable_cmd *cmd; 10304 struct sk_buff *skb; 10305 int len; 10306 10307 len = sizeof(*cmd); 10308 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10309 if (!skb) 10310 return -ENOMEM; 10311 10312 cmd = (struct wmi_wow_enable_cmd *)skb->data; 10313 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD, 10314 sizeof(*cmd)); 10315 10316 cmd->enable = cpu_to_le32(1); 10317 cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED); 10318 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n"); 10319 10320 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); 10321 } 10322 10323 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id, 10324 enum wmi_wow_wakeup_event event, 10325 u32 enable) 10326 { 10327 struct wmi_wow_add_del_event_cmd *cmd; 10328 struct sk_buff *skb; 10329 size_t len; 10330 10331 len = sizeof(*cmd); 10332 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10333 if (!skb) 10334 return -ENOMEM; 10335 10336 cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; 10337 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD, 10338 sizeof(*cmd)); 10339 cmd->vdev_id = cpu_to_le32(vdev_id); 10340 cmd->is_add = cpu_to_le32(enable); 10341 cmd->event_bitmap = cpu_to_le32((1 << event)); 10342 10343 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", 10344 wow_wakeup_event(event), enable, vdev_id); 10345 10346 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); 10347 } 10348 10349 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id, 10350 const u8 *pattern, const u8 *mask, 10351 int pattern_len, int pattern_offset) 10352 { 10353 struct wmi_wow_add_pattern_cmd *cmd; 10354 struct wmi_wow_bitmap_pattern_params *bitmap; 10355 struct wmi_tlv *tlv; 10356 struct sk_buff *skb; 10357 void *ptr; 10358 size_t len; 10359 10360 len = sizeof(*cmd) + 10361 sizeof(*tlv) + /* array struct */ 10362 sizeof(*bitmap) + /* bitmap */ 10363 sizeof(*tlv) + /* empty ipv4 sync */ 10364 sizeof(*tlv) + /* empty ipv6 sync */ 10365 sizeof(*tlv) + /* empty magic */ 10366 sizeof(*tlv) + /* empty info timeout */ 10367 sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ 10368 10369 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10370 if (!skb) 10371 return -ENOMEM; 10372 10373 /* cmd */ 10374 ptr = skb->data; 10375 cmd = ptr; 10376 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD, 10377 sizeof(*cmd)); 10378 cmd->vdev_id = cpu_to_le32(vdev_id); 10379 cmd->pattern_id = cpu_to_le32(pattern_id); 10380 cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); 10381 10382 ptr += sizeof(*cmd); 10383 10384 /* bitmap */ 10385 tlv = ptr; 10386 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap)); 10387 10388 ptr += sizeof(*tlv); 10389 10390 bitmap = ptr; 10391 bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T, 10392 sizeof(*bitmap)); 10393 memcpy(bitmap->patternbuf, pattern, pattern_len); 10394 memcpy(bitmap->bitmaskbuf, mask, pattern_len); 10395 bitmap->pattern_offset = cpu_to_le32(pattern_offset); 10396 bitmap->pattern_len = cpu_to_le32(pattern_len); 10397 bitmap->bitmask_len = cpu_to_le32(pattern_len); 10398 bitmap->pattern_id = cpu_to_le32(pattern_id); 10399 10400 ptr += sizeof(*bitmap); 10401 10402 /* ipv4 sync */ 10403 tlv = ptr; 10404 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 10405 10406 ptr += sizeof(*tlv); 10407 10408 /* ipv6 sync */ 10409 tlv = ptr; 10410 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 10411 10412 ptr += sizeof(*tlv); 10413 10414 /* magic */ 10415 tlv = ptr; 10416 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); 10417 10418 ptr += sizeof(*tlv); 10419 10420 /* pattern info timeout */ 10421 tlv = ptr; 10422 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 10423 10424 ptr += sizeof(*tlv); 10425 10426 /* ratelimit interval */ 10427 tlv = ptr; 10428 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); 10429 10430 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n", 10431 vdev_id, pattern_id, pattern_offset, pattern_len); 10432 10433 ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ", 10434 bitmap->patternbuf, pattern_len); 10435 ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ", 10436 bitmap->bitmaskbuf, pattern_len); 10437 10438 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); 10439 } 10440 10441 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id) 10442 { 10443 struct wmi_wow_del_pattern_cmd *cmd; 10444 struct sk_buff *skb; 10445 size_t len; 10446 10447 len = sizeof(*cmd); 10448 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10449 if (!skb) 10450 return -ENOMEM; 10451 10452 cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; 10453 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD, 10454 sizeof(*cmd)); 10455 cmd->vdev_id = cpu_to_le32(vdev_id); 10456 cmd->pattern_id = cpu_to_le32(pattern_id); 10457 cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); 10458 10459 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", 10460 vdev_id, pattern_id); 10461 10462 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); 10463 } 10464 10465 static struct sk_buff * 10466 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id, 10467 struct wmi_pno_scan_req_arg *pno) 10468 { 10469 struct nlo_configured_params *nlo_list; 10470 size_t len, nlo_list_len, channel_list_len; 10471 struct wmi_wow_nlo_config_cmd *cmd; 10472 __le32 *channel_list; 10473 struct wmi_tlv *tlv; 10474 struct sk_buff *skb; 10475 void *ptr; 10476 u32 i; 10477 10478 len = sizeof(*cmd) + 10479 sizeof(*tlv) + 10480 /* TLV place holder for array of structures 10481 * nlo_configured_params(nlo_list) 10482 */ 10483 sizeof(*tlv); 10484 /* TLV place holder for array of uint32 channel_list */ 10485 10486 channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; 10487 len += channel_list_len; 10488 10489 nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; 10490 len += nlo_list_len; 10491 10492 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10493 if (!skb) 10494 return ERR_PTR(-ENOMEM); 10495 10496 ptr = skb->data; 10497 cmd = ptr; 10498 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd)); 10499 10500 cmd->vdev_id = cpu_to_le32(pno->vdev_id); 10501 cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN); 10502 10503 /* current FW does not support min-max range for dwell time */ 10504 cmd->active_dwell_time = cpu_to_le32(pno->active_max_time); 10505 cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time); 10506 10507 if (pno->do_passive_scan) 10508 cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE); 10509 10510 cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period); 10511 cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period); 10512 cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles); 10513 cmd->delay_start_time = cpu_to_le32(pno->delay_start_time); 10514 10515 if (pno->enable_pno_scan_randomization) { 10516 cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | 10517 WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ); 10518 ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); 10519 ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); 10520 } 10521 10522 ptr += sizeof(*cmd); 10523 10524 /* nlo_configured_params(nlo_list) */ 10525 cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count); 10526 tlv = ptr; 10527 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len); 10528 10529 ptr += sizeof(*tlv); 10530 nlo_list = ptr; 10531 for (i = 0; i < pno->uc_networks_count; i++) { 10532 tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); 10533 tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE, 10534 sizeof(*nlo_list)); 10535 10536 nlo_list[i].ssid.valid = cpu_to_le32(1); 10537 nlo_list[i].ssid.ssid.ssid_len = 10538 cpu_to_le32(pno->a_networks[i].ssid.ssid_len); 10539 memcpy(nlo_list[i].ssid.ssid.ssid, 10540 pno->a_networks[i].ssid.ssid, 10541 le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len)); 10542 10543 if (pno->a_networks[i].rssi_threshold && 10544 pno->a_networks[i].rssi_threshold > -300) { 10545 nlo_list[i].rssi_cond.valid = cpu_to_le32(1); 10546 nlo_list[i].rssi_cond.rssi = 10547 cpu_to_le32(pno->a_networks[i].rssi_threshold); 10548 } 10549 10550 nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1); 10551 nlo_list[i].bcast_nw_type.bcast_nw_type = 10552 cpu_to_le32(pno->a_networks[i].bcast_nw_type); 10553 } 10554 10555 ptr += nlo_list_len; 10556 cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count); 10557 tlv = ptr; 10558 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len); 10559 ptr += sizeof(*tlv); 10560 channel_list = ptr; 10561 10562 for (i = 0; i < pno->a_networks[0].channel_count; i++) 10563 channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]); 10564 10565 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", 10566 vdev_id); 10567 10568 return skb; 10569 } 10570 10571 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar, 10572 u32 vdev_id) 10573 { 10574 struct wmi_wow_nlo_config_cmd *cmd; 10575 struct sk_buff *skb; 10576 size_t len; 10577 10578 len = sizeof(*cmd); 10579 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10580 if (!skb) 10581 return ERR_PTR(-ENOMEM); 10582 10583 cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; 10584 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len); 10585 10586 cmd->vdev_id = cpu_to_le32(vdev_id); 10587 cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP); 10588 10589 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10590 "wmi tlv stop pno config vdev_id %d\n", vdev_id); 10591 return skb; 10592 } 10593 10594 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id, 10595 struct wmi_pno_scan_req_arg *pno_scan) 10596 { 10597 struct sk_buff *skb; 10598 10599 if (pno_scan->enable) 10600 skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); 10601 else 10602 skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id); 10603 10604 if (IS_ERR_OR_NULL(skb)) 10605 return -ENOMEM; 10606 10607 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); 10608 } 10609 10610 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar, 10611 struct wmi_arp_ns_offload_arg *offload, 10612 void **ptr, 10613 bool enable, 10614 bool ext) 10615 { 10616 struct wmi_ns_offload_params *ns; 10617 struct wmi_tlv *tlv; 10618 void *buf_ptr = *ptr; 10619 u32 ns_cnt, ns_ext_tuples; 10620 int i, max_offloads; 10621 10622 ns_cnt = offload->ipv6_count; 10623 10624 tlv = buf_ptr; 10625 10626 if (ext) { 10627 ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; 10628 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 10629 ns_ext_tuples * sizeof(*ns)); 10630 i = WMI_MAX_NS_OFFLOADS; 10631 max_offloads = offload->ipv6_count; 10632 } else { 10633 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 10634 WMI_MAX_NS_OFFLOADS * sizeof(*ns)); 10635 i = 0; 10636 max_offloads = WMI_MAX_NS_OFFLOADS; 10637 } 10638 10639 buf_ptr += sizeof(*tlv); 10640 10641 for (; i < max_offloads; i++) { 10642 ns = buf_ptr; 10643 ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE, 10644 sizeof(*ns)); 10645 10646 if (enable) { 10647 if (i < ns_cnt) 10648 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID); 10649 10650 memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); 10651 memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); 10652 10653 if (offload->ipv6_type[i]) 10654 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST); 10655 10656 memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); 10657 10658 if (!is_zero_ether_addr(ns->target_mac.addr)) 10659 ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID); 10660 10661 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10662 "wmi index %d ns_solicited %pI6 target %pI6", 10663 i, ns->solicitation_ipaddr, 10664 ns->target_ipaddr[0]); 10665 } 10666 10667 buf_ptr += sizeof(*ns); 10668 } 10669 10670 *ptr = buf_ptr; 10671 } 10672 10673 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar, 10674 struct wmi_arp_ns_offload_arg *offload, 10675 void **ptr, 10676 bool enable) 10677 { 10678 struct wmi_arp_offload_params *arp; 10679 struct wmi_tlv *tlv; 10680 void *buf_ptr = *ptr; 10681 int i; 10682 10683 /* fill arp tuple */ 10684 tlv = buf_ptr; 10685 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 10686 WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); 10687 buf_ptr += sizeof(*tlv); 10688 10689 for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { 10690 arp = buf_ptr; 10691 arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE, 10692 sizeof(*arp)); 10693 10694 if (enable && i < offload->ipv4_count) { 10695 /* Copy the target ip addr and flags */ 10696 arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID); 10697 memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); 10698 10699 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4", 10700 arp->target_ipaddr); 10701 } 10702 10703 buf_ptr += sizeof(*arp); 10704 } 10705 10706 *ptr = buf_ptr; 10707 } 10708 10709 int ath12k_wmi_arp_ns_offload(struct ath12k *ar, 10710 struct ath12k_link_vif *arvif, 10711 struct wmi_arp_ns_offload_arg *offload, 10712 bool enable) 10713 { 10714 struct wmi_set_arp_ns_offload_cmd *cmd; 10715 struct wmi_tlv *tlv; 10716 struct sk_buff *skb; 10717 void *buf_ptr; 10718 size_t len; 10719 u8 ns_cnt, ns_ext_tuples = 0; 10720 10721 ns_cnt = offload->ipv6_count; 10722 10723 len = sizeof(*cmd) + 10724 sizeof(*tlv) + 10725 WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) + 10726 sizeof(*tlv) + 10727 WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params); 10728 10729 if (ns_cnt > WMI_MAX_NS_OFFLOADS) { 10730 ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; 10731 len += sizeof(*tlv) + 10732 ns_ext_tuples * sizeof(struct wmi_ns_offload_params); 10733 } 10734 10735 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10736 if (!skb) 10737 return -ENOMEM; 10738 10739 buf_ptr = skb->data; 10740 cmd = buf_ptr; 10741 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD, 10742 sizeof(*cmd)); 10743 cmd->flags = cpu_to_le32(0); 10744 cmd->vdev_id = cpu_to_le32(arvif->vdev_id); 10745 cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples); 10746 10747 buf_ptr += sizeof(*cmd); 10748 10749 ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); 10750 ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); 10751 10752 if (ns_ext_tuples) 10753 ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); 10754 10755 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); 10756 } 10757 10758 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, 10759 struct ath12k_link_vif *arvif, bool enable) 10760 { 10761 struct ath12k_rekey_data *rekey_data = &arvif->rekey_data; 10762 struct wmi_gtk_rekey_offload_cmd *cmd; 10763 struct sk_buff *skb; 10764 __le64 replay_ctr; 10765 int len; 10766 10767 len = sizeof(*cmd); 10768 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10769 if (!skb) 10770 return -ENOMEM; 10771 10772 cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; 10773 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); 10774 cmd->vdev_id = cpu_to_le32(arvif->vdev_id); 10775 10776 if (enable) { 10777 cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE); 10778 10779 /* the length in rekey_data and cmd is equal */ 10780 memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); 10781 memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); 10782 10783 replay_ctr = cpu_to_le64(rekey_data->replay_ctr); 10784 memcpy(cmd->replay_ctr, &replay_ctr, 10785 sizeof(replay_ctr)); 10786 } else { 10787 cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE); 10788 } 10789 10790 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", 10791 arvif->vdev_id, enable); 10792 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); 10793 } 10794 10795 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar, 10796 struct ath12k_link_vif *arvif) 10797 { 10798 struct wmi_gtk_rekey_offload_cmd *cmd; 10799 struct sk_buff *skb; 10800 int len; 10801 10802 len = sizeof(*cmd); 10803 skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); 10804 if (!skb) 10805 return -ENOMEM; 10806 10807 cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; 10808 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); 10809 cmd->vdev_id = cpu_to_le32(arvif->vdev_id); 10810 cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE); 10811 10812 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n", 10813 arvif->vdev_id); 10814 return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); 10815 } 10816 10817 int ath12k_wmi_sta_keepalive(struct ath12k *ar, 10818 const struct wmi_sta_keepalive_arg *arg) 10819 { 10820 struct wmi_sta_keepalive_arp_resp_params *arp; 10821 struct ath12k_wmi_pdev *wmi = ar->wmi; 10822 struct wmi_sta_keepalive_cmd *cmd; 10823 struct sk_buff *skb; 10824 size_t len; 10825 10826 len = sizeof(*cmd) + sizeof(*arp); 10827 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10828 if (!skb) 10829 return -ENOMEM; 10830 10831 cmd = (struct wmi_sta_keepalive_cmd *)skb->data; 10832 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd)); 10833 cmd->vdev_id = cpu_to_le32(arg->vdev_id); 10834 cmd->enabled = cpu_to_le32(arg->enabled); 10835 cmd->interval = cpu_to_le32(arg->interval); 10836 cmd->method = cpu_to_le32(arg->method); 10837 10838 arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1); 10839 arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE, 10840 sizeof(*arp)); 10841 if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || 10842 arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { 10843 arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr); 10844 arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr); 10845 ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); 10846 } 10847 10848 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10849 "wmi sta keepalive vdev %d enabled %d method %d interval %d\n", 10850 arg->vdev_id, arg->enabled, arg->method, arg->interval); 10851 10852 return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); 10853 } 10854 10855 int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params) 10856 { 10857 struct wmi_mlo_setup_cmd *cmd; 10858 struct ath12k_wmi_pdev *wmi = ar->wmi; 10859 u32 *partner_links, num_links; 10860 int i, ret, buf_len, arg_len; 10861 struct sk_buff *skb; 10862 struct wmi_tlv *tlv; 10863 void *ptr; 10864 10865 num_links = mlo_params->num_partner_links; 10866 arg_len = num_links * sizeof(u32); 10867 buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len; 10868 10869 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); 10870 if (!skb) 10871 return -ENOMEM; 10872 10873 cmd = (struct wmi_mlo_setup_cmd *)skb->data; 10874 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD, 10875 sizeof(*cmd)); 10876 cmd->mld_group_id = mlo_params->group_id; 10877 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 10878 ptr = skb->data + sizeof(*cmd); 10879 10880 tlv = ptr; 10881 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len); 10882 ptr += TLV_HDR_SIZE; 10883 10884 partner_links = ptr; 10885 for (i = 0; i < num_links; i++) 10886 partner_links[i] = mlo_params->partner_link_id[i]; 10887 10888 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID); 10889 if (ret) { 10890 ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n", 10891 ret); 10892 dev_kfree_skb(skb); 10893 return ret; 10894 } 10895 10896 return 0; 10897 } 10898 10899 int ath12k_wmi_mlo_ready(struct ath12k *ar) 10900 { 10901 struct wmi_mlo_ready_cmd *cmd; 10902 struct ath12k_wmi_pdev *wmi = ar->wmi; 10903 struct sk_buff *skb; 10904 int ret, len; 10905 10906 len = sizeof(*cmd); 10907 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10908 if (!skb) 10909 return -ENOMEM; 10910 10911 cmd = (struct wmi_mlo_ready_cmd *)skb->data; 10912 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD, 10913 sizeof(*cmd)); 10914 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 10915 10916 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID); 10917 if (ret) { 10918 ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n", 10919 ret); 10920 dev_kfree_skb(skb); 10921 return ret; 10922 } 10923 10924 return 0; 10925 } 10926 10927 int ath12k_wmi_mlo_teardown(struct ath12k *ar) 10928 { 10929 struct wmi_mlo_teardown_cmd *cmd; 10930 struct ath12k_wmi_pdev *wmi = ar->wmi; 10931 struct sk_buff *skb; 10932 int ret, len; 10933 10934 len = sizeof(*cmd); 10935 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10936 if (!skb) 10937 return -ENOMEM; 10938 10939 cmd = (struct wmi_mlo_teardown_cmd *)skb->data; 10940 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD, 10941 sizeof(*cmd)); 10942 cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); 10943 cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON; 10944 10945 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID); 10946 if (ret) { 10947 ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n", 10948 ret); 10949 dev_kfree_skb(skb); 10950 return ret; 10951 } 10952 10953 return 0; 10954 } 10955 10956 bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar) 10957 { 10958 return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT, 10959 ar->ab->wmi_ab.svc_map) && ar->supports_6ghz; 10960 } 10961 10962 int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar, 10963 u32 vdev_id, 10964 struct ath12k_reg_tpc_power_info *param) 10965 { 10966 struct wmi_vdev_set_tpc_power_cmd *cmd; 10967 struct ath12k_wmi_pdev *wmi = ar->wmi; 10968 struct wmi_vdev_ch_power_params *ch; 10969 int i, ret, len, array_len; 10970 struct sk_buff *skb; 10971 struct wmi_tlv *tlv; 10972 u8 *ptr; 10973 10974 array_len = sizeof(*ch) * param->num_pwr_levels; 10975 len = sizeof(*cmd) + TLV_HDR_SIZE + array_len; 10976 10977 skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); 10978 if (!skb) 10979 return -ENOMEM; 10980 10981 ptr = skb->data; 10982 10983 cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr; 10984 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD, 10985 sizeof(*cmd)); 10986 cmd->vdev_id = cpu_to_le32(vdev_id); 10987 cmd->psd_power = cpu_to_le32(param->is_psd_power); 10988 cmd->eirp_power = cpu_to_le32(param->eirp_power); 10989 cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type); 10990 10991 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 10992 "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n", 10993 vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type); 10994 10995 ptr += sizeof(*cmd); 10996 tlv = (struct wmi_tlv *)ptr; 10997 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len); 10998 10999 ptr += TLV_HDR_SIZE; 11000 ch = (struct wmi_vdev_ch_power_params *)ptr; 11001 11002 for (i = 0; i < param->num_pwr_levels; i++, ch++) { 11003 ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO, 11004 sizeof(*ch)); 11005 ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq); 11006 ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power); 11007 11008 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n", 11009 ch->chan_cfreq, ch->tx_power); 11010 } 11011 11012 ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID); 11013 if (ret) { 11014 ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n"); 11015 dev_kfree_skb(skb); 11016 return ret; 11017 } 11018 11019 return 0; 11020 } 11021 11022 static int 11023 ath12k_wmi_fill_disallowed_bmap(struct ath12k_base *ab, 11024 struct wmi_disallowed_mlo_mode_bitmap_params *dislw_bmap, 11025 struct wmi_mlo_link_set_active_arg *arg) 11026 { 11027 struct wmi_ml_disallow_mode_bmap_arg *dislw_bmap_arg; 11028 u8 i; 11029 11030 if (arg->num_disallow_mode_comb > 11031 ARRAY_SIZE(arg->disallow_bmap)) { 11032 ath12k_warn(ab, "invalid num_disallow_mode_comb: %d", 11033 arg->num_disallow_mode_comb); 11034 return -EINVAL; 11035 } 11036 11037 dislw_bmap_arg = &arg->disallow_bmap[0]; 11038 for (i = 0; i < arg->num_disallow_mode_comb; i++) { 11039 dislw_bmap->tlv_header = 11040 ath12k_wmi_tlv_cmd_hdr(0, sizeof(*dislw_bmap)); 11041 dislw_bmap->disallowed_mode_bitmap = 11042 cpu_to_le32(dislw_bmap_arg->disallowed_mode); 11043 dislw_bmap->ieee_link_id_comb = 11044 le32_encode_bits(dislw_bmap_arg->ieee_link_id[0], 11045 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1) | 11046 le32_encode_bits(dislw_bmap_arg->ieee_link_id[1], 11047 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2) | 11048 le32_encode_bits(dislw_bmap_arg->ieee_link_id[2], 11049 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3) | 11050 le32_encode_bits(dislw_bmap_arg->ieee_link_id[3], 11051 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4); 11052 11053 ath12k_dbg(ab, ATH12K_DBG_WMI, 11054 "entry %d disallowed_mode %d ieee_link_id_comb 0x%x", 11055 i, dislw_bmap_arg->disallowed_mode, 11056 dislw_bmap_arg->ieee_link_id_comb); 11057 dislw_bmap++; 11058 dislw_bmap_arg++; 11059 } 11060 11061 return 0; 11062 } 11063 11064 int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab, 11065 struct wmi_mlo_link_set_active_arg *arg) 11066 { 11067 struct wmi_disallowed_mlo_mode_bitmap_params *disallowed_mode_bmap; 11068 struct wmi_mlo_set_active_link_number_params *link_num_param; 11069 u32 num_link_num_param = 0, num_vdev_bitmap = 0; 11070 struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab; 11071 struct wmi_mlo_link_set_active_cmd *cmd; 11072 u32 num_inactive_vdev_bitmap = 0; 11073 u32 num_disallow_mode_comb = 0; 11074 struct wmi_tlv *tlv; 11075 struct sk_buff *skb; 11076 __le32 *vdev_bitmap; 11077 void *buf_ptr; 11078 int i, ret; 11079 u32 len; 11080 11081 if (!arg->num_vdev_bitmap && !arg->num_link_entry) { 11082 ath12k_warn(ab, "Invalid num_vdev_bitmap and num_link_entry"); 11083 return -EINVAL; 11084 } 11085 11086 switch (arg->force_mode) { 11087 case WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM: 11088 case WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM: 11089 num_link_num_param = arg->num_link_entry; 11090 fallthrough; 11091 case WMI_MLO_LINK_FORCE_MODE_ACTIVE: 11092 case WMI_MLO_LINK_FORCE_MODE_INACTIVE: 11093 case WMI_MLO_LINK_FORCE_MODE_NO_FORCE: 11094 num_vdev_bitmap = arg->num_vdev_bitmap; 11095 break; 11096 case WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE: 11097 num_vdev_bitmap = arg->num_vdev_bitmap; 11098 num_inactive_vdev_bitmap = arg->num_inactive_vdev_bitmap; 11099 break; 11100 default: 11101 ath12k_warn(ab, "Invalid force mode: %u", arg->force_mode); 11102 return -EINVAL; 11103 } 11104 11105 num_disallow_mode_comb = arg->num_disallow_mode_comb; 11106 len = sizeof(*cmd) + 11107 TLV_HDR_SIZE + sizeof(*link_num_param) * num_link_num_param + 11108 TLV_HDR_SIZE + sizeof(*vdev_bitmap) * num_vdev_bitmap + 11109 TLV_HDR_SIZE + TLV_HDR_SIZE + TLV_HDR_SIZE + 11110 TLV_HDR_SIZE + sizeof(*disallowed_mode_bmap) * num_disallow_mode_comb; 11111 if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) 11112 len += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap; 11113 11114 skb = ath12k_wmi_alloc_skb(wmi_ab, len); 11115 if (!skb) 11116 return -ENOMEM; 11117 11118 cmd = (struct wmi_mlo_link_set_active_cmd *)skb->data; 11119 cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_LINK_SET_ACTIVE_CMD, 11120 sizeof(*cmd)); 11121 cmd->force_mode = cpu_to_le32(arg->force_mode); 11122 cmd->reason = cpu_to_le32(arg->reason); 11123 ath12k_dbg(ab, ATH12K_DBG_WMI, 11124 "mode %d reason %d num_link_num_param %d num_vdev_bitmap %d inactive %d num_disallow_mode_comb %d", 11125 arg->force_mode, arg->reason, num_link_num_param, 11126 num_vdev_bitmap, num_inactive_vdev_bitmap, 11127 num_disallow_mode_comb); 11128 11129 buf_ptr = skb->data + sizeof(*cmd); 11130 tlv = buf_ptr; 11131 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 11132 sizeof(*link_num_param) * num_link_num_param); 11133 buf_ptr += TLV_HDR_SIZE; 11134 11135 if (num_link_num_param) { 11136 cmd->ctrl_flags = 11137 le32_encode_bits(arg->ctrl_flags.dync_force_link_num ? 1 : 0, 11138 CRTL_F_DYNC_FORCE_LINK_NUM); 11139 11140 link_num_param = buf_ptr; 11141 for (i = 0; i < num_link_num_param; i++) { 11142 link_num_param->tlv_header = 11143 ath12k_wmi_tlv_cmd_hdr(0, sizeof(*link_num_param)); 11144 link_num_param->num_of_link = 11145 cpu_to_le32(arg->link_num[i].num_of_link); 11146 link_num_param->vdev_type = 11147 cpu_to_le32(arg->link_num[i].vdev_type); 11148 link_num_param->vdev_subtype = 11149 cpu_to_le32(arg->link_num[i].vdev_subtype); 11150 link_num_param->home_freq = 11151 cpu_to_le32(arg->link_num[i].home_freq); 11152 ath12k_dbg(ab, ATH12K_DBG_WMI, 11153 "entry %d num_of_link %d vdev type %d subtype %d freq %d control_flags %d", 11154 i, arg->link_num[i].num_of_link, 11155 arg->link_num[i].vdev_type, 11156 arg->link_num[i].vdev_subtype, 11157 arg->link_num[i].home_freq, 11158 __le32_to_cpu(cmd->ctrl_flags)); 11159 link_num_param++; 11160 } 11161 11162 buf_ptr += sizeof(*link_num_param) * num_link_num_param; 11163 } 11164 11165 tlv = buf_ptr; 11166 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 11167 sizeof(*vdev_bitmap) * num_vdev_bitmap); 11168 buf_ptr += TLV_HDR_SIZE; 11169 11170 if (num_vdev_bitmap) { 11171 vdev_bitmap = buf_ptr; 11172 for (i = 0; i < num_vdev_bitmap; i++) { 11173 vdev_bitmap[i] = cpu_to_le32(arg->vdev_bitmap[i]); 11174 ath12k_dbg(ab, ATH12K_DBG_WMI, "entry %d vdev_id_bitmap 0x%x", 11175 i, arg->vdev_bitmap[i]); 11176 } 11177 11178 buf_ptr += sizeof(*vdev_bitmap) * num_vdev_bitmap; 11179 } 11180 11181 if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) { 11182 tlv = buf_ptr; 11183 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 11184 sizeof(*vdev_bitmap) * 11185 num_inactive_vdev_bitmap); 11186 buf_ptr += TLV_HDR_SIZE; 11187 11188 if (num_inactive_vdev_bitmap) { 11189 vdev_bitmap = buf_ptr; 11190 for (i = 0; i < num_inactive_vdev_bitmap; i++) { 11191 vdev_bitmap[i] = 11192 cpu_to_le32(arg->inactive_vdev_bitmap[i]); 11193 ath12k_dbg(ab, ATH12K_DBG_WMI, 11194 "entry %d inactive_vdev_id_bitmap 0x%x", 11195 i, arg->inactive_vdev_bitmap[i]); 11196 } 11197 11198 buf_ptr += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap; 11199 } 11200 } else { 11201 /* add empty vdev bitmap2 tlv */ 11202 tlv = buf_ptr; 11203 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 11204 buf_ptr += TLV_HDR_SIZE; 11205 } 11206 11207 /* add empty ieee_link_id_bitmap tlv */ 11208 tlv = buf_ptr; 11209 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 11210 buf_ptr += TLV_HDR_SIZE; 11211 11212 /* add empty ieee_link_id_bitmap2 tlv */ 11213 tlv = buf_ptr; 11214 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); 11215 buf_ptr += TLV_HDR_SIZE; 11216 11217 tlv = buf_ptr; 11218 tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 11219 sizeof(*disallowed_mode_bmap) * 11220 arg->num_disallow_mode_comb); 11221 buf_ptr += TLV_HDR_SIZE; 11222 11223 ret = ath12k_wmi_fill_disallowed_bmap(ab, buf_ptr, arg); 11224 if (ret) 11225 goto free_skb; 11226 11227 ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_MLO_LINK_SET_ACTIVE_CMDID); 11228 if (ret) { 11229 ath12k_warn(ab, 11230 "failed to send WMI_MLO_LINK_SET_ACTIVE_CMDID: %d\n", ret); 11231 goto free_skb; 11232 } 11233 11234 ath12k_dbg(ab, ATH12K_DBG_WMI, "WMI mlo link set active cmd"); 11235 11236 return ret; 11237 11238 free_skb: 11239 dev_kfree_skb(skb); 11240 return ret; 11241 } 11242