xref: /src/sys/contrib/dev/athk/ath12k/wmi.c (revision a96550206e4bde15bf615ff2127b80404a7ec41f)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 #include <linux/skbuff.h>
7 #include <linux/ctype.h>
8 #include <net/mac80211.h>
9 #include <net/cfg80211.h>
10 #include <linux/completion.h>
11 #include <linux/if_ether.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/uuid.h>
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include <linux/cleanup.h>
18 #include "core.h"
19 #include "debugfs.h"
20 #include "debug.h"
21 #include "mac.h"
22 #include "hw.h"
23 #include "peer.h"
24 #include "p2p.h"
25 #include "testmode.h"
26 
27 struct ath12k_wmi_svc_ready_parse {
28 	bool wmi_svc_bitmap_done;
29 };
30 
31 struct wmi_tlv_fw_stats_parse {
32 	const struct wmi_stats_event *ev;
33 	struct ath12k_fw_stats *stats;
34 	const struct wmi_per_chain_rssi_stat_params *rssi;
35 	int rssi_num;
36 	bool chain_rssi_done;
37 };
38 
39 struct ath12k_wmi_dma_ring_caps_parse {
40 	struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
41 	u32 n_dma_ring_caps;
42 };
43 
44 struct ath12k_wmi_service_ext_arg {
45 	u32 default_conc_scan_config_bits;
46 	u32 default_fw_config_bits;
47 	struct ath12k_wmi_ppe_threshold_arg ppet;
48 	u32 he_cap_info;
49 	u32 mpdu_density;
50 	u32 max_bssid_rx_filters;
51 	u32 num_hw_modes;
52 	u32 num_phy;
53 };
54 
55 struct ath12k_wmi_svc_rdy_ext_parse {
56 	struct ath12k_wmi_service_ext_arg arg;
57 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
58 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
59 	u32 n_hw_mode_caps;
60 	u32 tot_phy_id;
61 	struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
62 	struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
63 	u32 n_mac_phy_caps;
64 	const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
65 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
66 	u32 n_ext_hal_reg_caps;
67 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
68 	bool hw_mode_done;
69 	bool mac_phy_done;
70 	bool ext_hal_reg_done;
71 	bool mac_phy_chainmask_combo_done;
72 	bool mac_phy_chainmask_cap_done;
73 	bool oem_dma_ring_cap_done;
74 	bool dma_ring_cap_done;
75 };
76 
77 struct ath12k_wmi_svc_rdy_ext2_arg {
78 	u32 reg_db_version;
79 	u32 hw_min_max_tx_power_2ghz;
80 	u32 hw_min_max_tx_power_5ghz;
81 	u32 chwidth_num_peer_caps;
82 	u32 preamble_puncture_bw;
83 	u32 max_user_per_ppdu_ofdma;
84 	u32 max_user_per_ppdu_mumimo;
85 	u32 target_cap_flags;
86 	u32 eht_cap_mac_info[WMI_MAX_EHTCAP_MAC_SIZE];
87 	u32 max_num_linkview_peers;
88 	u32 max_num_msduq_supported_per_tid;
89 	u32 default_num_msduq_supported_per_tid;
90 };
91 
92 struct ath12k_wmi_svc_rdy_ext2_parse {
93 	struct ath12k_wmi_svc_rdy_ext2_arg arg;
94 	struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
95 	bool dma_ring_cap_done;
96 	bool spectral_bin_scaling_done;
97 	bool mac_phy_caps_ext_done;
98 	bool hal_reg_caps_ext2_done;
99 	bool scan_radio_caps_ext2_done;
100 	bool twt_caps_done;
101 	bool htt_msdu_idx_to_qtype_map_done;
102 	bool dbs_or_sbs_cap_ext_done;
103 };
104 
105 struct ath12k_wmi_rdy_parse {
106 	u32 num_extra_mac_addr;
107 };
108 
109 struct ath12k_wmi_dma_buf_release_arg {
110 	struct ath12k_wmi_dma_buf_release_fixed_params fixed;
111 	const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
112 	const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
113 	u32 num_buf_entry;
114 	u32 num_meta;
115 	bool buf_entry_done;
116 	bool meta_data_done;
117 };
118 
119 struct ath12k_wmi_tlv_policy {
120 	size_t min_len;
121 };
122 
123 struct wmi_tlv_mgmt_rx_parse {
124 	const struct ath12k_wmi_mgmt_rx_params *fixed;
125 	const u8 *frame_buf;
126 	bool frame_buf_done;
127 };
128 
129 static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
130 	[WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
131 	[WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
132 	[WMI_TAG_SERVICE_READY_EVENT] = {
133 		.min_len = sizeof(struct wmi_service_ready_event) },
134 	[WMI_TAG_SERVICE_READY_EXT_EVENT] = {
135 		.min_len = sizeof(struct wmi_service_ready_ext_event) },
136 	[WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
137 		.min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
138 	[WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
139 		.min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
140 	[WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
141 		.min_len = sizeof(struct wmi_vdev_start_resp_event) },
142 	[WMI_TAG_PEER_DELETE_RESP_EVENT] = {
143 		.min_len = sizeof(struct wmi_peer_delete_resp_event) },
144 	[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
145 		.min_len = sizeof(struct wmi_bcn_tx_status_event) },
146 	[WMI_TAG_VDEV_STOPPED_EVENT] = {
147 		.min_len = sizeof(struct wmi_vdev_stopped_event) },
148 	[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
149 		.min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
150 	[WMI_TAG_MGMT_RX_HDR] = {
151 		.min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
152 	[WMI_TAG_MGMT_TX_COMPL_EVENT] = {
153 		.min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
154 	[WMI_TAG_SCAN_EVENT] = {
155 		.min_len = sizeof(struct wmi_scan_event) },
156 	[WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
157 		.min_len = sizeof(struct wmi_peer_sta_kickout_event) },
158 	[WMI_TAG_ROAM_EVENT] = {
159 		.min_len = sizeof(struct wmi_roam_event) },
160 	[WMI_TAG_CHAN_INFO_EVENT] = {
161 		.min_len = sizeof(struct wmi_chan_info_event) },
162 	[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
163 		.min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
164 	[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
165 		.min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
166 	[WMI_TAG_READY_EVENT] = {
167 		.min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
168 	[WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
169 		.min_len = sizeof(struct wmi_service_available_event) },
170 	[WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
171 		.min_len = sizeof(struct wmi_peer_assoc_conf_event) },
172 	[WMI_TAG_RFKILL_EVENT] = {
173 		.min_len = sizeof(struct wmi_rfkill_state_change_event) },
174 	[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
175 		.min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
176 	[WMI_TAG_HOST_SWFDA_EVENT] = {
177 		.min_len = sizeof(struct wmi_fils_discovery_event) },
178 	[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
179 		.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
180 	[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
181 		.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
182 	[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
183 		.min_len = sizeof(struct wmi_twt_enable_event) },
184 	[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
185 		.min_len = sizeof(struct wmi_twt_disable_event) },
186 	[WMI_TAG_P2P_NOA_INFO] = {
187 		.min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
188 	[WMI_TAG_P2P_NOA_EVENT] = {
189 		.min_len = sizeof(struct wmi_p2p_noa_event) },
190 	[WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
191 		.min_len = sizeof(struct wmi_11d_new_cc_event) },
192 	[WMI_TAG_PER_CHAIN_RSSI_STATS] = {
193 		.min_len = sizeof(struct wmi_per_chain_rssi_stat_params) },
194 	[WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
195 		.min_len = sizeof(struct wmi_obss_color_collision_event) },
196 };
197 
ath12k_wmi_tlv_hdr(u32 cmd,u32 len)198 __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
199 {
200 	return le32_encode_bits(cmd, WMI_TLV_TAG) |
201 		le32_encode_bits(len, WMI_TLV_LEN);
202 }
203 
ath12k_wmi_tlv_cmd_hdr(u32 cmd,u32 len)204 static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
205 {
206 	return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
207 }
208 
ath12k_wmi_init_qcn9274(struct ath12k_base * ab,struct ath12k_wmi_resource_config_arg * config)209 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
210 			     struct ath12k_wmi_resource_config_arg *config)
211 {
212 	config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab);
213 	config->num_peers = ab->num_radios *
214 		ath12k_core_get_max_peers_per_radio(ab);
215 	config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
216 	config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
217 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
218 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
219 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
220 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
221 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
222 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
223 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
224 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
225 
226 	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
227 		config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
228 	else
229 		config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
230 
231 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
232 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
233 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
234 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
235 	config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
236 	config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
237 	config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
238 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
239 	config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
240 	config->dma_burst_size = TARGET_DMA_BURST_SIZE;
241 	config->rx_skip_defrag_timeout_dup_detection_check =
242 		TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
243 	config->vow_config = TARGET_VOW_CONFIG;
244 	config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
245 	config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
246 	config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
247 	config->rx_batchmode = TARGET_RX_BATCHMODE;
248 	/* Indicates host supports peer map v3 and unmap v2 support */
249 	config->peer_map_unmap_version = 0x32;
250 	config->twt_ap_pdev_count = ab->num_radios;
251 	config->twt_ap_sta_count = 1000;
252 	config->ema_max_vap_cnt = ab->num_radios;
253 	config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
254 	config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
255 
256 	if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
257 		config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
258 }
259 
ath12k_wmi_init_wcn7850(struct ath12k_base * ab,struct ath12k_wmi_resource_config_arg * config)260 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
261 			     struct ath12k_wmi_resource_config_arg *config)
262 {
263 	config->num_vdevs = 4;
264 	config->num_peers = 16;
265 	config->num_tids = 32;
266 
267 	config->num_offload_peers = 3;
268 	config->num_offload_reorder_buffs = 3;
269 	config->num_peer_keys = TARGET_NUM_PEER_KEYS;
270 	config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
271 	config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
272 	config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
273 	config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
274 	config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
275 	config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
276 	config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
277 	config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
278 	config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
279 	config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
280 	config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
281 	config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
282 	config->num_mcast_groups = 0;
283 	config->num_mcast_table_elems = 0;
284 	config->mcast2ucast_mode = 0;
285 	config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
286 	config->num_wds_entries = 0;
287 	config->dma_burst_size = 0;
288 	config->rx_skip_defrag_timeout_dup_detection_check = 0;
289 	config->vow_config = TARGET_VOW_CONFIG;
290 	config->gtk_offload_max_vdev = 2;
291 	config->num_msdu_desc = 0x400;
292 	config->beacon_tx_offload_max_vdev = 2;
293 	config->rx_batchmode = TARGET_RX_BATCHMODE;
294 
295 	config->peer_map_unmap_version = 0x1;
296 	config->use_pdev_id = 1;
297 	config->max_frag_entries = 0xa;
298 	config->num_tdls_vdevs = 0x1;
299 	config->num_tdls_conn_table_entries = 8;
300 	config->beacon_tx_offload_max_vdev = 0x2;
301 	config->num_multicast_filter_entries = 0x20;
302 	config->num_wow_filters = 0x16;
303 	config->num_keep_alive_pattern = 0;
304 }
305 
306 #define PRIMAP(_hw_mode_) \
307 	[_hw_mode_] = _hw_mode_##_PRI
308 
309 static const int ath12k_hw_mode_pri_map[] = {
310 	PRIMAP(WMI_HOST_HW_MODE_SINGLE),
311 	PRIMAP(WMI_HOST_HW_MODE_DBS),
312 	PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
313 	PRIMAP(WMI_HOST_HW_MODE_SBS),
314 	PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
315 	PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
316 	/* keep last */
317 	PRIMAP(WMI_HOST_HW_MODE_MAX),
318 };
319 
320 static int
321 #if defined(__linux__)
ath12k_wmi_tlv_iter(struct ath12k_base * ab,const void * ptr,size_t len,int (* iter)(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data),void * data)322 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
323 #elif defined(__FreeBSD__)
324 ath12k_wmi_tlv_iter(struct ath12k_base *ab, const u8 *ptr, size_t len,
325 #endif
326 		    int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
327 				const void *ptr, void *data),
328 		    void *data)
329 {
330 #if defined(__linux__)
331 	const void *begin = ptr;
332 #elif defined(__FreeBSD__)
333 	const u8 *begin = ptr;
334 #endif
335 	const struct wmi_tlv *tlv;
336 	u16 tlv_tag, tlv_len;
337 	int ret;
338 
339 	while (len > 0) {
340 		if (len < sizeof(*tlv)) {
341 			ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
342 				   ptr - begin, len, sizeof(*tlv));
343 			return -EINVAL;
344 		}
345 
346 #if defined(__linux__)
347 		tlv = ptr;
348 #elif defined(__FreeBSD__)
349 		tlv = (const void *)ptr;
350 #endif
351 		tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
352 		tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
353 		ptr += sizeof(*tlv);
354 		len -= sizeof(*tlv);
355 
356 		if (tlv_len > len) {
357 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
358 				   tlv_tag, ptr - begin, len, tlv_len);
359 			return -EINVAL;
360 		}
361 
362 		if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
363 		    ath12k_wmi_tlv_policies[tlv_tag].min_len &&
364 		    ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
365 			ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
366 				   tlv_tag, ptr - begin, tlv_len,
367 				   ath12k_wmi_tlv_policies[tlv_tag].min_len);
368 			return -EINVAL;
369 		}
370 
371 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
372 		if (ret)
373 			return ret;
374 
375 		ptr += tlv_len;
376 		len -= tlv_len;
377 	}
378 
379 	return 0;
380 }
381 
ath12k_wmi_tlv_iter_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)382 static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
383 				     const void *ptr, void *data)
384 {
385 	const void **tb = data;
386 
387 	if (tag < WMI_TAG_MAX)
388 		tb[tag] = ptr;
389 
390 	return 0;
391 }
392 
ath12k_wmi_tlv_parse(struct ath12k_base * ar,const void ** tb,const void * ptr,size_t len)393 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
394 				const void *ptr, size_t len)
395 {
396 	return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
397 				   (void *)tb);
398 }
399 
400 static const void **
ath12k_wmi_tlv_parse_alloc(struct ath12k_base * ab,struct sk_buff * skb,gfp_t gfp)401 ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
402 			   struct sk_buff *skb, gfp_t gfp)
403 {
404 	const void **tb;
405 	int ret;
406 
407 	tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
408 	if (!tb)
409 		return ERR_PTR(-ENOMEM);
410 
411 	ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
412 	if (ret) {
413 		kfree(tb);
414 		return ERR_PTR(ret);
415 	}
416 
417 	return tb;
418 }
419 
ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev * wmi,struct sk_buff * skb,u32 cmd_id)420 static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
421 				      u32 cmd_id)
422 {
423 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
424 	struct ath12k_base *ab = wmi->wmi_ab->ab;
425 	struct wmi_cmd_hdr *cmd_hdr;
426 	int ret;
427 
428 	if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
429 		return -ENOMEM;
430 
431 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
432 	cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
433 
434 	memset(skb_cb, 0, sizeof(*skb_cb));
435 	ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
436 
437 	if (ret)
438 		goto err_pull;
439 
440 	return 0;
441 
442 err_pull:
443 	skb_pull(skb, sizeof(struct wmi_cmd_hdr));
444 	return ret;
445 }
446 
ath12k_wmi_cmd_send(struct ath12k_wmi_pdev * wmi,struct sk_buff * skb,u32 cmd_id)447 int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
448 			u32 cmd_id)
449 {
450 	struct ath12k_wmi_base *wmi_ab = wmi->wmi_ab;
451 	int ret = -EOPNOTSUPP;
452 
453 	might_sleep();
454 
455 	wait_event_timeout(wmi_ab->tx_credits_wq, ({
456 		ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
457 
458 		if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_ab->ab->dev_flags))
459 			ret = -ESHUTDOWN;
460 
461 		(ret != -EAGAIN);
462 	}), WMI_SEND_TIMEOUT_HZ);
463 
464 	if (ret == -EAGAIN)
465 		ath12k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
466 
467 	return ret;
468 }
469 
ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev * wmi_handle,const void * ptr,struct ath12k_wmi_service_ext_arg * arg)470 static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
471 				     const void *ptr,
472 				     struct ath12k_wmi_service_ext_arg *arg)
473 {
474 	const struct wmi_service_ready_ext_event *ev = ptr;
475 	int i;
476 
477 	if (!ev)
478 		return -EINVAL;
479 
480 	/* Move this to host based bitmap */
481 	arg->default_conc_scan_config_bits =
482 		le32_to_cpu(ev->default_conc_scan_config_bits);
483 	arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
484 	arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
485 	arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
486 	arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
487 	arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
488 	arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
489 
490 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
491 		arg->ppet.ppet16_ppet8_ru3_ru0[i] =
492 			le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
493 
494 	return 0;
495 }
496 
497 static int
ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev * wmi_handle,struct ath12k_wmi_svc_rdy_ext_parse * svc,u8 hw_mode_id,u8 phy_id,struct ath12k_pdev * pdev)498 ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
499 				      struct ath12k_wmi_svc_rdy_ext_parse *svc,
500 				      u8 hw_mode_id, u8 phy_id,
501 				      struct ath12k_pdev *pdev)
502 {
503 	const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
504 	const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
505 	const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
506 	const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
507 	struct ath12k_base *ab = wmi_handle->wmi_ab->ab;
508 	struct ath12k_band_cap *cap_band;
509 	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
510 	struct ath12k_fw_pdev *fw_pdev;
511 	u32 phy_map;
512 	u32 hw_idx, phy_idx = 0;
513 	int i;
514 
515 	if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
516 		return -EINVAL;
517 
518 	for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
519 		if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
520 			break;
521 
522 		phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
523 		phy_idx = fls(phy_map);
524 	}
525 
526 	if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
527 		return -EINVAL;
528 
529 	phy_idx += phy_id;
530 	if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
531 		return -EINVAL;
532 
533 	mac_caps = wmi_mac_phy_caps + phy_idx;
534 
535 	pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
536 	pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
537 	pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
538 	pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
539 
540 	fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
541 	fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
542 	fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
543 	fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
544 	ab->fw_pdev_count++;
545 
546 	/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
547 	 * band to band for a single radio, need to see how this should be
548 	 * handled.
549 	 */
550 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
551 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
552 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
553 	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
554 		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
555 		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
556 		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
557 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
558 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
559 		pdev_cap->nss_ratio_enabled =
560 			WMI_NSS_RATIO_EN_DIS_GET(mac_caps->nss_ratio);
561 		pdev_cap->nss_ratio_info =
562 			WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio);
563 	} else {
564 		return -EINVAL;
565 	}
566 
567 	/* tx/rx chainmask reported from fw depends on the actual hw chains used,
568 	 * For example, for 4x4 capable macphys, first 4 chains can be used for first
569 	 * mac and the remaining 4 chains can be used for the second mac or vice-versa.
570 	 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
571 	 * will be advertised for second mac or vice-versa. Compute the shift value
572 	 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
573 	 * mac80211.
574 	 */
575 	pdev_cap->tx_chain_mask_shift =
576 			find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
577 	pdev_cap->rx_chain_mask_shift =
578 			find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
579 
580 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
581 		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
582 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
583 		cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
584 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
585 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
586 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
587 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
588 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
589 			cap_band->he_cap_phy_info[i] =
590 				le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
591 
592 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
593 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
594 
595 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
596 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
597 				le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
598 	}
599 
600 	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
601 		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
602 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
603 		cap_band->max_bw_supported =
604 			le32_to_cpu(mac_caps->max_bw_supported_5g);
605 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
606 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
607 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
608 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
609 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
610 			cap_band->he_cap_phy_info[i] =
611 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
612 
613 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
614 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
615 
616 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
617 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
618 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
619 
620 		cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
621 		cap_band->max_bw_supported =
622 			le32_to_cpu(mac_caps->max_bw_supported_5g);
623 		cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
624 		cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
625 		cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
626 		cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
627 		for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
628 			cap_band->he_cap_phy_info[i] =
629 				le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
630 
631 		cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
632 		cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
633 
634 		for (i = 0; i < WMI_MAX_NUM_SS; i++)
635 			cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
636 				le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
637 	}
638 
639 	return 0;
640 }
641 
642 static int
ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev * wmi_handle,const struct ath12k_wmi_soc_hal_reg_caps_params * reg_caps,const struct ath12k_wmi_hal_reg_caps_ext_params * ext_caps,u8 phy_idx,struct ath12k_wmi_hal_reg_capabilities_ext_arg * param)643 ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
644 				const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
645 				const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
646 				u8 phy_idx,
647 				struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
648 {
649 	const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
650 
651 	if (!reg_caps || !ext_caps)
652 		return -EINVAL;
653 
654 	if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
655 		return -EINVAL;
656 
657 	ext_reg_cap = &ext_caps[phy_idx];
658 
659 	param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
660 	param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
661 	param->eeprom_reg_domain_ext =
662 		le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
663 	param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
664 	param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
665 	/* check if param->wireless_mode is needed */
666 	param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
667 	param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
668 	param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
669 	param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
670 
671 	return 0;
672 }
673 
ath12k_pull_service_ready_tlv(struct ath12k_base * ab,const void * evt_buf,struct ath12k_wmi_target_cap_arg * cap)674 static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
675 					 const void *evt_buf,
676 					 struct ath12k_wmi_target_cap_arg *cap)
677 {
678 	const struct wmi_service_ready_event *ev = evt_buf;
679 
680 	if (!ev) {
681 		ath12k_err(ab, "%s: failed by NULL param\n",
682 			   __func__);
683 		return -EINVAL;
684 	}
685 
686 	cap->phy_capability = le32_to_cpu(ev->phy_capability);
687 	cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
688 	cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
689 	cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
690 	cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
691 	cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
692 	cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
693 	cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
694 	cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
695 	cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
696 	cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
697 	cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
698 	cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
699 	cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
700 	cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
701 	cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
702 	cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
703 
704 	return 0;
705 }
706 
707 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
708  * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
709  * 4-byte word.
710  */
ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev * wmi,const u32 * wmi_svc_bm)711 static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
712 					   const u32 *wmi_svc_bm)
713 {
714 	int i, j;
715 
716 	for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
717 		do {
718 			if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
719 				set_bit(j, wmi->wmi_ab->svc_map);
720 		} while (++j % WMI_SERVICE_BITS_IN_SIZE32);
721 	}
722 }
723 
ath12k_wmi_svc_rdy_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)724 static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
725 				    const void *ptr, void *data)
726 {
727 	struct ath12k_wmi_svc_ready_parse *svc_ready = data;
728 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
729 	u16 expect_len;
730 
731 	switch (tag) {
732 	case WMI_TAG_SERVICE_READY_EVENT:
733 		if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
734 			return -EINVAL;
735 		break;
736 
737 	case WMI_TAG_ARRAY_UINT32:
738 		if (!svc_ready->wmi_svc_bitmap_done) {
739 			expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
740 			if (len < expect_len) {
741 				ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
742 					    len, tag);
743 				return -EINVAL;
744 			}
745 
746 			ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
747 
748 			svc_ready->wmi_svc_bitmap_done = true;
749 		}
750 		break;
751 	default:
752 		break;
753 	}
754 
755 	return 0;
756 }
757 
ath12k_service_ready_event(struct ath12k_base * ab,struct sk_buff * skb)758 static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
759 {
760 	struct ath12k_wmi_svc_ready_parse svc_ready = { };
761 	int ret;
762 
763 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
764 				  ath12k_wmi_svc_rdy_parse,
765 				  &svc_ready);
766 	if (ret) {
767 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
768 		return ret;
769 	}
770 
771 	return 0;
772 }
773 
ath12k_wmi_mgmt_get_freq(struct ath12k * ar,struct ieee80211_tx_info * info)774 static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
775 				    struct ieee80211_tx_info *info)
776 {
777 	struct ath12k_base *ab = ar->ab;
778 	u32 freq = 0;
779 
780 	if (ab->hw_params->single_pdev_only &&
781 	    ar->scan.is_roc &&
782 	    (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
783 		freq = ar->scan.roc_freq;
784 
785 	return freq;
786 }
787 
ath12k_wmi_alloc_skb(struct ath12k_wmi_base * wmi_ab,u32 len)788 struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
789 {
790 	struct sk_buff *skb;
791 	struct ath12k_base *ab = wmi_ab->ab;
792 	u32 round_len = roundup(len, 4);
793 
794 	skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
795 	if (!skb)
796 		return NULL;
797 
798 	skb_reserve(skb, WMI_SKB_HEADROOM);
799 	if (!IS_ALIGNED((unsigned long)skb->data, 4))
800 		ath12k_warn(ab, "unaligned WMI skb data\n");
801 
802 	skb_put(skb, round_len);
803 	memset(skb->data, 0, round_len);
804 
805 	return skb;
806 }
807 
ath12k_wmi_mgmt_send(struct ath12k_link_vif * arvif,u32 buf_id,struct sk_buff * frame)808 int ath12k_wmi_mgmt_send(struct ath12k_link_vif *arvif, u32 buf_id,
809 			 struct sk_buff *frame)
810 {
811 	struct ath12k *ar = arvif->ar;
812 	struct ath12k_wmi_pdev *wmi = ar->wmi;
813 	struct wmi_mgmt_send_cmd *cmd;
814 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
815 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)frame->data;
816 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
817 	int cmd_len = sizeof(struct ath12k_wmi_mgmt_send_tx_params);
818 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
819 	struct ath12k_wmi_mlo_mgmt_send_params *ml_params;
820 	struct ath12k_base *ab = ar->ab;
821 	struct wmi_tlv *frame_tlv, *tlv;
822 	struct ath12k_skb_cb *skb_cb;
823 	u32 buf_len, buf_len_aligned;
824 	u32 vdev_id = arvif->vdev_id;
825 	bool link_agnostic = false;
826 	struct sk_buff *skb;
827 	int ret, len;
828 	void *ptr;
829 
830 	buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
831 
832 	buf_len_aligned = roundup(buf_len, sizeof(u32));
833 
834 	len = sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned;
835 
836 	if (ieee80211_vif_is_mld(vif)) {
837 		skb_cb = ATH12K_SKB_CB(frame);
838 		if ((skb_cb->flags & ATH12K_SKB_MLO_STA) &&
839 		    ab->hw_params->hw_ops->is_frame_link_agnostic &&
840 		    ab->hw_params->hw_ops->is_frame_link_agnostic(arvif, mgmt)) {
841 			len += cmd_len + TLV_HDR_SIZE + sizeof(*ml_params);
842 			ath12k_generic_dbg(ATH12K_DBG_MGMT,
843 					   "Sending Mgmt Frame fc 0x%0x as link agnostic",
844 					   mgmt->frame_control);
845 			link_agnostic = true;
846 		}
847 	}
848 
849 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
850 	if (!skb)
851 		return -ENOMEM;
852 
853 	cmd = (struct wmi_mgmt_send_cmd *)skb->data;
854 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
855 						 sizeof(*cmd));
856 	cmd->vdev_id = cpu_to_le32(vdev_id);
857 	cmd->desc_id = cpu_to_le32(buf_id);
858 	cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
859 	cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
860 	cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
861 	cmd->frame_len = cpu_to_le32(frame->len);
862 	cmd->buf_len = cpu_to_le32(buf_len);
863 	cmd->tx_params_valid = 0;
864 
865 	frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
866 	frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len_aligned);
867 
868 	memcpy(frame_tlv->value, frame->data, buf_len);
869 
870 	if (!link_agnostic)
871 		goto send;
872 
873 	ptr = skb->data + sizeof(*cmd) + sizeof(*frame_tlv) + buf_len_aligned;
874 
875 	tlv = ptr;
876 
877 	/* Tx params not used currently */
878 	tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TX_SEND_PARAMS, cmd_len);
879 	ptr += cmd_len;
880 
881 	tlv = ptr;
882 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*ml_params));
883 	ptr += TLV_HDR_SIZE;
884 
885 	ml_params = ptr;
886 	ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TX_SEND_PARAMS,
887 						       sizeof(*ml_params));
888 
889 	ml_params->hw_link_id = cpu_to_le32(WMI_MGMT_LINK_AGNOSTIC_ID);
890 
891 send:
892 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
893 	if (ret) {
894 		ath12k_warn(ar->ab,
895 			    "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
896 		dev_kfree_skb(skb);
897 	}
898 
899 	return ret;
900 }
901 
ath12k_wmi_send_stats_request_cmd(struct ath12k * ar,u32 stats_id,u32 vdev_id,u32 pdev_id)902 int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
903 				      u32 vdev_id, u32 pdev_id)
904 {
905 	struct ath12k_wmi_pdev *wmi = ar->wmi;
906 	struct wmi_request_stats_cmd *cmd;
907 	struct sk_buff *skb;
908 	int ret;
909 
910 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
911 	if (!skb)
912 		return -ENOMEM;
913 
914 	cmd = (struct wmi_request_stats_cmd *)skb->data;
915 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD,
916 						 sizeof(*cmd));
917 
918 	cmd->stats_id = cpu_to_le32(stats_id);
919 	cmd->vdev_id = cpu_to_le32(vdev_id);
920 	cmd->pdev_id = cpu_to_le32(pdev_id);
921 
922 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
923 	if (ret) {
924 		ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
925 		dev_kfree_skb(skb);
926 	}
927 
928 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
929 		   "WMI request stats 0x%x vdev id %d pdev id %d\n",
930 		   stats_id, vdev_id, pdev_id);
931 
932 	return ret;
933 }
934 
ath12k_wmi_vdev_create(struct ath12k * ar,u8 * macaddr,struct ath12k_wmi_vdev_create_arg * args)935 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
936 			   struct ath12k_wmi_vdev_create_arg *args)
937 {
938 	struct ath12k_wmi_pdev *wmi = ar->wmi;
939 	struct wmi_vdev_create_cmd *cmd;
940 	struct sk_buff *skb;
941 	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
942 	bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
943 	struct wmi_vdev_create_mlo_params *ml_params;
944 	struct wmi_tlv *tlv;
945 	int ret, len;
946 #if defined(__linux__)
947 	void *ptr;
948 #elif defined(__FreeBSD__)
949 	u8 *ptr;
950 #endif
951 
952 	/* It can be optimized my sending tx/rx chain configuration
953 	 * only for supported bands instead of always sending it for
954 	 * both the bands.
955 	 */
956 	len = sizeof(*cmd) + TLV_HDR_SIZE +
957 		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
958 		(is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
959 
960 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
961 	if (!skb)
962 		return -ENOMEM;
963 
964 	cmd = (struct wmi_vdev_create_cmd *)skb->data;
965 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
966 						 sizeof(*cmd));
967 
968 	cmd->vdev_id = cpu_to_le32(args->if_id);
969 	cmd->vdev_type = cpu_to_le32(args->type);
970 	cmd->vdev_subtype = cpu_to_le32(args->subtype);
971 	cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
972 	cmd->pdev_id = cpu_to_le32(args->pdev_id);
973 	cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
974 	cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
975 	cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
976 	ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
977 
978 	if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
979 		cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
980 
981 	ptr = skb->data + sizeof(*cmd);
982 	len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
983 
984 #if defined(__linux__)
985 	tlv = ptr;
986 #elif defined(__FreeBSD__)
987 	tlv = (void *)ptr;
988 #endif
989 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
990 
991 	ptr += TLV_HDR_SIZE;
992 #if defined(__linux__)
993 	txrx_streams = ptr;
994 #elif defined(__FreeBSD__)
995 	txrx_streams = (void *)ptr;
996 #endif
997 	len = sizeof(*txrx_streams);
998 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
999 							  len);
1000 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
1001 	txrx_streams->supported_tx_streams =
1002 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
1003 	txrx_streams->supported_rx_streams =
1004 				cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
1005 
1006 	txrx_streams++;
1007 	txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
1008 							  len);
1009 	txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
1010 	txrx_streams->supported_tx_streams =
1011 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
1012 	txrx_streams->supported_rx_streams =
1013 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
1014 
1015 	ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
1016 
1017 	if (is_ml_vdev) {
1018 		tlv = ptr;
1019 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1020 						 sizeof(*ml_params));
1021 		ptr += TLV_HDR_SIZE;
1022 		ml_params = ptr;
1023 
1024 		ml_params->tlv_header =
1025 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
1026 					       sizeof(*ml_params));
1027 		ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
1028 	}
1029 
1030 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1031 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
1032 		   args->if_id, args->type, args->subtype,
1033 		   macaddr, args->pdev_id);
1034 
1035 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
1036 	if (ret) {
1037 		ath12k_warn(ar->ab,
1038 			    "failed to submit WMI_VDEV_CREATE_CMDID\n");
1039 		dev_kfree_skb(skb);
1040 	}
1041 
1042 	return ret;
1043 }
1044 
ath12k_wmi_vdev_delete(struct ath12k * ar,u8 vdev_id)1045 int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
1046 {
1047 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1048 	struct wmi_vdev_delete_cmd *cmd;
1049 	struct sk_buff *skb;
1050 	int ret;
1051 
1052 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1053 	if (!skb)
1054 		return -ENOMEM;
1055 
1056 	cmd = (struct wmi_vdev_delete_cmd *)skb->data;
1057 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
1058 						 sizeof(*cmd));
1059 	cmd->vdev_id = cpu_to_le32(vdev_id);
1060 
1061 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
1062 
1063 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
1064 	if (ret) {
1065 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
1066 		dev_kfree_skb(skb);
1067 	}
1068 
1069 	return ret;
1070 }
1071 
ath12k_wmi_vdev_stop(struct ath12k * ar,u8 vdev_id)1072 int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
1073 {
1074 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1075 	struct wmi_vdev_stop_cmd *cmd;
1076 	struct sk_buff *skb;
1077 	int ret;
1078 
1079 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1080 	if (!skb)
1081 		return -ENOMEM;
1082 
1083 	cmd = (struct wmi_vdev_stop_cmd *)skb->data;
1084 
1085 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
1086 						 sizeof(*cmd));
1087 	cmd->vdev_id = cpu_to_le32(vdev_id);
1088 
1089 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
1090 
1091 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
1092 	if (ret) {
1093 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
1094 		dev_kfree_skb(skb);
1095 	}
1096 
1097 	return ret;
1098 }
1099 
ath12k_wmi_vdev_down(struct ath12k * ar,u8 vdev_id)1100 int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
1101 {
1102 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1103 	struct wmi_vdev_down_cmd *cmd;
1104 	struct sk_buff *skb;
1105 	int ret;
1106 
1107 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1108 	if (!skb)
1109 		return -ENOMEM;
1110 
1111 	cmd = (struct wmi_vdev_down_cmd *)skb->data;
1112 
1113 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
1114 						 sizeof(*cmd));
1115 	cmd->vdev_id = cpu_to_le32(vdev_id);
1116 
1117 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
1118 
1119 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
1120 	if (ret) {
1121 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
1122 		dev_kfree_skb(skb);
1123 	}
1124 
1125 	return ret;
1126 }
1127 
ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params * chan,struct wmi_vdev_start_req_arg * arg)1128 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
1129 				       struct wmi_vdev_start_req_arg *arg)
1130 {
1131 	u32 center_freq1 = arg->band_center_freq1;
1132 
1133 	memset(chan, 0, sizeof(*chan));
1134 
1135 	chan->mhz = cpu_to_le32(arg->freq);
1136 	chan->band_center_freq1 = cpu_to_le32(center_freq1);
1137 	if (arg->mode == MODE_11BE_EHT320) {
1138 		if (arg->freq > center_freq1)
1139 			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80);
1140 		else
1141 			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80);
1142 
1143 		chan->band_center_freq2 = cpu_to_le32(center_freq1);
1144 
1145 	} else if (arg->mode == MODE_11BE_EHT160 ||
1146 		   arg->mode == MODE_11AX_HE160) {
1147 		if (arg->freq > center_freq1)
1148 			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
1149 		else
1150 			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);
1151 
1152 		chan->band_center_freq2 = cpu_to_le32(center_freq1);
1153 	} else {
1154 		chan->band_center_freq2 = 0;
1155 	}
1156 
1157 	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
1158 	if (arg->passive)
1159 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
1160 	if (arg->allow_ibss)
1161 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
1162 	if (arg->allow_ht)
1163 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
1164 	if (arg->allow_vht)
1165 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
1166 	if (arg->allow_he)
1167 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
1168 	if (arg->ht40plus)
1169 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
1170 	if (arg->chan_radar)
1171 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
1172 	if (arg->freq2_radar)
1173 		chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
1174 
1175 	chan->reg_info_1 = le32_encode_bits(arg->max_power,
1176 					    WMI_CHAN_REG_INFO1_MAX_PWR) |
1177 		le32_encode_bits(arg->max_reg_power,
1178 				 WMI_CHAN_REG_INFO1_MAX_REG_PWR);
1179 
1180 	chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
1181 					    WMI_CHAN_REG_INFO2_ANT_MAX) |
1182 		le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
1183 }
1184 
ath12k_wmi_vdev_start(struct ath12k * ar,struct wmi_vdev_start_req_arg * arg,bool restart)1185 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
1186 			  bool restart)
1187 {
1188 	struct wmi_vdev_start_mlo_params *ml_params;
1189 	struct wmi_partner_link_info *partner_info;
1190 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1191 	struct wmi_vdev_start_request_cmd *cmd;
1192 	struct sk_buff *skb;
1193 	struct ath12k_wmi_channel_params *chan;
1194 	struct wmi_tlv *tlv;
1195 #if defined(__linux__)
1196 	void *ptr;
1197 #elif defined(__FreeBSD__)
1198 	u8 *ptr;
1199 #endif
1200 	int ret, len, i, ml_arg_size = 0;
1201 
1202 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
1203 		return -EINVAL;
1204 
1205 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
1206 
1207 	if (!restart && arg->ml.enabled) {
1208 		ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
1209 			      TLV_HDR_SIZE + (arg->ml.num_partner_links *
1210 					      sizeof(*partner_info));
1211 		len += ml_arg_size;
1212 	}
1213 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1214 	if (!skb)
1215 		return -ENOMEM;
1216 
1217 	cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
1218 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
1219 						 sizeof(*cmd));
1220 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1221 	cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
1222 	cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
1223 	cmd->dtim_period = cpu_to_le32(arg->dtim_period);
1224 	cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
1225 	cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
1226 	cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
1227 	cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
1228 	cmd->regdomain = cpu_to_le32(arg->regdomain);
1229 	cmd->he_ops = cpu_to_le32(arg->he_ops);
1230 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
1231 	cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
1232 	cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
1233 
1234 	if (!restart) {
1235 		if (arg->ssid) {
1236 			cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
1237 			memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
1238 		}
1239 		if (arg->hidden_ssid)
1240 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
1241 		if (arg->pmf_enabled)
1242 			cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
1243 	}
1244 
1245 	cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
1246 
1247 	ptr = skb->data + sizeof(*cmd);
1248 #if defined(__linux__)
1249 	chan = ptr;
1250 #elif defined(__FreeBSD__)
1251 	chan = (void *)ptr;
1252 #endif
1253 
1254 	ath12k_wmi_put_wmi_channel(chan, arg);
1255 
1256 	chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
1257 						  sizeof(*chan));
1258 	ptr += sizeof(*chan);
1259 
1260 #if defined(__linux__)
1261 	tlv = ptr;
1262 #elif defined(__FreeBSD__)
1263 	tlv = (void *)ptr;
1264 #endif
1265 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
1266 
1267 	/* Note: This is a nested TLV containing:
1268 	 * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
1269 	 */
1270 
1271 	ptr += sizeof(*tlv);
1272 
1273 	if (ml_arg_size) {
1274 		tlv = ptr;
1275 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1276 						 sizeof(*ml_params));
1277 		ptr += TLV_HDR_SIZE;
1278 
1279 		ml_params = ptr;
1280 
1281 		ml_params->tlv_header =
1282 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
1283 					       sizeof(*ml_params));
1284 
1285 		ml_params->flags = le32_encode_bits(arg->ml.enabled,
1286 						    ATH12K_WMI_FLAG_MLO_ENABLED) |
1287 				   le32_encode_bits(arg->ml.assoc_link,
1288 						    ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
1289 				   le32_encode_bits(arg->ml.mcast_link,
1290 						    ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
1291 				   le32_encode_bits(arg->ml.link_add,
1292 						    ATH12K_WMI_FLAG_MLO_LINK_ADD);
1293 
1294 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
1295 			   arg->vdev_id, ml_params->flags);
1296 
1297 		ptr += sizeof(*ml_params);
1298 
1299 		tlv = ptr;
1300 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1301 						 arg->ml.num_partner_links *
1302 						 sizeof(*partner_info));
1303 		ptr += TLV_HDR_SIZE;
1304 
1305 		partner_info = ptr;
1306 
1307 		for (i = 0; i < arg->ml.num_partner_links; i++) {
1308 			partner_info->tlv_header =
1309 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
1310 						       sizeof(*partner_info));
1311 			partner_info->vdev_id =
1312 				cpu_to_le32(arg->ml.partner_info[i].vdev_id);
1313 			partner_info->hw_link_id =
1314 				cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
1315 			ether_addr_copy(partner_info->vdev_addr.addr,
1316 					arg->ml.partner_info[i].addr);
1317 
1318 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
1319 				   partner_info->vdev_id, partner_info->hw_link_id,
1320 				   partner_info->vdev_addr.addr);
1321 
1322 			partner_info++;
1323 		}
1324 
1325 		ptr = partner_info;
1326 	}
1327 
1328 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
1329 		   restart ? "restart" : "start", arg->vdev_id,
1330 		   arg->freq, arg->mode);
1331 
1332 	if (restart)
1333 		ret = ath12k_wmi_cmd_send(wmi, skb,
1334 					  WMI_VDEV_RESTART_REQUEST_CMDID);
1335 	else
1336 		ret = ath12k_wmi_cmd_send(wmi, skb,
1337 					  WMI_VDEV_START_REQUEST_CMDID);
1338 	if (ret) {
1339 		ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
1340 			    restart ? "restart" : "start");
1341 		dev_kfree_skb(skb);
1342 	}
1343 
1344 	return ret;
1345 }
1346 
ath12k_wmi_vdev_up(struct ath12k * ar,struct ath12k_wmi_vdev_up_params * params)1347 int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
1348 {
1349 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1350 	struct wmi_vdev_up_cmd *cmd;
1351 	struct sk_buff *skb;
1352 	int ret;
1353 
1354 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1355 	if (!skb)
1356 		return -ENOMEM;
1357 
1358 	cmd = (struct wmi_vdev_up_cmd *)skb->data;
1359 
1360 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
1361 						 sizeof(*cmd));
1362 	cmd->vdev_id = cpu_to_le32(params->vdev_id);
1363 	cmd->vdev_assoc_id = cpu_to_le32(params->aid);
1364 
1365 	ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
1366 
1367 	if (params->tx_bssid) {
1368 		ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
1369 		cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
1370 		cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
1371 	}
1372 
1373 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1374 		   "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
1375 		   params->vdev_id, params->aid, params->bssid);
1376 
1377 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
1378 	if (ret) {
1379 		ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
1380 		dev_kfree_skb(skb);
1381 	}
1382 
1383 	return ret;
1384 }
1385 
ath12k_wmi_send_peer_create_cmd(struct ath12k * ar,struct ath12k_wmi_peer_create_arg * arg)1386 int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
1387 				    struct ath12k_wmi_peer_create_arg *arg)
1388 {
1389 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1390 	struct wmi_peer_create_cmd *cmd;
1391 	struct sk_buff *skb;
1392 	int ret, len;
1393 	struct wmi_peer_create_mlo_params *ml_param;
1394 	void *ptr;
1395 	struct wmi_tlv *tlv;
1396 
1397 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
1398 
1399 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1400 	if (!skb)
1401 		return -ENOMEM;
1402 
1403 	cmd = (struct wmi_peer_create_cmd *)skb->data;
1404 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
1405 						 sizeof(*cmd));
1406 
1407 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
1408 	cmd->peer_type = cpu_to_le32(arg->peer_type);
1409 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1410 
1411 	ptr = skb->data + sizeof(*cmd);
1412 	tlv = ptr;
1413 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
1414 					 sizeof(*ml_param));
1415 	ptr += TLV_HDR_SIZE;
1416 	ml_param = ptr;
1417 	ml_param->tlv_header =
1418 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
1419 					       sizeof(*ml_param));
1420 	if (arg->ml_enabled)
1421 		ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
1422 
1423 	ptr += sizeof(*ml_param);
1424 
1425 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1426 		   "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
1427 		   arg->vdev_id, arg->peer_addr, ml_param->flags);
1428 
1429 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
1430 	if (ret) {
1431 		ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
1432 		dev_kfree_skb(skb);
1433 	}
1434 
1435 	return ret;
1436 }
1437 
ath12k_wmi_send_peer_delete_cmd(struct ath12k * ar,const u8 * peer_addr,u8 vdev_id)1438 int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
1439 				    const u8 *peer_addr, u8 vdev_id)
1440 {
1441 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1442 	struct wmi_peer_delete_cmd *cmd;
1443 	struct sk_buff *skb;
1444 	int ret;
1445 
1446 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1447 	if (!skb)
1448 		return -ENOMEM;
1449 
1450 	cmd = (struct wmi_peer_delete_cmd *)skb->data;
1451 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
1452 						 sizeof(*cmd));
1453 
1454 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1455 	cmd->vdev_id = cpu_to_le32(vdev_id);
1456 
1457 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1458 		   "WMI peer delete vdev_id %d peer_addr %pM\n",
1459 		   vdev_id,  peer_addr);
1460 
1461 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
1462 	if (ret) {
1463 		ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
1464 		dev_kfree_skb(skb);
1465 	}
1466 
1467 	return ret;
1468 }
1469 
ath12k_wmi_send_pdev_set_regdomain(struct ath12k * ar,struct ath12k_wmi_pdev_set_regdomain_arg * arg)1470 int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
1471 				       struct ath12k_wmi_pdev_set_regdomain_arg *arg)
1472 {
1473 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1474 	struct wmi_pdev_set_regdomain_cmd *cmd;
1475 	struct sk_buff *skb;
1476 	int ret;
1477 
1478 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1479 	if (!skb)
1480 		return -ENOMEM;
1481 
1482 	cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
1483 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
1484 						 sizeof(*cmd));
1485 
1486 	cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
1487 	cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
1488 	cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
1489 	cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
1490 	cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
1491 	cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
1492 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
1493 
1494 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1495 		   "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1496 		   arg->current_rd_in_use, arg->current_rd_2g,
1497 		   arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
1498 
1499 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
1500 	if (ret) {
1501 		ath12k_warn(ar->ab,
1502 			    "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1503 		dev_kfree_skb(skb);
1504 	}
1505 
1506 	return ret;
1507 }
1508 
ath12k_wmi_set_peer_param(struct ath12k * ar,const u8 * peer_addr,u32 vdev_id,u32 param_id,u32 param_val)1509 int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
1510 			      u32 vdev_id, u32 param_id, u32 param_val)
1511 {
1512 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1513 	struct wmi_peer_set_param_cmd *cmd;
1514 	struct sk_buff *skb;
1515 	int ret;
1516 
1517 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1518 	if (!skb)
1519 		return -ENOMEM;
1520 
1521 	cmd = (struct wmi_peer_set_param_cmd *)skb->data;
1522 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
1523 						 sizeof(*cmd));
1524 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1525 	cmd->vdev_id = cpu_to_le32(vdev_id);
1526 	cmd->param_id = cpu_to_le32(param_id);
1527 	cmd->param_value = cpu_to_le32(param_val);
1528 
1529 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1530 		   "WMI vdev %d peer 0x%pM set param %d value %d\n",
1531 		   vdev_id, peer_addr, param_id, param_val);
1532 
1533 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
1534 	if (ret) {
1535 		ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
1536 		dev_kfree_skb(skb);
1537 	}
1538 
1539 	return ret;
1540 }
1541 
ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k * ar,u8 peer_addr[ETH_ALEN],u32 peer_tid_bitmap,u8 vdev_id)1542 int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
1543 					u8 peer_addr[ETH_ALEN],
1544 					u32 peer_tid_bitmap,
1545 					u8 vdev_id)
1546 {
1547 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1548 	struct wmi_peer_flush_tids_cmd *cmd;
1549 	struct sk_buff *skb;
1550 	int ret;
1551 
1552 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1553 	if (!skb)
1554 		return -ENOMEM;
1555 
1556 	cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
1557 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
1558 						 sizeof(*cmd));
1559 
1560 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1561 	cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
1562 	cmd->vdev_id = cpu_to_le32(vdev_id);
1563 
1564 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1565 		   "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1566 		   vdev_id, peer_addr, peer_tid_bitmap);
1567 
1568 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
1569 	if (ret) {
1570 		ath12k_warn(ar->ab,
1571 			    "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1572 		dev_kfree_skb(skb);
1573 	}
1574 
1575 	return ret;
1576 }
1577 
ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k * ar,int vdev_id,const u8 * addr,dma_addr_t paddr,u8 tid,u8 ba_window_size_valid,u32 ba_window_size)1578 int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
1579 					   int vdev_id, const u8 *addr,
1580 					   dma_addr_t paddr, u8 tid,
1581 					   u8 ba_window_size_valid,
1582 					   u32 ba_window_size)
1583 {
1584 	struct wmi_peer_reorder_queue_setup_cmd *cmd;
1585 	struct sk_buff *skb;
1586 	int ret;
1587 
1588 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
1589 	if (!skb)
1590 		return -ENOMEM;
1591 
1592 	cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
1593 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
1594 						 sizeof(*cmd));
1595 
1596 	ether_addr_copy(cmd->peer_macaddr.addr, addr);
1597 	cmd->vdev_id = cpu_to_le32(vdev_id);
1598 	cmd->tid = cpu_to_le32(tid);
1599 	cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
1600 	cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
1601 	cmd->queue_no = cpu_to_le32(tid);
1602 	cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
1603 	cmd->ba_window_size = cpu_to_le32(ba_window_size);
1604 
1605 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1606 		   "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1607 		   addr, vdev_id, tid);
1608 
1609 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
1610 				  WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
1611 	if (ret) {
1612 		ath12k_warn(ar->ab,
1613 			    "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1614 		dev_kfree_skb(skb);
1615 	}
1616 
1617 	return ret;
1618 }
1619 
1620 int
ath12k_wmi_rx_reord_queue_remove(struct ath12k * ar,struct ath12k_wmi_rx_reorder_queue_remove_arg * arg)1621 ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
1622 				 struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
1623 {
1624 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1625 	struct wmi_peer_reorder_queue_remove_cmd *cmd;
1626 	struct sk_buff *skb;
1627 	int ret;
1628 
1629 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1630 	if (!skb)
1631 		return -ENOMEM;
1632 
1633 	cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
1634 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
1635 						 sizeof(*cmd));
1636 
1637 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
1638 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1639 	cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
1640 
1641 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1642 		   "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
1643 		   arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
1644 
1645 	ret = ath12k_wmi_cmd_send(wmi, skb,
1646 				  WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
1647 	if (ret) {
1648 		ath12k_warn(ar->ab,
1649 			    "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1650 		dev_kfree_skb(skb);
1651 	}
1652 
1653 	return ret;
1654 }
1655 
ath12k_wmi_pdev_set_param(struct ath12k * ar,u32 param_id,u32 param_value,u8 pdev_id)1656 int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
1657 			      u32 param_value, u8 pdev_id)
1658 {
1659 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1660 	struct wmi_pdev_set_param_cmd *cmd;
1661 	struct sk_buff *skb;
1662 	int ret;
1663 
1664 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1665 	if (!skb)
1666 		return -ENOMEM;
1667 
1668 	cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
1669 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
1670 						 sizeof(*cmd));
1671 	cmd->pdev_id = cpu_to_le32(pdev_id);
1672 	cmd->param_id = cpu_to_le32(param_id);
1673 	cmd->param_value = cpu_to_le32(param_value);
1674 
1675 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1676 		   "WMI pdev set param %d pdev id %d value %d\n",
1677 		   param_id, pdev_id, param_value);
1678 
1679 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
1680 	if (ret) {
1681 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1682 		dev_kfree_skb(skb);
1683 	}
1684 
1685 	return ret;
1686 }
1687 
ath12k_wmi_pdev_set_ps_mode(struct ath12k * ar,int vdev_id,u32 enable)1688 int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
1689 {
1690 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1691 	struct wmi_pdev_set_ps_mode_cmd *cmd;
1692 	struct sk_buff *skb;
1693 	int ret;
1694 
1695 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1696 	if (!skb)
1697 		return -ENOMEM;
1698 
1699 	cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
1700 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
1701 						 sizeof(*cmd));
1702 	cmd->vdev_id = cpu_to_le32(vdev_id);
1703 	cmd->sta_ps_mode = cpu_to_le32(enable);
1704 
1705 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1706 		   "WMI vdev set psmode %d vdev id %d\n",
1707 		   enable, vdev_id);
1708 
1709 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
1710 	if (ret) {
1711 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1712 		dev_kfree_skb(skb);
1713 	}
1714 
1715 	return ret;
1716 }
1717 
ath12k_wmi_pdev_suspend(struct ath12k * ar,u32 suspend_opt,u32 pdev_id)1718 int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
1719 			    u32 pdev_id)
1720 {
1721 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1722 	struct wmi_pdev_suspend_cmd *cmd;
1723 	struct sk_buff *skb;
1724 	int ret;
1725 
1726 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1727 	if (!skb)
1728 		return -ENOMEM;
1729 
1730 	cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
1731 
1732 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
1733 						 sizeof(*cmd));
1734 
1735 	cmd->suspend_opt = cpu_to_le32(suspend_opt);
1736 	cmd->pdev_id = cpu_to_le32(pdev_id);
1737 
1738 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1739 		   "WMI pdev suspend pdev_id %d\n", pdev_id);
1740 
1741 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
1742 	if (ret) {
1743 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
1744 		dev_kfree_skb(skb);
1745 	}
1746 
1747 	return ret;
1748 }
1749 
ath12k_wmi_pdev_resume(struct ath12k * ar,u32 pdev_id)1750 int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
1751 {
1752 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1753 	struct wmi_pdev_resume_cmd *cmd;
1754 	struct sk_buff *skb;
1755 	int ret;
1756 
1757 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1758 	if (!skb)
1759 		return -ENOMEM;
1760 
1761 	cmd = (struct wmi_pdev_resume_cmd *)skb->data;
1762 
1763 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
1764 						 sizeof(*cmd));
1765 	cmd->pdev_id = cpu_to_le32(pdev_id);
1766 
1767 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1768 		   "WMI pdev resume pdev id %d\n", pdev_id);
1769 
1770 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
1771 	if (ret) {
1772 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
1773 		dev_kfree_skb(skb);
1774 	}
1775 
1776 	return ret;
1777 }
1778 
1779 /* TODO FW Support for the cmd is not available yet.
1780  * Can be tested once the command and corresponding
1781  * event is implemented in FW
1782  */
ath12k_wmi_pdev_bss_chan_info_request(struct ath12k * ar,enum wmi_bss_chan_info_req_type type)1783 int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
1784 					  enum wmi_bss_chan_info_req_type type)
1785 {
1786 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1787 	struct wmi_pdev_bss_chan_info_req_cmd *cmd;
1788 	struct sk_buff *skb;
1789 	int ret;
1790 
1791 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1792 	if (!skb)
1793 		return -ENOMEM;
1794 
1795 	cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
1796 
1797 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
1798 						 sizeof(*cmd));
1799 	cmd->req_type = cpu_to_le32(type);
1800 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1801 
1802 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1803 		   "WMI bss chan info req type %d\n", type);
1804 
1805 	ret = ath12k_wmi_cmd_send(wmi, skb,
1806 				  WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
1807 	if (ret) {
1808 		ath12k_warn(ar->ab,
1809 			    "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1810 		dev_kfree_skb(skb);
1811 	}
1812 
1813 	return ret;
1814 }
1815 
ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k * ar,u8 * peer_addr,struct ath12k_wmi_ap_ps_arg * arg)1816 int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
1817 					struct ath12k_wmi_ap_ps_arg *arg)
1818 {
1819 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1820 	struct wmi_ap_ps_peer_cmd *cmd;
1821 	struct sk_buff *skb;
1822 	int ret;
1823 
1824 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1825 	if (!skb)
1826 		return -ENOMEM;
1827 
1828 	cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
1829 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
1830 						 sizeof(*cmd));
1831 
1832 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
1833 	ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
1834 	cmd->param = cpu_to_le32(arg->param);
1835 	cmd->value = cpu_to_le32(arg->value);
1836 
1837 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1838 		   "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1839 		   arg->vdev_id, peer_addr, arg->param, arg->value);
1840 
1841 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
1842 	if (ret) {
1843 		ath12k_warn(ar->ab,
1844 			    "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1845 		dev_kfree_skb(skb);
1846 	}
1847 
1848 	return ret;
1849 }
1850 
ath12k_wmi_set_sta_ps_param(struct ath12k * ar,u32 vdev_id,u32 param,u32 param_value)1851 int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
1852 				u32 param, u32 param_value)
1853 {
1854 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1855 	struct wmi_sta_powersave_param_cmd *cmd;
1856 	struct sk_buff *skb;
1857 	int ret;
1858 
1859 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1860 	if (!skb)
1861 		return -ENOMEM;
1862 
1863 	cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
1864 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
1865 						 sizeof(*cmd));
1866 
1867 	cmd->vdev_id = cpu_to_le32(vdev_id);
1868 	cmd->param = cpu_to_le32(param);
1869 	cmd->value = cpu_to_le32(param_value);
1870 
1871 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1872 		   "WMI set sta ps vdev_id %d param %d value %d\n",
1873 		   vdev_id, param, param_value);
1874 
1875 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
1876 	if (ret) {
1877 		ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1878 		dev_kfree_skb(skb);
1879 	}
1880 
1881 	return ret;
1882 }
1883 
ath12k_wmi_force_fw_hang_cmd(struct ath12k * ar,u32 type,u32 delay_time_ms)1884 int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
1885 {
1886 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1887 	struct wmi_force_fw_hang_cmd *cmd;
1888 	struct sk_buff *skb;
1889 	int ret, len;
1890 
1891 	len = sizeof(*cmd);
1892 
1893 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
1894 	if (!skb)
1895 		return -ENOMEM;
1896 
1897 	cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
1898 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
1899 						 len);
1900 
1901 	cmd->type = cpu_to_le32(type);
1902 	cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
1903 
1904 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
1905 
1906 	if (ret) {
1907 		ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1908 		dev_kfree_skb(skb);
1909 	}
1910 	return ret;
1911 }
1912 
ath12k_wmi_vdev_set_param_cmd(struct ath12k * ar,u32 vdev_id,u32 param_id,u32 param_value)1913 int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
1914 				  u32 param_id, u32 param_value)
1915 {
1916 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1917 	struct wmi_vdev_set_param_cmd *cmd;
1918 	struct sk_buff *skb;
1919 	int ret;
1920 
1921 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1922 	if (!skb)
1923 		return -ENOMEM;
1924 
1925 	cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
1926 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
1927 						 sizeof(*cmd));
1928 
1929 	cmd->vdev_id = cpu_to_le32(vdev_id);
1930 	cmd->param_id = cpu_to_le32(param_id);
1931 	cmd->param_value = cpu_to_le32(param_value);
1932 
1933 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1934 		   "WMI vdev id 0x%x set param %d value %d\n",
1935 		   vdev_id, param_id, param_value);
1936 
1937 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
1938 	if (ret) {
1939 		ath12k_warn(ar->ab,
1940 			    "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1941 		dev_kfree_skb(skb);
1942 	}
1943 
1944 	return ret;
1945 }
1946 
ath12k_wmi_send_pdev_temperature_cmd(struct ath12k * ar)1947 int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
1948 {
1949 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1950 	struct wmi_get_pdev_temperature_cmd *cmd;
1951 	struct sk_buff *skb;
1952 	int ret;
1953 
1954 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1955 	if (!skb)
1956 		return -ENOMEM;
1957 
1958 	cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
1959 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
1960 						 sizeof(*cmd));
1961 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
1962 
1963 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1964 		   "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
1965 
1966 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
1967 	if (ret) {
1968 		ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1969 		dev_kfree_skb(skb);
1970 	}
1971 
1972 	return ret;
1973 }
1974 
ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k * ar,u32 vdev_id,u32 bcn_ctrl_op)1975 int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
1976 					    u32 vdev_id, u32 bcn_ctrl_op)
1977 {
1978 	struct ath12k_wmi_pdev *wmi = ar->wmi;
1979 	struct wmi_bcn_offload_ctrl_cmd *cmd;
1980 	struct sk_buff *skb;
1981 	int ret;
1982 
1983 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
1984 	if (!skb)
1985 		return -ENOMEM;
1986 
1987 	cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
1988 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
1989 						 sizeof(*cmd));
1990 
1991 	cmd->vdev_id = cpu_to_le32(vdev_id);
1992 	cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
1993 
1994 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
1995 		   "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1996 		   vdev_id, bcn_ctrl_op);
1997 
1998 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
1999 	if (ret) {
2000 		ath12k_warn(ar->ab,
2001 			    "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
2002 		dev_kfree_skb(skb);
2003 	}
2004 
2005 	return ret;
2006 }
2007 
ath12k_wmi_p2p_go_bcn_ie(struct ath12k * ar,u32 vdev_id,const u8 * p2p_ie)2008 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
2009 			     const u8 *p2p_ie)
2010 {
2011 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2012 	struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
2013 	size_t p2p_ie_len, aligned_len;
2014 	struct wmi_tlv *tlv;
2015 	struct sk_buff *skb;
2016 #if defined(__linux__)
2017 	void *ptr;
2018 #elif defined(__FreeBSD__)
2019 	u8 *ptr;
2020 #endif
2021 	int ret, len;
2022 
2023 	p2p_ie_len = p2p_ie[1] + 2;
2024 	aligned_len = roundup(p2p_ie_len, sizeof(u32));
2025 
2026 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
2027 
2028 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2029 	if (!skb)
2030 		return -ENOMEM;
2031 
2032 	ptr = skb->data;
2033 	cmd = ptr;
2034 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
2035 						 sizeof(*cmd));
2036 	cmd->vdev_id = cpu_to_le32(vdev_id);
2037 	cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
2038 
2039 	ptr += sizeof(*cmd);
2040 	tlv = ptr;
2041 	tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
2042 					     aligned_len);
2043 	memcpy(tlv->value, p2p_ie, p2p_ie_len);
2044 
2045 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
2046 	if (ret) {
2047 		ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
2048 		dev_kfree_skb(skb);
2049 	}
2050 
2051 	return ret;
2052 }
2053 
ath12k_wmi_bcn_tmpl(struct ath12k_link_vif * arvif,struct ieee80211_mutable_offsets * offs,struct sk_buff * bcn,struct ath12k_wmi_bcn_tmpl_ema_arg * ema_args)2054 int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
2055 			struct ieee80211_mutable_offsets *offs,
2056 			struct sk_buff *bcn,
2057 			struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
2058 {
2059 	struct ath12k *ar = arvif->ar;
2060 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2061 	struct ath12k_base *ab = ar->ab;
2062 	struct wmi_bcn_tmpl_cmd *cmd;
2063 	struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
2064 	struct ath12k_vif *ahvif = arvif->ahvif;
2065 	struct ieee80211_bss_conf *conf;
2066 	u32 vdev_id = arvif->vdev_id;
2067 	struct wmi_tlv *tlv;
2068 	struct sk_buff *skb;
2069 	u32 ema_params = 0;
2070 	void *ptr;
2071 	int ret, len;
2072 	size_t aligned_len = roundup(bcn->len, 4);
2073 
2074 	conf = ath12k_mac_get_link_bss_conf(arvif);
2075 	if (!conf) {
2076 		ath12k_warn(ab,
2077 			    "unable to access bss link conf in beacon template command for vif %pM link %u\n",
2078 			    ahvif->vif->addr, arvif->link_id);
2079 		return -EINVAL;
2080 	}
2081 
2082 	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
2083 
2084 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2085 	if (!skb)
2086 		return -ENOMEM;
2087 
2088 	cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
2089 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
2090 						 sizeof(*cmd));
2091 	cmd->vdev_id = cpu_to_le32(vdev_id);
2092 	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
2093 
2094 	if (conf->csa_active) {
2095 		cmd->csa_switch_count_offset =
2096 				cpu_to_le32(offs->cntdwn_counter_offs[0]);
2097 		cmd->ext_csa_switch_count_offset =
2098 				cpu_to_le32(offs->cntdwn_counter_offs[1]);
2099 		cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF);
2100 		arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]];
2101 	}
2102 
2103 	cmd->buf_len = cpu_to_le32(bcn->len);
2104 	cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
2105 	if (ema_args) {
2106 		u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
2107 		u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
2108 		if (ema_args->bcn_index == 0)
2109 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
2110 		if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
2111 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
2112 		cmd->ema_params = cpu_to_le32(ema_params);
2113 	}
2114 	cmd->feature_enable_bitmap =
2115 		cpu_to_le32(u32_encode_bits(arvif->beacon_prot,
2116 					    WMI_BEACON_PROTECTION_EN_BIT));
2117 
2118 	ptr = skb->data + sizeof(*cmd);
2119 
2120 #if defined(__linux__)
2121 	bcn_prb_info = ptr;
2122 #elif defined(__FreeBSD__)
2123 	bcn_prb_info = (void *)ptr;
2124 #endif
2125 	len = sizeof(*bcn_prb_info);
2126 	bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
2127 							  len);
2128 	bcn_prb_info->caps = 0;
2129 	bcn_prb_info->erp = 0;
2130 
2131 	ptr += sizeof(*bcn_prb_info);
2132 
2133 #if defined(__linux__)
2134 	tlv = ptr;
2135 #elif defined(__FreeBSD__)
2136 	tlv = (void *)ptr;
2137 #endif
2138 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
2139 	memcpy(tlv->value, bcn->data, bcn->len);
2140 
2141 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
2142 	if (ret) {
2143 		ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n");
2144 		dev_kfree_skb(skb);
2145 	}
2146 
2147 	return ret;
2148 }
2149 
ath12k_wmi_vdev_install_key(struct ath12k * ar,struct wmi_vdev_install_key_arg * arg)2150 int ath12k_wmi_vdev_install_key(struct ath12k *ar,
2151 				struct wmi_vdev_install_key_arg *arg)
2152 {
2153 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2154 	struct wmi_vdev_install_key_cmd *cmd;
2155 	struct wmi_tlv *tlv;
2156 	struct sk_buff *skb;
2157 	int ret, len, key_len_aligned;
2158 
2159 	/* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
2160 	 * length is specified in cmd->key_len.
2161 	 */
2162 	key_len_aligned = roundup(arg->key_len, 4);
2163 
2164 	len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
2165 
2166 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2167 	if (!skb)
2168 		return -ENOMEM;
2169 
2170 	cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
2171 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
2172 						 sizeof(*cmd));
2173 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2174 	ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
2175 	cmd->key_idx = cpu_to_le32(arg->key_idx);
2176 	cmd->key_flags = cpu_to_le32(arg->key_flags);
2177 	cmd->key_cipher = cpu_to_le32(arg->key_cipher);
2178 	cmd->key_len = cpu_to_le32(arg->key_len);
2179 	cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
2180 	cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
2181 
2182 	if (arg->key_rsc_counter)
2183 		cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
2184 
2185 	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
2186 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
2187 	memcpy(tlv->value, arg->key_data, arg->key_len);
2188 
2189 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2190 		   "WMI vdev install key idx %d cipher %d len %d\n",
2191 		   arg->key_idx, arg->key_cipher, arg->key_len);
2192 
2193 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
2194 	if (ret) {
2195 		ath12k_warn(ar->ab,
2196 			    "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
2197 		dev_kfree_skb(skb);
2198 	}
2199 
2200 	return ret;
2201 }
2202 
ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd * cmd,struct ath12k_wmi_peer_assoc_arg * arg,bool hw_crypto_disabled)2203 static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
2204 				       struct ath12k_wmi_peer_assoc_arg *arg,
2205 				       bool hw_crypto_disabled)
2206 {
2207 	cmd->peer_flags = 0;
2208 	cmd->peer_flags_ext = 0;
2209 
2210 	if (arg->is_wme_set) {
2211 		if (arg->qos_flag)
2212 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
2213 		if (arg->apsd_flag)
2214 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
2215 		if (arg->ht_flag)
2216 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
2217 		if (arg->bw_40)
2218 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
2219 		if (arg->bw_80)
2220 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
2221 		if (arg->bw_160)
2222 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
2223 		if (arg->bw_320)
2224 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
2225 
2226 		/* Typically if STBC is enabled for VHT it should be enabled
2227 		 * for HT as well
2228 		 **/
2229 		if (arg->stbc_flag)
2230 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
2231 
2232 		/* Typically if LDPC is enabled for VHT it should be enabled
2233 		 * for HT as well
2234 		 **/
2235 		if (arg->ldpc_flag)
2236 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
2237 
2238 		if (arg->static_mimops_flag)
2239 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
2240 		if (arg->dynamic_mimops_flag)
2241 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
2242 		if (arg->spatial_mux_flag)
2243 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
2244 		if (arg->vht_flag)
2245 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
2246 		if (arg->he_flag)
2247 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
2248 		if (arg->twt_requester)
2249 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
2250 		if (arg->twt_responder)
2251 			cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
2252 		if (arg->eht_flag)
2253 			cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_EHT);
2254 	}
2255 
2256 	/* Suppress authorization for all AUTH modes that need 4-way handshake
2257 	 * (during re-association).
2258 	 * Authorization will be done for these modes on key installation.
2259 	 */
2260 	if (arg->auth_flag)
2261 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
2262 	if (arg->need_ptk_4_way) {
2263 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
2264 		if (!hw_crypto_disabled && arg->is_assoc)
2265 			cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
2266 	}
2267 	if (arg->need_gtk_2_way)
2268 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
2269 	/* safe mode bypass the 4-way handshake */
2270 	if (arg->safe_mode_enabled)
2271 		cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
2272 						 WMI_PEER_NEED_GTK_2_WAY));
2273 
2274 	if (arg->is_pmf_enabled)
2275 		cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
2276 
2277 	/* Disable AMSDU for station transmit, if user configures it */
2278 	/* Disable AMSDU for AP transmit to 11n Stations, if user configures
2279 	 * it
2280 	 * if (arg->amsdu_disable) Add after FW support
2281 	 **/
2282 
2283 	/* Target asserts if node is marked HT and all MCS is set to 0.
2284 	 * Mark the node as non-HT if all the mcs rates are disabled through
2285 	 * iwpriv
2286 	 **/
2287 	if (arg->peer_ht_rates.num_rates == 0)
2288 		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
2289 }
2290 
ath12k_wmi_send_peer_assoc_cmd(struct ath12k * ar,struct ath12k_wmi_peer_assoc_arg * arg)2291 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
2292 				   struct ath12k_wmi_peer_assoc_arg *arg)
2293 {
2294 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2295 	struct wmi_peer_assoc_complete_cmd *cmd;
2296 	struct ath12k_wmi_vht_rate_set_params *mcs;
2297 	struct ath12k_wmi_he_rate_set_params *he_mcs;
2298 	struct ath12k_wmi_eht_rate_set_params *eht_mcs;
2299 	struct wmi_peer_assoc_mlo_params *ml_params;
2300 	struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
2301 	struct sk_buff *skb;
2302 	struct wmi_tlv *tlv;
2303 #if defined(__linux__)
2304 	void *ptr;
2305 #elif defined(__FreeBSD__)
2306 	u8 *ptr;
2307 #endif
2308 	u32 peer_legacy_rates_align, eml_pad_delay, eml_trans_delay;
2309 	u32 peer_ht_rates_align, eml_trans_timeout;
2310 	int i, ret, len;
2311 	u16 eml_cap;
2312 	__le32 v;
2313 
2314 	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
2315 					  sizeof(u32));
2316 	peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
2317 				      sizeof(u32));
2318 
2319 	len = sizeof(*cmd) +
2320 	      TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
2321 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
2322 	      sizeof(*mcs) + TLV_HDR_SIZE +
2323 	      (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
2324 	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
2325 
2326 	if (arg->ml.enabled)
2327 		len += TLV_HDR_SIZE + sizeof(*ml_params) +
2328 		       TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
2329 	else
2330 		len += (2 * TLV_HDR_SIZE);
2331 
2332 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2333 	if (!skb)
2334 		return -ENOMEM;
2335 
2336 	ptr = skb->data;
2337 
2338 #if defined(__linux__)
2339 	cmd = ptr;
2340 #elif defined(__FreeBSD__)
2341 	cmd = (void *)ptr;
2342 #endif
2343 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
2344 						 sizeof(*cmd));
2345 
2346 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2347 
2348 	cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
2349 	cmd->peer_associd = cpu_to_le32(arg->peer_associd);
2350 	cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
2351 
2352 	ath12k_wmi_copy_peer_flags(cmd, arg,
2353 				   test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
2354 					    &ar->ab->dev_flags));
2355 
2356 	ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
2357 
2358 	cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
2359 	cmd->peer_caps = cpu_to_le32(arg->peer_caps);
2360 	cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
2361 	cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
2362 	cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
2363 	cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
2364 	cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
2365 	cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
2366 
2367 	/* Update 11ax capabilities */
2368 	cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
2369 	cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
2370 	cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
2371 	cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
2372 	cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
2373 	for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
2374 		cmd->peer_he_cap_phy[i] =
2375 			cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
2376 	cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
2377 	cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
2378 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
2379 		cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
2380 			cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
2381 
2382 	/* Update 11be capabilities */
2383 	memcpy_and_pad(cmd->peer_eht_cap_mac, sizeof(cmd->peer_eht_cap_mac),
2384 		       arg->peer_eht_cap_mac, sizeof(arg->peer_eht_cap_mac),
2385 		       0);
2386 	memcpy_and_pad(cmd->peer_eht_cap_phy, sizeof(cmd->peer_eht_cap_phy),
2387 		       arg->peer_eht_cap_phy, sizeof(arg->peer_eht_cap_phy),
2388 		       0);
2389 	memcpy_and_pad(&cmd->peer_eht_ppet, sizeof(cmd->peer_eht_ppet),
2390 		       &arg->peer_eht_ppet, sizeof(arg->peer_eht_ppet), 0);
2391 
2392 	/* Update peer legacy rate information */
2393 	ptr += sizeof(*cmd);
2394 
2395 #if defined(__linux__)
2396 	tlv = ptr;
2397 #elif defined(__FreeBSD__)
2398 	tlv = (void *)ptr;
2399 #endif
2400 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
2401 
2402 	ptr += TLV_HDR_SIZE;
2403 
2404 	cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
2405 	memcpy(ptr, arg->peer_legacy_rates.rates,
2406 	       arg->peer_legacy_rates.num_rates);
2407 
2408 	/* Update peer HT rate information */
2409 	ptr += peer_legacy_rates_align;
2410 
2411 #if defined(__linux__)
2412 	tlv = ptr;
2413 #elif defined(__FreeBSD__)
2414 	tlv = (void *)ptr;
2415 #endif
2416 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
2417 	ptr += TLV_HDR_SIZE;
2418 	cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
2419 	memcpy(ptr, arg->peer_ht_rates.rates,
2420 	       arg->peer_ht_rates.num_rates);
2421 
2422 	/* VHT Rates */
2423 	ptr += peer_ht_rates_align;
2424 
2425 #if defined(__linux__)
2426 	mcs = ptr;
2427 #elif defined(__FreeBSD__)
2428 	mcs = (void *)ptr;
2429 #endif
2430 
2431 	mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
2432 						 sizeof(*mcs));
2433 
2434 	cmd->peer_nss = cpu_to_le32(arg->peer_nss);
2435 
2436 	/* Update bandwidth-NSS mapping */
2437 	cmd->peer_bw_rxnss_override = 0;
2438 	cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
2439 
2440 	if (arg->vht_capable) {
2441 		/* Firmware interprets mcs->tx_mcs_set field as peer's
2442 		 * RX capability
2443 		 */
2444 		mcs->rx_max_rate = cpu_to_le32(arg->tx_max_rate);
2445 		mcs->rx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
2446 		mcs->tx_max_rate = cpu_to_le32(arg->rx_max_rate);
2447 		mcs->tx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
2448 	}
2449 
2450 	/* HE Rates */
2451 	cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
2452 	cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
2453 
2454 	ptr += sizeof(*mcs);
2455 
2456 	len = arg->peer_he_mcs_count * sizeof(*he_mcs);
2457 
2458 #if defined(__linux__)
2459 	tlv = ptr;
2460 #elif defined(__FreeBSD__)
2461 	tlv = (void *)ptr;
2462 #endif
2463 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2464 	ptr += TLV_HDR_SIZE;
2465 
2466 	/* Loop through the HE rate set */
2467 	for (i = 0; i < arg->peer_he_mcs_count; i++) {
2468 #if defined(__linux__)
2469 		he_mcs = ptr;
2470 #elif defined(__FreeBSD__)
2471 		he_mcs = (void *)ptr;
2472 #endif
2473 		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
2474 							    sizeof(*he_mcs));
2475 
2476 		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
2477 		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
2478 		ptr += sizeof(*he_mcs);
2479 	}
2480 
2481 #if defined(__linux__)
2482 	tlv = ptr;
2483 #elif defined(__FreeBSD__)
2484 	tlv = (void *)ptr;
2485 #endif
2486 	len = arg->ml.enabled ? sizeof(*ml_params) : 0;
2487 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2488 	ptr += TLV_HDR_SIZE;
2489 	if (!len)
2490 		goto skip_ml_params;
2491 
2492 	ml_params = ptr;
2493 	ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
2494 						       len);
2495 	ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2496 
2497 	if (arg->ml.assoc_link)
2498 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2499 
2500 	if (arg->ml.primary_umac)
2501 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2502 
2503 	if (arg->ml.logical_link_idx_valid)
2504 		ml_params->flags |=
2505 			cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
2506 
2507 	if (arg->ml.peer_id_valid)
2508 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
2509 
2510 	ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
2511 	ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
2512 	ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
2513 	ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
2514 
2515 	eml_cap = arg->ml.eml_cap;
2516 	if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) {
2517 		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT);
2518 		/* Padding delay */
2519 		eml_pad_delay = ieee80211_emlsr_pad_delay_in_us(eml_cap);
2520 		ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_pad_delay);
2521 		/* Transition delay */
2522 		eml_trans_delay = ieee80211_emlsr_trans_delay_in_us(eml_cap);
2523 		ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_trans_delay);
2524 		/* Transition timeout */
2525 		eml_trans_timeout = ieee80211_eml_trans_timeout_in_us(eml_cap);
2526 		ml_params->emlsr_trans_timeout_us =
2527 					cpu_to_le32(eml_trans_timeout);
2528 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer %pM emlsr padding delay %u, trans delay %u trans timeout %u",
2529 			   arg->peer_mac, eml_pad_delay, eml_trans_delay,
2530 			   eml_trans_timeout);
2531 	}
2532 
2533 	ptr += sizeof(*ml_params);
2534 
2535 skip_ml_params:
2536 	/* Loop through the EHT rate set */
2537 	len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
2538 #if defined(__linux__)
2539 	tlv = ptr;
2540 #elif defined(__FreeBSD__)
2541 	tlv = (void *)ptr;
2542 #endif
2543 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2544 	ptr += TLV_HDR_SIZE;
2545 
2546 	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
2547 #if defined(__linux__)
2548 		eht_mcs = ptr;
2549 #elif defined(__FreeBSD__)
2550 		eht_mcs = (void *)ptr;
2551 #endif
2552 		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET,
2553 							     sizeof(*eht_mcs));
2554 
2555 		eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
2556 		eht_mcs->tx_mcs_set = cpu_to_le32(arg->peer_eht_tx_mcs_set[i]);
2557 		ptr += sizeof(*eht_mcs);
2558 	}
2559 
2560 	/* Update MCS15 capability */
2561 	if (arg->eht_disable_mcs15)
2562 		cmd->peer_eht_ops = cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE);
2563 
2564 #if defined(__linux__)
2565 	tlv = ptr;
2566 #elif defined(__FreeBSD__)
2567 	tlv = (void *)ptr;
2568 #endif
2569 	len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
2570 	/* fill ML Partner links */
2571 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
2572 	ptr += TLV_HDR_SIZE;
2573 
2574 	if (len == 0)
2575 		goto send;
2576 
2577 	for (i = 0; i < arg->ml.num_partner_links; i++) {
2578 		u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
2579 
2580 		partner_info = ptr;
2581 		partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
2582 								  sizeof(*partner_info));
2583 		partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
2584 		partner_info->hw_link_id =
2585 			cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
2586 		partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
2587 
2588 		if (arg->ml.partner_info[i].assoc_link)
2589 			partner_info->flags |=
2590 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
2591 
2592 		if (arg->ml.partner_info[i].primary_umac)
2593 			partner_info->flags |=
2594 				cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
2595 
2596 		if (arg->ml.partner_info[i].logical_link_idx_valid) {
2597 			v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
2598 			partner_info->flags |= v;
2599 		}
2600 
2601 		partner_info->logical_link_idx =
2602 			cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
2603 		ptr += sizeof(*partner_info);
2604 	}
2605 
2606 send:
2607 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
2608 		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n",
2609 		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
2610 		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
2611 		   cmd->peer_listen_intval, cmd->peer_ht_caps,
2612 		   cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
2613 		   cmd->peer_mpdu_density,
2614 		   cmd->peer_vht_caps, cmd->peer_he_cap_info,
2615 		   cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
2616 		   cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
2617 		   cmd->peer_he_cap_phy[2],
2618 		   cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
2619 		   cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
2620 		   cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
2621 		   cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops);
2622 
2623 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
2624 	if (ret) {
2625 		ath12k_warn(ar->ab,
2626 			    "failed to send WMI_PEER_ASSOC_CMDID\n");
2627 		dev_kfree_skb(skb);
2628 	}
2629 
2630 	return ret;
2631 }
2632 
ath12k_wmi_start_scan_init(struct ath12k * ar,struct ath12k_wmi_scan_req_arg * arg)2633 void ath12k_wmi_start_scan_init(struct ath12k *ar,
2634 				struct ath12k_wmi_scan_req_arg *arg)
2635 {
2636 	/* setup commonly used values */
2637 	arg->scan_req_id = 1;
2638 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2639 	arg->dwell_time_active = 50;
2640 	arg->dwell_time_active_2g = 0;
2641 	arg->dwell_time_passive = 150;
2642 	arg->dwell_time_active_6g = 70;
2643 	arg->dwell_time_passive_6g = 70;
2644 	arg->min_rest_time = 50;
2645 	arg->max_rest_time = 500;
2646 	arg->repeat_probe_time = 0;
2647 	arg->probe_spacing_time = 0;
2648 	arg->idle_time = 0;
2649 	arg->max_scan_time = 20000;
2650 	arg->probe_delay = 5;
2651 	arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
2652 				  WMI_SCAN_EVENT_COMPLETED |
2653 				  WMI_SCAN_EVENT_BSS_CHANNEL |
2654 				  WMI_SCAN_EVENT_FOREIGN_CHAN |
2655 				  WMI_SCAN_EVENT_DEQUEUED;
2656 	arg->scan_f_chan_stat_evnt = 1;
2657 	arg->num_bssid = 1;
2658 
2659 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
2660 	 * ZEROs in probe request
2661 	 */
2662 	eth_broadcast_addr(arg->bssid_list[0].addr);
2663 }
2664 
ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd * cmd,struct ath12k_wmi_scan_req_arg * arg)2665 static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
2666 						   struct ath12k_wmi_scan_req_arg *arg)
2667 {
2668 	/* Scan events subscription */
2669 	if (arg->scan_ev_started)
2670 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
2671 	if (arg->scan_ev_completed)
2672 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
2673 	if (arg->scan_ev_bss_chan)
2674 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
2675 	if (arg->scan_ev_foreign_chan)
2676 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
2677 	if (arg->scan_ev_dequeued)
2678 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
2679 	if (arg->scan_ev_preempted)
2680 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
2681 	if (arg->scan_ev_start_failed)
2682 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
2683 	if (arg->scan_ev_restarted)
2684 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
2685 	if (arg->scan_ev_foreign_chn_exit)
2686 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
2687 	if (arg->scan_ev_suspended)
2688 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
2689 	if (arg->scan_ev_resumed)
2690 		cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
2691 
2692 	/** Set scan control flags */
2693 	cmd->scan_ctrl_flags = 0;
2694 	if (arg->scan_f_passive)
2695 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
2696 	if (arg->scan_f_strict_passive_pch)
2697 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
2698 	if (arg->scan_f_promisc_mode)
2699 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
2700 	if (arg->scan_f_capture_phy_err)
2701 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
2702 	if (arg->scan_f_half_rate)
2703 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
2704 	if (arg->scan_f_quarter_rate)
2705 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
2706 	if (arg->scan_f_cck_rates)
2707 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
2708 	if (arg->scan_f_ofdm_rates)
2709 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
2710 	if (arg->scan_f_chan_stat_evnt)
2711 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
2712 	if (arg->scan_f_filter_prb_req)
2713 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
2714 	if (arg->scan_f_bcast_probe)
2715 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
2716 	if (arg->scan_f_offchan_mgmt_tx)
2717 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
2718 	if (arg->scan_f_offchan_data_tx)
2719 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
2720 	if (arg->scan_f_force_active_dfs_chn)
2721 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
2722 	if (arg->scan_f_add_tpc_ie_in_probe)
2723 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
2724 	if (arg->scan_f_add_ds_ie_in_probe)
2725 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
2726 	if (arg->scan_f_add_spoofed_mac_in_probe)
2727 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
2728 	if (arg->scan_f_add_rand_seq_in_probe)
2729 		cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
2730 	if (arg->scan_f_en_ie_whitelist_in_probe)
2731 		cmd->scan_ctrl_flags |=
2732 			cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
2733 
2734 	cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
2735 						 WMI_SCAN_DWELL_MODE_MASK);
2736 }
2737 
ath12k_wmi_send_scan_start_cmd(struct ath12k * ar,struct ath12k_wmi_scan_req_arg * arg)2738 int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
2739 				   struct ath12k_wmi_scan_req_arg *arg)
2740 {
2741 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2742 	struct wmi_start_scan_cmd *cmd;
2743 	struct ath12k_wmi_ssid_params *ssid = NULL;
2744 	struct ath12k_wmi_mac_addr_params *bssid;
2745 	struct sk_buff *skb;
2746 	struct wmi_tlv *tlv;
2747 #if defined(__linux__)
2748 	void *ptr;
2749 #elif defined(__FreeBSD__)
2750 	u8 *ptr;
2751 #endif
2752 	int i, ret, len;
2753 	u32 *tmp_ptr, extraie_len_with_pad = 0;
2754 	struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
2755 	struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
2756 
2757 	len = sizeof(*cmd);
2758 
2759 	len += TLV_HDR_SIZE;
2760 	if (arg->num_chan)
2761 		len += arg->num_chan * sizeof(u32);
2762 
2763 	len += TLV_HDR_SIZE;
2764 	if (arg->num_ssids)
2765 		len += arg->num_ssids * sizeof(*ssid);
2766 
2767 	len += TLV_HDR_SIZE;
2768 	if (arg->num_bssid)
2769 		len += sizeof(*bssid) * arg->num_bssid;
2770 
2771 	if (arg->num_hint_bssid)
2772 		len += TLV_HDR_SIZE +
2773 		       arg->num_hint_bssid * sizeof(*hint_bssid);
2774 
2775 	if (arg->num_hint_s_ssid)
2776 		len += TLV_HDR_SIZE +
2777 		       arg->num_hint_s_ssid * sizeof(*s_ssid);
2778 
2779 	len += TLV_HDR_SIZE;
2780 	if (arg->extraie.len)
2781 		extraie_len_with_pad =
2782 			roundup(arg->extraie.len, sizeof(u32));
2783 	if (extraie_len_with_pad <= (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len)) {
2784 		len += extraie_len_with_pad;
2785 	} else {
2786 		ath12k_warn(ar->ab, "discard large size %d bytes extraie for scan start\n",
2787 			    arg->extraie.len);
2788 		extraie_len_with_pad = 0;
2789 	}
2790 
2791 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
2792 	if (!skb)
2793 		return -ENOMEM;
2794 
2795 	ptr = skb->data;
2796 
2797 #if defined(__linux__)
2798 	cmd = ptr;
2799 #elif defined(__FreeBSD__)
2800 	cmd = (void *)ptr;
2801 #endif
2802 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
2803 						 sizeof(*cmd));
2804 
2805 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2806 	cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
2807 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2808 	if (ar->state_11d == ATH12K_11D_PREPARING)
2809 		arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
2810 	else
2811 		arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
2812 	cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
2813 
2814 	ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
2815 
2816 	cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
2817 	cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
2818 	cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
2819 	cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
2820 	cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
2821 	cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
2822 	cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
2823 	cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
2824 	cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
2825 	cmd->idle_time = cpu_to_le32(arg->idle_time);
2826 	cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
2827 	cmd->probe_delay = cpu_to_le32(arg->probe_delay);
2828 	cmd->burst_duration = cpu_to_le32(arg->burst_duration);
2829 	cmd->num_chan = cpu_to_le32(arg->num_chan);
2830 	cmd->num_bssid = cpu_to_le32(arg->num_bssid);
2831 	cmd->num_ssids = cpu_to_le32(arg->num_ssids);
2832 	cmd->ie_len = cpu_to_le32(arg->extraie.len);
2833 	cmd->n_probes = cpu_to_le32(arg->n_probes);
2834 
2835 	ptr += sizeof(*cmd);
2836 
2837 	len = arg->num_chan * sizeof(u32);
2838 
2839 #if defined(__linux__)
2840 	tlv = ptr;
2841 #elif defined(__FreeBSD__)
2842 	tlv = (void *)ptr;
2843 #endif
2844 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
2845 	ptr += TLV_HDR_SIZE;
2846 	tmp_ptr = (u32 *)ptr;
2847 
2848 	memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
2849 
2850 	ptr += len;
2851 
2852 	len = arg->num_ssids * sizeof(*ssid);
2853 #if defined(__linux__)
2854 	tlv = ptr;
2855 #elif defined(__FreeBSD__)
2856 	tlv = (void *)ptr;
2857 #endif
2858 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2859 
2860 	ptr += TLV_HDR_SIZE;
2861 
2862 	if (arg->num_ssids) {
2863 #if defined(__linux__)
2864 		ssid = ptr;
2865 #elif defined(__FreeBSD__)
2866 		ssid = (void *)ptr;
2867 #endif
2868 		for (i = 0; i < arg->num_ssids; ++i) {
2869 			ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
2870 			memcpy(ssid->ssid, arg->ssid[i].ssid,
2871 			       arg->ssid[i].ssid_len);
2872 			ssid++;
2873 		}
2874 	}
2875 
2876 	ptr += (arg->num_ssids * sizeof(*ssid));
2877 	len = arg->num_bssid * sizeof(*bssid);
2878 #if defined(__linux__)
2879 	tlv = ptr;
2880 #elif defined(__FreeBSD__)
2881 	tlv = (void *)ptr;
2882 #endif
2883 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2884 
2885 	ptr += TLV_HDR_SIZE;
2886 #if defined(__linux__)
2887 	bssid = ptr;
2888 #elif defined(__FreeBSD__)
2889 	bssid = (void *)ptr;
2890 #endif
2891 
2892 	if (arg->num_bssid) {
2893 		for (i = 0; i < arg->num_bssid; ++i) {
2894 			ether_addr_copy(bssid->addr,
2895 					arg->bssid_list[i].addr);
2896 			bssid++;
2897 		}
2898 	}
2899 
2900 	ptr += arg->num_bssid * sizeof(*bssid);
2901 
2902 	len = extraie_len_with_pad;
2903 #if defined(__linux__)
2904 	tlv = ptr;
2905 #elif defined(__FreeBSD__)
2906 	tlv = (void *)ptr;
2907 #endif
2908 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
2909 	ptr += TLV_HDR_SIZE;
2910 
2911 	if (extraie_len_with_pad)
2912 		memcpy(ptr, arg->extraie.ptr,
2913 		       arg->extraie.len);
2914 
2915 	ptr += extraie_len_with_pad;
2916 
2917 	if (arg->num_hint_s_ssid) {
2918 		len = arg->num_hint_s_ssid * sizeof(*s_ssid);
2919 #if defined(__linux__)
2920 		tlv = ptr;
2921 #elif defined(__FreeBSD__)
2922 		tlv = (void *)ptr;
2923 #endif
2924 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2925 		ptr += TLV_HDR_SIZE;
2926 #if defined(__linux__)
2927 		s_ssid = ptr;
2928 #elif defined(__FreeBSD__)
2929 		s_ssid = (void *)ptr;
2930 #endif
2931 		for (i = 0; i < arg->num_hint_s_ssid; ++i) {
2932 			s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
2933 			s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
2934 			s_ssid++;
2935 		}
2936 		ptr += len;
2937 	}
2938 
2939 	if (arg->num_hint_bssid) {
2940 		len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
2941 #if defined(__linux__)
2942 		tlv = ptr;
2943 #elif defined(__FreeBSD__)
2944 		tlv = (void *)ptr;
2945 #endif
2946 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
2947 		ptr += TLV_HDR_SIZE;
2948 #if defined(__linux__)
2949 		hint_bssid = ptr;
2950 #elif defined(__FreeBSD__)
2951 		hint_bssid = (void *)ptr;
2952 #endif
2953 		for (i = 0; i < arg->num_hint_bssid; ++i) {
2954 			hint_bssid->freq_flags =
2955 				arg->hint_bssid[i].freq_flags;
2956 			ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
2957 					&hint_bssid->bssid.addr[0]);
2958 			hint_bssid++;
2959 		}
2960 	}
2961 
2962 	ret = ath12k_wmi_cmd_send(wmi, skb,
2963 				  WMI_START_SCAN_CMDID);
2964 	if (ret) {
2965 		ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
2966 		dev_kfree_skb(skb);
2967 	}
2968 
2969 	return ret;
2970 }
2971 
ath12k_wmi_send_scan_stop_cmd(struct ath12k * ar,struct ath12k_wmi_scan_cancel_arg * arg)2972 int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
2973 				  struct ath12k_wmi_scan_cancel_arg *arg)
2974 {
2975 	struct ath12k_wmi_pdev *wmi = ar->wmi;
2976 	struct wmi_stop_scan_cmd *cmd;
2977 	struct sk_buff *skb;
2978 	int ret;
2979 
2980 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
2981 	if (!skb)
2982 		return -ENOMEM;
2983 
2984 	cmd = (struct wmi_stop_scan_cmd *)skb->data;
2985 
2986 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
2987 						 sizeof(*cmd));
2988 
2989 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
2990 	cmd->requestor = cpu_to_le32(arg->requester);
2991 	cmd->scan_id = cpu_to_le32(arg->scan_id);
2992 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
2993 	/* stop the scan with the corresponding scan_id */
2994 	if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
2995 		/* Cancelling all scans */
2996 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
2997 	} else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
2998 		/* Cancelling VAP scans */
2999 		cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
3000 	} else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
3001 		/* Cancelling specific scan */
3002 		cmd->req_type = WMI_SCAN_STOP_ONE;
3003 	} else {
3004 		ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
3005 			    arg->req_type);
3006 		dev_kfree_skb(skb);
3007 		return -EINVAL;
3008 	}
3009 
3010 	ret = ath12k_wmi_cmd_send(wmi, skb,
3011 				  WMI_STOP_SCAN_CMDID);
3012 	if (ret) {
3013 		ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
3014 		dev_kfree_skb(skb);
3015 	}
3016 
3017 	return ret;
3018 }
3019 
ath12k_wmi_send_scan_chan_list_cmd(struct ath12k * ar,struct ath12k_wmi_scan_chan_list_arg * arg)3020 int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
3021 				       struct ath12k_wmi_scan_chan_list_arg *arg)
3022 {
3023 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3024 	struct wmi_scan_chan_list_cmd *cmd;
3025 	struct sk_buff *skb;
3026 	struct ath12k_wmi_channel_params *chan_info;
3027 	struct ath12k_wmi_channel_arg *channel_arg;
3028 	struct wmi_tlv *tlv;
3029 #if defined(__linux__)
3030 	void *ptr;
3031 #elif defined(__FreeBSD__)
3032 	u8 *ptr;
3033 #endif
3034 	int i, ret, len;
3035 	u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
3036 	__le32 *reg1, *reg2;
3037 
3038 	channel_arg = &arg->channel[0];
3039 	while (arg->nallchans) {
3040 		len = sizeof(*cmd) + TLV_HDR_SIZE;
3041 		max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
3042 			sizeof(*chan_info);
3043 
3044 		num_send_chans = min(arg->nallchans, max_chan_limit);
3045 
3046 		arg->nallchans -= num_send_chans;
3047 		len += sizeof(*chan_info) * num_send_chans;
3048 
3049 		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3050 		if (!skb)
3051 			return -ENOMEM;
3052 
3053 		cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
3054 		cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
3055 							 sizeof(*cmd));
3056 		cmd->pdev_id = cpu_to_le32(arg->pdev_id);
3057 		cmd->num_scan_chans = cpu_to_le32(num_send_chans);
3058 		if (num_sends)
3059 			cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
3060 
3061 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3062 			   "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
3063 			   num_send_chans, len, cmd->pdev_id, num_sends);
3064 
3065 		ptr = skb->data + sizeof(*cmd);
3066 
3067 		len = sizeof(*chan_info) * num_send_chans;
3068 #if defined(__linux__)
3069 		tlv = ptr;
3070 #elif defined(__FreeBSD__)
3071 		tlv = (void *)ptr;
3072 #endif
3073 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
3074 						     len);
3075 		ptr += TLV_HDR_SIZE;
3076 
3077 		for (i = 0; i < num_send_chans; ++i) {
3078 #if defined(__linux__)
3079 			chan_info = ptr;
3080 #elif defined(__FreeBSD__)
3081 			chan_info = (void *)ptr;
3082 #endif
3083 			memset(chan_info, 0, sizeof(*chan_info));
3084 			len = sizeof(*chan_info);
3085 			chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
3086 								       len);
3087 
3088 			reg1 = &chan_info->reg_info_1;
3089 			reg2 = &chan_info->reg_info_2;
3090 			chan_info->mhz = cpu_to_le32(channel_arg->mhz);
3091 			chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
3092 			chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
3093 
3094 			if (channel_arg->is_chan_passive)
3095 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
3096 			if (channel_arg->allow_he)
3097 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
3098 			else if (channel_arg->allow_vht)
3099 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
3100 			else if (channel_arg->allow_ht)
3101 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
3102 			if (channel_arg->half_rate)
3103 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
3104 			if (channel_arg->quarter_rate)
3105 				chan_info->info |=
3106 					cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
3107 
3108 			if (channel_arg->psc_channel)
3109 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
3110 
3111 			if (channel_arg->dfs_set)
3112 				chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
3113 
3114 			chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
3115 							    WMI_CHAN_INFO_MODE);
3116 			*reg1 |= le32_encode_bits(channel_arg->minpower,
3117 						  WMI_CHAN_REG_INFO1_MIN_PWR);
3118 			*reg1 |= le32_encode_bits(channel_arg->maxpower,
3119 						  WMI_CHAN_REG_INFO1_MAX_PWR);
3120 			*reg1 |= le32_encode_bits(channel_arg->maxregpower,
3121 						  WMI_CHAN_REG_INFO1_MAX_REG_PWR);
3122 			*reg1 |= le32_encode_bits(channel_arg->reg_class_id,
3123 						  WMI_CHAN_REG_INFO1_REG_CLS);
3124 			*reg2 |= le32_encode_bits(channel_arg->antennamax,
3125 						  WMI_CHAN_REG_INFO2_ANT_MAX);
3126 			*reg2 |= le32_encode_bits(channel_arg->maxregpower,
3127 						  WMI_CHAN_REG_INFO2_MAX_TX_PWR);
3128 
3129 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3130 				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
3131 				   i, chan_info->mhz, chan_info->info);
3132 
3133 			ptr += sizeof(*chan_info);
3134 
3135 			channel_arg++;
3136 		}
3137 
3138 		ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
3139 		if (ret) {
3140 			ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
3141 			dev_kfree_skb(skb);
3142 			return ret;
3143 		}
3144 
3145 		num_sends++;
3146 	}
3147 
3148 	return 0;
3149 }
3150 
ath12k_wmi_send_wmm_update_cmd(struct ath12k * ar,u32 vdev_id,struct wmi_wmm_params_all_arg * param)3151 int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
3152 				   struct wmi_wmm_params_all_arg *param)
3153 {
3154 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3155 	struct wmi_vdev_set_wmm_params_cmd *cmd;
3156 	struct wmi_wmm_params *wmm_param;
3157 	struct wmi_wmm_params_arg *wmi_wmm_arg;
3158 	struct sk_buff *skb;
3159 	int ret, ac;
3160 
3161 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3162 	if (!skb)
3163 		return -ENOMEM;
3164 
3165 	cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
3166 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
3167 						 sizeof(*cmd));
3168 
3169 	cmd->vdev_id = cpu_to_le32(vdev_id);
3170 	cmd->wmm_param_type = 0;
3171 
3172 	for (ac = 0; ac < WME_NUM_AC; ac++) {
3173 		switch (ac) {
3174 		case WME_AC_BE:
3175 			wmi_wmm_arg = &param->ac_be;
3176 			break;
3177 		case WME_AC_BK:
3178 			wmi_wmm_arg = &param->ac_bk;
3179 			break;
3180 		case WME_AC_VI:
3181 			wmi_wmm_arg = &param->ac_vi;
3182 			break;
3183 		case WME_AC_VO:
3184 			wmi_wmm_arg = &param->ac_vo;
3185 			break;
3186 		}
3187 
3188 		wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
3189 		wmm_param->tlv_header =
3190 			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
3191 					       sizeof(*wmm_param));
3192 
3193 		wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
3194 		wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
3195 		wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
3196 		wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
3197 		wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
3198 		wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
3199 
3200 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3201 			   "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
3202 			   ac, wmm_param->aifs, wmm_param->cwmin,
3203 			   wmm_param->cwmax, wmm_param->txoplimit,
3204 			   wmm_param->acm, wmm_param->no_ack);
3205 	}
3206 	ret = ath12k_wmi_cmd_send(wmi, skb,
3207 				  WMI_VDEV_SET_WMM_PARAMS_CMDID);
3208 	if (ret) {
3209 		ath12k_warn(ar->ab,
3210 			    "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
3211 		dev_kfree_skb(skb);
3212 	}
3213 
3214 	return ret;
3215 }
3216 
ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k * ar,u32 pdev_id)3217 int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
3218 						  u32 pdev_id)
3219 {
3220 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3221 	struct wmi_dfs_phyerr_offload_cmd *cmd;
3222 	struct sk_buff *skb;
3223 	int ret;
3224 
3225 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3226 	if (!skb)
3227 		return -ENOMEM;
3228 
3229 	cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
3230 	cmd->tlv_header =
3231 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
3232 				       sizeof(*cmd));
3233 
3234 	cmd->pdev_id = cpu_to_le32(pdev_id);
3235 
3236 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3237 		   "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
3238 
3239 	ret = ath12k_wmi_cmd_send(wmi, skb,
3240 				  WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
3241 	if (ret) {
3242 		ath12k_warn(ar->ab,
3243 			    "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
3244 		dev_kfree_skb(skb);
3245 	}
3246 
3247 	return ret;
3248 }
3249 
ath12k_wmi_set_bios_cmd(struct ath12k_base * ab,u32 param_id,const u8 * buf,size_t buf_len)3250 int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
3251 			    const u8 *buf, size_t buf_len)
3252 {
3253 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3254 	struct wmi_pdev_set_bios_interface_cmd *cmd;
3255 	struct wmi_tlv *tlv;
3256 	struct sk_buff *skb;
3257 	u8 *ptr;
3258 	u32 len, len_aligned;
3259 	int ret;
3260 
3261 	len_aligned = roundup(buf_len, sizeof(u32));
3262 	len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
3263 
3264 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3265 	if (!skb)
3266 		return -ENOMEM;
3267 
3268 	cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
3269 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
3270 						 sizeof(*cmd));
3271 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3272 	cmd->param_type_id = cpu_to_le32(param_id);
3273 	cmd->length = cpu_to_le32(buf_len);
3274 
3275 	ptr = skb->data + sizeof(*cmd);
3276 	tlv = (struct wmi_tlv *)ptr;
3277 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
3278 	ptr += TLV_HDR_SIZE;
3279 	memcpy(ptr, buf, buf_len);
3280 
3281 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3282 				  skb,
3283 				  WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
3284 	if (ret) {
3285 		ath12k_warn(ab,
3286 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
3287 			    param_id, ret);
3288 		dev_kfree_skb(skb);
3289 	}
3290 
3291 	return 0;
3292 }
3293 
ath12k_wmi_set_bios_sar_cmd(struct ath12k_base * ab,const u8 * psar_table)3294 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
3295 {
3296 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3297 	struct wmi_pdev_set_bios_sar_table_cmd *cmd;
3298 	struct wmi_tlv *tlv;
3299 	struct sk_buff *skb;
3300 	int ret;
3301 	u8 *buf_ptr;
3302 	u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
3303 	const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
3304 	const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
3305 
3306 	sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
3307 	sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
3308 					      sizeof(u32));
3309 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
3310 		TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
3311 
3312 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3313 	if (!skb)
3314 		return -ENOMEM;
3315 
3316 	cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
3317 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
3318 						 sizeof(*cmd));
3319 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3320 	cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3321 	cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3322 
3323 	buf_ptr = skb->data + sizeof(*cmd);
3324 	tlv = (struct wmi_tlv *)buf_ptr;
3325 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3326 					 sar_table_len_aligned);
3327 	buf_ptr += TLV_HDR_SIZE;
3328 	memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
3329 
3330 	buf_ptr += sar_table_len_aligned;
3331 	tlv = (struct wmi_tlv *)buf_ptr;
3332 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
3333 					 sar_dbs_backoff_len_aligned);
3334 	buf_ptr += TLV_HDR_SIZE;
3335 	memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
3336 
3337 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3338 				  skb,
3339 				  WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
3340 	if (ret) {
3341 		ath12k_warn(ab,
3342 			    "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
3343 			    ret);
3344 		dev_kfree_skb(skb);
3345 	}
3346 
3347 	return ret;
3348 }
3349 
ath12k_wmi_set_bios_geo_cmd(struct ath12k_base * ab,const u8 * pgeo_table)3350 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
3351 {
3352 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
3353 	struct wmi_pdev_set_bios_geo_table_cmd *cmd;
3354 	struct wmi_tlv *tlv;
3355 	struct sk_buff *skb;
3356 	int ret;
3357 	u8 *buf_ptr;
3358 	u32 len, sar_geo_len_aligned;
3359 	const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
3360 
3361 	sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
3362 	len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
3363 
3364 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
3365 	if (!skb)
3366 		return -ENOMEM;
3367 
3368 	cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
3369 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
3370 						 sizeof(*cmd));
3371 	cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
3372 	cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3373 
3374 	buf_ptr = skb->data + sizeof(*cmd);
3375 	tlv = (struct wmi_tlv *)buf_ptr;
3376 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
3377 	buf_ptr += TLV_HDR_SIZE;
3378 	memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
3379 
3380 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
3381 				  skb,
3382 				  WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
3383 	if (ret) {
3384 		ath12k_warn(ab,
3385 			    "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
3386 			    ret);
3387 		dev_kfree_skb(skb);
3388 	}
3389 
3390 	return ret;
3391 }
3392 
ath12k_wmi_delba_send(struct ath12k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 initiator,u32 reason)3393 int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3394 			  u32 tid, u32 initiator, u32 reason)
3395 {
3396 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3397 	struct wmi_delba_send_cmd *cmd;
3398 	struct sk_buff *skb;
3399 	int ret;
3400 
3401 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3402 	if (!skb)
3403 		return -ENOMEM;
3404 
3405 	cmd = (struct wmi_delba_send_cmd *)skb->data;
3406 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
3407 						 sizeof(*cmd));
3408 	cmd->vdev_id = cpu_to_le32(vdev_id);
3409 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3410 	cmd->tid = cpu_to_le32(tid);
3411 	cmd->initiator = cpu_to_le32(initiator);
3412 	cmd->reasoncode = cpu_to_le32(reason);
3413 
3414 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3415 		   "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
3416 		   vdev_id, mac, tid, initiator, reason);
3417 
3418 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
3419 
3420 	if (ret) {
3421 		ath12k_warn(ar->ab,
3422 			    "failed to send WMI_DELBA_SEND_CMDID cmd\n");
3423 		dev_kfree_skb(skb);
3424 	}
3425 
3426 	return ret;
3427 }
3428 
ath12k_wmi_addba_set_resp(struct ath12k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 status)3429 int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3430 			      u32 tid, u32 status)
3431 {
3432 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3433 	struct wmi_addba_setresponse_cmd *cmd;
3434 	struct sk_buff *skb;
3435 	int ret;
3436 
3437 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3438 	if (!skb)
3439 		return -ENOMEM;
3440 
3441 	cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
3442 	cmd->tlv_header =
3443 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
3444 				       sizeof(*cmd));
3445 	cmd->vdev_id = cpu_to_le32(vdev_id);
3446 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3447 	cmd->tid = cpu_to_le32(tid);
3448 	cmd->statuscode = cpu_to_le32(status);
3449 
3450 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3451 		   "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
3452 		   vdev_id, mac, tid, status);
3453 
3454 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
3455 
3456 	if (ret) {
3457 		ath12k_warn(ar->ab,
3458 			    "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
3459 		dev_kfree_skb(skb);
3460 	}
3461 
3462 	return ret;
3463 }
3464 
ath12k_wmi_addba_send(struct ath12k * ar,u32 vdev_id,const u8 * mac,u32 tid,u32 buf_size)3465 int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
3466 			  u32 tid, u32 buf_size)
3467 {
3468 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3469 	struct wmi_addba_send_cmd *cmd;
3470 	struct sk_buff *skb;
3471 	int ret;
3472 
3473 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3474 	if (!skb)
3475 		return -ENOMEM;
3476 
3477 	cmd = (struct wmi_addba_send_cmd *)skb->data;
3478 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
3479 						 sizeof(*cmd));
3480 	cmd->vdev_id = cpu_to_le32(vdev_id);
3481 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3482 	cmd->tid = cpu_to_le32(tid);
3483 	cmd->buffersize = cpu_to_le32(buf_size);
3484 
3485 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3486 		   "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
3487 		   vdev_id, mac, tid, buf_size);
3488 
3489 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
3490 
3491 	if (ret) {
3492 		ath12k_warn(ar->ab,
3493 			    "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
3494 		dev_kfree_skb(skb);
3495 	}
3496 
3497 	return ret;
3498 }
3499 
ath12k_wmi_addba_clear_resp(struct ath12k * ar,u32 vdev_id,const u8 * mac)3500 int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
3501 {
3502 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3503 	struct wmi_addba_clear_resp_cmd *cmd;
3504 	struct sk_buff *skb;
3505 	int ret;
3506 
3507 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3508 	if (!skb)
3509 		return -ENOMEM;
3510 
3511 	cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
3512 	cmd->tlv_header =
3513 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
3514 				       sizeof(*cmd));
3515 	cmd->vdev_id = cpu_to_le32(vdev_id);
3516 	ether_addr_copy(cmd->peer_macaddr.addr, mac);
3517 
3518 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3519 		   "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
3520 		   vdev_id, mac);
3521 
3522 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
3523 
3524 	if (ret) {
3525 		ath12k_warn(ar->ab,
3526 			    "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
3527 		dev_kfree_skb(skb);
3528 	}
3529 
3530 	return ret;
3531 }
3532 
ath12k_wmi_send_init_country_cmd(struct ath12k * ar,struct ath12k_wmi_init_country_arg * arg)3533 int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
3534 				     struct ath12k_wmi_init_country_arg *arg)
3535 {
3536 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3537 	struct wmi_init_country_cmd *cmd;
3538 	struct sk_buff *skb;
3539 	int ret;
3540 
3541 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3542 	if (!skb)
3543 		return -ENOMEM;
3544 
3545 	cmd = (struct wmi_init_country_cmd *)skb->data;
3546 	cmd->tlv_header =
3547 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
3548 				       sizeof(*cmd));
3549 
3550 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3551 
3552 	switch (arg->flags) {
3553 	case ALPHA_IS_SET:
3554 		cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
3555 		memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
3556 		break;
3557 	case CC_IS_SET:
3558 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
3559 		cmd->cc_info.country_code =
3560 			cpu_to_le32(arg->cc_info.country_code);
3561 		break;
3562 	case REGDMN_IS_SET:
3563 		cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
3564 		cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
3565 		break;
3566 	default:
3567 		ret = -EINVAL;
3568 		goto out;
3569 	}
3570 
3571 	ret = ath12k_wmi_cmd_send(wmi, skb,
3572 				  WMI_SET_INIT_COUNTRY_CMDID);
3573 
3574 out:
3575 	if (ret) {
3576 		ath12k_warn(ar->ab,
3577 			    "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
3578 			    ret);
3579 		dev_kfree_skb(skb);
3580 	}
3581 
3582 	return ret;
3583 }
3584 
ath12k_wmi_send_set_current_country_cmd(struct ath12k * ar,struct wmi_set_current_country_arg * arg)3585 int ath12k_wmi_send_set_current_country_cmd(struct ath12k *ar,
3586 					    struct wmi_set_current_country_arg *arg)
3587 {
3588 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3589 	struct wmi_set_current_country_cmd *cmd;
3590 	struct sk_buff *skb;
3591 	int ret;
3592 
3593 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3594 	if (!skb)
3595 		return -ENOMEM;
3596 
3597 	cmd = (struct wmi_set_current_country_cmd *)skb->data;
3598 	cmd->tlv_header =
3599 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_CURRENT_COUNTRY_CMD,
3600 				       sizeof(*cmd));
3601 
3602 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
3603 	memcpy(&cmd->new_alpha2, &arg->alpha2, sizeof(arg->alpha2));
3604 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
3605 
3606 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3607 		   "set current country pdev id %d alpha2 %c%c\n",
3608 		   ar->pdev->pdev_id,
3609 		   arg->alpha2[0],
3610 		   arg->alpha2[1]);
3611 
3612 	if (ret) {
3613 		ath12k_warn(ar->ab,
3614 			    "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
3615 		dev_kfree_skb(skb);
3616 	}
3617 
3618 	return ret;
3619 }
3620 
ath12k_wmi_send_11d_scan_start_cmd(struct ath12k * ar,struct wmi_11d_scan_start_arg * arg)3621 int ath12k_wmi_send_11d_scan_start_cmd(struct ath12k *ar,
3622 				       struct wmi_11d_scan_start_arg *arg)
3623 {
3624 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3625 	struct wmi_11d_scan_start_cmd *cmd;
3626 	struct sk_buff *skb;
3627 	int ret;
3628 
3629 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3630 	if (!skb)
3631 		return -ENOMEM;
3632 
3633 	cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
3634 	cmd->tlv_header =
3635 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_START_CMD,
3636 				       sizeof(*cmd));
3637 
3638 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
3639 	cmd->scan_period_msec = cpu_to_le32(arg->scan_period_msec);
3640 	cmd->start_interval_msec = cpu_to_le32(arg->start_interval_msec);
3641 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
3642 
3643 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3644 		   "send 11d scan start vdev id %d period %d ms internal %d ms\n",
3645 		   arg->vdev_id, arg->scan_period_msec,
3646 		   arg->start_interval_msec);
3647 
3648 	if (ret) {
3649 		ath12k_warn(ar->ab,
3650 			    "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
3651 		dev_kfree_skb(skb);
3652 	}
3653 
3654 	return ret;
3655 }
3656 
ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k * ar,u32 vdev_id)3657 int ath12k_wmi_send_11d_scan_stop_cmd(struct ath12k *ar, u32 vdev_id)
3658 {
3659 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3660 	struct wmi_11d_scan_stop_cmd *cmd;
3661 	struct sk_buff *skb;
3662 	int ret;
3663 
3664 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
3665 	if (!skb)
3666 		return -ENOMEM;
3667 
3668 	cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
3669 	cmd->tlv_header =
3670 		ath12k_wmi_tlv_cmd_hdr(WMI_TAG_11D_SCAN_STOP_CMD,
3671 				       sizeof(*cmd));
3672 
3673 	cmd->vdev_id = cpu_to_le32(vdev_id);
3674 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
3675 
3676 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3677 		   "send 11d scan stop vdev id %d\n",
3678 		   cmd->vdev_id);
3679 
3680 	if (ret) {
3681 		ath12k_warn(ar->ab,
3682 			    "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
3683 		dev_kfree_skb(skb);
3684 	}
3685 
3686 	return ret;
3687 }
3688 
3689 int
ath12k_wmi_send_twt_enable_cmd(struct ath12k * ar,u32 pdev_id)3690 ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
3691 {
3692 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3693 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3694 	struct wmi_twt_enable_params_cmd *cmd;
3695 	struct sk_buff *skb;
3696 	int ret, len;
3697 
3698 	len = sizeof(*cmd);
3699 
3700 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3701 	if (!skb)
3702 		return -ENOMEM;
3703 
3704 	cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
3705 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
3706 						 len);
3707 	cmd->pdev_id = cpu_to_le32(pdev_id);
3708 	cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
3709 	cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
3710 	cmd->congestion_thresh_setup =
3711 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
3712 	cmd->congestion_thresh_teardown =
3713 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
3714 	cmd->congestion_thresh_critical =
3715 		cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
3716 	cmd->interference_thresh_teardown =
3717 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
3718 	cmd->interference_thresh_setup =
3719 		cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
3720 	cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
3721 	cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
3722 	cmd->no_of_bcast_mcast_slots =
3723 		cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
3724 	cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
3725 	cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
3726 	cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
3727 	cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
3728 	cmd->remove_sta_slot_interval =
3729 		cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
3730 	/* TODO add MBSSID support */
3731 	cmd->mbss_support = 0;
3732 
3733 	ret = ath12k_wmi_cmd_send(wmi, skb,
3734 				  WMI_TWT_ENABLE_CMDID);
3735 	if (ret) {
3736 		ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
3737 		dev_kfree_skb(skb);
3738 	}
3739 	return ret;
3740 }
3741 
3742 int
ath12k_wmi_send_twt_disable_cmd(struct ath12k * ar,u32 pdev_id)3743 ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
3744 {
3745 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3746 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3747 	struct wmi_twt_disable_params_cmd *cmd;
3748 	struct sk_buff *skb;
3749 	int ret, len;
3750 
3751 	len = sizeof(*cmd);
3752 
3753 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3754 	if (!skb)
3755 		return -ENOMEM;
3756 
3757 	cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
3758 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
3759 						 len);
3760 	cmd->pdev_id = cpu_to_le32(pdev_id);
3761 
3762 	ret = ath12k_wmi_cmd_send(wmi, skb,
3763 				  WMI_TWT_DISABLE_CMDID);
3764 	if (ret) {
3765 		ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
3766 		dev_kfree_skb(skb);
3767 	}
3768 	return ret;
3769 }
3770 
3771 int
ath12k_wmi_send_obss_spr_cmd(struct ath12k * ar,u32 vdev_id,struct ieee80211_he_obss_pd * he_obss_pd)3772 ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
3773 			     struct ieee80211_he_obss_pd *he_obss_pd)
3774 {
3775 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3776 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3777 	struct wmi_obss_spatial_reuse_params_cmd *cmd;
3778 	struct sk_buff *skb;
3779 	int ret, len;
3780 
3781 	len = sizeof(*cmd);
3782 
3783 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3784 	if (!skb)
3785 		return -ENOMEM;
3786 
3787 	cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
3788 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
3789 						 len);
3790 	cmd->vdev_id = cpu_to_le32(vdev_id);
3791 	cmd->enable = cpu_to_le32(he_obss_pd->enable);
3792 	cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
3793 	cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
3794 
3795 	ret = ath12k_wmi_cmd_send(wmi, skb,
3796 				  WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
3797 	if (ret) {
3798 		ath12k_warn(ab,
3799 			    "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
3800 		dev_kfree_skb(skb);
3801 	}
3802 	return ret;
3803 }
3804 
ath12k_wmi_obss_color_cfg_cmd(struct ath12k * ar,u32 vdev_id,u8 bss_color,u32 period,bool enable)3805 int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
3806 				  u8 bss_color, u32 period,
3807 				  bool enable)
3808 {
3809 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3810 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3811 	struct wmi_obss_color_collision_cfg_params_cmd *cmd;
3812 	struct sk_buff *skb;
3813 	int ret, len;
3814 
3815 	len = sizeof(*cmd);
3816 
3817 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3818 	if (!skb)
3819 		return -ENOMEM;
3820 
3821 	cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
3822 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
3823 						 len);
3824 	cmd->vdev_id = cpu_to_le32(vdev_id);
3825 	cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
3826 		cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
3827 	cmd->current_bss_color = cpu_to_le32(bss_color);
3828 	cmd->detection_period_ms = cpu_to_le32(period);
3829 	cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
3830 	cmd->free_slot_expiry_time_ms = 0;
3831 	cmd->flags = 0;
3832 
3833 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3834 		   "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3835 		   cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
3836 		   cmd->detection_period_ms, cmd->scan_period_ms);
3837 
3838 	ret = ath12k_wmi_cmd_send(wmi, skb,
3839 				  WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
3840 	if (ret) {
3841 		ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3842 		dev_kfree_skb(skb);
3843 	}
3844 	return ret;
3845 }
3846 
ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k * ar,u32 vdev_id,bool enable)3847 int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
3848 						bool enable)
3849 {
3850 	struct ath12k_wmi_pdev *wmi = ar->wmi;
3851 	struct ath12k_base *ab = wmi->wmi_ab->ab;
3852 	struct wmi_bss_color_change_enable_params_cmd *cmd;
3853 	struct sk_buff *skb;
3854 	int ret, len;
3855 
3856 	len = sizeof(*cmd);
3857 
3858 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
3859 	if (!skb)
3860 		return -ENOMEM;
3861 
3862 	cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
3863 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
3864 						 len);
3865 	cmd->vdev_id = cpu_to_le32(vdev_id);
3866 	cmd->enable = enable ? cpu_to_le32(1) : 0;
3867 
3868 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3869 		   "wmi_send_bss_color_change_enable id %d enable %d\n",
3870 		   cmd->vdev_id, cmd->enable);
3871 
3872 	ret = ath12k_wmi_cmd_send(wmi, skb,
3873 				  WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
3874 	if (ret) {
3875 		ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3876 		dev_kfree_skb(skb);
3877 	}
3878 	return ret;
3879 }
3880 
ath12k_wmi_fils_discovery_tmpl(struct ath12k * ar,u32 vdev_id,struct sk_buff * tmpl)3881 int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
3882 				   struct sk_buff *tmpl)
3883 {
3884 	struct wmi_tlv *tlv;
3885 	struct sk_buff *skb;
3886 	void *ptr;
3887 	int ret, len;
3888 	size_t aligned_len;
3889 	struct wmi_fils_discovery_tmpl_cmd *cmd;
3890 
3891 	aligned_len = roundup(tmpl->len, 4);
3892 	len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
3893 
3894 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3895 		   "WMI vdev %i set FILS discovery template\n", vdev_id);
3896 
3897 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3898 	if (!skb)
3899 		return -ENOMEM;
3900 
3901 	cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
3902 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
3903 						 sizeof(*cmd));
3904 	cmd->vdev_id = cpu_to_le32(vdev_id);
3905 	cmd->buf_len = cpu_to_le32(tmpl->len);
3906 	ptr = skb->data + sizeof(*cmd);
3907 
3908 	tlv = ptr;
3909 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3910 	memcpy(tlv->value, tmpl->data, tmpl->len);
3911 
3912 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
3913 	if (ret) {
3914 		ath12k_warn(ar->ab,
3915 			    "WMI vdev %i failed to send FILS discovery template command\n",
3916 			    vdev_id);
3917 		dev_kfree_skb(skb);
3918 	}
3919 	return ret;
3920 }
3921 
ath12k_wmi_probe_resp_tmpl(struct ath12k * ar,u32 vdev_id,struct sk_buff * tmpl)3922 int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
3923 			       struct sk_buff *tmpl)
3924 {
3925 	struct wmi_probe_tmpl_cmd *cmd;
3926 	struct ath12k_wmi_bcn_prb_info_params *probe_info;
3927 	struct wmi_tlv *tlv;
3928 	struct sk_buff *skb;
3929 #if defined(__linux__)
3930 	void *ptr;
3931 #elif defined(__FreeBSD__)
3932 	u8 *ptr;
3933 #endif
3934 	int ret, len;
3935 	size_t aligned_len = roundup(tmpl->len, 4);
3936 
3937 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3938 		   "WMI vdev %i set probe response template\n", vdev_id);
3939 
3940 	len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
3941 
3942 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
3943 	if (!skb)
3944 		return -ENOMEM;
3945 
3946 	cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
3947 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
3948 						 sizeof(*cmd));
3949 	cmd->vdev_id = cpu_to_le32(vdev_id);
3950 	cmd->buf_len = cpu_to_le32(tmpl->len);
3951 
3952 	ptr = skb->data + sizeof(*cmd);
3953 
3954 #if defined(__linux__)
3955 	probe_info = ptr;
3956 #elif defined(__FreeBSD__)
3957 	probe_info = (void *)ptr;
3958 #endif
3959 	len = sizeof(*probe_info);
3960 	probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
3961 							len);
3962 	probe_info->caps = 0;
3963 	probe_info->erp = 0;
3964 
3965 	ptr += sizeof(*probe_info);
3966 
3967 #if defined(__linux__)
3968 	tlv = ptr;
3969 #elif defined(__FreeBSD__)
3970 	tlv = (void *)ptr;
3971 #endif
3972 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
3973 	memcpy(tlv->value, tmpl->data, tmpl->len);
3974 
3975 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
3976 	if (ret) {
3977 		ath12k_warn(ar->ab,
3978 			    "WMI vdev %i failed to send probe response template command\n",
3979 			    vdev_id);
3980 		dev_kfree_skb(skb);
3981 	}
3982 	return ret;
3983 }
3984 
ath12k_wmi_fils_discovery(struct ath12k * ar,u32 vdev_id,u32 interval,bool unsol_bcast_probe_resp_enabled)3985 int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
3986 			      bool unsol_bcast_probe_resp_enabled)
3987 {
3988 	struct sk_buff *skb;
3989 	int ret, len;
3990 	struct wmi_fils_discovery_cmd *cmd;
3991 
3992 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
3993 		   "WMI vdev %i set %s interval to %u TU\n",
3994 		   vdev_id, unsol_bcast_probe_resp_enabled ?
3995 		   "unsolicited broadcast probe response" : "FILS discovery",
3996 		   interval);
3997 
3998 	len = sizeof(*cmd);
3999 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
4000 	if (!skb)
4001 		return -ENOMEM;
4002 
4003 	cmd = (struct wmi_fils_discovery_cmd *)skb->data;
4004 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
4005 						 len);
4006 	cmd->vdev_id = cpu_to_le32(vdev_id);
4007 	cmd->interval = cpu_to_le32(interval);
4008 	cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
4009 
4010 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
4011 	if (ret) {
4012 		ath12k_warn(ar->ab,
4013 			    "WMI vdev %i failed to send FILS discovery enable/disable command\n",
4014 			    vdev_id);
4015 		dev_kfree_skb(skb);
4016 	}
4017 	return ret;
4018 }
4019 
4020 static void
ath12k_wmi_obss_color_collision_event(struct ath12k_base * ab,struct sk_buff * skb)4021 ath12k_wmi_obss_color_collision_event(struct ath12k_base *ab, struct sk_buff *skb)
4022 {
4023 	const struct wmi_obss_color_collision_event *ev;
4024 	struct ath12k_link_vif *arvif;
4025 	u32 vdev_id, evt_type;
4026 	u64 bitmap;
4027 
4028 	const void **tb __free(kfree) = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
4029 	if (IS_ERR(tb)) {
4030 		ath12k_warn(ab, "failed to parse OBSS color collision tlv %ld\n",
4031 			    PTR_ERR(tb));
4032 		return;
4033 	}
4034 
4035 	ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT];
4036 	if (!ev) {
4037 		ath12k_warn(ab, "failed to fetch OBSS color collision event\n");
4038 		return;
4039 	}
4040 
4041 	vdev_id = le32_to_cpu(ev->vdev_id);
4042 	evt_type = le32_to_cpu(ev->evt_type);
4043 	bitmap = le64_to_cpu(ev->obss_color_bitmap);
4044 
4045 	guard(rcu)();
4046 
4047 	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id);
4048 	if (!arvif) {
4049 		ath12k_warn(ab, "no arvif found for vdev %u in OBSS color collision event\n",
4050 			    vdev_id);
4051 		return;
4052 	}
4053 
4054 	switch (evt_type) {
4055 	case WMI_BSS_COLOR_COLLISION_DETECTION:
4056 		ieee80211_obss_color_collision_notify(arvif->ahvif->vif,
4057 						      bitmap,
4058 						      arvif->link_id);
4059 		ath12k_dbg(ab, ATH12K_DBG_WMI,
4060 			   "obss color collision detected vdev %u event %d bitmap %016llx\n",
4061 			   vdev_id, evt_type, bitmap);
4062 		break;
4063 	case WMI_BSS_COLOR_COLLISION_DISABLE:
4064 	case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY:
4065 	case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE:
4066 		break;
4067 	default:
4068 		ath12k_warn(ab, "unknown OBSS color collision event type %d\n", evt_type);
4069 	}
4070 }
4071 
4072 static void
ath12k_fill_band_to_mac_param(struct ath12k_base * soc,struct ath12k_wmi_pdev_band_arg * arg)4073 ath12k_fill_band_to_mac_param(struct ath12k_base  *soc,
4074 			      struct ath12k_wmi_pdev_band_arg *arg)
4075 {
4076 	u8 i;
4077 	struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
4078 	struct ath12k_pdev *pdev;
4079 
4080 	for (i = 0; i < soc->num_radios; i++) {
4081 		pdev = &soc->pdevs[i];
4082 		hal_reg_cap = &soc->hal_reg_cap[i];
4083 		arg[i].pdev_id = pdev->pdev_id;
4084 
4085 		switch (pdev->cap.supported_bands) {
4086 		case WMI_HOST_WLAN_2GHZ_5GHZ_CAP:
4087 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
4088 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
4089 			break;
4090 		case WMI_HOST_WLAN_2GHZ_CAP:
4091 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
4092 			arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
4093 			break;
4094 		case WMI_HOST_WLAN_5GHZ_CAP:
4095 			arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
4096 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
4097 			break;
4098 		default:
4099 			break;
4100 		}
4101 	}
4102 }
4103 
4104 static void
ath12k_wmi_copy_resource_config(struct ath12k_base * ab,struct ath12k_wmi_resource_config_params * wmi_cfg,struct ath12k_wmi_resource_config_arg * tg_cfg)4105 ath12k_wmi_copy_resource_config(struct ath12k_base *ab,
4106 				struct ath12k_wmi_resource_config_params *wmi_cfg,
4107 				struct ath12k_wmi_resource_config_arg *tg_cfg)
4108 {
4109 	wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
4110 	wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
4111 	wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
4112 	wmi_cfg->num_offload_reorder_buffs =
4113 		cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
4114 	wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
4115 	wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
4116 	wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
4117 	wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
4118 	wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
4119 	wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
4120 	wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
4121 	wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
4122 	wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
4123 	wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
4124 	wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
4125 	wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
4126 	wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
4127 	wmi_cfg->roam_offload_max_ap_profiles =
4128 		cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
4129 	wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
4130 	wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
4131 	wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
4132 	wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
4133 	wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
4134 	wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
4135 	wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
4136 	wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
4137 		cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
4138 	wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
4139 	wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
4140 	wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
4141 	wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
4142 	wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
4143 	wmi_cfg->num_tdls_conn_table_entries =
4144 		cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
4145 	wmi_cfg->beacon_tx_offload_max_vdev =
4146 		cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
4147 	wmi_cfg->num_multicast_filter_entries =
4148 		cpu_to_le32(tg_cfg->num_multicast_filter_entries);
4149 	wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
4150 	wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
4151 	wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
4152 	wmi_cfg->max_tdls_concurrent_sleep_sta =
4153 		cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
4154 	wmi_cfg->max_tdls_concurrent_buffer_sta =
4155 		cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
4156 	wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
4157 	wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
4158 	wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
4159 	wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
4160 	wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
4161 	wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
4162 	wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
4163 	wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
4164 				     WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 |
4165 				     WMI_RSRC_CFG_FLAG1_ACK_RSSI);
4166 	wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
4167 	wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
4168 	wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
4169 	wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
4170 	wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
4171 					   WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
4172 	wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
4173 				WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
4174 	if (ab->hw_params->reoq_lut_support)
4175 		wmi_cfg->host_service_flags |=
4176 			cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REO_QREF_SUPPORT_BIT);
4177 	wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
4178 	wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
4179 	wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
4180 }
4181 
ath12k_init_cmd_send(struct ath12k_wmi_pdev * wmi,struct ath12k_wmi_init_cmd_arg * arg)4182 static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
4183 				struct ath12k_wmi_init_cmd_arg *arg)
4184 {
4185 	struct ath12k_base *ab = wmi->wmi_ab->ab;
4186 	struct sk_buff *skb;
4187 	struct wmi_init_cmd *cmd;
4188 	struct ath12k_wmi_resource_config_params *cfg;
4189 	struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
4190 	struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
4191 	struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
4192 	struct wmi_tlv *tlv;
4193 	size_t ret, len;
4194 #if defined(__linux__)
4195 	void *ptr;
4196 #elif defined(__FreeBSD__)
4197 	u8 *ptr;
4198 #endif
4199 	u32 hw_mode_len = 0;
4200 	u16 idx;
4201 
4202 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
4203 		hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
4204 			      (arg->num_band_to_mac * sizeof(*band_to_mac));
4205 
4206 	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
4207 	      (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
4208 
4209 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
4210 	if (!skb)
4211 		return -ENOMEM;
4212 
4213 	cmd = (struct wmi_init_cmd *)skb->data;
4214 
4215 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
4216 						 sizeof(*cmd));
4217 
4218 	ptr = skb->data + sizeof(*cmd);
4219 #if defined(__linux__)
4220 	cfg = ptr;
4221 #elif defined(__FreeBSD__)
4222 	cfg = (void *)ptr;
4223 #endif
4224 
4225 	ath12k_wmi_copy_resource_config(ab, cfg, &arg->res_cfg);
4226 
4227 	cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
4228 						 sizeof(*cfg));
4229 
4230 	ptr += sizeof(*cfg);
4231 #if defined(__linux__)
4232 	host_mem_chunks = ptr + TLV_HDR_SIZE;
4233 #elif defined(__FreeBSD__)
4234 	host_mem_chunks = (void *)(ptr + TLV_HDR_SIZE);
4235 #endif
4236 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
4237 
4238 	for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
4239 		host_mem_chunks[idx].tlv_header =
4240 			ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
4241 					   len);
4242 
4243 		host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
4244 		host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
4245 		host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
4246 
4247 		ath12k_dbg(ab, ATH12K_DBG_WMI,
4248 #if defined(__linux__)
4249 			   "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
4250 #elif defined(__FreeBSD__)
4251 			   "WMI host mem chunk req_id %d paddr 0x%jx len %d\n",
4252 #endif
4253 			   arg->mem_chunks[idx].req_id,
4254 #if defined(__linux__)
4255 			   (u64)arg->mem_chunks[idx].paddr,
4256 #elif defined(__FreeBSD__)
4257 			   (uintmax_t)arg->mem_chunks[idx].paddr,
4258 #endif
4259 			   arg->mem_chunks[idx].len);
4260 	}
4261 	cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
4262 	len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
4263 
4264 	/* num_mem_chunks is zero */
4265 #if defined(__linux__)
4266 	tlv = ptr;
4267 #elif defined(__FreeBSD__)
4268 	tlv = (void *)ptr;
4269 #endif
4270 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
4271 	ptr += TLV_HDR_SIZE + len;
4272 
4273 	if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
4274 		hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
4275 		hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
4276 							     sizeof(*hw_mode));
4277 
4278 		hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
4279 		hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
4280 
4281 		ptr += sizeof(*hw_mode);
4282 
4283 		len = arg->num_band_to_mac * sizeof(*band_to_mac);
4284 #if defined(__linux__)
4285 		tlv = ptr;
4286 #elif defined(__FreeBSD__)
4287 		tlv = (void *)ptr;
4288 #endif
4289 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
4290 
4291 		ptr += TLV_HDR_SIZE;
4292 		len = sizeof(*band_to_mac);
4293 
4294 		for (idx = 0; idx < arg->num_band_to_mac; idx++) {
4295 			band_to_mac = (void *)ptr;
4296 
4297 			band_to_mac->tlv_header =
4298 				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
4299 						       len);
4300 			band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
4301 			band_to_mac->start_freq =
4302 				cpu_to_le32(arg->band_to_mac[idx].start_freq);
4303 			band_to_mac->end_freq =
4304 				cpu_to_le32(arg->band_to_mac[idx].end_freq);
4305 			ptr += sizeof(*band_to_mac);
4306 		}
4307 	}
4308 
4309 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
4310 	if (ret) {
4311 		ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
4312 		dev_kfree_skb(skb);
4313 	}
4314 
4315 	return ret;
4316 }
4317 
ath12k_wmi_pdev_lro_cfg(struct ath12k * ar,int pdev_id)4318 int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
4319 			    int pdev_id)
4320 {
4321 	struct ath12k_wmi_pdev_lro_config_cmd *cmd;
4322 	struct sk_buff *skb;
4323 	int ret;
4324 
4325 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4326 	if (!skb)
4327 		return -ENOMEM;
4328 
4329 	cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
4330 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
4331 						 sizeof(*cmd));
4332 
4333 	get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
4334 	get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
4335 
4336 	cmd->pdev_id = cpu_to_le32(pdev_id);
4337 
4338 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4339 		   "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
4340 
4341 	ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
4342 	if (ret) {
4343 		ath12k_warn(ar->ab,
4344 			    "failed to send lro cfg req wmi cmd\n");
4345 		goto err;
4346 	}
4347 
4348 	return 0;
4349 err:
4350 	dev_kfree_skb(skb);
4351 	return ret;
4352 }
4353 
ath12k_wmi_wait_for_service_ready(struct ath12k_base * ab)4354 int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
4355 {
4356 	unsigned long time_left;
4357 
4358 	time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
4359 						WMI_SERVICE_READY_TIMEOUT_HZ);
4360 	if (!time_left)
4361 		return -ETIMEDOUT;
4362 
4363 	return 0;
4364 }
4365 
ath12k_wmi_wait_for_unified_ready(struct ath12k_base * ab)4366 int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
4367 {
4368 	unsigned long time_left;
4369 
4370 	time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
4371 						WMI_SERVICE_READY_TIMEOUT_HZ);
4372 	if (!time_left)
4373 		return -ETIMEDOUT;
4374 
4375 	return 0;
4376 }
4377 
ath12k_wmi_set_hw_mode(struct ath12k_base * ab,enum wmi_host_hw_mode_config_type mode)4378 int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
4379 			   enum wmi_host_hw_mode_config_type mode)
4380 {
4381 	struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
4382 	struct sk_buff *skb;
4383 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
4384 	int len;
4385 	int ret;
4386 
4387 	len = sizeof(*cmd);
4388 
4389 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
4390 	if (!skb)
4391 		return -ENOMEM;
4392 
4393 	cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
4394 
4395 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
4396 						 sizeof(*cmd));
4397 
4398 	cmd->pdev_id = WMI_PDEV_ID_SOC;
4399 	cmd->hw_mode_index = cpu_to_le32(mode);
4400 
4401 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
4402 	if (ret) {
4403 		ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
4404 		dev_kfree_skb(skb);
4405 	}
4406 
4407 	return ret;
4408 }
4409 
ath12k_wmi_cmd_init(struct ath12k_base * ab)4410 int ath12k_wmi_cmd_init(struct ath12k_base *ab)
4411 {
4412 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
4413 	struct ath12k_wmi_init_cmd_arg arg = {};
4414 
4415 	if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
4416 		     ab->wmi_ab.svc_map))
4417 		arg.res_cfg.is_reg_cc_ext_event_supported = true;
4418 
4419 	ab->hw_params->wmi_init(ab, &arg.res_cfg);
4420 	ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
4421 
4422 	arg.num_mem_chunks = wmi_ab->num_mem_chunks;
4423 	arg.hw_mode_id = wmi_ab->preferred_hw_mode;
4424 	arg.mem_chunks = wmi_ab->mem_chunks;
4425 
4426 	if (ab->hw_params->single_pdev_only)
4427 		arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
4428 
4429 	arg.num_band_to_mac = ab->num_radios;
4430 	ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
4431 
4432 	ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
4433 
4434 	return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
4435 }
4436 
ath12k_wmi_vdev_spectral_conf(struct ath12k * ar,struct ath12k_wmi_vdev_spectral_conf_arg * arg)4437 int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
4438 				  struct ath12k_wmi_vdev_spectral_conf_arg *arg)
4439 {
4440 	struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
4441 	struct sk_buff *skb;
4442 	int ret;
4443 
4444 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4445 	if (!skb)
4446 		return -ENOMEM;
4447 
4448 	cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
4449 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
4450 						 sizeof(*cmd));
4451 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
4452 	cmd->scan_count = cpu_to_le32(arg->scan_count);
4453 	cmd->scan_period = cpu_to_le32(arg->scan_period);
4454 	cmd->scan_priority = cpu_to_le32(arg->scan_priority);
4455 	cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
4456 	cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
4457 	cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
4458 	cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
4459 	cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
4460 	cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
4461 	cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
4462 	cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
4463 	cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
4464 	cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
4465 	cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
4466 	cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
4467 	cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
4468 	cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
4469 	cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
4470 
4471 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4472 		   "WMI spectral scan config cmd vdev_id 0x%x\n",
4473 		   arg->vdev_id);
4474 
4475 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4476 				  WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
4477 	if (ret) {
4478 		ath12k_warn(ar->ab,
4479 			    "failed to send spectral scan config wmi cmd\n");
4480 		goto err;
4481 	}
4482 
4483 	return 0;
4484 err:
4485 	dev_kfree_skb(skb);
4486 	return ret;
4487 }
4488 
ath12k_wmi_vdev_spectral_enable(struct ath12k * ar,u32 vdev_id,u32 trigger,u32 enable)4489 int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
4490 				    u32 trigger, u32 enable)
4491 {
4492 	struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
4493 	struct sk_buff *skb;
4494 	int ret;
4495 
4496 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4497 	if (!skb)
4498 		return -ENOMEM;
4499 
4500 	cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
4501 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
4502 						 sizeof(*cmd));
4503 
4504 	cmd->vdev_id = cpu_to_le32(vdev_id);
4505 	cmd->trigger_cmd = cpu_to_le32(trigger);
4506 	cmd->enable_cmd = cpu_to_le32(enable);
4507 
4508 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4509 		   "WMI spectral enable cmd vdev id 0x%x\n",
4510 		   vdev_id);
4511 
4512 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4513 				  WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
4514 	if (ret) {
4515 		ath12k_warn(ar->ab,
4516 			    "failed to send spectral enable wmi cmd\n");
4517 		goto err;
4518 	}
4519 
4520 	return 0;
4521 err:
4522 	dev_kfree_skb(skb);
4523 	return ret;
4524 }
4525 
ath12k_wmi_pdev_dma_ring_cfg(struct ath12k * ar,struct ath12k_wmi_pdev_dma_ring_cfg_arg * arg)4526 int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
4527 				 struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
4528 {
4529 	struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
4530 	struct sk_buff *skb;
4531 	int ret;
4532 
4533 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
4534 	if (!skb)
4535 		return -ENOMEM;
4536 
4537 	cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
4538 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
4539 						 sizeof(*cmd));
4540 
4541 	cmd->pdev_id = cpu_to_le32(arg->pdev_id);
4542 	cmd->module_id = cpu_to_le32(arg->module_id);
4543 	cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
4544 	cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
4545 	cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
4546 	cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
4547 	cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
4548 	cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
4549 	cmd->num_elems = cpu_to_le32(arg->num_elems);
4550 	cmd->buf_size = cpu_to_le32(arg->buf_size);
4551 	cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
4552 	cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
4553 
4554 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
4555 		   "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
4556 		   arg->pdev_id);
4557 
4558 	ret = ath12k_wmi_cmd_send(ar->wmi, skb,
4559 				  WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
4560 	if (ret) {
4561 		ath12k_warn(ar->ab,
4562 			    "failed to send dma ring cfg req wmi cmd\n");
4563 		goto err;
4564 	}
4565 
4566 	return 0;
4567 err:
4568 	dev_kfree_skb(skb);
4569 	return ret;
4570 }
4571 
ath12k_wmi_dma_buf_entry_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4572 static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
4573 					  u16 tag, u16 len,
4574 					  const void *ptr, void *data)
4575 {
4576 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4577 
4578 	if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
4579 		return -EPROTO;
4580 
4581 	if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
4582 		return -ENOBUFS;
4583 
4584 	arg->num_buf_entry++;
4585 	return 0;
4586 }
4587 
ath12k_wmi_dma_buf_meta_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4588 static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
4589 					 u16 tag, u16 len,
4590 					 const void *ptr, void *data)
4591 {
4592 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4593 
4594 	if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
4595 		return -EPROTO;
4596 
4597 	if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
4598 		return -ENOBUFS;
4599 
4600 	arg->num_meta++;
4601 
4602 	return 0;
4603 }
4604 
ath12k_wmi_dma_buf_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)4605 static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
4606 				    u16 tag, u16 len,
4607 				    const void *ptr, void *data)
4608 {
4609 	struct ath12k_wmi_dma_buf_release_arg *arg = data;
4610 	const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
4611 	u32 pdev_id;
4612 	int ret;
4613 
4614 	switch (tag) {
4615 	case WMI_TAG_DMA_BUF_RELEASE:
4616 		fixed = ptr;
4617 		arg->fixed = *fixed;
4618 		pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
4619 		arg->fixed.pdev_id = cpu_to_le32(pdev_id);
4620 		break;
4621 	case WMI_TAG_ARRAY_STRUCT:
4622 		if (!arg->buf_entry_done) {
4623 			arg->num_buf_entry = 0;
4624 			arg->buf_entry = ptr;
4625 
4626 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4627 						  ath12k_wmi_dma_buf_entry_parse,
4628 						  arg);
4629 			if (ret) {
4630 				ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
4631 					    ret);
4632 				return ret;
4633 			}
4634 
4635 			arg->buf_entry_done = true;
4636 		} else if (!arg->meta_data_done) {
4637 			arg->num_meta = 0;
4638 			arg->meta_data = ptr;
4639 
4640 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4641 						  ath12k_wmi_dma_buf_meta_parse,
4642 						  arg);
4643 			if (ret) {
4644 				ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
4645 					    ret);
4646 				return ret;
4647 			}
4648 
4649 			arg->meta_data_done = true;
4650 		}
4651 		break;
4652 	default:
4653 		break;
4654 	}
4655 	return 0;
4656 }
4657 
ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base * ab,struct sk_buff * skb)4658 static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
4659 						       struct sk_buff *skb)
4660 {
4661 	struct ath12k_wmi_dma_buf_release_arg arg = {};
4662 	struct ath12k_dbring_buf_release_event param;
4663 	int ret;
4664 
4665 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
4666 				  ath12k_wmi_dma_buf_parse,
4667 				  &arg);
4668 	if (ret) {
4669 		ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
4670 		return;
4671 	}
4672 
4673 	param.fixed = arg.fixed;
4674 	param.buf_entry = arg.buf_entry;
4675 	param.num_buf_entry = arg.num_buf_entry;
4676 	param.meta_data = arg.meta_data;
4677 	param.num_meta = arg.num_meta;
4678 
4679 	ret = ath12k_dbring_buffer_release_event(ab, &param);
4680 	if (ret) {
4681 		ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
4682 		return;
4683 	}
4684 }
4685 
ath12k_wmi_hw_mode_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4686 static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
4687 					 u16 tag, u16 len,
4688 					 const void *ptr, void *data)
4689 {
4690 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4691 	struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
4692 	u32 phy_map = 0;
4693 
4694 	if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
4695 		return -EPROTO;
4696 
4697 	if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
4698 		return -ENOBUFS;
4699 
4700 	hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
4701 				   hw_mode_id);
4702 	svc_rdy_ext->n_hw_mode_caps++;
4703 
4704 	phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
4705 	svc_rdy_ext->tot_phy_id += fls(phy_map);
4706 
4707 	return 0;
4708 }
4709 
ath12k_wmi_hw_mode_caps(struct ath12k_base * soc,u16 len,const void * ptr,void * data)4710 static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
4711 				   u16 len, const void *ptr, void *data)
4712 {
4713 	struct ath12k_svc_ext_info *svc_ext_info = &soc->wmi_ab.svc_ext_info;
4714 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4715 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
4716 	enum wmi_host_hw_mode_config_type mode, pref;
4717 	u32 i;
4718 	int ret;
4719 
4720 	svc_rdy_ext->n_hw_mode_caps = 0;
4721 	svc_rdy_ext->hw_mode_caps = ptr;
4722 
4723 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4724 				  ath12k_wmi_hw_mode_caps_parse,
4725 				  svc_rdy_ext);
4726 	if (ret) {
4727 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4728 		return ret;
4729 	}
4730 
4731 	for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
4732 		hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
4733 		mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
4734 
4735 		if (mode >= WMI_HOST_HW_MODE_MAX)
4736 			continue;
4737 
4738 		pref = soc->wmi_ab.preferred_hw_mode;
4739 
4740 		if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
4741 			svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
4742 			soc->wmi_ab.preferred_hw_mode = mode;
4743 		}
4744 	}
4745 
4746 	svc_ext_info->num_hw_modes = svc_rdy_ext->n_hw_mode_caps;
4747 
4748 	ath12k_dbg(soc, ATH12K_DBG_WMI, "num hw modes %u preferred_hw_mode %d\n",
4749 		   svc_ext_info->num_hw_modes, soc->wmi_ab.preferred_hw_mode);
4750 
4751 	if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
4752 		return -EINVAL;
4753 
4754 	return 0;
4755 }
4756 
ath12k_wmi_mac_phy_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4757 static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
4758 					 u16 tag, u16 len,
4759 					 const void *ptr, void *data)
4760 {
4761 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4762 
4763 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
4764 		return -EPROTO;
4765 
4766 	if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
4767 		return -ENOBUFS;
4768 
4769 	len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
4770 	if (!svc_rdy_ext->n_mac_phy_caps) {
4771 		svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
4772 						    GFP_ATOMIC);
4773 		if (!svc_rdy_ext->mac_phy_caps)
4774 			return -ENOMEM;
4775 	}
4776 
4777 	memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
4778 	svc_rdy_ext->n_mac_phy_caps++;
4779 	return 0;
4780 }
4781 
ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4782 static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
4783 					     u16 tag, u16 len,
4784 					     const void *ptr, void *data)
4785 {
4786 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4787 
4788 	if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
4789 		return -EPROTO;
4790 
4791 	if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
4792 		return -ENOBUFS;
4793 
4794 	svc_rdy_ext->n_ext_hal_reg_caps++;
4795 	return 0;
4796 }
4797 
ath12k_wmi_ext_hal_reg_caps(struct ath12k_base * soc,u16 len,const void * ptr,void * data)4798 static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
4799 				       u16 len, const void *ptr, void *data)
4800 {
4801 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4802 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4803 	struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
4804 	int ret;
4805 	u32 i;
4806 
4807 	svc_rdy_ext->n_ext_hal_reg_caps = 0;
4808 	svc_rdy_ext->ext_hal_reg_caps = ptr;
4809 	ret = ath12k_wmi_tlv_iter(soc, ptr, len,
4810 				  ath12k_wmi_ext_hal_reg_caps_parse,
4811 				  svc_rdy_ext);
4812 	if (ret) {
4813 		ath12k_warn(soc, "failed to parse tlv %d\n", ret);
4814 		return ret;
4815 	}
4816 
4817 	for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
4818 		ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
4819 						      svc_rdy_ext->soc_hal_reg_caps,
4820 						      svc_rdy_ext->ext_hal_reg_caps, i,
4821 						      &reg_cap);
4822 		if (ret) {
4823 			ath12k_warn(soc, "failed to extract reg cap %d\n", i);
4824 			return ret;
4825 		}
4826 
4827 		if (reg_cap.phy_id >= MAX_RADIOS) {
4828 			ath12k_warn(soc, "unexpected phy id %u\n", reg_cap.phy_id);
4829 			return -EINVAL;
4830 		}
4831 
4832 		soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
4833 	}
4834 	return 0;
4835 }
4836 
ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base * soc,u16 len,const void * ptr,void * data)4837 static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
4838 						 u16 len, const void *ptr,
4839 						 void *data)
4840 {
4841 	struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
4842 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
4843 	u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
4844 	u32 phy_id_map;
4845 	int pdev_index = 0;
4846 	int ret;
4847 
4848 	svc_rdy_ext->soc_hal_reg_caps = ptr;
4849 	svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
4850 
4851 	soc->num_radios = 0;
4852 	phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
4853 	soc->fw_pdev_count = 0;
4854 
4855 	while (phy_id_map && soc->num_radios < MAX_RADIOS) {
4856 		ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
4857 							    svc_rdy_ext,
4858 							    hw_mode_id, soc->num_radios,
4859 							    &soc->pdevs[pdev_index]);
4860 		if (ret) {
4861 			ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
4862 				    soc->num_radios);
4863 			return ret;
4864 		}
4865 
4866 		soc->num_radios++;
4867 
4868 		/* For single_pdev_only targets,
4869 		 * save mac_phy capability in the same pdev
4870 		 */
4871 		if (soc->hw_params->single_pdev_only)
4872 			pdev_index = 0;
4873 		else
4874 			pdev_index = soc->num_radios;
4875 
4876 		/* TODO: mac_phy_cap prints */
4877 		phy_id_map >>= 1;
4878 	}
4879 
4880 	if (soc->hw_params->single_pdev_only) {
4881 		soc->num_radios = 1;
4882 		soc->pdevs[0].pdev_id = 0;
4883 	}
4884 
4885 	return 0;
4886 }
4887 
ath12k_wmi_dma_ring_caps_parse(struct ath12k_base * soc,u16 tag,u16 len,const void * ptr,void * data)4888 static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
4889 					  u16 tag, u16 len,
4890 					  const void *ptr, void *data)
4891 {
4892 	struct ath12k_wmi_dma_ring_caps_parse *parse = data;
4893 
4894 	if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
4895 		return -EPROTO;
4896 
4897 	parse->n_dma_ring_caps++;
4898 	return 0;
4899 }
4900 
ath12k_wmi_alloc_dbring_caps(struct ath12k_base * ab,u32 num_cap)4901 static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
4902 					u32 num_cap)
4903 {
4904 	size_t sz;
4905 	void *ptr;
4906 
4907 	sz = num_cap * sizeof(struct ath12k_dbring_cap);
4908 	ptr = kzalloc(sz, GFP_ATOMIC);
4909 	if (!ptr)
4910 		return -ENOMEM;
4911 
4912 	ab->db_caps = ptr;
4913 	ab->num_db_cap = num_cap;
4914 
4915 	return 0;
4916 }
4917 
ath12k_wmi_free_dbring_caps(struct ath12k_base * ab)4918 static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
4919 {
4920 	kfree(ab->db_caps);
4921 	ab->db_caps = NULL;
4922 	ab->num_db_cap = 0;
4923 }
4924 
ath12k_wmi_dma_ring_caps(struct ath12k_base * ab,u16 len,const void * ptr,void * data)4925 static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
4926 				    u16 len, const void *ptr, void *data)
4927 {
4928 	struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
4929 #if defined(__linux__)
4930 	struct ath12k_wmi_dma_ring_caps_params *dma_caps;
4931 #elif defined(__FreeBSD__)
4932 	const struct ath12k_wmi_dma_ring_caps_params *dma_caps;
4933 #endif
4934 	struct ath12k_dbring_cap *dir_buff_caps;
4935 	int ret;
4936 	u32 i;
4937 
4938 	dma_caps_parse->n_dma_ring_caps = 0;
4939 #if defined(__linux__)
4940 	dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
4941 #elif defined(__FreeBSD__)
4942 	dma_caps = (const struct ath12k_wmi_dma_ring_caps_params *)ptr;
4943 #endif
4944 	ret = ath12k_wmi_tlv_iter(ab, ptr, len,
4945 				  ath12k_wmi_dma_ring_caps_parse,
4946 				  dma_caps_parse);
4947 	if (ret) {
4948 		ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
4949 		return ret;
4950 	}
4951 
4952 	if (!dma_caps_parse->n_dma_ring_caps)
4953 		return 0;
4954 
4955 	if (ab->num_db_cap) {
4956 		ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
4957 		return 0;
4958 	}
4959 
4960 	ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
4961 	if (ret)
4962 		return ret;
4963 
4964 	dir_buff_caps = ab->db_caps;
4965 	for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
4966 		if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
4967 			ath12k_warn(ab, "Invalid module id %d\n",
4968 				    le32_to_cpu(dma_caps[i].module_id));
4969 			ret = -EINVAL;
4970 			goto free_dir_buff;
4971 		}
4972 
4973 		dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
4974 		dir_buff_caps[i].pdev_id =
4975 			DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
4976 		dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
4977 		dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
4978 		dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
4979 	}
4980 
4981 	return 0;
4982 
4983 free_dir_buff:
4984 	ath12k_wmi_free_dbring_caps(ab);
4985 	return ret;
4986 }
4987 
4988 static void
ath12k_wmi_save_mac_phy_info(struct ath12k_base * ab,const struct ath12k_wmi_mac_phy_caps_params * mac_phy_cap,struct ath12k_svc_ext_mac_phy_info * mac_phy_info)4989 ath12k_wmi_save_mac_phy_info(struct ath12k_base *ab,
4990 			     const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap,
4991 			     struct ath12k_svc_ext_mac_phy_info *mac_phy_info)
4992 {
4993 	mac_phy_info->phy_id = __le32_to_cpu(mac_phy_cap->phy_id);
4994 	mac_phy_info->supported_bands = __le32_to_cpu(mac_phy_cap->supported_bands);
4995 	mac_phy_info->hw_freq_range.low_2ghz_freq =
4996 					__le32_to_cpu(mac_phy_cap->low_2ghz_chan_freq);
4997 	mac_phy_info->hw_freq_range.high_2ghz_freq =
4998 					__le32_to_cpu(mac_phy_cap->high_2ghz_chan_freq);
4999 	mac_phy_info->hw_freq_range.low_5ghz_freq =
5000 					__le32_to_cpu(mac_phy_cap->low_5ghz_chan_freq);
5001 	mac_phy_info->hw_freq_range.high_5ghz_freq =
5002 					__le32_to_cpu(mac_phy_cap->high_5ghz_chan_freq);
5003 }
5004 
5005 static void
ath12k_wmi_save_all_mac_phy_info(struct ath12k_base * ab,struct ath12k_wmi_svc_rdy_ext_parse * svc_rdy_ext)5006 ath12k_wmi_save_all_mac_phy_info(struct ath12k_base *ab,
5007 				 struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext)
5008 {
5009 	struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
5010 	const struct ath12k_wmi_mac_phy_caps_params *mac_phy_cap;
5011 	const struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
5012 	struct ath12k_svc_ext_mac_phy_info *mac_phy_info;
5013 	u32 hw_mode_id, phy_bit_map;
5014 	u8 hw_idx;
5015 
5016 	mac_phy_info = &svc_ext_info->mac_phy_info[0];
5017 	mac_phy_cap = svc_rdy_ext->mac_phy_caps;
5018 
5019 	for (hw_idx = 0; hw_idx < svc_ext_info->num_hw_modes; hw_idx++) {
5020 		hw_mode_cap = &svc_rdy_ext->hw_mode_caps[hw_idx];
5021 		hw_mode_id = __le32_to_cpu(hw_mode_cap->hw_mode_id);
5022 		phy_bit_map = __le32_to_cpu(hw_mode_cap->phy_id_map);
5023 
5024 		while (phy_bit_map) {
5025 			ath12k_wmi_save_mac_phy_info(ab, mac_phy_cap, mac_phy_info);
5026 			mac_phy_info->hw_mode_config_type =
5027 					le32_get_bits(hw_mode_cap->hw_mode_config_type,
5028 						      WMI_HW_MODE_CAP_CFG_TYPE);
5029 			ath12k_dbg(ab, ATH12K_DBG_WMI,
5030 				   "hw_idx %u hw_mode_id %u hw_mode_config_type %u supported_bands %u phy_id %u 2 GHz [%u - %u] 5 GHz [%u - %u]\n",
5031 				   hw_idx, hw_mode_id,
5032 				   mac_phy_info->hw_mode_config_type,
5033 				   mac_phy_info->supported_bands, mac_phy_info->phy_id,
5034 				   mac_phy_info->hw_freq_range.low_2ghz_freq,
5035 				   mac_phy_info->hw_freq_range.high_2ghz_freq,
5036 				   mac_phy_info->hw_freq_range.low_5ghz_freq,
5037 				   mac_phy_info->hw_freq_range.high_5ghz_freq);
5038 
5039 			mac_phy_cap++;
5040 			mac_phy_info++;
5041 
5042 			phy_bit_map >>= 1;
5043 		}
5044 	}
5045 }
5046 
ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)5047 static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
5048 					u16 tag, u16 len,
5049 					const void *ptr, void *data)
5050 {
5051 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
5052 	struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
5053 	int ret;
5054 
5055 	switch (tag) {
5056 	case WMI_TAG_SERVICE_READY_EXT_EVENT:
5057 		ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
5058 						&svc_rdy_ext->arg);
5059 		if (ret) {
5060 			ath12k_warn(ab, "unable to extract ext params\n");
5061 			return ret;
5062 		}
5063 		break;
5064 
5065 	case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
5066 		svc_rdy_ext->hw_caps = ptr;
5067 		svc_rdy_ext->arg.num_hw_modes =
5068 			le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
5069 		break;
5070 
5071 	case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
5072 		ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
5073 							    svc_rdy_ext);
5074 		if (ret)
5075 			return ret;
5076 		break;
5077 
5078 	case WMI_TAG_ARRAY_STRUCT:
5079 		if (!svc_rdy_ext->hw_mode_done) {
5080 			ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
5081 			if (ret)
5082 				return ret;
5083 
5084 			svc_rdy_ext->hw_mode_done = true;
5085 		} else if (!svc_rdy_ext->mac_phy_done) {
5086 			svc_rdy_ext->n_mac_phy_caps = 0;
5087 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
5088 						  ath12k_wmi_mac_phy_caps_parse,
5089 						  svc_rdy_ext);
5090 			if (ret) {
5091 				ath12k_warn(ab, "failed to parse tlv %d\n", ret);
5092 				return ret;
5093 			}
5094 
5095 			ath12k_wmi_save_all_mac_phy_info(ab, svc_rdy_ext);
5096 
5097 			svc_rdy_ext->mac_phy_done = true;
5098 		} else if (!svc_rdy_ext->ext_hal_reg_done) {
5099 			ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
5100 			if (ret)
5101 				return ret;
5102 
5103 			svc_rdy_ext->ext_hal_reg_done = true;
5104 		} else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
5105 			svc_rdy_ext->mac_phy_chainmask_combo_done = true;
5106 		} else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
5107 			svc_rdy_ext->mac_phy_chainmask_cap_done = true;
5108 		} else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
5109 			svc_rdy_ext->oem_dma_ring_cap_done = true;
5110 		} else if (!svc_rdy_ext->dma_ring_cap_done) {
5111 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
5112 						       &svc_rdy_ext->dma_caps_parse);
5113 			if (ret)
5114 				return ret;
5115 
5116 			svc_rdy_ext->dma_ring_cap_done = true;
5117 		}
5118 		break;
5119 
5120 	default:
5121 		break;
5122 	}
5123 	return 0;
5124 }
5125 
ath12k_service_ready_ext_event(struct ath12k_base * ab,struct sk_buff * skb)5126 static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
5127 					  struct sk_buff *skb)
5128 {
5129 	struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
5130 	int ret;
5131 
5132 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5133 				  ath12k_wmi_svc_rdy_ext_parse,
5134 				  &svc_rdy_ext);
5135 	if (ret) {
5136 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
5137 		goto err;
5138 	}
5139 
5140 	if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
5141 		complete(&ab->wmi_ab.service_ready);
5142 
5143 	kfree(svc_rdy_ext.mac_phy_caps);
5144 	return 0;
5145 
5146 err:
5147 	kfree(svc_rdy_ext.mac_phy_caps);
5148 	ath12k_wmi_free_dbring_caps(ab);
5149 	return ret;
5150 }
5151 
ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev * wmi_handle,const void * ptr,struct ath12k_wmi_svc_rdy_ext2_arg * arg)5152 static int ath12k_pull_svc_ready_ext2(struct ath12k_wmi_pdev *wmi_handle,
5153 				      const void *ptr,
5154 				      struct ath12k_wmi_svc_rdy_ext2_arg *arg)
5155 {
5156 	const struct wmi_service_ready_ext2_event *ev = ptr;
5157 
5158 	if (!ev)
5159 		return -EINVAL;
5160 
5161 	arg->reg_db_version = le32_to_cpu(ev->reg_db_version);
5162 	arg->hw_min_max_tx_power_2ghz = le32_to_cpu(ev->hw_min_max_tx_power_2ghz);
5163 	arg->hw_min_max_tx_power_5ghz = le32_to_cpu(ev->hw_min_max_tx_power_5ghz);
5164 	arg->chwidth_num_peer_caps = le32_to_cpu(ev->chwidth_num_peer_caps);
5165 	arg->preamble_puncture_bw = le32_to_cpu(ev->preamble_puncture_bw);
5166 	arg->max_user_per_ppdu_ofdma = le32_to_cpu(ev->max_user_per_ppdu_ofdma);
5167 	arg->max_user_per_ppdu_mumimo = le32_to_cpu(ev->max_user_per_ppdu_mumimo);
5168 	arg->target_cap_flags = le32_to_cpu(ev->target_cap_flags);
5169 	return 0;
5170 }
5171 
ath12k_wmi_eht_caps_parse(struct ath12k_pdev * pdev,u32 band,const __le32 cap_mac_info[],const __le32 cap_phy_info[],const __le32 supp_mcs[],const struct ath12k_wmi_ppe_threshold_params * ppet,__le32 cap_info_internal)5172 static void ath12k_wmi_eht_caps_parse(struct ath12k_pdev *pdev, u32 band,
5173 				      const __le32 cap_mac_info[],
5174 				      const __le32 cap_phy_info[],
5175 				      const __le32 supp_mcs[],
5176 				      const struct ath12k_wmi_ppe_threshold_params *ppet,
5177 				       __le32 cap_info_internal)
5178 {
5179 	struct ath12k_band_cap *cap_band = &pdev->cap.band[band];
5180 	u32 support_320mhz;
5181 	u8 i;
5182 
5183 	if (band == NL80211_BAND_6GHZ)
5184 		support_320mhz = cap_band->eht_cap_phy_info[0] &
5185 					IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
5186 
5187 	for (i = 0; i < WMI_MAX_EHTCAP_MAC_SIZE; i++)
5188 		cap_band->eht_cap_mac_info[i] = le32_to_cpu(cap_mac_info[i]);
5189 
5190 	for (i = 0; i < WMI_MAX_EHTCAP_PHY_SIZE; i++)
5191 		cap_band->eht_cap_phy_info[i] = le32_to_cpu(cap_phy_info[i]);
5192 
5193 	if (band == NL80211_BAND_6GHZ)
5194 		cap_band->eht_cap_phy_info[0] |= support_320mhz;
5195 
5196 	cap_band->eht_mcs_20_only = le32_to_cpu(supp_mcs[0]);
5197 	cap_band->eht_mcs_80 = le32_to_cpu(supp_mcs[1]);
5198 	if (band != NL80211_BAND_2GHZ) {
5199 		cap_band->eht_mcs_160 = le32_to_cpu(supp_mcs[2]);
5200 		cap_band->eht_mcs_320 = le32_to_cpu(supp_mcs[3]);
5201 	}
5202 
5203 	cap_band->eht_ppet.numss_m1 = le32_to_cpu(ppet->numss_m1);
5204 	cap_band->eht_ppet.ru_bit_mask = le32_to_cpu(ppet->ru_info);
5205 	for (i = 0; i < WMI_MAX_NUM_SS; i++)
5206 		cap_band->eht_ppet.ppet16_ppet8_ru3_ru0[i] =
5207 			le32_to_cpu(ppet->ppet16_ppet8_ru3_ru0[i]);
5208 
5209 	cap_band->eht_cap_info_internal = le32_to_cpu(cap_info_internal);
5210 }
5211 
5212 static int
ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base * ab,const struct ath12k_wmi_caps_ext_params * caps,struct ath12k_pdev * pdev)5213 ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
5214 				      const struct ath12k_wmi_caps_ext_params *caps,
5215 				      struct ath12k_pdev *pdev)
5216 {
5217 	struct ath12k_band_cap *cap_band;
5218 	u32 bands, support_320mhz;
5219 	int i;
5220 
5221 	if (ab->hw_params->single_pdev_only) {
5222 		if (caps->hw_mode_id == WMI_HOST_HW_MODE_SINGLE) {
5223 			support_320mhz = le32_to_cpu(caps->eht_cap_phy_info_5ghz[0]) &
5224 				IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
5225 			cap_band = &pdev->cap.band[NL80211_BAND_6GHZ];
5226 			cap_band->eht_cap_phy_info[0] |= support_320mhz;
5227 			return 0;
5228 		}
5229 
5230 		for (i = 0; i < ab->fw_pdev_count; i++) {
5231 			struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
5232 
5233 			if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
5234 			    fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
5235 				bands = fw_pdev->supported_bands;
5236 				break;
5237 			}
5238 		}
5239 
5240 		if (i == ab->fw_pdev_count)
5241 			return -EINVAL;
5242 	} else {
5243 		bands = pdev->cap.supported_bands;
5244 	}
5245 
5246 	if (bands & WMI_HOST_WLAN_2GHZ_CAP) {
5247 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
5248 					  caps->eht_cap_mac_info_2ghz,
5249 					  caps->eht_cap_phy_info_2ghz,
5250 					  caps->eht_supp_mcs_ext_2ghz,
5251 					  &caps->eht_ppet_2ghz,
5252 					  caps->eht_cap_info_internal);
5253 	}
5254 
5255 	if (bands & WMI_HOST_WLAN_5GHZ_CAP) {
5256 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
5257 					  caps->eht_cap_mac_info_5ghz,
5258 					  caps->eht_cap_phy_info_5ghz,
5259 					  caps->eht_supp_mcs_ext_5ghz,
5260 					  &caps->eht_ppet_5ghz,
5261 					  caps->eht_cap_info_internal);
5262 
5263 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_6GHZ,
5264 					  caps->eht_cap_mac_info_5ghz,
5265 					  caps->eht_cap_phy_info_5ghz,
5266 					  caps->eht_supp_mcs_ext_5ghz,
5267 					  &caps->eht_ppet_5ghz,
5268 					  caps->eht_cap_info_internal);
5269 	}
5270 
5271 	pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability);
5272 	pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability);
5273 
5274 	return 0;
5275 }
5276 
ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)5277 static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
5278 					   u16 len, const void *ptr,
5279 					   void *data)
5280 {
5281 	const struct ath12k_wmi_caps_ext_params *caps = ptr;
5282 	int i = 0, ret;
5283 
5284 	if (tag != WMI_TAG_MAC_PHY_CAPABILITIES_EXT)
5285 		return -EPROTO;
5286 
5287 	if (ab->hw_params->single_pdev_only) {
5288 		if (ab->wmi_ab.preferred_hw_mode != le32_to_cpu(caps->hw_mode_id) &&
5289 		    caps->hw_mode_id != WMI_HOST_HW_MODE_SINGLE)
5290 			return 0;
5291 	} else {
5292 		for (i = 0; i < ab->num_radios; i++) {
5293 			if (ab->pdevs[i].pdev_id ==
5294 			    ath12k_wmi_caps_ext_get_pdev_id(caps))
5295 				break;
5296 		}
5297 
5298 		if (i == ab->num_radios)
5299 			return -EINVAL;
5300 	}
5301 
5302 	ret = ath12k_wmi_tlv_mac_phy_caps_ext_parse(ab, caps, &ab->pdevs[i]);
5303 	if (ret) {
5304 		ath12k_warn(ab,
5305 			    "failed to parse extended MAC PHY capabilities for pdev %d: %d\n",
5306 			    ret, ab->pdevs[i].pdev_id);
5307 		return ret;
5308 	}
5309 
5310 	return 0;
5311 }
5312 
5313 static void
ath12k_wmi_update_freq_info(struct ath12k_base * ab,struct ath12k_svc_ext_mac_phy_info * mac_cap,enum ath12k_hw_mode mode,u32 phy_id)5314 ath12k_wmi_update_freq_info(struct ath12k_base *ab,
5315 			    struct ath12k_svc_ext_mac_phy_info *mac_cap,
5316 			    enum ath12k_hw_mode mode,
5317 			    u32 phy_id)
5318 {
5319 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5320 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5321 
5322 	mac_range = &hw_mode_info->freq_range_caps[mode][phy_id];
5323 
5324 	if (mac_cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
5325 		mac_range->low_2ghz_freq = max_t(u32,
5326 						 mac_cap->hw_freq_range.low_2ghz_freq,
5327 						 ATH12K_MIN_2GHZ_FREQ);
5328 		mac_range->high_2ghz_freq = mac_cap->hw_freq_range.high_2ghz_freq ?
5329 					    min_t(u32,
5330 						  mac_cap->hw_freq_range.high_2ghz_freq,
5331 						  ATH12K_MAX_2GHZ_FREQ) :
5332 					    ATH12K_MAX_2GHZ_FREQ;
5333 	}
5334 
5335 	if (mac_cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
5336 		mac_range->low_5ghz_freq = max_t(u32,
5337 						 mac_cap->hw_freq_range.low_5ghz_freq,
5338 						 ATH12K_MIN_5GHZ_FREQ);
5339 		mac_range->high_5ghz_freq = mac_cap->hw_freq_range.high_5ghz_freq ?
5340 					    min_t(u32,
5341 						  mac_cap->hw_freq_range.high_5ghz_freq,
5342 						  ATH12K_MAX_6GHZ_FREQ) :
5343 					    ATH12K_MAX_6GHZ_FREQ;
5344 	}
5345 }
5346 
5347 static bool
ath12k_wmi_all_phy_range_updated(struct ath12k_base * ab,enum ath12k_hw_mode hwmode)5348 ath12k_wmi_all_phy_range_updated(struct ath12k_base *ab,
5349 				 enum ath12k_hw_mode hwmode)
5350 {
5351 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5352 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5353 	u8 phy_id;
5354 
5355 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5356 		mac_range = &hw_mode_info->freq_range_caps[hwmode][phy_id];
5357 		/* modify SBS/DBS range only when both phy for DBS are filled */
5358 		if (!mac_range->low_2ghz_freq && !mac_range->low_5ghz_freq)
5359 			return false;
5360 	}
5361 
5362 	return true;
5363 }
5364 
ath12k_wmi_update_dbs_freq_info(struct ath12k_base * ab)5365 static void ath12k_wmi_update_dbs_freq_info(struct ath12k_base *ab)
5366 {
5367 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5368 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5369 	u8 phy_id;
5370 
5371 	mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_DBS];
5372 	/* Reset 5 GHz range for shared mac for DBS */
5373 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5374 		if (mac_range[phy_id].low_2ghz_freq &&
5375 		    mac_range[phy_id].low_5ghz_freq) {
5376 			mac_range[phy_id].low_5ghz_freq = 0;
5377 			mac_range[phy_id].high_5ghz_freq = 0;
5378 		}
5379 	}
5380 }
5381 
5382 static u32
ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg * range)5383 ath12k_wmi_get_highest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
5384 {
5385 	u32 highest_freq = 0;
5386 	u8 phy_id;
5387 
5388 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5389 		if (range[phy_id].high_5ghz_freq > highest_freq)
5390 			highest_freq = range[phy_id].high_5ghz_freq;
5391 	}
5392 
5393 	return highest_freq ? highest_freq : ATH12K_MAX_6GHZ_FREQ;
5394 }
5395 
5396 static u32
ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg * range)5397 ath12k_wmi_get_lowest_5ghz_freq_from_range(struct ath12k_hw_mode_freq_range_arg *range)
5398 {
5399 	u32 lowest_freq = 0;
5400 	u8 phy_id;
5401 
5402 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5403 		if ((!lowest_freq && range[phy_id].low_5ghz_freq) ||
5404 		    range[phy_id].low_5ghz_freq < lowest_freq)
5405 			lowest_freq = range[phy_id].low_5ghz_freq;
5406 	}
5407 
5408 	return lowest_freq ? lowest_freq : ATH12K_MIN_5GHZ_FREQ;
5409 }
5410 
5411 static void
ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base * ab,u16 sbs_range_sep,struct ath12k_hw_mode_freq_range_arg * ref_freq)5412 ath12k_wmi_fill_upper_share_sbs_freq(struct ath12k_base *ab,
5413 				     u16 sbs_range_sep,
5414 				     struct ath12k_hw_mode_freq_range_arg *ref_freq)
5415 {
5416 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5417 	struct ath12k_hw_mode_freq_range_arg *upper_sbs_freq_range;
5418 	u8 phy_id;
5419 
5420 	upper_sbs_freq_range =
5421 			hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_UPPER_SHARE];
5422 
5423 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5424 		upper_sbs_freq_range[phy_id].low_2ghz_freq =
5425 						ref_freq[phy_id].low_2ghz_freq;
5426 		upper_sbs_freq_range[phy_id].high_2ghz_freq =
5427 						ref_freq[phy_id].high_2ghz_freq;
5428 
5429 		/* update for shared mac */
5430 		if (upper_sbs_freq_range[phy_id].low_2ghz_freq) {
5431 			upper_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
5432 			upper_sbs_freq_range[phy_id].high_5ghz_freq =
5433 				ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
5434 		} else {
5435 			upper_sbs_freq_range[phy_id].low_5ghz_freq =
5436 				ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
5437 			upper_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
5438 		}
5439 	}
5440 }
5441 
5442 static void
ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base * ab,u16 sbs_range_sep,struct ath12k_hw_mode_freq_range_arg * ref_freq)5443 ath12k_wmi_fill_lower_share_sbs_freq(struct ath12k_base *ab,
5444 				     u16 sbs_range_sep,
5445 				     struct ath12k_hw_mode_freq_range_arg *ref_freq)
5446 {
5447 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5448 	struct ath12k_hw_mode_freq_range_arg *lower_sbs_freq_range;
5449 	u8 phy_id;
5450 
5451 	lower_sbs_freq_range =
5452 			hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS_LOWER_SHARE];
5453 
5454 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5455 		lower_sbs_freq_range[phy_id].low_2ghz_freq =
5456 						ref_freq[phy_id].low_2ghz_freq;
5457 		lower_sbs_freq_range[phy_id].high_2ghz_freq =
5458 						ref_freq[phy_id].high_2ghz_freq;
5459 
5460 		/* update for shared mac */
5461 		if (lower_sbs_freq_range[phy_id].low_2ghz_freq) {
5462 			lower_sbs_freq_range[phy_id].low_5ghz_freq =
5463 				ath12k_wmi_get_lowest_5ghz_freq_from_range(ref_freq);
5464 			lower_sbs_freq_range[phy_id].high_5ghz_freq = sbs_range_sep;
5465 		} else {
5466 			lower_sbs_freq_range[phy_id].low_5ghz_freq = sbs_range_sep + 10;
5467 			lower_sbs_freq_range[phy_id].high_5ghz_freq =
5468 				ath12k_wmi_get_highest_5ghz_freq_from_range(ref_freq);
5469 		}
5470 	}
5471 }
5472 
ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode)5473 static const char *ath12k_wmi_hw_mode_to_str(enum ath12k_hw_mode hw_mode)
5474 {
5475 	static const char * const mode_str[] = {
5476 		[ATH12K_HW_MODE_SMM] = "SMM",
5477 		[ATH12K_HW_MODE_DBS] = "DBS",
5478 		[ATH12K_HW_MODE_SBS] = "SBS",
5479 		[ATH12K_HW_MODE_SBS_UPPER_SHARE] = "SBS_UPPER_SHARE",
5480 		[ATH12K_HW_MODE_SBS_LOWER_SHARE] = "SBS_LOWER_SHARE",
5481 	};
5482 
5483 	if (hw_mode >= ARRAY_SIZE(mode_str))
5484 		return "Unknown";
5485 
5486 	return mode_str[hw_mode];
5487 }
5488 
5489 static void
ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base * ab,struct ath12k_hw_mode_freq_range_arg * freq_range,enum ath12k_hw_mode hw_mode)5490 ath12k_wmi_dump_freq_range_per_mac(struct ath12k_base *ab,
5491 				   struct ath12k_hw_mode_freq_range_arg *freq_range,
5492 				   enum ath12k_hw_mode hw_mode)
5493 {
5494 	u8 i;
5495 
5496 	for (i = 0; i < MAX_RADIOS; i++)
5497 		if (freq_range[i].low_2ghz_freq || freq_range[i].low_5ghz_freq)
5498 			ath12k_dbg(ab, ATH12K_DBG_WMI,
5499 				   "frequency range: %s(%d) mac %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
5500 				   ath12k_wmi_hw_mode_to_str(hw_mode),
5501 				   hw_mode, i,
5502 				   freq_range[i].low_2ghz_freq,
5503 				   freq_range[i].high_2ghz_freq,
5504 				   freq_range[i].low_5ghz_freq,
5505 				   freq_range[i].high_5ghz_freq);
5506 }
5507 
ath12k_wmi_dump_freq_range(struct ath12k_base * ab)5508 static void ath12k_wmi_dump_freq_range(struct ath12k_base *ab)
5509 {
5510 	struct ath12k_hw_mode_freq_range_arg *freq_range;
5511 	u8 i;
5512 
5513 	for (i = ATH12K_HW_MODE_SMM; i < ATH12K_HW_MODE_MAX; i++) {
5514 		freq_range = ab->wmi_ab.hw_mode_info.freq_range_caps[i];
5515 		ath12k_wmi_dump_freq_range_per_mac(ab, freq_range, i);
5516 	}
5517 }
5518 
ath12k_wmi_modify_sbs_freq(struct ath12k_base * ab,u8 phy_id)5519 static int ath12k_wmi_modify_sbs_freq(struct ath12k_base *ab, u8 phy_id)
5520 {
5521 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5522 	struct ath12k_hw_mode_freq_range_arg *sbs_mac_range, *shared_mac_range;
5523 	struct ath12k_hw_mode_freq_range_arg *non_shared_range;
5524 	u8 shared_phy_id;
5525 
5526 	sbs_mac_range = &hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][phy_id];
5527 
5528 	/* if SBS mac range has both 2.4 and 5 GHz ranges, i.e. shared phy_id
5529 	 * keep the range as it is in SBS
5530 	 */
5531 	if (sbs_mac_range->low_2ghz_freq && sbs_mac_range->low_5ghz_freq)
5532 		return 0;
5533 
5534 	if (sbs_mac_range->low_2ghz_freq && !sbs_mac_range->low_5ghz_freq) {
5535 		ath12k_err(ab, "Invalid DBS/SBS mode with only 2.4Ghz");
5536 		ath12k_wmi_dump_freq_range_per_mac(ab, sbs_mac_range, ATH12K_HW_MODE_SBS);
5537 		return -EINVAL;
5538 	}
5539 
5540 	non_shared_range = sbs_mac_range;
5541 	/* if SBS mac range has only 5 GHz then it's the non-shared phy, so
5542 	 * modify the range as per the shared mac.
5543 	 */
5544 	shared_phy_id = phy_id ? 0 : 1;
5545 	shared_mac_range =
5546 		&hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS][shared_phy_id];
5547 
5548 	if (shared_mac_range->low_5ghz_freq > non_shared_range->low_5ghz_freq) {
5549 		ath12k_dbg(ab, ATH12K_DBG_WMI, "high 5 GHz shared");
5550 		/* If the shared mac lower 5 GHz frequency is greater than
5551 		 * non-shared mac lower 5 GHz frequency then the shared mac has
5552 		 * high 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz high
5553 		 * freq should be less than the shared mac's low 5 GHz freq.
5554 		 */
5555 		if (non_shared_range->high_5ghz_freq >=
5556 		    shared_mac_range->low_5ghz_freq)
5557 			non_shared_range->high_5ghz_freq =
5558 				max_t(u32, shared_mac_range->low_5ghz_freq - 10,
5559 				      non_shared_range->low_5ghz_freq);
5560 	} else if (shared_mac_range->high_5ghz_freq <
5561 		   non_shared_range->high_5ghz_freq) {
5562 		ath12k_dbg(ab, ATH12K_DBG_WMI, "low 5 GHz shared");
5563 		/* If the shared mac high 5 GHz frequency is less than
5564 		 * non-shared mac high 5 GHz frequency then the shared mac has
5565 		 * low 5 GHz shared with 2.4 GHz. So non-shared mac's 5 GHz low
5566 		 * freq should be greater than the shared mac's high 5 GHz freq.
5567 		 */
5568 		if (shared_mac_range->high_5ghz_freq >=
5569 		    non_shared_range->low_5ghz_freq)
5570 			non_shared_range->low_5ghz_freq =
5571 				min_t(u32, shared_mac_range->high_5ghz_freq + 10,
5572 				      non_shared_range->high_5ghz_freq);
5573 	} else {
5574 		ath12k_warn(ab, "invalid SBS range with all 5 GHz shared");
5575 		return -EINVAL;
5576 	}
5577 
5578 	return 0;
5579 }
5580 
ath12k_wmi_update_sbs_freq_info(struct ath12k_base * ab)5581 static void ath12k_wmi_update_sbs_freq_info(struct ath12k_base *ab)
5582 {
5583 	struct ath12k_hw_mode_info *hw_mode_info = &ab->wmi_ab.hw_mode_info;
5584 	struct ath12k_hw_mode_freq_range_arg *mac_range;
5585 	u16 sbs_range_sep;
5586 	u8 phy_id;
5587 	int ret;
5588 
5589 	mac_range = hw_mode_info->freq_range_caps[ATH12K_HW_MODE_SBS];
5590 
5591 	/* If sbs_lower_band_end_freq has a value, then the frequency range
5592 	 * will be split using that value.
5593 	 */
5594 	sbs_range_sep = ab->wmi_ab.sbs_lower_band_end_freq;
5595 	if (sbs_range_sep) {
5596 		ath12k_wmi_fill_upper_share_sbs_freq(ab, sbs_range_sep,
5597 						     mac_range);
5598 		ath12k_wmi_fill_lower_share_sbs_freq(ab, sbs_range_sep,
5599 						     mac_range);
5600 		/* Hardware specifies the range boundary with sbs_range_sep,
5601 		 * (i.e. the boundary between 5 GHz high and 5 GHz low),
5602 		 * reset the original one to make sure it will not get used.
5603 		 */
5604 		memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
5605 		return;
5606 	}
5607 
5608 	/* If sbs_lower_band_end_freq is not set that means firmware will send one
5609 	 * shared mac range and one non-shared mac range. so update that freq.
5610 	 */
5611 	for (phy_id = 0; phy_id < MAX_RADIOS; phy_id++) {
5612 		ret = ath12k_wmi_modify_sbs_freq(ab, phy_id);
5613 		if (ret) {
5614 			memset(mac_range, 0, sizeof(*mac_range) * MAX_RADIOS);
5615 			break;
5616 		}
5617 	}
5618 }
5619 
5620 static void
ath12k_wmi_update_mac_freq_info(struct ath12k_base * ab,enum wmi_host_hw_mode_config_type hw_config_type,u32 phy_id,struct ath12k_svc_ext_mac_phy_info * mac_cap)5621 ath12k_wmi_update_mac_freq_info(struct ath12k_base *ab,
5622 				enum wmi_host_hw_mode_config_type hw_config_type,
5623 				u32 phy_id,
5624 				struct ath12k_svc_ext_mac_phy_info *mac_cap)
5625 {
5626 	if (phy_id >= MAX_RADIOS) {
5627 		ath12k_err(ab, "mac more than two not supported: %d", phy_id);
5628 		return;
5629 	}
5630 
5631 	ath12k_dbg(ab, ATH12K_DBG_WMI,
5632 		   "hw_mode_cfg %d mac %d band 0x%x SBS cutoff freq %d 2 GHz [%d - %d] 5 GHz [%d - %d]",
5633 		   hw_config_type, phy_id, mac_cap->supported_bands,
5634 		   ab->wmi_ab.sbs_lower_band_end_freq,
5635 		   mac_cap->hw_freq_range.low_2ghz_freq,
5636 		   mac_cap->hw_freq_range.high_2ghz_freq,
5637 		   mac_cap->hw_freq_range.low_5ghz_freq,
5638 		   mac_cap->hw_freq_range.high_5ghz_freq);
5639 
5640 	switch (hw_config_type) {
5641 	case WMI_HOST_HW_MODE_SINGLE:
5642 		if (phy_id) {
5643 			ath12k_dbg(ab, ATH12K_DBG_WMI, "mac phy 1 is not supported");
5644 			break;
5645 		}
5646 		ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SMM, phy_id);
5647 		break;
5648 
5649 	case WMI_HOST_HW_MODE_DBS:
5650 		if (!ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
5651 			ath12k_wmi_update_freq_info(ab, mac_cap,
5652 						    ATH12K_HW_MODE_DBS, phy_id);
5653 		break;
5654 	case WMI_HOST_HW_MODE_DBS_SBS:
5655 	case WMI_HOST_HW_MODE_DBS_OR_SBS:
5656 		ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_DBS, phy_id);
5657 		if (ab->wmi_ab.sbs_lower_band_end_freq ||
5658 		    mac_cap->hw_freq_range.low_5ghz_freq ||
5659 		    mac_cap->hw_freq_range.low_2ghz_freq)
5660 			ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS,
5661 						    phy_id);
5662 
5663 		if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_DBS))
5664 			ath12k_wmi_update_dbs_freq_info(ab);
5665 		if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
5666 			ath12k_wmi_update_sbs_freq_info(ab);
5667 		break;
5668 	case WMI_HOST_HW_MODE_SBS:
5669 	case WMI_HOST_HW_MODE_SBS_PASSIVE:
5670 		ath12k_wmi_update_freq_info(ab, mac_cap, ATH12K_HW_MODE_SBS, phy_id);
5671 		if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS))
5672 			ath12k_wmi_update_sbs_freq_info(ab);
5673 
5674 		break;
5675 	default:
5676 		break;
5677 	}
5678 }
5679 
ath12k_wmi_sbs_range_present(struct ath12k_base * ab)5680 static bool ath12k_wmi_sbs_range_present(struct ath12k_base *ab)
5681 {
5682 	if (ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS) ||
5683 	    (ab->wmi_ab.sbs_lower_band_end_freq &&
5684 	     ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_LOWER_SHARE) &&
5685 	     ath12k_wmi_all_phy_range_updated(ab, ATH12K_HW_MODE_SBS_UPPER_SHARE)))
5686 		return true;
5687 
5688 	return false;
5689 }
5690 
ath12k_wmi_update_hw_mode_list(struct ath12k_base * ab)5691 static int ath12k_wmi_update_hw_mode_list(struct ath12k_base *ab)
5692 {
5693 	struct ath12k_svc_ext_info *svc_ext_info = &ab->wmi_ab.svc_ext_info;
5694 	struct ath12k_hw_mode_info *info = &ab->wmi_ab.hw_mode_info;
5695 	enum wmi_host_hw_mode_config_type hw_config_type;
5696 	struct ath12k_svc_ext_mac_phy_info *tmp;
5697 	bool dbs_mode = false, sbs_mode = false;
5698 	u32 i, j = 0;
5699 
5700 	if (!svc_ext_info->num_hw_modes) {
5701 		ath12k_err(ab, "invalid number of hw modes");
5702 		return -EINVAL;
5703 	}
5704 
5705 	ath12k_dbg(ab, ATH12K_DBG_WMI, "updated HW mode list: num modes %d",
5706 		   svc_ext_info->num_hw_modes);
5707 
5708 	memset(info->freq_range_caps, 0, sizeof(info->freq_range_caps));
5709 
5710 	for (i = 0; i < svc_ext_info->num_hw_modes; i++) {
5711 		if (j >= ATH12K_MAX_MAC_PHY_CAP)
5712 			return -EINVAL;
5713 
5714 		/* Update for MAC0 */
5715 		tmp = &svc_ext_info->mac_phy_info[j++];
5716 		hw_config_type = tmp->hw_mode_config_type;
5717 		ath12k_wmi_update_mac_freq_info(ab, hw_config_type, tmp->phy_id, tmp);
5718 
5719 		/* SBS and DBS have dual MAC. Up to 2 MACs are considered. */
5720 		if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
5721 		    hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
5722 		    hw_config_type == WMI_HOST_HW_MODE_SBS ||
5723 		    hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS) {
5724 			if (j >= ATH12K_MAX_MAC_PHY_CAP)
5725 				return -EINVAL;
5726 			/* Update for MAC1 */
5727 			tmp = &svc_ext_info->mac_phy_info[j++];
5728 			ath12k_wmi_update_mac_freq_info(ab, hw_config_type,
5729 							tmp->phy_id, tmp);
5730 
5731 			if (hw_config_type == WMI_HOST_HW_MODE_DBS ||
5732 			    hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS)
5733 				dbs_mode = true;
5734 
5735 			if (ath12k_wmi_sbs_range_present(ab) &&
5736 			    (hw_config_type == WMI_HOST_HW_MODE_SBS_PASSIVE ||
5737 			     hw_config_type == WMI_HOST_HW_MODE_SBS ||
5738 			     hw_config_type == WMI_HOST_HW_MODE_DBS_OR_SBS))
5739 				sbs_mode = true;
5740 		}
5741 	}
5742 
5743 	info->support_dbs = dbs_mode;
5744 	info->support_sbs = sbs_mode;
5745 
5746 	ath12k_wmi_dump_freq_range(ab);
5747 
5748 	return 0;
5749 }
5750 
ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)5751 static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
5752 					 u16 tag, u16 len,
5753 					 const void *ptr, void *data)
5754 {
5755 	const struct ath12k_wmi_dbs_or_sbs_cap_params *dbs_or_sbs_caps;
5756 	struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
5757 	struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
5758 	int ret;
5759 
5760 	switch (tag) {
5761 	case WMI_TAG_SERVICE_READY_EXT2_EVENT:
5762 		ret = ath12k_pull_svc_ready_ext2(wmi_handle, ptr,
5763 						 &parse->arg);
5764 		if (ret) {
5765 			ath12k_warn(ab,
5766 				    "failed to extract wmi service ready ext2 parameters: %d\n",
5767 				    ret);
5768 			return ret;
5769 		}
5770 		break;
5771 
5772 	case WMI_TAG_ARRAY_STRUCT:
5773 		if (!parse->dma_ring_cap_done) {
5774 			ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
5775 						       &parse->dma_caps_parse);
5776 			if (ret)
5777 				return ret;
5778 
5779 			parse->dma_ring_cap_done = true;
5780 		} else if (!parse->spectral_bin_scaling_done) {
5781 			/* TODO: This is a place-holder as WMI tag for
5782 			 * spectral scaling is before
5783 			 * WMI_TAG_MAC_PHY_CAPABILITIES_EXT
5784 			 */
5785 			parse->spectral_bin_scaling_done = true;
5786 		} else if (!parse->mac_phy_caps_ext_done) {
5787 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
5788 						  ath12k_wmi_tlv_mac_phy_caps_ext,
5789 						  parse);
5790 			if (ret) {
5791 				ath12k_warn(ab, "failed to parse extended MAC PHY capabilities WMI TLV: %d\n",
5792 					    ret);
5793 				return ret;
5794 			}
5795 
5796 			parse->mac_phy_caps_ext_done = true;
5797 		} else if (!parse->hal_reg_caps_ext2_done) {
5798 			parse->hal_reg_caps_ext2_done = true;
5799 		} else if (!parse->scan_radio_caps_ext2_done) {
5800 			parse->scan_radio_caps_ext2_done = true;
5801 		} else if (!parse->twt_caps_done) {
5802 			parse->twt_caps_done = true;
5803 		} else if (!parse->htt_msdu_idx_to_qtype_map_done) {
5804 			parse->htt_msdu_idx_to_qtype_map_done = true;
5805 		} else if (!parse->dbs_or_sbs_cap_ext_done) {
5806 			dbs_or_sbs_caps = ptr;
5807 			ab->wmi_ab.sbs_lower_band_end_freq =
5808 				__le32_to_cpu(dbs_or_sbs_caps->sbs_lower_band_end_freq);
5809 
5810 			ath12k_dbg(ab, ATH12K_DBG_WMI, "sbs_lower_band_end_freq %u\n",
5811 				   ab->wmi_ab.sbs_lower_band_end_freq);
5812 
5813 			ret = ath12k_wmi_update_hw_mode_list(ab);
5814 			if (ret) {
5815 				ath12k_warn(ab, "failed to update hw mode list: %d\n",
5816 					    ret);
5817 				return ret;
5818 			}
5819 
5820 			parse->dbs_or_sbs_cap_ext_done = true;
5821 		}
5822 
5823 		break;
5824 	default:
5825 		break;
5826 	}
5827 
5828 	return 0;
5829 }
5830 
ath12k_service_ready_ext2_event(struct ath12k_base * ab,struct sk_buff * skb)5831 static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
5832 					   struct sk_buff *skb)
5833 {
5834 	struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
5835 	int ret;
5836 
5837 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
5838 				  ath12k_wmi_svc_rdy_ext2_parse,
5839 				  &svc_rdy_ext2);
5840 	if (ret) {
5841 		ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
5842 		goto err;
5843 	}
5844 
5845 	complete(&ab->wmi_ab.service_ready);
5846 
5847 	return 0;
5848 
5849 err:
5850 	ath12k_wmi_free_dbring_caps(ab);
5851 	return ret;
5852 }
5853 
ath12k_pull_vdev_start_resp_tlv(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_vdev_start_resp_event * vdev_rsp)5854 static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
5855 					   struct wmi_vdev_start_resp_event *vdev_rsp)
5856 {
5857 	const void **tb;
5858 	const struct wmi_vdev_start_resp_event *ev;
5859 	int ret;
5860 
5861 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5862 	if (IS_ERR(tb)) {
5863 		ret = PTR_ERR(tb);
5864 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5865 		return ret;
5866 	}
5867 
5868 	ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
5869 	if (!ev) {
5870 		ath12k_warn(ab, "failed to fetch vdev start resp ev");
5871 		kfree(tb);
5872 		return -EPROTO;
5873 	}
5874 
5875 	*vdev_rsp = *ev;
5876 
5877 	kfree(tb);
5878 	return 0;
5879 }
5880 
5881 static struct ath12k_reg_rule
create_ext_reg_rules_from_wmi(u32 num_reg_rules,struct ath12k_wmi_reg_rule_ext_params * wmi_reg_rule)5882 *create_ext_reg_rules_from_wmi(u32 num_reg_rules,
5883 #if defined(__linux__)
5884 			       struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
5885 #elif defined(__FreeBSD__)
5886 			       const struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
5887 #endif
5888 {
5889 	struct ath12k_reg_rule *reg_rule_ptr;
5890 	u32 count;
5891 
5892 	reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
5893 			       GFP_ATOMIC);
5894 
5895 	if (!reg_rule_ptr)
5896 		return NULL;
5897 
5898 	for (count = 0; count < num_reg_rules; count++) {
5899 		reg_rule_ptr[count].start_freq =
5900 			le32_get_bits(wmi_reg_rule[count].freq_info,
5901 				      REG_RULE_START_FREQ);
5902 		reg_rule_ptr[count].end_freq =
5903 			le32_get_bits(wmi_reg_rule[count].freq_info,
5904 				      REG_RULE_END_FREQ);
5905 		reg_rule_ptr[count].max_bw =
5906 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5907 				      REG_RULE_MAX_BW);
5908 		reg_rule_ptr[count].reg_power =
5909 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5910 				      REG_RULE_REG_PWR);
5911 		reg_rule_ptr[count].ant_gain =
5912 			le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
5913 				      REG_RULE_ANT_GAIN);
5914 		reg_rule_ptr[count].flags =
5915 			le32_get_bits(wmi_reg_rule[count].flag_info,
5916 				      REG_RULE_FLAGS);
5917 		reg_rule_ptr[count].psd_flag =
5918 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
5919 				      REG_RULE_PSD_INFO);
5920 		reg_rule_ptr[count].psd_eirp =
5921 			le32_get_bits(wmi_reg_rule[count].psd_power_info,
5922 				      REG_RULE_PSD_EIRP);
5923 	}
5924 
5925 	return reg_rule_ptr;
5926 }
5927 
ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params * rule,u32 num_reg_rules)5928 static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
5929 					    u32 num_reg_rules)
5930 {
5931 	u8 num_invalid_5ghz_rules = 0;
5932 	u32 count, start_freq;
5933 
5934 	for (count = 0; count < num_reg_rules; count++) {
5935 		start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
5936 
5937 		if (start_freq >= ATH12K_MIN_6GHZ_FREQ)
5938 			num_invalid_5ghz_rules++;
5939 	}
5940 
5941 	return num_invalid_5ghz_rules;
5942 }
5943 
ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base * ab,struct sk_buff * skb,struct ath12k_reg_info * reg_info)5944 static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
5945 						   struct sk_buff *skb,
5946 						   struct ath12k_reg_info *reg_info)
5947 {
5948 	const void **tb;
5949 	const struct wmi_reg_chan_list_cc_ext_event *ev;
5950 #if defined(__linux__)
5951 	struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
5952 #elif defined(__FreeBSD__)
5953 	const struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
5954 #endif
5955 	u32 num_2g_reg_rules, num_5g_reg_rules;
5956 	u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
5957 	u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
5958 	u8 num_invalid_5ghz_ext_rules;
5959 	u32 total_reg_rules = 0;
5960 	int ret, i, j;
5961 
5962 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
5963 
5964 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
5965 	if (IS_ERR(tb)) {
5966 		ret = PTR_ERR(tb);
5967 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
5968 		return ret;
5969 	}
5970 
5971 	ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
5972 	if (!ev) {
5973 		ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
5974 		kfree(tb);
5975 		return -EPROTO;
5976 	}
5977 
5978 	reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
5979 	reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
5980 	reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
5981 		le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
5982 	reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
5983 		le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
5984 	reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
5985 		le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
5986 
5987 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
5988 		reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
5989 			le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
5990 		reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
5991 			le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
5992 		reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
5993 			le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
5994 	}
5995 
5996 	num_2g_reg_rules = reg_info->num_2g_reg_rules;
5997 	total_reg_rules += num_2g_reg_rules;
5998 	num_5g_reg_rules = reg_info->num_5g_reg_rules;
5999 	total_reg_rules += num_5g_reg_rules;
6000 
6001 	if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
6002 		ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
6003 			    num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
6004 		kfree(tb);
6005 		return -EINVAL;
6006 	}
6007 
6008 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
6009 		num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
6010 
6011 		if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) {
6012 			ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
6013 				    i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES);
6014 			kfree(tb);
6015 			return -EINVAL;
6016 		}
6017 
6018 		total_reg_rules += num_6g_reg_rules_ap[i];
6019 	}
6020 
6021 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
6022 		num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
6023 				reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
6024 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
6025 
6026 		num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
6027 				reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
6028 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
6029 
6030 		num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
6031 				reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
6032 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
6033 
6034 		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES ||
6035 		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES ||
6036 		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6GHZ_REG_RULES) {
6037 			ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
6038 				    i);
6039 			kfree(tb);
6040 			return -EINVAL;
6041 		}
6042 	}
6043 
6044 	if (!total_reg_rules) {
6045 		ath12k_warn(ab, "No reg rules available\n");
6046 		kfree(tb);
6047 		return -EINVAL;
6048 	}
6049 
6050 	memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
6051 
6052 	reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
6053 	reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
6054 	reg_info->num_phy = le32_to_cpu(ev->num_phy);
6055 	reg_info->phy_id = le32_to_cpu(ev->phy_id);
6056 	reg_info->ctry_code = le32_to_cpu(ev->country_id);
6057 	reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
6058 
6059 	switch (le32_to_cpu(ev->status_code)) {
6060 	case WMI_REG_SET_CC_STATUS_PASS:
6061 		reg_info->status_code = REG_SET_CC_STATUS_PASS;
6062 		break;
6063 	case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
6064 		reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
6065 		break;
6066 	case WMI_REG_INIT_ALPHA2_NOT_FOUND:
6067 		reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
6068 		break;
6069 	case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
6070 		reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
6071 		break;
6072 	case WMI_REG_SET_CC_STATUS_NO_MEMORY:
6073 		reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
6074 		break;
6075 	case WMI_REG_SET_CC_STATUS_FAIL:
6076 		reg_info->status_code = REG_SET_CC_STATUS_FAIL;
6077 		break;
6078 	}
6079 
6080 	reg_info->is_ext_reg_event = true;
6081 
6082 	reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
6083 	reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
6084 	reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
6085 	reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
6086 	reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
6087 	reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
6088 	reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
6089 	reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
6090 	reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
6091 	reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
6092 
6093 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
6094 		reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
6095 			le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
6096 		reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
6097 			le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
6098 		reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
6099 			le32_to_cpu(ev->min_bw_6g_client_sp[i]);
6100 		reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
6101 			le32_to_cpu(ev->max_bw_6g_client_sp[i]);
6102 		reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
6103 			le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
6104 		reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
6105 			le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
6106 	}
6107 
6108 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6109 		   "%s:cc_ext %s dfs %d BW: min_2g %d max_2g %d min_5g %d max_5g %d phy_bitmap 0x%x",
6110 		   __func__, reg_info->alpha2, reg_info->dfs_region,
6111 		   reg_info->min_bw_2g, reg_info->max_bw_2g,
6112 		   reg_info->min_bw_5g, reg_info->max_bw_5g,
6113 		   reg_info->phybitmap);
6114 
6115 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6116 		   "num_2g_reg_rules %d num_5g_reg_rules %d",
6117 		   num_2g_reg_rules, num_5g_reg_rules);
6118 
6119 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6120 		   "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
6121 		   num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
6122 		   num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
6123 		   num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
6124 
6125 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6126 		   "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
6127 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
6128 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
6129 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
6130 
6131 	ath12k_dbg(ab, ATH12K_DBG_WMI,
6132 		   "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
6133 		   num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
6134 		   num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
6135 		   num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
6136 
6137 	ext_wmi_reg_rule =
6138 #if defined(__linux__)
6139 		(struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
6140 #elif defined(__FreeBSD__)
6141 		(const struct ath12k_wmi_reg_rule_ext_params *)((const u8 *)ev
6142 #endif
6143 			+ sizeof(*ev)
6144 			+ sizeof(struct wmi_tlv));
6145 
6146 	if (num_2g_reg_rules) {
6147 		reg_info->reg_rules_2g_ptr =
6148 			create_ext_reg_rules_from_wmi(num_2g_reg_rules,
6149 						      ext_wmi_reg_rule);
6150 
6151 		if (!reg_info->reg_rules_2g_ptr) {
6152 			kfree(tb);
6153 			ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
6154 			return -ENOMEM;
6155 		}
6156 	}
6157 
6158 	ext_wmi_reg_rule += num_2g_reg_rules;
6159 
6160 	/* Firmware might include 6 GHz reg rule in 5 GHz rule list
6161 	 * for few countries along with separate 6 GHz rule.
6162 	 * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
6163 	 * causes intersect check to be true, and same rules will be
6164 	 * shown multiple times in iw cmd.
6165 	 * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
6166 	 */
6167 	num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
6168 								       num_5g_reg_rules);
6169 
6170 	if (num_invalid_5ghz_ext_rules) {
6171 		ath12k_dbg(ab, ATH12K_DBG_WMI,
6172 			   "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
6173 			   reg_info->alpha2, reg_info->num_5g_reg_rules,
6174 			   num_invalid_5ghz_ext_rules);
6175 
6176 		num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
6177 		reg_info->num_5g_reg_rules = num_5g_reg_rules;
6178 	}
6179 
6180 	if (num_5g_reg_rules) {
6181 		reg_info->reg_rules_5g_ptr =
6182 			create_ext_reg_rules_from_wmi(num_5g_reg_rules,
6183 						      ext_wmi_reg_rule);
6184 
6185 		if (!reg_info->reg_rules_5g_ptr) {
6186 			kfree(tb);
6187 			ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
6188 			return -ENOMEM;
6189 		}
6190 	}
6191 
6192 	/* We have adjusted the number of 5 GHz reg rules above. But still those
6193 	 * many rules needs to be adjusted in ext_wmi_reg_rule.
6194 	 *
6195 	 * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
6196 	 */
6197 	ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
6198 
6199 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
6200 		reg_info->reg_rules_6g_ap_ptr[i] =
6201 			create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
6202 						      ext_wmi_reg_rule);
6203 
6204 		if (!reg_info->reg_rules_6g_ap_ptr[i]) {
6205 			kfree(tb);
6206 			ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
6207 			return -ENOMEM;
6208 		}
6209 
6210 		ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
6211 	}
6212 
6213 	for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
6214 		for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
6215 			reg_info->reg_rules_6g_client_ptr[j][i] =
6216 				create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
6217 							      ext_wmi_reg_rule);
6218 
6219 			if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
6220 				kfree(tb);
6221 				ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
6222 				return -ENOMEM;
6223 			}
6224 
6225 			ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
6226 		}
6227 	}
6228 
6229 	reg_info->client_type = le32_to_cpu(ev->client_type);
6230 	reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
6231 	reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
6232 	reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
6233 		le32_to_cpu(ev->domain_code_6g_ap_lpi);
6234 	reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
6235 		le32_to_cpu(ev->domain_code_6g_ap_sp);
6236 	reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
6237 		le32_to_cpu(ev->domain_code_6g_ap_vlp);
6238 
6239 	for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
6240 		reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
6241 			le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
6242 		reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
6243 			le32_to_cpu(ev->domain_code_6g_client_sp[i]);
6244 		reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
6245 			le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
6246 	}
6247 
6248 	reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
6249 
6250 	ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
6251 		   reg_info->client_type, reg_info->domain_code_6g_super_id);
6252 
6253 	ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
6254 
6255 	kfree(tb);
6256 	return 0;
6257 }
6258 
ath12k_pull_peer_del_resp_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_peer_delete_resp_event * peer_del_resp)6259 static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
6260 					struct wmi_peer_delete_resp_event *peer_del_resp)
6261 {
6262 	const void **tb;
6263 	const struct wmi_peer_delete_resp_event *ev;
6264 	int ret;
6265 
6266 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6267 	if (IS_ERR(tb)) {
6268 		ret = PTR_ERR(tb);
6269 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6270 		return ret;
6271 	}
6272 
6273 	ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
6274 	if (!ev) {
6275 		ath12k_warn(ab, "failed to fetch peer delete resp ev");
6276 		kfree(tb);
6277 		return -EPROTO;
6278 	}
6279 
6280 	memset(peer_del_resp, 0, sizeof(*peer_del_resp));
6281 
6282 	peer_del_resp->vdev_id = ev->vdev_id;
6283 	ether_addr_copy(peer_del_resp->peer_macaddr.addr,
6284 			ev->peer_macaddr.addr);
6285 
6286 	kfree(tb);
6287 	return 0;
6288 }
6289 
ath12k_pull_vdev_del_resp_ev(struct ath12k_base * ab,struct sk_buff * skb,u32 * vdev_id)6290 static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
6291 					struct sk_buff *skb,
6292 					u32 *vdev_id)
6293 {
6294 	const void **tb;
6295 	const struct wmi_vdev_delete_resp_event *ev;
6296 	int ret;
6297 
6298 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6299 	if (IS_ERR(tb)) {
6300 		ret = PTR_ERR(tb);
6301 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6302 		return ret;
6303 	}
6304 
6305 	ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
6306 	if (!ev) {
6307 		ath12k_warn(ab, "failed to fetch vdev delete resp ev");
6308 		kfree(tb);
6309 		return -EPROTO;
6310 	}
6311 
6312 	*vdev_id = le32_to_cpu(ev->vdev_id);
6313 
6314 	kfree(tb);
6315 	return 0;
6316 }
6317 
ath12k_pull_bcn_tx_status_ev(struct ath12k_base * ab,struct sk_buff * skb,u32 * vdev_id,u32 * tx_status)6318 static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
6319 					struct sk_buff *skb,
6320 					u32 *vdev_id, u32 *tx_status)
6321 {
6322 	const void **tb;
6323 	const struct wmi_bcn_tx_status_event *ev;
6324 	int ret;
6325 
6326 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6327 	if (IS_ERR(tb)) {
6328 		ret = PTR_ERR(tb);
6329 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6330 		return ret;
6331 	}
6332 
6333 	ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
6334 	if (!ev) {
6335 		ath12k_warn(ab, "failed to fetch bcn tx status ev");
6336 		kfree(tb);
6337 		return -EPROTO;
6338 	}
6339 
6340 	*vdev_id = le32_to_cpu(ev->vdev_id);
6341 	*tx_status = le32_to_cpu(ev->tx_status);
6342 
6343 	kfree(tb);
6344 	return 0;
6345 }
6346 
ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base * ab,struct sk_buff * skb,u32 * vdev_id)6347 static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
6348 					      u32 *vdev_id)
6349 {
6350 	const void **tb;
6351 	const struct wmi_vdev_stopped_event *ev;
6352 	int ret;
6353 
6354 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6355 	if (IS_ERR(tb)) {
6356 		ret = PTR_ERR(tb);
6357 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6358 		return ret;
6359 	}
6360 
6361 	ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
6362 	if (!ev) {
6363 		ath12k_warn(ab, "failed to fetch vdev stop ev");
6364 		kfree(tb);
6365 		return -EPROTO;
6366 	}
6367 
6368 	*vdev_id = le32_to_cpu(ev->vdev_id);
6369 
6370 	kfree(tb);
6371 	return 0;
6372 }
6373 
ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)6374 static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
6375 					u16 tag, u16 len,
6376 					const void *ptr, void *data)
6377 {
6378 	struct wmi_tlv_mgmt_rx_parse *parse = data;
6379 
6380 	switch (tag) {
6381 	case WMI_TAG_MGMT_RX_HDR:
6382 		parse->fixed = ptr;
6383 		break;
6384 	case WMI_TAG_ARRAY_BYTE:
6385 		if (!parse->frame_buf_done) {
6386 			parse->frame_buf = ptr;
6387 			parse->frame_buf_done = true;
6388 		}
6389 		break;
6390 	}
6391 	return 0;
6392 }
6393 
ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base * ab,struct sk_buff * skb,struct ath12k_wmi_mgmt_rx_arg * hdr)6394 static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
6395 					  struct sk_buff *skb,
6396 					  struct ath12k_wmi_mgmt_rx_arg *hdr)
6397 {
6398 	struct wmi_tlv_mgmt_rx_parse parse = { };
6399 	const struct ath12k_wmi_mgmt_rx_params *ev;
6400 	const u8 *frame;
6401 	int i, ret;
6402 
6403 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
6404 				  ath12k_wmi_tlv_mgmt_rx_parse,
6405 				  &parse);
6406 	if (ret) {
6407 		ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
6408 		return ret;
6409 	}
6410 
6411 	ev = parse.fixed;
6412 	frame = parse.frame_buf;
6413 
6414 	if (!ev || !frame) {
6415 		ath12k_warn(ab, "failed to fetch mgmt rx hdr");
6416 		return -EPROTO;
6417 	}
6418 
6419 	hdr->pdev_id = le32_to_cpu(ev->pdev_id);
6420 	hdr->chan_freq = le32_to_cpu(ev->chan_freq);
6421 	hdr->channel = le32_to_cpu(ev->channel);
6422 	hdr->snr = le32_to_cpu(ev->snr);
6423 	hdr->rate = le32_to_cpu(ev->rate);
6424 	hdr->phy_mode = le32_to_cpu(ev->phy_mode);
6425 	hdr->buf_len = le32_to_cpu(ev->buf_len);
6426 	hdr->status = le32_to_cpu(ev->status);
6427 	hdr->flags = le32_to_cpu(ev->flags);
6428 	hdr->rssi = a_sle32_to_cpu(ev->rssi);
6429 	hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
6430 
6431 	for (i = 0; i < ATH_MAX_ANTENNA; i++)
6432 		hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
6433 
6434 	if (skb->len < (frame - skb->data) + hdr->buf_len) {
6435 		ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
6436 		return -EPROTO;
6437 	}
6438 
6439 	/* shift the sk_buff to point to `frame` */
6440 	skb_trim(skb, 0);
6441 	skb_put(skb, frame - skb->data);
6442 	skb_pull(skb, frame - skb->data);
6443 	skb_put(skb, hdr->buf_len);
6444 
6445 	return 0;
6446 }
6447 
wmi_process_mgmt_tx_comp(struct ath12k * ar,u32 desc_id,u32 status,u32 ack_rssi)6448 static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
6449 				    u32 status, u32 ack_rssi)
6450 {
6451 	struct sk_buff *msdu;
6452 	struct ieee80211_tx_info *info;
6453 	struct ath12k_skb_cb *skb_cb;
6454 	int num_mgmt;
6455 
6456 	spin_lock_bh(&ar->txmgmt_idr_lock);
6457 	msdu = idr_find(&ar->txmgmt_idr, desc_id);
6458 
6459 	if (!msdu) {
6460 		ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
6461 			    desc_id);
6462 		spin_unlock_bh(&ar->txmgmt_idr_lock);
6463 		return -ENOENT;
6464 	}
6465 
6466 	idr_remove(&ar->txmgmt_idr, desc_id);
6467 	spin_unlock_bh(&ar->txmgmt_idr_lock);
6468 
6469 	skb_cb = ATH12K_SKB_CB(msdu);
6470 	dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
6471 
6472 	info = IEEE80211_SKB_CB(msdu);
6473 	memset(&info->status, 0, sizeof(info->status));
6474 
6475 	/* skip tx rate update from ieee80211_status*/
6476 	info->status.rates[0].idx = -1;
6477 
6478 	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status) {
6479 		info->flags |= IEEE80211_TX_STAT_ACK;
6480 		info->status.ack_signal = ack_rssi;
6481 		info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
6482 	}
6483 
6484 	if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
6485 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
6486 
6487 	ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
6488 
6489 	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
6490 
6491 	/* WARN when we received this event without doing any mgmt tx */
6492 	if (num_mgmt < 0)
6493 		WARN_ON_ONCE(1);
6494 
6495 	if (!num_mgmt)
6496 		wake_up(&ar->txmgmt_empty_waitq);
6497 
6498 	return 0;
6499 }
6500 
ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_mgmt_tx_compl_event * param)6501 static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
6502 					       struct sk_buff *skb,
6503 					       struct wmi_mgmt_tx_compl_event *param)
6504 {
6505 	const void **tb;
6506 	const struct wmi_mgmt_tx_compl_event *ev;
6507 	int ret;
6508 
6509 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6510 	if (IS_ERR(tb)) {
6511 		ret = PTR_ERR(tb);
6512 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6513 		return ret;
6514 	}
6515 
6516 	ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
6517 	if (!ev) {
6518 		ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
6519 		kfree(tb);
6520 		return -EPROTO;
6521 	}
6522 
6523 	param->pdev_id = ev->pdev_id;
6524 	param->desc_id = ev->desc_id;
6525 	param->status = ev->status;
6526 	param->ppdu_id = ev->ppdu_id;
6527 	param->ack_rssi = ev->ack_rssi;
6528 
6529 	kfree(tb);
6530 	return 0;
6531 }
6532 
ath12k_wmi_event_scan_started(struct ath12k * ar)6533 static void ath12k_wmi_event_scan_started(struct ath12k *ar)
6534 {
6535 	lockdep_assert_held(&ar->data_lock);
6536 
6537 	switch (ar->scan.state) {
6538 	case ATH12K_SCAN_IDLE:
6539 	case ATH12K_SCAN_RUNNING:
6540 	case ATH12K_SCAN_ABORTING:
6541 		ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
6542 			    ath12k_scan_state_str(ar->scan.state),
6543 			    ar->scan.state);
6544 		break;
6545 	case ATH12K_SCAN_STARTING:
6546 		ar->scan.state = ATH12K_SCAN_RUNNING;
6547 
6548 		if (ar->scan.is_roc)
6549 			ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
6550 
6551 		complete(&ar->scan.started);
6552 		break;
6553 	}
6554 }
6555 
ath12k_wmi_event_scan_start_failed(struct ath12k * ar)6556 static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
6557 {
6558 	lockdep_assert_held(&ar->data_lock);
6559 
6560 	switch (ar->scan.state) {
6561 	case ATH12K_SCAN_IDLE:
6562 	case ATH12K_SCAN_RUNNING:
6563 	case ATH12K_SCAN_ABORTING:
6564 		ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
6565 			    ath12k_scan_state_str(ar->scan.state),
6566 			    ar->scan.state);
6567 		break;
6568 	case ATH12K_SCAN_STARTING:
6569 		complete(&ar->scan.started);
6570 		__ath12k_mac_scan_finish(ar);
6571 		break;
6572 	}
6573 }
6574 
ath12k_wmi_event_scan_completed(struct ath12k * ar)6575 static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
6576 {
6577 	lockdep_assert_held(&ar->data_lock);
6578 
6579 	switch (ar->scan.state) {
6580 	case ATH12K_SCAN_IDLE:
6581 	case ATH12K_SCAN_STARTING:
6582 		/* One suspected reason scan can be completed while starting is
6583 		 * if firmware fails to deliver all scan events to the host,
6584 		 * e.g. when transport pipe is full. This has been observed
6585 		 * with spectral scan phyerr events starving wmi transport
6586 		 * pipe. In such case the "scan completed" event should be (and
6587 		 * is) ignored by the host as it may be just firmware's scan
6588 		 * state machine recovering.
6589 		 */
6590 		ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
6591 			    ath12k_scan_state_str(ar->scan.state),
6592 			    ar->scan.state);
6593 		break;
6594 	case ATH12K_SCAN_RUNNING:
6595 	case ATH12K_SCAN_ABORTING:
6596 		__ath12k_mac_scan_finish(ar);
6597 		break;
6598 	}
6599 }
6600 
ath12k_wmi_event_scan_bss_chan(struct ath12k * ar)6601 static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
6602 {
6603 	lockdep_assert_held(&ar->data_lock);
6604 
6605 	switch (ar->scan.state) {
6606 	case ATH12K_SCAN_IDLE:
6607 	case ATH12K_SCAN_STARTING:
6608 		ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
6609 			    ath12k_scan_state_str(ar->scan.state),
6610 			    ar->scan.state);
6611 		break;
6612 	case ATH12K_SCAN_RUNNING:
6613 	case ATH12K_SCAN_ABORTING:
6614 		ar->scan_channel = NULL;
6615 		break;
6616 	}
6617 }
6618 
ath12k_wmi_event_scan_foreign_chan(struct ath12k * ar,u32 freq)6619 static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
6620 {
6621 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
6622 
6623 	lockdep_assert_held(&ar->data_lock);
6624 
6625 	switch (ar->scan.state) {
6626 	case ATH12K_SCAN_IDLE:
6627 	case ATH12K_SCAN_STARTING:
6628 		ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
6629 			    ath12k_scan_state_str(ar->scan.state),
6630 			    ar->scan.state);
6631 		break;
6632 	case ATH12K_SCAN_RUNNING:
6633 	case ATH12K_SCAN_ABORTING:
6634 		ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
6635 
6636 		if (ar->scan.is_roc && ar->scan.roc_freq == freq)
6637 			complete(&ar->scan.on_channel);
6638 
6639 		break;
6640 	}
6641 }
6642 
6643 static const char *
ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,enum wmi_scan_completion_reason reason)6644 ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
6645 			       enum wmi_scan_completion_reason reason)
6646 {
6647 	switch (type) {
6648 	case WMI_SCAN_EVENT_STARTED:
6649 		return "started";
6650 	case WMI_SCAN_EVENT_COMPLETED:
6651 		switch (reason) {
6652 		case WMI_SCAN_REASON_COMPLETED:
6653 			return "completed";
6654 		case WMI_SCAN_REASON_CANCELLED:
6655 			return "completed [cancelled]";
6656 		case WMI_SCAN_REASON_PREEMPTED:
6657 			return "completed [preempted]";
6658 		case WMI_SCAN_REASON_TIMEDOUT:
6659 			return "completed [timedout]";
6660 		case WMI_SCAN_REASON_INTERNAL_FAILURE:
6661 			return "completed [internal err]";
6662 		case WMI_SCAN_REASON_MAX:
6663 			break;
6664 		}
6665 		return "completed [unknown]";
6666 	case WMI_SCAN_EVENT_BSS_CHANNEL:
6667 		return "bss channel";
6668 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
6669 		return "foreign channel";
6670 	case WMI_SCAN_EVENT_DEQUEUED:
6671 		return "dequeued";
6672 	case WMI_SCAN_EVENT_PREEMPTED:
6673 		return "preempted";
6674 	case WMI_SCAN_EVENT_START_FAILED:
6675 		return "start failed";
6676 	case WMI_SCAN_EVENT_RESTARTED:
6677 		return "restarted";
6678 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
6679 		return "foreign channel exit";
6680 	default:
6681 		return "unknown";
6682 	}
6683 }
6684 
ath12k_pull_scan_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_scan_event * scan_evt_param)6685 static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
6686 			       struct wmi_scan_event *scan_evt_param)
6687 {
6688 	const void **tb;
6689 	const struct wmi_scan_event *ev;
6690 	int ret;
6691 
6692 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6693 	if (IS_ERR(tb)) {
6694 		ret = PTR_ERR(tb);
6695 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6696 		return ret;
6697 	}
6698 
6699 	ev = tb[WMI_TAG_SCAN_EVENT];
6700 	if (!ev) {
6701 		ath12k_warn(ab, "failed to fetch scan ev");
6702 		kfree(tb);
6703 		return -EPROTO;
6704 	}
6705 
6706 	scan_evt_param->event_type = ev->event_type;
6707 	scan_evt_param->reason = ev->reason;
6708 	scan_evt_param->channel_freq = ev->channel_freq;
6709 	scan_evt_param->scan_req_id = ev->scan_req_id;
6710 	scan_evt_param->scan_id = ev->scan_id;
6711 	scan_evt_param->vdev_id = ev->vdev_id;
6712 	scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
6713 
6714 	kfree(tb);
6715 	return 0;
6716 }
6717 
ath12k_pull_peer_sta_kickout_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_peer_sta_kickout_arg * arg)6718 static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
6719 					   struct wmi_peer_sta_kickout_arg *arg)
6720 {
6721 	const void **tb;
6722 	const struct wmi_peer_sta_kickout_event *ev;
6723 	int ret;
6724 
6725 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6726 	if (IS_ERR(tb)) {
6727 		ret = PTR_ERR(tb);
6728 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6729 		return ret;
6730 	}
6731 
6732 	ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
6733 	if (!ev) {
6734 		ath12k_warn(ab, "failed to fetch peer sta kickout ev");
6735 		kfree(tb);
6736 		return -EPROTO;
6737 	}
6738 
6739 	arg->mac_addr = ev->peer_macaddr.addr;
6740 	arg->reason = le32_to_cpu(ev->reason);
6741 	arg->rssi = le32_to_cpu(ev->rssi);
6742 
6743 	kfree(tb);
6744 	return 0;
6745 }
6746 
ath12k_pull_roam_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_roam_event * roam_ev)6747 static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
6748 			       struct wmi_roam_event *roam_ev)
6749 {
6750 	const void **tb;
6751 	const struct wmi_roam_event *ev;
6752 	int ret;
6753 
6754 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6755 	if (IS_ERR(tb)) {
6756 		ret = PTR_ERR(tb);
6757 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6758 		return ret;
6759 	}
6760 
6761 	ev = tb[WMI_TAG_ROAM_EVENT];
6762 	if (!ev) {
6763 		ath12k_warn(ab, "failed to fetch roam ev");
6764 		kfree(tb);
6765 		return -EPROTO;
6766 	}
6767 
6768 	roam_ev->vdev_id = ev->vdev_id;
6769 	roam_ev->reason = ev->reason;
6770 	roam_ev->rssi = ev->rssi;
6771 
6772 	kfree(tb);
6773 	return 0;
6774 }
6775 
freq_to_idx(struct ath12k * ar,int freq)6776 static int freq_to_idx(struct ath12k *ar, int freq)
6777 {
6778 	struct ieee80211_supported_band *sband;
6779 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
6780 	int band, ch, idx = 0;
6781 
6782 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
6783 		if (!ar->mac.sbands[band].channels)
6784 			continue;
6785 
6786 		sband = hw->wiphy->bands[band];
6787 		if (!sband)
6788 			continue;
6789 
6790 		for (ch = 0; ch < sband->n_channels; ch++, idx++)
6791 			if (sband->channels[ch].center_freq == freq)
6792 				goto exit;
6793 	}
6794 
6795 exit:
6796 	return idx;
6797 }
6798 
ath12k_pull_chan_info_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_chan_info_event * ch_info_ev)6799 static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
6800 				    struct wmi_chan_info_event *ch_info_ev)
6801 {
6802 	const void **tb;
6803 	const struct wmi_chan_info_event *ev;
6804 	int ret;
6805 
6806 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6807 	if (IS_ERR(tb)) {
6808 		ret = PTR_ERR(tb);
6809 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6810 		return ret;
6811 	}
6812 
6813 	ev = tb[WMI_TAG_CHAN_INFO_EVENT];
6814 	if (!ev) {
6815 		ath12k_warn(ab, "failed to fetch chan info ev");
6816 		kfree(tb);
6817 		return -EPROTO;
6818 	}
6819 
6820 	ch_info_ev->err_code = ev->err_code;
6821 	ch_info_ev->freq = ev->freq;
6822 	ch_info_ev->cmd_flags = ev->cmd_flags;
6823 	ch_info_ev->noise_floor = ev->noise_floor;
6824 	ch_info_ev->rx_clear_count = ev->rx_clear_count;
6825 	ch_info_ev->cycle_count = ev->cycle_count;
6826 	ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
6827 	ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
6828 	ch_info_ev->rx_frame_count = ev->rx_frame_count;
6829 	ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
6830 	ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
6831 	ch_info_ev->vdev_id = ev->vdev_id;
6832 
6833 	kfree(tb);
6834 	return 0;
6835 }
6836 
6837 static int
ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_pdev_bss_chan_info_event * bss_ch_info_ev)6838 ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
6839 				  struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
6840 {
6841 	const void **tb;
6842 	const struct wmi_pdev_bss_chan_info_event *ev;
6843 	int ret;
6844 
6845 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6846 	if (IS_ERR(tb)) {
6847 		ret = PTR_ERR(tb);
6848 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6849 		return ret;
6850 	}
6851 
6852 	ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
6853 	if (!ev) {
6854 		ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
6855 		kfree(tb);
6856 		return -EPROTO;
6857 	}
6858 
6859 	bss_ch_info_ev->pdev_id = ev->pdev_id;
6860 	bss_ch_info_ev->freq = ev->freq;
6861 	bss_ch_info_ev->noise_floor = ev->noise_floor;
6862 	bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
6863 	bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
6864 	bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
6865 	bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
6866 	bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
6867 	bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
6868 	bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
6869 	bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
6870 	bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
6871 	bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
6872 
6873 	kfree(tb);
6874 	return 0;
6875 }
6876 
6877 static int
ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_vdev_install_key_complete_arg * arg)6878 ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
6879 				      struct wmi_vdev_install_key_complete_arg *arg)
6880 {
6881 	const void **tb;
6882 	const struct wmi_vdev_install_key_compl_event *ev;
6883 	int ret;
6884 
6885 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6886 	if (IS_ERR(tb)) {
6887 		ret = PTR_ERR(tb);
6888 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6889 		return ret;
6890 	}
6891 
6892 	ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
6893 	if (!ev) {
6894 		ath12k_warn(ab, "failed to fetch vdev install key compl ev");
6895 		kfree(tb);
6896 		return -EPROTO;
6897 	}
6898 
6899 	arg->vdev_id = le32_to_cpu(ev->vdev_id);
6900 	arg->macaddr = ev->peer_macaddr.addr;
6901 	arg->key_idx = le32_to_cpu(ev->key_idx);
6902 	arg->key_flags = le32_to_cpu(ev->key_flags);
6903 	arg->status = le32_to_cpu(ev->status);
6904 
6905 	kfree(tb);
6906 	return 0;
6907 }
6908 
ath12k_pull_peer_assoc_conf_ev(struct ath12k_base * ab,struct sk_buff * skb,struct wmi_peer_assoc_conf_arg * peer_assoc_conf)6909 static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
6910 					  struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
6911 {
6912 	const void **tb;
6913 	const struct wmi_peer_assoc_conf_event *ev;
6914 	int ret;
6915 
6916 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6917 	if (IS_ERR(tb)) {
6918 		ret = PTR_ERR(tb);
6919 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6920 		return ret;
6921 	}
6922 
6923 	ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
6924 	if (!ev) {
6925 		ath12k_warn(ab, "failed to fetch peer assoc conf ev");
6926 		kfree(tb);
6927 		return -EPROTO;
6928 	}
6929 
6930 	peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
6931 	peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
6932 
6933 	kfree(tb);
6934 	return 0;
6935 }
6936 
6937 static int
ath12k_pull_pdev_temp_ev(struct ath12k_base * ab,struct sk_buff * skb,const struct wmi_pdev_temperature_event * ev)6938 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
6939 			 const struct wmi_pdev_temperature_event *ev)
6940 {
6941 	const void **tb;
6942 	int ret;
6943 
6944 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6945 	if (IS_ERR(tb)) {
6946 		ret = PTR_ERR(tb);
6947 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6948 		return ret;
6949 	}
6950 
6951 	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
6952 	if (!ev) {
6953 		ath12k_warn(ab, "failed to fetch pdev temp ev");
6954 		kfree(tb);
6955 		return -EPROTO;
6956 	}
6957 
6958 	kfree(tb);
6959 	return 0;
6960 }
6961 
ath12k_wmi_op_ep_tx_credits(struct ath12k_base * ab)6962 static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
6963 {
6964 	/* try to send pending beacons first. they take priority */
6965 	wake_up(&ab->wmi_ab.tx_credits_wq);
6966 }
6967 
ath12k_reg_11d_new_cc_event(struct ath12k_base * ab,struct sk_buff * skb)6968 static int ath12k_reg_11d_new_cc_event(struct ath12k_base *ab, struct sk_buff *skb)
6969 {
6970 	const struct wmi_11d_new_cc_event *ev;
6971 	struct ath12k *ar;
6972 	struct ath12k_pdev *pdev;
6973 	const void **tb;
6974 	int ret, i;
6975 
6976 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
6977 	if (IS_ERR(tb)) {
6978 		ret = PTR_ERR(tb);
6979 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
6980 		return ret;
6981 	}
6982 
6983 	ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
6984 	if (!ev) {
6985 		kfree(tb);
6986 		ath12k_warn(ab, "failed to fetch 11d new cc ev");
6987 		return -EPROTO;
6988 	}
6989 
6990 	spin_lock_bh(&ab->base_lock);
6991 	memcpy(&ab->new_alpha2, &ev->new_alpha2, REG_ALPHA2_LEN);
6992 	spin_unlock_bh(&ab->base_lock);
6993 
6994 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi 11d new cc %c%c\n",
6995 		   ab->new_alpha2[0],
6996 		   ab->new_alpha2[1]);
6997 
6998 	kfree(tb);
6999 
7000 	for (i = 0; i < ab->num_radios; i++) {
7001 		pdev = &ab->pdevs[i];
7002 		ar = pdev->ar;
7003 		ar->state_11d = ATH12K_11D_IDLE;
7004 		ar->ah->regd_updated = false;
7005 		complete(&ar->completed_11d_scan);
7006 	}
7007 
7008 	queue_work(ab->workqueue, &ab->update_11d_work);
7009 
7010 	return 0;
7011 }
7012 
ath12k_wmi_htc_tx_complete(struct ath12k_base * ab,struct sk_buff * skb)7013 static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
7014 				       struct sk_buff *skb)
7015 {
7016 	dev_kfree_skb(skb);
7017 }
7018 
ath12k_reg_chan_list_event(struct ath12k_base * ab,struct sk_buff * skb)7019 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
7020 {
7021 	struct ath12k_reg_info *reg_info;
7022 	struct ath12k *ar = NULL;
7023 	u8 pdev_idx = 255;
7024 	int ret;
7025 
7026 	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
7027 	if (!reg_info) {
7028 		ret = -ENOMEM;
7029 		goto fallback;
7030 	}
7031 
7032 	ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
7033 	if (ret) {
7034 		ath12k_warn(ab, "failed to extract regulatory info from received event\n");
7035 		goto mem_free;
7036 	}
7037 
7038 	ret = ath12k_reg_validate_reg_info(ab, reg_info);
7039 	if (ret == ATH12K_REG_STATUS_FALLBACK) {
7040 		ath12k_warn(ab, "failed to validate reg info %d\n", ret);
7041 		/* firmware has successfully switches to new regd but host can not
7042 		 * continue, so free reginfo and fallback to old regd
7043 		 */
7044 		goto mem_free;
7045 	} else if (ret == ATH12K_REG_STATUS_DROP) {
7046 		/* reg info is valid but we will not store it and
7047 		 * not going to create new regd for it
7048 		 */
7049 		ret = ATH12K_REG_STATUS_VALID;
7050 		goto mem_free;
7051 	}
7052 
7053 	/* free old reg_info if it exist */
7054 	pdev_idx = reg_info->phy_id;
7055 	if (ab->reg_info[pdev_idx]) {
7056 		ath12k_reg_reset_reg_info(ab->reg_info[pdev_idx]);
7057 		kfree(ab->reg_info[pdev_idx]);
7058 	}
7059 	/* reg_info is valid, we store it for later use
7060 	 * even below regd build failed
7061 	 */
7062 	ab->reg_info[pdev_idx] = reg_info;
7063 
7064 	ret = ath12k_reg_handle_chan_list(ab, reg_info, WMI_VDEV_TYPE_UNSPEC,
7065 					  IEEE80211_REG_UNSET_AP);
7066 	if (ret) {
7067 		ath12k_warn(ab, "failed to handle chan list %d\n", ret);
7068 		goto fallback;
7069 	}
7070 
7071 	goto out;
7072 
7073 mem_free:
7074 	ath12k_reg_reset_reg_info(reg_info);
7075 	kfree(reg_info);
7076 
7077 	if (ret == ATH12K_REG_STATUS_VALID)
7078 		goto out;
7079 
7080 fallback:
7081 	/* Fallback to older reg (by sending previous country setting
7082 	 * again if fw has succeeded and we failed to process here.
7083 	 * The Regdomain should be uniform across driver and fw. Since the
7084 	 * FW has processed the command and sent a success status, we expect
7085 	 * this function to succeed as well. If it doesn't, CTRY needs to be
7086 	 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
7087 	 */
7088 	/* TODO: This is rare, but still should also be handled */
7089 	WARN_ON(1);
7090 
7091 out:
7092 	/* In some error cases, even a valid pdev_idx might not be available */
7093 	if (pdev_idx != 255)
7094 		ar = ab->pdevs[pdev_idx].ar;
7095 
7096 	/* During the boot-time update, 'ar' might not be allocated,
7097 	 * so the completion cannot be marked at that point.
7098 	 * This boot-time update is handled in ath12k_mac_hw_register()
7099 	 * before registering the hardware.
7100 	 */
7101 	if (ar)
7102 		complete_all(&ar->regd_update_completed);
7103 
7104 	return ret;
7105 }
7106 
ath12k_wmi_rdy_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)7107 static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
7108 				const void *ptr, void *data)
7109 {
7110 	struct ath12k_wmi_rdy_parse *rdy_parse = data;
7111 	struct wmi_ready_event fixed_param;
7112 #if defined(__linux__)
7113 	struct ath12k_wmi_mac_addr_params *addr_list;
7114 #elif defined(__FreeBSD__)
7115 	const struct ath12k_wmi_mac_addr_params *addr_list;
7116 #endif
7117 	struct ath12k_pdev *pdev;
7118 	u32 num_mac_addr;
7119 	int i;
7120 
7121 	switch (tag) {
7122 	case WMI_TAG_READY_EVENT:
7123 		memset(&fixed_param, 0, sizeof(fixed_param));
7124 #if defined(__linux__)
7125 		memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
7126 #elif defined(__FreeBSD__)
7127 		memcpy(&fixed_param, (const struct wmi_ready_event *)ptr,
7128 #endif
7129 		       min_t(u16, sizeof(fixed_param), len));
7130 		ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
7131 		rdy_parse->num_extra_mac_addr =
7132 			le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
7133 
7134 		ether_addr_copy(ab->mac_addr,
7135 				fixed_param.ready_event_min.mac_addr.addr);
7136 		ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
7137 		ab->wmi_ready = true;
7138 		break;
7139 	case WMI_TAG_ARRAY_FIXED_STRUCT:
7140 #if defined(__linux__)
7141 		addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
7142 #elif defined(__FreeBSD__)
7143 		addr_list = (const struct ath12k_wmi_mac_addr_params *)ptr;
7144 #endif
7145 		num_mac_addr = rdy_parse->num_extra_mac_addr;
7146 
7147 		if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
7148 			break;
7149 
7150 		for (i = 0; i < ab->num_radios; i++) {
7151 			pdev = &ab->pdevs[i];
7152 			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
7153 		}
7154 		ab->pdevs_macaddr_valid = true;
7155 		break;
7156 	default:
7157 		break;
7158 	}
7159 
7160 	return 0;
7161 }
7162 
ath12k_ready_event(struct ath12k_base * ab,struct sk_buff * skb)7163 static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
7164 {
7165 	struct ath12k_wmi_rdy_parse rdy_parse = { };
7166 	int ret;
7167 
7168 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7169 				  ath12k_wmi_rdy_parse, &rdy_parse);
7170 	if (ret) {
7171 		ath12k_warn(ab, "failed to parse tlv %d\n", ret);
7172 		return ret;
7173 	}
7174 
7175 	complete(&ab->wmi_ab.unified_ready);
7176 	return 0;
7177 }
7178 
ath12k_peer_delete_resp_event(struct ath12k_base * ab,struct sk_buff * skb)7179 static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
7180 {
7181 	struct wmi_peer_delete_resp_event peer_del_resp;
7182 	struct ath12k *ar;
7183 
7184 	if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
7185 		ath12k_warn(ab, "failed to extract peer delete resp");
7186 		return;
7187 	}
7188 
7189 	rcu_read_lock();
7190 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
7191 	if (!ar) {
7192 		ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
7193 			    peer_del_resp.vdev_id);
7194 		rcu_read_unlock();
7195 		return;
7196 	}
7197 
7198 	complete(&ar->peer_delete_done);
7199 	rcu_read_unlock();
7200 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
7201 		   peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
7202 }
7203 
ath12k_vdev_delete_resp_event(struct ath12k_base * ab,struct sk_buff * skb)7204 static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
7205 					  struct sk_buff *skb)
7206 {
7207 	struct ath12k *ar;
7208 	u32 vdev_id = 0;
7209 
7210 	if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
7211 		ath12k_warn(ab, "failed to extract vdev delete resp");
7212 		return;
7213 	}
7214 
7215 	rcu_read_lock();
7216 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
7217 	if (!ar) {
7218 		ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
7219 			    vdev_id);
7220 		rcu_read_unlock();
7221 		return;
7222 	}
7223 
7224 	complete(&ar->vdev_delete_done);
7225 
7226 	rcu_read_unlock();
7227 
7228 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
7229 		   vdev_id);
7230 }
7231 
ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)7232 static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
7233 {
7234 	switch (vdev_resp_status) {
7235 	case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
7236 		return "invalid vdev id";
7237 	case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
7238 		return "not supported";
7239 	case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
7240 		return "dfs violation";
7241 	case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
7242 		return "invalid regdomain";
7243 	default:
7244 		return "unknown";
7245 	}
7246 }
7247 
ath12k_vdev_start_resp_event(struct ath12k_base * ab,struct sk_buff * skb)7248 static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
7249 {
7250 	struct wmi_vdev_start_resp_event vdev_start_resp;
7251 	struct ath12k *ar;
7252 	u32 status;
7253 
7254 	if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
7255 		ath12k_warn(ab, "failed to extract vdev start resp");
7256 		return;
7257 	}
7258 
7259 	rcu_read_lock();
7260 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
7261 	if (!ar) {
7262 		ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
7263 			    vdev_start_resp.vdev_id);
7264 		rcu_read_unlock();
7265 		return;
7266 	}
7267 
7268 	ar->last_wmi_vdev_start_status = 0;
7269 
7270 	status = le32_to_cpu(vdev_start_resp.status);
7271 	if (WARN_ON_ONCE(status)) {
7272 		ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
7273 			    status, ath12k_wmi_vdev_resp_print(status));
7274 		ar->last_wmi_vdev_start_status = status;
7275 	}
7276 
7277 	ar->max_allowed_tx_power = (s8)le32_to_cpu(vdev_start_resp.max_allowed_tx_power);
7278 
7279 	complete(&ar->vdev_setup_done);
7280 
7281 	rcu_read_unlock();
7282 
7283 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
7284 		   vdev_start_resp.vdev_id);
7285 }
7286 
ath12k_bcn_tx_status_event(struct ath12k_base * ab,struct sk_buff * skb)7287 static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
7288 {
7289 	struct ath12k_link_vif *arvif;
7290 	struct ath12k *ar;
7291 	u32 vdev_id, tx_status;
7292 
7293 	if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
7294 		ath12k_warn(ab, "failed to extract bcn tx status");
7295 		return;
7296 	}
7297 
7298 	guard(rcu)();
7299 
7300 	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id);
7301 	if (!arvif) {
7302 		ath12k_warn(ab, "invalid vdev %u in bcn tx status\n",
7303 			    vdev_id);
7304 		return;
7305 	}
7306 
7307 	ar = arvif->ar;
7308 	wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &arvif->bcn_tx_work);
7309 }
7310 
ath12k_vdev_stopped_event(struct ath12k_base * ab,struct sk_buff * skb)7311 static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
7312 {
7313 	struct ath12k *ar;
7314 	u32 vdev_id = 0;
7315 
7316 	if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
7317 		ath12k_warn(ab, "failed to extract vdev stopped event");
7318 		return;
7319 	}
7320 
7321 	rcu_read_lock();
7322 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
7323 	if (!ar) {
7324 		ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
7325 			    vdev_id);
7326 		rcu_read_unlock();
7327 		return;
7328 	}
7329 
7330 	complete(&ar->vdev_setup_done);
7331 
7332 	rcu_read_unlock();
7333 
7334 	ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
7335 }
7336 
ath12k_mgmt_rx_event(struct ath12k_base * ab,struct sk_buff * skb)7337 static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
7338 {
7339 	struct ath12k_wmi_mgmt_rx_arg rx_ev = {};
7340 	struct ath12k *ar;
7341 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
7342 	struct ieee80211_hdr *hdr;
7343 	u16 fc;
7344 	struct ieee80211_supported_band *sband;
7345 	s32 noise_floor;
7346 
7347 	if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
7348 		ath12k_warn(ab, "failed to extract mgmt rx event");
7349 		dev_kfree_skb(skb);
7350 		return;
7351 	}
7352 
7353 	memset(status, 0, sizeof(*status));
7354 
7355 	ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
7356 		   rx_ev.status);
7357 
7358 	rcu_read_lock();
7359 	ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
7360 
7361 	if (!ar) {
7362 		ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
7363 			    rx_ev.pdev_id);
7364 		dev_kfree_skb(skb);
7365 		goto exit;
7366 	}
7367 
7368 	if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) ||
7369 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
7370 			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
7371 			     WMI_RX_STATUS_ERR_CRC))) {
7372 		dev_kfree_skb(skb);
7373 		goto exit;
7374 	}
7375 
7376 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
7377 		status->flag |= RX_FLAG_MMIC_ERROR;
7378 
7379 	if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ &&
7380 	    rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) {
7381 		status->band = NL80211_BAND_6GHZ;
7382 		status->freq = rx_ev.chan_freq;
7383 	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
7384 		status->band = NL80211_BAND_2GHZ;
7385 	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) {
7386 		status->band = NL80211_BAND_5GHZ;
7387 	} else {
7388 		/* Shouldn't happen unless list of advertised channels to
7389 		 * mac80211 has been changed.
7390 		 */
7391 		WARN_ON_ONCE(1);
7392 		dev_kfree_skb(skb);
7393 		goto exit;
7394 	}
7395 
7396 	if (rx_ev.phy_mode == MODE_11B &&
7397 	    (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
7398 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7399 			   "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
7400 
7401 	sband = &ar->mac.sbands[status->band];
7402 
7403 	if (status->band != NL80211_BAND_6GHZ)
7404 		status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
7405 							      status->band);
7406 
7407 	spin_lock_bh(&ar->data_lock);
7408 	noise_floor = ath12k_pdev_get_noise_floor(ar);
7409 	spin_unlock_bh(&ar->data_lock);
7410 
7411 	status->signal = rx_ev.snr + noise_floor;
7412 	status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
7413 
7414 	hdr = (struct ieee80211_hdr *)skb->data;
7415 	fc = le16_to_cpu(hdr->frame_control);
7416 
7417 	/* Firmware is guaranteed to report all essential management frames via
7418 	 * WMI while it can deliver some extra via HTT. Since there can be
7419 	 * duplicates split the reporting wrt monitor/sniffing.
7420 	 */
7421 	status->flag |= RX_FLAG_SKIP_MONITOR;
7422 
7423 	/* In case of PMF, FW delivers decrypted frames with Protected Bit set
7424 	 * including group privacy action frames.
7425 	 */
7426 	if (ieee80211_has_protected(hdr->frame_control)) {
7427 		status->flag |= RX_FLAG_DECRYPTED;
7428 
7429 		if (!ieee80211_is_robust_mgmt_frame(skb)) {
7430 			status->flag |= RX_FLAG_IV_STRIPPED |
7431 					RX_FLAG_MMIC_STRIPPED;
7432 			hdr->frame_control = __cpu_to_le16(fc &
7433 					     ~IEEE80211_FCTL_PROTECTED);
7434 		}
7435 	}
7436 
7437 	if (ieee80211_is_beacon(hdr->frame_control))
7438 		ath12k_mac_handle_beacon(ar, skb);
7439 
7440 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
7441 		   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
7442 		   skb, skb->len,
7443 		   fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
7444 
7445 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
7446 		   "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
7447 		   status->freq, status->band, status->signal,
7448 		   status->rate_idx);
7449 
7450 	ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
7451 
7452 exit:
7453 	rcu_read_unlock();
7454 }
7455 
ath12k_mgmt_tx_compl_event(struct ath12k_base * ab,struct sk_buff * skb)7456 static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
7457 {
7458 	struct wmi_mgmt_tx_compl_event tx_compl_param = {};
7459 	struct ath12k *ar;
7460 
7461 	if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
7462 		ath12k_warn(ab, "failed to extract mgmt tx compl event");
7463 		return;
7464 	}
7465 
7466 	rcu_read_lock();
7467 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
7468 	if (!ar) {
7469 		ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
7470 			    tx_compl_param.pdev_id);
7471 		goto exit;
7472 	}
7473 
7474 	wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
7475 				 le32_to_cpu(tx_compl_param.status),
7476 				 le32_to_cpu(tx_compl_param.ack_rssi));
7477 
7478 	ath12k_dbg(ab, ATH12K_DBG_MGMT,
7479 		   "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
7480 		   tx_compl_param.pdev_id, tx_compl_param.desc_id,
7481 		   tx_compl_param.status);
7482 
7483 exit:
7484 	rcu_read_unlock();
7485 }
7486 
ath12k_get_ar_on_scan_state(struct ath12k_base * ab,u32 vdev_id,enum ath12k_scan_state state)7487 static struct ath12k *ath12k_get_ar_on_scan_state(struct ath12k_base *ab,
7488 						  u32 vdev_id,
7489 						  enum ath12k_scan_state state)
7490 {
7491 	int i;
7492 	struct ath12k_pdev *pdev;
7493 	struct ath12k *ar;
7494 
7495 	for (i = 0; i < ab->num_radios; i++) {
7496 		pdev = rcu_dereference(ab->pdevs_active[i]);
7497 		if (pdev && pdev->ar) {
7498 			ar = pdev->ar;
7499 
7500 			spin_lock_bh(&ar->data_lock);
7501 			if (ar->scan.state == state &&
7502 			    ar->scan.arvif &&
7503 			    ar->scan.arvif->vdev_id == vdev_id) {
7504 				spin_unlock_bh(&ar->data_lock);
7505 				return ar;
7506 			}
7507 			spin_unlock_bh(&ar->data_lock);
7508 		}
7509 	}
7510 	return NULL;
7511 }
7512 
ath12k_scan_event(struct ath12k_base * ab,struct sk_buff * skb)7513 static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
7514 {
7515 	struct ath12k *ar;
7516 	struct wmi_scan_event scan_ev = {};
7517 
7518 	if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
7519 		ath12k_warn(ab, "failed to extract scan event");
7520 		return;
7521 	}
7522 
7523 	rcu_read_lock();
7524 
7525 	/* In case the scan was cancelled, ex. during interface teardown,
7526 	 * the interface will not be found in active interfaces.
7527 	 * Rather, in such scenarios, iterate over the active pdev's to
7528 	 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
7529 	 * aborting scan's vdev id matches this event info.
7530 	 */
7531 	if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
7532 	    le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED) {
7533 		ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
7534 						 ATH12K_SCAN_ABORTING);
7535 		if (!ar)
7536 			ar = ath12k_get_ar_on_scan_state(ab, le32_to_cpu(scan_ev.vdev_id),
7537 							 ATH12K_SCAN_RUNNING);
7538 	} else {
7539 		ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
7540 	}
7541 
7542 	if (!ar) {
7543 		ath12k_warn(ab, "Received scan event for unknown vdev");
7544 		rcu_read_unlock();
7545 		return;
7546 	}
7547 
7548 	spin_lock_bh(&ar->data_lock);
7549 
7550 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7551 		   "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
7552 		   ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
7553 						  le32_to_cpu(scan_ev.reason)),
7554 		   le32_to_cpu(scan_ev.event_type),
7555 		   le32_to_cpu(scan_ev.reason),
7556 		   le32_to_cpu(scan_ev.channel_freq),
7557 		   le32_to_cpu(scan_ev.scan_req_id),
7558 		   le32_to_cpu(scan_ev.scan_id),
7559 		   le32_to_cpu(scan_ev.vdev_id),
7560 		   ath12k_scan_state_str(ar->scan.state), ar->scan.state);
7561 
7562 	switch (le32_to_cpu(scan_ev.event_type)) {
7563 	case WMI_SCAN_EVENT_STARTED:
7564 		ath12k_wmi_event_scan_started(ar);
7565 		break;
7566 	case WMI_SCAN_EVENT_COMPLETED:
7567 		ath12k_wmi_event_scan_completed(ar);
7568 		break;
7569 	case WMI_SCAN_EVENT_BSS_CHANNEL:
7570 		ath12k_wmi_event_scan_bss_chan(ar);
7571 		break;
7572 	case WMI_SCAN_EVENT_FOREIGN_CHAN:
7573 		ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
7574 		break;
7575 	case WMI_SCAN_EVENT_START_FAILED:
7576 		ath12k_warn(ab, "received scan start failure event\n");
7577 		ath12k_wmi_event_scan_start_failed(ar);
7578 		break;
7579 	case WMI_SCAN_EVENT_DEQUEUED:
7580 		__ath12k_mac_scan_finish(ar);
7581 		break;
7582 	case WMI_SCAN_EVENT_PREEMPTED:
7583 	case WMI_SCAN_EVENT_RESTARTED:
7584 	case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
7585 	default:
7586 		break;
7587 	}
7588 
7589 	spin_unlock_bh(&ar->data_lock);
7590 
7591 	rcu_read_unlock();
7592 }
7593 
ath12k_peer_sta_kickout_event(struct ath12k_base * ab,struct sk_buff * skb)7594 static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
7595 {
7596 	struct wmi_peer_sta_kickout_arg arg = {};
7597 	struct ath12k_link_vif *arvif;
7598 	struct ieee80211_sta *sta;
7599 	struct ath12k_peer *peer;
7600 	unsigned int link_id;
7601 	struct ath12k *ar;
7602 
7603 	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
7604 		ath12k_warn(ab, "failed to extract peer sta kickout event");
7605 		return;
7606 	}
7607 
7608 	rcu_read_lock();
7609 
7610 	spin_lock_bh(&ab->base_lock);
7611 
7612 	peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
7613 
7614 	if (!peer) {
7615 		ath12k_warn(ab, "peer not found %pM\n",
7616 			    arg.mac_addr);
7617 		goto exit;
7618 	}
7619 
7620 	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, peer->vdev_id);
7621 	if (!arvif) {
7622 		ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
7623 			    peer->vdev_id);
7624 		goto exit;
7625 	}
7626 
7627 	ar = arvif->ar;
7628 
7629 	if (peer->mlo) {
7630 		sta = ieee80211_find_sta_by_link_addrs(ath12k_ar_to_hw(ar),
7631 						       arg.mac_addr,
7632 						       NULL, &link_id);
7633 		if (peer->link_id != link_id) {
7634 			ath12k_warn(ab,
7635 				    "Spurious quick kickout for MLO STA %pM with invalid link_id, peer: %d, sta: %d\n",
7636 				    arg.mac_addr, peer->link_id, link_id);
7637 			goto exit;
7638 		}
7639 	} else {
7640 		sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
7641 						   arg.mac_addr, NULL);
7642 	}
7643 	if (!sta) {
7644 		ath12k_warn(ab, "Spurious quick kickout for %sSTA %pM\n",
7645 			    peer->mlo ? "MLO " : "", arg.mac_addr);
7646 		goto exit;
7647 	}
7648 
7649 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7650 		   "peer sta kickout event %pM reason: %d rssi: %d\n",
7651 		   arg.mac_addr, arg.reason, arg.rssi);
7652 
7653 	switch (arg.reason) {
7654 	case WMI_PEER_STA_KICKOUT_REASON_INACTIVITY:
7655 		if (arvif->ahvif->vif->type == NL80211_IFTYPE_STATION) {
7656 			ath12k_mac_handle_beacon_miss(ar, arvif);
7657 			break;
7658 		}
7659 		fallthrough;
7660 	default:
7661 		ieee80211_report_low_ack(sta, 10);
7662 	}
7663 
7664 exit:
7665 	spin_unlock_bh(&ab->base_lock);
7666 	rcu_read_unlock();
7667 }
7668 
ath12k_roam_event(struct ath12k_base * ab,struct sk_buff * skb)7669 static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
7670 {
7671 	struct ath12k_link_vif *arvif;
7672 	struct wmi_roam_event roam_ev = {};
7673 	struct ath12k *ar;
7674 	u32 vdev_id;
7675 	u8 roam_reason;
7676 
7677 	if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
7678 		ath12k_warn(ab, "failed to extract roam event");
7679 		return;
7680 	}
7681 
7682 	vdev_id = le32_to_cpu(roam_ev.vdev_id);
7683 	roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
7684 				   WMI_ROAM_REASON_MASK);
7685 
7686 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7687 		   "wmi roam event vdev %u reason %d rssi %d\n",
7688 		   vdev_id, roam_reason, roam_ev.rssi);
7689 
7690 	guard(rcu)();
7691 	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_id);
7692 	if (!arvif) {
7693 		ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
7694 		return;
7695 	}
7696 
7697 	ar = arvif->ar;
7698 
7699 	if (roam_reason >= WMI_ROAM_REASON_MAX)
7700 		ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
7701 			    roam_reason, vdev_id);
7702 
7703 	switch (roam_reason) {
7704 	case WMI_ROAM_REASON_BEACON_MISS:
7705 		ath12k_mac_handle_beacon_miss(ar, arvif);
7706 		break;
7707 	case WMI_ROAM_REASON_BETTER_AP:
7708 	case WMI_ROAM_REASON_LOW_RSSI:
7709 	case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
7710 	case WMI_ROAM_REASON_HO_FAILED:
7711 		ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
7712 			    roam_reason, vdev_id);
7713 		break;
7714 	}
7715 }
7716 
ath12k_chan_info_event(struct ath12k_base * ab,struct sk_buff * skb)7717 static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
7718 {
7719 	struct wmi_chan_info_event ch_info_ev = {};
7720 	struct ath12k *ar;
7721 	struct survey_info *survey;
7722 	int idx;
7723 	/* HW channel counters frequency value in hertz */
7724 	u32 cc_freq_hz = ab->cc_freq_hz;
7725 
7726 	if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
7727 		ath12k_warn(ab, "failed to extract chan info event");
7728 		return;
7729 	}
7730 
7731 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7732 		   "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
7733 		   ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
7734 		   ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
7735 		   ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
7736 		   ch_info_ev.mac_clk_mhz);
7737 
7738 	if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
7739 		ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
7740 		return;
7741 	}
7742 
7743 	rcu_read_lock();
7744 	ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
7745 	if (!ar) {
7746 		ath12k_warn(ab, "invalid vdev id in chan info ev %d",
7747 			    ch_info_ev.vdev_id);
7748 		rcu_read_unlock();
7749 		return;
7750 	}
7751 	spin_lock_bh(&ar->data_lock);
7752 
7753 	switch (ar->scan.state) {
7754 	case ATH12K_SCAN_IDLE:
7755 	case ATH12K_SCAN_STARTING:
7756 		ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
7757 		goto exit;
7758 	case ATH12K_SCAN_RUNNING:
7759 	case ATH12K_SCAN_ABORTING:
7760 		break;
7761 	}
7762 
7763 	idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
7764 	if (idx >= ARRAY_SIZE(ar->survey)) {
7765 		ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
7766 			    ch_info_ev.freq, idx);
7767 		goto exit;
7768 	}
7769 
7770 	/* If FW provides MAC clock frequency in Mhz, overriding the initialized
7771 	 * HW channel counters frequency value
7772 	 */
7773 	if (ch_info_ev.mac_clk_mhz)
7774 		cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
7775 
7776 	if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
7777 		survey = &ar->survey[idx];
7778 		memset(survey, 0, sizeof(*survey));
7779 		survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
7780 		survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
7781 				 SURVEY_INFO_TIME_BUSY;
7782 		survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
7783 		survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
7784 					    cc_freq_hz);
7785 	}
7786 exit:
7787 	spin_unlock_bh(&ar->data_lock);
7788 	rcu_read_unlock();
7789 }
7790 
7791 static void
ath12k_pdev_bss_chan_info_event(struct ath12k_base * ab,struct sk_buff * skb)7792 ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
7793 {
7794 	struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
7795 	struct survey_info *survey;
7796 	struct ath12k *ar;
7797 	u32 cc_freq_hz = ab->cc_freq_hz;
7798 	u64 busy, total, tx, rx, rx_bss;
7799 	int idx;
7800 
7801 	if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
7802 		ath12k_warn(ab, "failed to extract pdev bss chan info event");
7803 		return;
7804 	}
7805 
7806 	busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
7807 		le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
7808 
7809 	total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
7810 		le32_to_cpu(bss_ch_info_ev.cycle_count_low);
7811 
7812 	tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
7813 		le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
7814 
7815 	rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
7816 		le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
7817 
7818 	rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
7819 		le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
7820 
7821 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7822 #if defined(__linux__)
7823 		   "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
7824 		   bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
7825 		   bss_ch_info_ev.noise_floor, busy, total,
7826 		   tx, rx, rx_bss);
7827 #elif defined(__FreeBSD__)
7828 		   "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %ju total %ju tx %ju rx %ju rx_bss %ju\n",
7829 		   bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
7830 		   bss_ch_info_ev.noise_floor, (uintmax_t)busy, (uintmax_t)total,
7831 		   (uintmax_t)tx, (uintmax_t)rx, (uintmax_t)rx_bss);
7832 #endif
7833 
7834 	rcu_read_lock();
7835 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
7836 
7837 	if (!ar) {
7838 		ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
7839 			    bss_ch_info_ev.pdev_id);
7840 		rcu_read_unlock();
7841 		return;
7842 	}
7843 
7844 	spin_lock_bh(&ar->data_lock);
7845 	idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
7846 	if (idx >= ARRAY_SIZE(ar->survey)) {
7847 		ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
7848 			    bss_ch_info_ev.freq, idx);
7849 		goto exit;
7850 	}
7851 
7852 	survey = &ar->survey[idx];
7853 
7854 	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
7855 	survey->time      = div_u64(total, cc_freq_hz);
7856 	survey->time_busy = div_u64(busy, cc_freq_hz);
7857 	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
7858 	survey->time_tx   = div_u64(tx, cc_freq_hz);
7859 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
7860 			     SURVEY_INFO_TIME |
7861 			     SURVEY_INFO_TIME_BUSY |
7862 			     SURVEY_INFO_TIME_RX |
7863 			     SURVEY_INFO_TIME_TX);
7864 exit:
7865 	spin_unlock_bh(&ar->data_lock);
7866 	complete(&ar->bss_survey_done);
7867 
7868 	rcu_read_unlock();
7869 }
7870 
ath12k_vdev_install_key_compl_event(struct ath12k_base * ab,struct sk_buff * skb)7871 static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
7872 						struct sk_buff *skb)
7873 {
7874 	struct wmi_vdev_install_key_complete_arg install_key_compl = {};
7875 	struct ath12k *ar;
7876 
7877 	if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
7878 		ath12k_warn(ab, "failed to extract install key compl event");
7879 		return;
7880 	}
7881 
7882 	ath12k_dbg(ab, ATH12K_DBG_WMI,
7883 		   "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
7884 		   install_key_compl.key_idx, install_key_compl.key_flags,
7885 		   install_key_compl.macaddr, install_key_compl.status);
7886 
7887 	rcu_read_lock();
7888 	ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
7889 	if (!ar) {
7890 		ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
7891 			    install_key_compl.vdev_id);
7892 		rcu_read_unlock();
7893 		return;
7894 	}
7895 
7896 	ar->install_key_status = 0;
7897 
7898 	if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
7899 		ath12k_warn(ab, "install key failed for %pM status %d\n",
7900 			    install_key_compl.macaddr, install_key_compl.status);
7901 		ar->install_key_status = install_key_compl.status;
7902 	}
7903 
7904 	complete(&ar->install_key_done);
7905 	rcu_read_unlock();
7906 }
7907 
ath12k_wmi_tlv_services_parser(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)7908 static int ath12k_wmi_tlv_services_parser(struct ath12k_base *ab,
7909 					  u16 tag, u16 len,
7910 					  const void *ptr,
7911 					  void *data)
7912 {
7913 	const struct wmi_service_available_event *ev;
7914 	u16 wmi_ext2_service_words;
7915 #if defined(__linux__)
7916 	__le32 *wmi_ext2_service_bitmap;
7917 #elif defined(__FreeBSD__)
7918 	const __le32 *wmi_ext2_service_bitmap;
7919 #endif
7920 	int i, j;
7921 	u16 expected_len;
7922 
7923 	expected_len = WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32);
7924 	if (len < expected_len) {
7925 		ath12k_warn(ab, "invalid length %d for the WMI services available tag 0x%x\n",
7926 			    len, tag);
7927 		return -EINVAL;
7928 	}
7929 
7930 	switch (tag) {
7931 	case WMI_TAG_SERVICE_AVAILABLE_EVENT:
7932 #if defined(__linux__)
7933 		ev = (struct wmi_service_available_event *)ptr;
7934 #elif defined(__FreeBSD__)
7935 		ev = (const struct wmi_service_available_event *)ptr;
7936 #endif
7937 		for (i = 0, j = WMI_MAX_SERVICE;
7938 		     i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
7939 		     i++) {
7940 			do {
7941 				if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
7942 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
7943 					set_bit(j, ab->wmi_ab.svc_map);
7944 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7945 		}
7946 
7947 		ath12k_dbg(ab, ATH12K_DBG_WMI,
7948 			   "wmi_ext_service_bitmap 0x%x 0x%x 0x%x 0x%x",
7949 			   ev->wmi_service_segment_bitmap[0],
7950 			   ev->wmi_service_segment_bitmap[1],
7951 			   ev->wmi_service_segment_bitmap[2],
7952 			   ev->wmi_service_segment_bitmap[3]);
7953 		break;
7954 	case WMI_TAG_ARRAY_UINT32:
7955 #if defined(__linux__)
7956 		wmi_ext2_service_bitmap = (__le32 *)ptr;
7957 #elif defined(__FreeBSD__)
7958 		wmi_ext2_service_bitmap = (const __le32 *)ptr;
7959 #endif
7960 		wmi_ext2_service_words = len / sizeof(u32);
7961 		for (i = 0, j = WMI_MAX_EXT_SERVICE;
7962 		     i < wmi_ext2_service_words && j < WMI_MAX_EXT2_SERVICE;
7963 		     i++) {
7964 			do {
7965 				if (__le32_to_cpu(wmi_ext2_service_bitmap[i]) &
7966 				    BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
7967 					set_bit(j, ab->wmi_ab.svc_map);
7968 			} while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
7969 			ath12k_dbg(ab, ATH12K_DBG_WMI,
7970 				   "wmi_ext2_service bitmap 0x%08x\n",
7971 				   __le32_to_cpu(wmi_ext2_service_bitmap[i]));
7972 		}
7973 
7974 		break;
7975 	}
7976 	return 0;
7977 }
7978 
ath12k_service_available_event(struct ath12k_base * ab,struct sk_buff * skb)7979 static int ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
7980 {
7981 	int ret;
7982 
7983 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
7984 				  ath12k_wmi_tlv_services_parser,
7985 				  NULL);
7986 	return ret;
7987 }
7988 
ath12k_peer_assoc_conf_event(struct ath12k_base * ab,struct sk_buff * skb)7989 static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
7990 {
7991 	struct wmi_peer_assoc_conf_arg peer_assoc_conf = {};
7992 	struct ath12k *ar;
7993 
7994 	if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
7995 		ath12k_warn(ab, "failed to extract peer assoc conf event");
7996 		return;
7997 	}
7998 
7999 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8000 		   "peer assoc conf ev vdev id %d macaddr %pM\n",
8001 		   peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
8002 
8003 	rcu_read_lock();
8004 	ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
8005 
8006 	if (!ar) {
8007 		ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
8008 			    peer_assoc_conf.vdev_id);
8009 		rcu_read_unlock();
8010 		return;
8011 	}
8012 
8013 	complete(&ar->peer_assoc_done);
8014 	rcu_read_unlock();
8015 }
8016 
8017 static void
ath12k_wmi_fw_vdev_stats_dump(struct ath12k * ar,struct ath12k_fw_stats * fw_stats,char * buf,u32 * length)8018 ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar,
8019 			      struct ath12k_fw_stats *fw_stats,
8020 			      char *buf, u32 *length)
8021 {
8022 	const struct ath12k_fw_stats_vdev *vdev;
8023 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
8024 	struct ath12k_link_vif *arvif;
8025 	u32 len = *length;
8026 	u8 *vif_macaddr;
8027 	int i;
8028 
8029 	len += scnprintf(buf + len, buf_len - len, "\n");
8030 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
8031 			 "ath12k VDEV stats");
8032 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8033 			 "=================");
8034 
8035 	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
8036 		arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id);
8037 		if (!arvif)
8038 			continue;
8039 		vif_macaddr = arvif->ahvif->vif->addr;
8040 
8041 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8042 				 "VDEV ID", vdev->vdev_id);
8043 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8044 				 "VDEV MAC address", vif_macaddr);
8045 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8046 				 "beacon snr", vdev->beacon_snr);
8047 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8048 				 "data snr", vdev->data_snr);
8049 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8050 				 "num rx frames", vdev->num_rx_frames);
8051 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8052 				 "num rts fail", vdev->num_rts_fail);
8053 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8054 				 "num rts success", vdev->num_rts_success);
8055 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8056 				 "num rx err", vdev->num_rx_err);
8057 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8058 				 "num rx discard", vdev->num_rx_discard);
8059 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8060 				 "num tx not acked", vdev->num_tx_not_acked);
8061 
8062 		for (i = 0 ; i < WLAN_MAX_AC; i++)
8063 			len += scnprintf(buf + len, buf_len - len,
8064 					"%25s [%02d] %u\n",
8065 					"num tx frames", i,
8066 					vdev->num_tx_frames[i]);
8067 
8068 		for (i = 0 ; i < WLAN_MAX_AC; i++)
8069 			len += scnprintf(buf + len, buf_len - len,
8070 					"%25s [%02d] %u\n",
8071 					"num tx frames retries", i,
8072 					vdev->num_tx_frames_retries[i]);
8073 
8074 		for (i = 0 ; i < WLAN_MAX_AC; i++)
8075 			len += scnprintf(buf + len, buf_len - len,
8076 					"%25s [%02d] %u\n",
8077 					"num tx frames failures", i,
8078 					vdev->num_tx_frames_failures[i]);
8079 
8080 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
8081 			len += scnprintf(buf + len, buf_len - len,
8082 					"%25s [%02d] 0x%08x\n",
8083 					"tx rate history", i,
8084 					vdev->tx_rate_history[i]);
8085 		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
8086 			len += scnprintf(buf + len, buf_len - len,
8087 					"%25s [%02d] %u\n",
8088 					"beacon rssi history", i,
8089 					vdev->beacon_rssi_history[i]);
8090 
8091 		len += scnprintf(buf + len, buf_len - len, "\n");
8092 		*length = len;
8093 	}
8094 }
8095 
8096 static void
ath12k_wmi_fw_bcn_stats_dump(struct ath12k * ar,struct ath12k_fw_stats * fw_stats,char * buf,u32 * length)8097 ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar,
8098 			     struct ath12k_fw_stats *fw_stats,
8099 			     char *buf, u32 *length)
8100 {
8101 	const struct ath12k_fw_stats_bcn *bcn;
8102 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
8103 	struct ath12k_link_vif *arvif;
8104 	u32 len = *length;
8105 	size_t num_bcn;
8106 
8107 	num_bcn = list_count_nodes(&fw_stats->bcn);
8108 
8109 	len += scnprintf(buf + len, buf_len - len, "\n");
8110 	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
8111 			 "ath12k Beacon stats", num_bcn);
8112 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8113 			 "===================");
8114 
8115 	list_for_each_entry(bcn, &fw_stats->bcn, list) {
8116 		arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id);
8117 		if (!arvif)
8118 			continue;
8119 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8120 				 "VDEV ID", bcn->vdev_id);
8121 		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
8122 				 "VDEV MAC address", arvif->ahvif->vif->addr);
8123 		len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8124 				 "================");
8125 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8126 				 "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
8127 		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
8128 				 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
8129 
8130 		len += scnprintf(buf + len, buf_len - len, "\n");
8131 		*length = len;
8132 	}
8133 }
8134 
8135 static void
ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev * pdev,char * buf,u32 * length,u64 fw_soc_drop_cnt)8136 ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
8137 				   char *buf, u32 *length, u64 fw_soc_drop_cnt)
8138 {
8139 	u32 len = *length;
8140 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
8141 
8142 	len = scnprintf(buf + len, buf_len - len, "\n");
8143 	len += scnprintf(buf + len, buf_len - len, "%30s\n",
8144 			"ath12k PDEV stats");
8145 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8146 			"=================");
8147 
8148 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8149 			"Channel noise floor", pdev->ch_noise_floor);
8150 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8151 			"Channel TX power", pdev->chan_tx_power);
8152 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8153 			"TX frame count", pdev->tx_frame_count);
8154 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8155 			"RX frame count", pdev->rx_frame_count);
8156 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8157 			"RX clear count", pdev->rx_clear_count);
8158 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8159 			"Cycle count", pdev->cycle_count);
8160 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8161 			"PHY error count", pdev->phy_err_count);
8162 	len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n",
8163 			"soc drop count", fw_soc_drop_cnt);
8164 
8165 	*length = len;
8166 }
8167 
8168 static void
ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev * pdev,char * buf,u32 * length)8169 ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
8170 				 char *buf, u32 *length)
8171 {
8172 	u32 len = *length;
8173 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
8174 
8175 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8176 			 "ath12k PDEV TX stats");
8177 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8178 			 "====================");
8179 
8180 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8181 			 "HTT cookies queued", pdev->comp_queued);
8182 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8183 			 "HTT cookies disp.", pdev->comp_delivered);
8184 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8185 			 "MSDU queued", pdev->msdu_enqued);
8186 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8187 			 "MPDU queued", pdev->mpdu_enqued);
8188 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8189 			 "MSDUs dropped", pdev->wmm_drop);
8190 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8191 			 "Local enqued", pdev->local_enqued);
8192 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8193 			 "Local freed", pdev->local_freed);
8194 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8195 			 "HW queued", pdev->hw_queued);
8196 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8197 			 "PPDUs reaped", pdev->hw_reaped);
8198 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8199 			 "Num underruns", pdev->underrun);
8200 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8201 			 "PPDUs cleaned", pdev->tx_abort);
8202 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8203 			 "MPDUs requeued", pdev->mpdus_requed);
8204 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8205 			 "Excessive retries", pdev->tx_ko);
8206 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8207 			 "HW rate", pdev->data_rc);
8208 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8209 			 "Sched self triggers", pdev->self_triggers);
8210 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8211 			 "Dropped due to SW retries",
8212 			 pdev->sw_retry_failure);
8213 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8214 			 "Illegal rate phy errors",
8215 			 pdev->illgl_rate_phy_err);
8216 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8217 			 "PDEV continuous xretry", pdev->pdev_cont_xretry);
8218 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8219 			 "TX timeout", pdev->pdev_tx_timeout);
8220 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8221 			 "PDEV resets", pdev->pdev_resets);
8222 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8223 			 "Stateless TIDs alloc failures",
8224 			 pdev->stateless_tid_alloc_failure);
8225 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8226 			 "PHY underrun", pdev->phy_underrun);
8227 	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
8228 			 "MPDU is more than txop limit", pdev->txop_ovf);
8229 	*length = len;
8230 }
8231 
8232 static void
ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev * pdev,char * buf,u32 * length)8233 ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
8234 				 char *buf, u32 *length)
8235 {
8236 	u32 len = *length;
8237 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
8238 
8239 	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
8240 			 "ath12k PDEV RX stats");
8241 	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
8242 			 "====================");
8243 
8244 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8245 			 "Mid PPDU route change",
8246 			 pdev->mid_ppdu_route_change);
8247 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8248 			 "Tot. number of statuses", pdev->status_rcvd);
8249 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8250 			 "Extra frags on rings 0", pdev->r0_frags);
8251 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8252 			 "Extra frags on rings 1", pdev->r1_frags);
8253 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8254 			 "Extra frags on rings 2", pdev->r2_frags);
8255 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8256 			 "Extra frags on rings 3", pdev->r3_frags);
8257 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8258 			 "MSDUs delivered to HTT", pdev->htt_msdus);
8259 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8260 			 "MPDUs delivered to HTT", pdev->htt_mpdus);
8261 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8262 			 "MSDUs delivered to stack", pdev->loc_msdus);
8263 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8264 			 "MPDUs delivered to stack", pdev->loc_mpdus);
8265 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8266 			 "Oversized AMSUs", pdev->oversize_amsdu);
8267 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8268 			 "PHY errors", pdev->phy_errs);
8269 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8270 			 "PHY errors drops", pdev->phy_err_drop);
8271 	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
8272 			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
8273 	*length = len;
8274 }
8275 
8276 static void
ath12k_wmi_fw_pdev_stats_dump(struct ath12k * ar,struct ath12k_fw_stats * fw_stats,char * buf,u32 * length)8277 ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar,
8278 			      struct ath12k_fw_stats *fw_stats,
8279 			      char *buf, u32 *length)
8280 {
8281 	const struct ath12k_fw_stats_pdev *pdev;
8282 	u32 len = *length;
8283 
8284 	pdev = list_first_entry_or_null(&fw_stats->pdevs,
8285 					struct ath12k_fw_stats_pdev, list);
8286 	if (!pdev) {
8287 		ath12k_warn(ar->ab, "failed to get pdev stats\n");
8288 		return;
8289 	}
8290 
8291 	ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len,
8292 					   ar->ab->fw_soc_drop_count);
8293 	ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len);
8294 	ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len);
8295 
8296 	*length = len;
8297 }
8298 
ath12k_wmi_fw_stats_dump(struct ath12k * ar,struct ath12k_fw_stats * fw_stats,u32 stats_id,char * buf)8299 void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
8300 			      struct ath12k_fw_stats *fw_stats,
8301 			      u32 stats_id, char *buf)
8302 {
8303 	u32 len = 0;
8304 	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
8305 
8306 	spin_lock_bh(&ar->data_lock);
8307 
8308 	switch (stats_id) {
8309 	case WMI_REQUEST_VDEV_STAT:
8310 		ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len);
8311 		break;
8312 	case WMI_REQUEST_BCN_STAT:
8313 		ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len);
8314 		break;
8315 	case WMI_REQUEST_PDEV_STAT:
8316 		ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len);
8317 		break;
8318 	default:
8319 		break;
8320 	}
8321 
8322 	spin_unlock_bh(&ar->data_lock);
8323 
8324 	if (len >= buf_len)
8325 		buf[len - 1] = 0;
8326 	else
8327 		buf[len] = 0;
8328 }
8329 
8330 static void
ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params * src,struct ath12k_fw_stats_vdev * dst)8331 ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src,
8332 			   struct ath12k_fw_stats_vdev *dst)
8333 {
8334 	int i;
8335 
8336 	dst->vdev_id = le32_to_cpu(src->vdev_id);
8337 	dst->beacon_snr = le32_to_cpu(src->beacon_snr);
8338 	dst->data_snr = le32_to_cpu(src->data_snr);
8339 	dst->num_rx_frames = le32_to_cpu(src->num_rx_frames);
8340 	dst->num_rts_fail = le32_to_cpu(src->num_rts_fail);
8341 	dst->num_rts_success = le32_to_cpu(src->num_rts_success);
8342 	dst->num_rx_err = le32_to_cpu(src->num_rx_err);
8343 	dst->num_rx_discard = le32_to_cpu(src->num_rx_discard);
8344 	dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked);
8345 
8346 	for (i = 0; i < WLAN_MAX_AC; i++)
8347 		dst->num_tx_frames[i] =
8348 			le32_to_cpu(src->num_tx_frames[i]);
8349 
8350 	for (i = 0; i < WLAN_MAX_AC; i++)
8351 		dst->num_tx_frames_retries[i] =
8352 			le32_to_cpu(src->num_tx_frames_retries[i]);
8353 
8354 	for (i = 0; i < WLAN_MAX_AC; i++)
8355 		dst->num_tx_frames_failures[i] =
8356 			le32_to_cpu(src->num_tx_frames_failures[i]);
8357 
8358 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
8359 		dst->tx_rate_history[i] =
8360 			le32_to_cpu(src->tx_rate_history[i]);
8361 
8362 	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
8363 		dst->beacon_rssi_history[i] =
8364 			le32_to_cpu(src->beacon_rssi_history[i]);
8365 }
8366 
8367 static void
ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params * src,struct ath12k_fw_stats_bcn * dst)8368 ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src,
8369 			  struct ath12k_fw_stats_bcn *dst)
8370 {
8371 	dst->vdev_id = le32_to_cpu(src->vdev_id);
8372 	dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt);
8373 	dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt);
8374 }
8375 
8376 static void
ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params * src,struct ath12k_fw_stats_pdev * dst)8377 ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src,
8378 				struct ath12k_fw_stats_pdev *dst)
8379 {
8380 	dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf);
8381 	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
8382 	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
8383 	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
8384 	dst->cycle_count = __le32_to_cpu(src->cycle_count);
8385 	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
8386 	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
8387 }
8388 
8389 static void
ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params * src,struct ath12k_fw_stats_pdev * dst)8390 ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src,
8391 			      struct ath12k_fw_stats_pdev *dst)
8392 {
8393 	dst->comp_queued = a_sle32_to_cpu(src->comp_queued);
8394 	dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered);
8395 	dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued);
8396 	dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued);
8397 	dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop);
8398 	dst->local_enqued = a_sle32_to_cpu(src->local_enqued);
8399 	dst->local_freed = a_sle32_to_cpu(src->local_freed);
8400 	dst->hw_queued = a_sle32_to_cpu(src->hw_queued);
8401 	dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped);
8402 	dst->underrun = a_sle32_to_cpu(src->underrun);
8403 	dst->tx_abort = a_sle32_to_cpu(src->tx_abort);
8404 	dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed);
8405 	dst->tx_ko = __le32_to_cpu(src->tx_ko);
8406 	dst->data_rc = __le32_to_cpu(src->data_rc);
8407 	dst->self_triggers = __le32_to_cpu(src->self_triggers);
8408 	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
8409 	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
8410 	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
8411 	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
8412 	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
8413 	dst->stateless_tid_alloc_failure =
8414 		__le32_to_cpu(src->stateless_tid_alloc_failure);
8415 	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
8416 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
8417 }
8418 
8419 static void
ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params * src,struct ath12k_fw_stats_pdev * dst)8420 ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src,
8421 			      struct ath12k_fw_stats_pdev *dst)
8422 {
8423 	dst->mid_ppdu_route_change =
8424 		a_sle32_to_cpu(src->mid_ppdu_route_change);
8425 	dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd);
8426 	dst->r0_frags = a_sle32_to_cpu(src->r0_frags);
8427 	dst->r1_frags = a_sle32_to_cpu(src->r1_frags);
8428 	dst->r2_frags = a_sle32_to_cpu(src->r2_frags);
8429 	dst->r3_frags = a_sle32_to_cpu(src->r3_frags);
8430 	dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus);
8431 	dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus);
8432 	dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus);
8433 	dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus);
8434 	dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu);
8435 	dst->phy_errs = a_sle32_to_cpu(src->phy_errs);
8436 	dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop);
8437 	dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs);
8438 }
8439 
ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base * ab,struct wmi_tlv_fw_stats_parse * parse,const void * ptr,u16 len)8440 static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
8441 					      struct wmi_tlv_fw_stats_parse *parse,
8442 					      const void *ptr,
8443 					      u16 len)
8444 {
8445 	const struct wmi_stats_event *ev = parse->ev;
8446 	struct ath12k_fw_stats *stats = parse->stats;
8447 	struct ath12k *ar;
8448 	struct ath12k_link_vif *arvif;
8449 	struct ieee80211_sta *sta;
8450 	struct ath12k_sta *ahsta;
8451 	struct ath12k_link_sta *arsta;
8452 	int i, ret = 0;
8453 	const void *data = ptr;
8454 
8455 	if (!ev) {
8456 		ath12k_warn(ab, "failed to fetch update stats ev");
8457 		return -EPROTO;
8458 	}
8459 
8460 	if (!stats)
8461 		return -EINVAL;
8462 
8463 	rcu_read_lock();
8464 
8465 	stats->pdev_id = le32_to_cpu(ev->pdev_id);
8466 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
8467 	if (!ar) {
8468 		ath12k_warn(ab, "invalid pdev id %d in update stats event\n",
8469 			    le32_to_cpu(ev->pdev_id));
8470 		ret = -EPROTO;
8471 		goto exit;
8472 	}
8473 
8474 	for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) {
8475 		const struct wmi_vdev_stats_params *src;
8476 		struct ath12k_fw_stats_vdev *dst;
8477 
8478 		src = data;
8479 		if (len < sizeof(*src)) {
8480 			ret = -EPROTO;
8481 			goto exit;
8482 		}
8483 
8484 		arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id));
8485 		if (arvif) {
8486 			sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
8487 							   arvif->bssid,
8488 							   NULL);
8489 			if (sta) {
8490 				ahsta = ath12k_sta_to_ahsta(sta);
8491 				arsta = &ahsta->deflink;
8492 				arsta->rssi_beacon = le32_to_cpu(src->beacon_snr);
8493 				ath12k_dbg(ab, ATH12K_DBG_WMI,
8494 					   "wmi stats vdev id %d snr %d\n",
8495 					   src->vdev_id, src->beacon_snr);
8496 			} else {
8497 				ath12k_dbg(ab, ATH12K_DBG_WMI,
8498 					   "not found station bssid %pM for vdev stat\n",
8499 					   arvif->bssid);
8500 			}
8501 		}
8502 
8503 		data += sizeof(*src);
8504 		len -= sizeof(*src);
8505 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
8506 		if (!dst)
8507 			continue;
8508 		ath12k_wmi_pull_vdev_stats(src, dst);
8509 		stats->stats_id = WMI_REQUEST_VDEV_STAT;
8510 		list_add_tail(&dst->list, &stats->vdevs);
8511 	}
8512 	for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
8513 		const struct ath12k_wmi_bcn_stats_params *src;
8514 		struct ath12k_fw_stats_bcn *dst;
8515 
8516 		src = data;
8517 		if (len < sizeof(*src)) {
8518 			ret = -EPROTO;
8519 			goto exit;
8520 		}
8521 
8522 		data += sizeof(*src);
8523 		len -= sizeof(*src);
8524 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
8525 		if (!dst)
8526 			continue;
8527 		ath12k_wmi_pull_bcn_stats(src, dst);
8528 		stats->stats_id = WMI_REQUEST_BCN_STAT;
8529 		list_add_tail(&dst->list, &stats->bcn);
8530 	}
8531 	for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
8532 		const struct ath12k_wmi_pdev_stats_params *src;
8533 		struct ath12k_fw_stats_pdev *dst;
8534 
8535 		src = data;
8536 		if (len < sizeof(*src)) {
8537 			ret = -EPROTO;
8538 			goto exit;
8539 		}
8540 
8541 		stats->stats_id = WMI_REQUEST_PDEV_STAT;
8542 
8543 		data += sizeof(*src);
8544 		len -= sizeof(*src);
8545 
8546 		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
8547 		if (!dst)
8548 			continue;
8549 
8550 		ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
8551 		ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
8552 		ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
8553 		list_add_tail(&dst->list, &stats->pdevs);
8554 	}
8555 
8556 exit:
8557 	rcu_read_unlock();
8558 	return ret;
8559 }
8560 
ath12k_wmi_tlv_rssi_chain_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)8561 static int ath12k_wmi_tlv_rssi_chain_parse(struct ath12k_base *ab,
8562 					   u16 tag, u16 len,
8563 					   const void *ptr, void *data)
8564 {
8565 	const struct wmi_rssi_stat_params *stats_rssi = ptr;
8566 	struct wmi_tlv_fw_stats_parse *parse = data;
8567 	const struct wmi_stats_event *ev = parse->ev;
8568 	struct ath12k_fw_stats *stats = parse->stats;
8569 	struct ath12k_link_vif *arvif;
8570 	struct ath12k_link_sta *arsta;
8571 	struct ieee80211_sta *sta;
8572 	struct ath12k_sta *ahsta;
8573 	struct ath12k *ar;
8574 	int vdev_id;
8575 	int j;
8576 
8577 	if (!ev) {
8578 		ath12k_warn(ab, "failed to fetch update stats ev");
8579 		return -EPROTO;
8580 	}
8581 
8582 	if (tag != WMI_TAG_RSSI_STATS)
8583 		return -EPROTO;
8584 
8585 	if (!stats)
8586 		return -EINVAL;
8587 
8588 	stats->pdev_id = le32_to_cpu(ev->pdev_id);
8589 	vdev_id = le32_to_cpu(stats_rssi->vdev_id);
8590 	guard(rcu)();
8591 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
8592 	if (!ar) {
8593 		ath12k_warn(ab, "invalid pdev id %d in rssi chain parse\n",
8594 			    stats->pdev_id);
8595 		return -EPROTO;
8596 	}
8597 
8598 	arvif = ath12k_mac_get_arvif(ar, vdev_id);
8599 	if (!arvif) {
8600 		ath12k_warn(ab, "not found vif for vdev id %d\n", vdev_id);
8601 		return -EPROTO;
8602 	}
8603 
8604 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8605 		   "stats bssid %pM vif %p\n",
8606 		   arvif->bssid, arvif->ahvif->vif);
8607 
8608 	sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
8609 					   arvif->bssid,
8610 					   NULL);
8611 	if (!sta) {
8612 		ath12k_dbg(ab, ATH12K_DBG_WMI,
8613 			   "not found station of bssid %pM for rssi chain\n",
8614 			   arvif->bssid);
8615 		return -EPROTO;
8616 	}
8617 
8618 	ahsta = ath12k_sta_to_ahsta(sta);
8619 	arsta = &ahsta->deflink;
8620 
8621 	BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
8622 		     ARRAY_SIZE(stats_rssi->rssi_avg_beacon));
8623 
8624 	for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++)
8625 		arsta->chain_signal[j] = le32_to_cpu(stats_rssi->rssi_avg_beacon[j]);
8626 
8627 	stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
8628 
8629 	return 0;
8630 }
8631 
ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)8632 static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
8633 					 u16 tag, u16 len,
8634 					 const void *ptr, void *data)
8635 {
8636 	struct wmi_tlv_fw_stats_parse *parse = data;
8637 	int ret = 0;
8638 
8639 	switch (tag) {
8640 	case WMI_TAG_STATS_EVENT:
8641 		parse->ev = ptr;
8642 		break;
8643 	case WMI_TAG_ARRAY_BYTE:
8644 		ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
8645 		break;
8646 	case WMI_TAG_PER_CHAIN_RSSI_STATS:
8647 		parse->rssi = ptr;
8648 		if (le32_to_cpu(parse->ev->stats_id) & WMI_REQUEST_RSSI_PER_CHAIN_STAT)
8649 			parse->rssi_num = le32_to_cpu(parse->rssi->num_per_chain_rssi);
8650 		break;
8651 	case WMI_TAG_ARRAY_STRUCT:
8652 		if (parse->rssi_num && !parse->chain_rssi_done) {
8653 			ret = ath12k_wmi_tlv_iter(ab, ptr, len,
8654 						  ath12k_wmi_tlv_rssi_chain_parse,
8655 						  parse);
8656 			if (ret)
8657 				return ret;
8658 
8659 			parse->chain_rssi_done = true;
8660 		}
8661 		break;
8662 	default:
8663 		break;
8664 	}
8665 	return ret;
8666 }
8667 
ath12k_wmi_pull_fw_stats(struct ath12k_base * ab,struct sk_buff * skb,struct ath12k_fw_stats * stats)8668 static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
8669 				    struct ath12k_fw_stats *stats)
8670 {
8671 	struct wmi_tlv_fw_stats_parse parse = {};
8672 
8673 	stats->stats_id = 0;
8674 	parse.stats = stats;
8675 
8676 	return ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
8677 				   ath12k_wmi_tlv_fw_stats_parse,
8678 				   &parse);
8679 }
8680 
ath12k_wmi_fw_stats_process(struct ath12k * ar,struct ath12k_fw_stats * stats)8681 static void ath12k_wmi_fw_stats_process(struct ath12k *ar,
8682 					struct ath12k_fw_stats *stats)
8683 {
8684 	struct ath12k_base *ab = ar->ab;
8685 	struct ath12k_pdev *pdev;
8686 	bool is_end = true;
8687 	size_t total_vdevs_started = 0;
8688 	int i;
8689 
8690 	if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
8691 		if (list_empty(&stats->vdevs)) {
8692 			ath12k_warn(ab, "empty vdev stats");
8693 			return;
8694 		}
8695 		/* FW sends all the active VDEV stats irrespective of PDEV,
8696 		 * hence limit until the count of all VDEVs started
8697 		 */
8698 		rcu_read_lock();
8699 		for (i = 0; i < ab->num_radios; i++) {
8700 			pdev = rcu_dereference(ab->pdevs_active[i]);
8701 			if (pdev && pdev->ar)
8702 				total_vdevs_started += pdev->ar->num_started_vdevs;
8703 		}
8704 		rcu_read_unlock();
8705 
8706 		if (total_vdevs_started)
8707 			is_end = ((++ar->fw_stats.num_vdev_recvd) ==
8708 				  total_vdevs_started);
8709 
8710 		list_splice_tail_init(&stats->vdevs,
8711 				      &ar->fw_stats.vdevs);
8712 
8713 		if (is_end)
8714 			complete(&ar->fw_stats_done);
8715 
8716 		return;
8717 	}
8718 
8719 	if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
8720 		if (list_empty(&stats->bcn)) {
8721 			ath12k_warn(ab, "empty beacon stats");
8722 			return;
8723 		}
8724 
8725 		list_splice_tail_init(&stats->bcn,
8726 				      &ar->fw_stats.bcn);
8727 		complete(&ar->fw_stats_done);
8728 	}
8729 }
8730 
ath12k_update_stats_event(struct ath12k_base * ab,struct sk_buff * skb)8731 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
8732 {
8733 	struct ath12k_fw_stats stats = {};
8734 	struct ath12k *ar;
8735 	int ret;
8736 
8737 	INIT_LIST_HEAD(&stats.pdevs);
8738 	INIT_LIST_HEAD(&stats.vdevs);
8739 	INIT_LIST_HEAD(&stats.bcn);
8740 
8741 	ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats);
8742 	if (ret) {
8743 		ath12k_warn(ab, "failed to pull fw stats: %d\n", ret);
8744 		goto free;
8745 	}
8746 
8747 	ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats");
8748 
8749 	rcu_read_lock();
8750 	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
8751 	if (!ar) {
8752 		rcu_read_unlock();
8753 		ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
8754 			    stats.pdev_id, ret);
8755 		goto free;
8756 	}
8757 
8758 	spin_lock_bh(&ar->data_lock);
8759 
8760 	/* Handle WMI_REQUEST_PDEV_STAT status update */
8761 	if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
8762 		list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
8763 		complete(&ar->fw_stats_done);
8764 		goto complete;
8765 	}
8766 
8767 	/* Handle WMI_REQUEST_RSSI_PER_CHAIN_STAT status update */
8768 	if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
8769 		complete(&ar->fw_stats_done);
8770 		goto complete;
8771 	}
8772 
8773 	/* Handle WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT updates. */
8774 	ath12k_wmi_fw_stats_process(ar, &stats);
8775 
8776 complete:
8777 	complete(&ar->fw_stats_complete);
8778 	spin_unlock_bh(&ar->data_lock);
8779 	rcu_read_unlock();
8780 
8781 	/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
8782 	 * at this point, no need to free the individual list.
8783 	 */
8784 	return;
8785 
8786 free:
8787 	ath12k_fw_stats_free(&stats);
8788 }
8789 
8790 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
8791  * is not part of BDF CTL(Conformance test limits) table entries.
8792  */
ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base * ab,struct sk_buff * skb)8793 static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
8794 						 struct sk_buff *skb)
8795 {
8796 	const void **tb;
8797 	const struct wmi_pdev_ctl_failsafe_chk_event *ev;
8798 	int ret;
8799 
8800 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8801 	if (IS_ERR(tb)) {
8802 		ret = PTR_ERR(tb);
8803 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8804 		return;
8805 	}
8806 
8807 	ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
8808 	if (!ev) {
8809 		ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
8810 		kfree(tb);
8811 		return;
8812 	}
8813 
8814 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8815 		   "pdev ctl failsafe check ev status %d\n",
8816 		   ev->ctl_failsafe_status);
8817 
8818 	/* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
8819 	 * to 10 dBm else the CTL power entry in the BDF would be picked up.
8820 	 */
8821 	if (ev->ctl_failsafe_status != 0)
8822 		ath12k_warn(ab, "pdev ctl failsafe failure status %d",
8823 			    ev->ctl_failsafe_status);
8824 
8825 	kfree(tb);
8826 }
8827 
8828 static void
ath12k_wmi_process_csa_switch_count_event(struct ath12k_base * ab,const struct ath12k_wmi_pdev_csa_event * ev,const u32 * vdev_ids)8829 ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
8830 					  const struct ath12k_wmi_pdev_csa_event *ev,
8831 					  const u32 *vdev_ids)
8832 {
8833 	u32 current_switch_count = le32_to_cpu(ev->current_switch_count);
8834 	u32 num_vdevs = le32_to_cpu(ev->num_vdevs);
8835 	struct ieee80211_bss_conf *conf;
8836 	struct ath12k_link_vif *arvif;
8837 	struct ath12k_vif *ahvif;
8838 	int i;
8839 
8840 	rcu_read_lock();
8841 	for (i = 0; i < num_vdevs; i++) {
8842 		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
8843 
8844 		if (!arvif) {
8845 			ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
8846 				    vdev_ids[i]);
8847 			continue;
8848 		}
8849 		ahvif = arvif->ahvif;
8850 
8851 		if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
8852 			ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
8853 				    arvif->link_id);
8854 			continue;
8855 		}
8856 
8857 		conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
8858 		if (!conf) {
8859 			ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
8860 				    ahvif->vif->addr, arvif->link_id);
8861 			continue;
8862 		}
8863 
8864 		if (!arvif->is_up || !conf->csa_active)
8865 			continue;
8866 
8867 		/* Finish CSA when counter reaches zero */
8868 		if (!current_switch_count) {
8869 			ieee80211_csa_finish(ahvif->vif, arvif->link_id);
8870 			arvif->current_cntdown_counter = 0;
8871 		} else if (current_switch_count > 1) {
8872 			/* If the count in event is not what we expect, don't update the
8873 			 * mac80211 count. Since during beacon Tx failure, count in the
8874 			 * firmware will not decrement and this event will come with the
8875 			 * previous count value again
8876 			 */
8877 			if (current_switch_count != arvif->current_cntdown_counter)
8878 				continue;
8879 
8880 			arvif->current_cntdown_counter =
8881 				ieee80211_beacon_update_cntdwn(ahvif->vif,
8882 							       arvif->link_id);
8883 		}
8884 	}
8885 	rcu_read_unlock();
8886 }
8887 
8888 static void
ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base * ab,struct sk_buff * skb)8889 ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
8890 					      struct sk_buff *skb)
8891 {
8892 	const void **tb;
8893 	const struct ath12k_wmi_pdev_csa_event *ev;
8894 	const u32 *vdev_ids;
8895 	int ret;
8896 
8897 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8898 	if (IS_ERR(tb)) {
8899 		ret = PTR_ERR(tb);
8900 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8901 		return;
8902 	}
8903 
8904 	ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
8905 	vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
8906 
8907 	if (!ev || !vdev_ids) {
8908 		ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
8909 		kfree(tb);
8910 		return;
8911 	}
8912 
8913 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8914 		   "pdev csa switch count %d for pdev %d, num_vdevs %d",
8915 		   ev->current_switch_count, ev->pdev_id,
8916 		   ev->num_vdevs);
8917 
8918 	ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
8919 
8920 	kfree(tb);
8921 }
8922 
8923 static void
ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base * ab,struct sk_buff * skb)8924 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
8925 {
8926 	const void **tb;
8927 	struct ath12k_mac_get_any_chanctx_conf_arg arg;
8928 	const struct ath12k_wmi_pdev_radar_event *ev;
8929 	struct ath12k *ar;
8930 	int ret;
8931 
8932 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8933 	if (IS_ERR(tb)) {
8934 		ret = PTR_ERR(tb);
8935 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
8936 		return;
8937 	}
8938 
8939 	ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
8940 
8941 	if (!ev) {
8942 		ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
8943 		kfree(tb);
8944 		return;
8945 	}
8946 
8947 	ath12k_dbg(ab, ATH12K_DBG_WMI,
8948 		   "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
8949 		   ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
8950 		   ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
8951 		   ev->freq_offset, ev->sidx);
8952 
8953 	rcu_read_lock();
8954 
8955 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
8956 
8957 	if (!ar) {
8958 		ath12k_warn(ab, "radar detected in invalid pdev %d\n",
8959 			    ev->pdev_id);
8960 		goto exit;
8961 	}
8962 
8963 	arg.ar = ar;
8964 	arg.chanctx_conf = NULL;
8965 	ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
8966 					    ath12k_mac_get_any_chanctx_conf_iter, &arg);
8967 	if (!arg.chanctx_conf) {
8968 		ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n");
8969 		goto exit;
8970 	}
8971 
8972 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
8973 		   ev->pdev_id);
8974 
8975 	if (ar->dfs_block_radar_events)
8976 		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
8977 	else
8978 		ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf);
8979 
8980 exit:
8981 	rcu_read_unlock();
8982 
8983 	kfree(tb);
8984 }
8985 
ath12k_tm_wmi_event_segmented(struct ath12k_base * ab,u32 cmd_id,struct sk_buff * skb)8986 static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id,
8987 					  struct sk_buff *skb)
8988 {
8989 	const struct ath12k_wmi_ftm_event *ev;
8990 	const void **tb;
8991 	int ret;
8992 	u16 length;
8993 
8994 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
8995 
8996 	if (IS_ERR(tb)) {
8997 		ret = PTR_ERR(tb);
8998 		ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
8999 		return;
9000 	}
9001 
9002 	ev = tb[WMI_TAG_ARRAY_BYTE];
9003 	if (!ev) {
9004 		ath12k_warn(ab, "failed to fetch ftm msg\n");
9005 		kfree(tb);
9006 		return;
9007 	}
9008 
9009 	length = skb->len - TLV_HDR_SIZE;
9010 	ath12k_tm_process_event(ab, cmd_id, ev, length);
9011 	kfree(tb);
9012 	tb = NULL;
9013 }
9014 
9015 static void
ath12k_wmi_pdev_temperature_event(struct ath12k_base * ab,struct sk_buff * skb)9016 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
9017 				  struct sk_buff *skb)
9018 {
9019 	struct ath12k *ar;
9020 	struct wmi_pdev_temperature_event ev = {};
9021 
9022 	if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
9023 		ath12k_warn(ab, "failed to extract pdev temperature event");
9024 		return;
9025 	}
9026 
9027 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9028 		   "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
9029 
9030 	rcu_read_lock();
9031 
9032 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
9033 	if (!ar) {
9034 		ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
9035 		goto exit;
9036 	}
9037 
9038 exit:
9039 	rcu_read_unlock();
9040 }
9041 
ath12k_fils_discovery_event(struct ath12k_base * ab,struct sk_buff * skb)9042 static void ath12k_fils_discovery_event(struct ath12k_base *ab,
9043 					struct sk_buff *skb)
9044 {
9045 	const void **tb;
9046 	const struct wmi_fils_discovery_event *ev;
9047 	int ret;
9048 
9049 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
9050 	if (IS_ERR(tb)) {
9051 		ret = PTR_ERR(tb);
9052 		ath12k_warn(ab,
9053 			    "failed to parse FILS discovery event tlv %d\n",
9054 			    ret);
9055 		return;
9056 	}
9057 
9058 	ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
9059 	if (!ev) {
9060 		ath12k_warn(ab, "failed to fetch FILS discovery event\n");
9061 		kfree(tb);
9062 		return;
9063 	}
9064 
9065 	ath12k_warn(ab,
9066 		    "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
9067 		    ev->vdev_id, ev->fils_tt, ev->tbtt);
9068 
9069 	kfree(tb);
9070 }
9071 
ath12k_probe_resp_tx_status_event(struct ath12k_base * ab,struct sk_buff * skb)9072 static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
9073 					      struct sk_buff *skb)
9074 {
9075 	const void **tb;
9076 	const struct wmi_probe_resp_tx_status_event *ev;
9077 	int ret;
9078 
9079 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
9080 	if (IS_ERR(tb)) {
9081 		ret = PTR_ERR(tb);
9082 		ath12k_warn(ab,
9083 			    "failed to parse probe response transmission status event tlv: %d\n",
9084 			    ret);
9085 		return;
9086 	}
9087 
9088 	ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
9089 	if (!ev) {
9090 		ath12k_warn(ab,
9091 			    "failed to fetch probe response transmission status event");
9092 		kfree(tb);
9093 		return;
9094 	}
9095 
9096 	if (ev->tx_status)
9097 		ath12k_warn(ab,
9098 			    "Probe response transmission failed for vdev_id %u, status %u\n",
9099 			    ev->vdev_id, ev->tx_status);
9100 
9101 	kfree(tb);
9102 }
9103 
ath12k_wmi_p2p_noa_event(struct ath12k_base * ab,struct sk_buff * skb)9104 static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
9105 				    struct sk_buff *skb)
9106 {
9107 	const void **tb;
9108 	const struct wmi_p2p_noa_event *ev;
9109 	const struct ath12k_wmi_p2p_noa_info *noa;
9110 	struct ath12k *ar;
9111 	int ret, vdev_id;
9112 
9113 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
9114 	if (IS_ERR(tb)) {
9115 		ret = PTR_ERR(tb);
9116 		ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
9117 		return ret;
9118 	}
9119 
9120 	ev = tb[WMI_TAG_P2P_NOA_EVENT];
9121 	noa = tb[WMI_TAG_P2P_NOA_INFO];
9122 
9123 	if (!ev || !noa) {
9124 		ret = -EPROTO;
9125 		goto out;
9126 	}
9127 
9128 	vdev_id = __le32_to_cpu(ev->vdev_id);
9129 
9130 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9131 		   "wmi tlv p2p noa vdev_id %i descriptors %u\n",
9132 		   vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
9133 
9134 	rcu_read_lock();
9135 	ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
9136 	if (!ar) {
9137 		ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
9138 			    vdev_id);
9139 		ret = -EINVAL;
9140 		goto unlock;
9141 	}
9142 
9143 	ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
9144 
9145 	ret = 0;
9146 
9147 unlock:
9148 	rcu_read_unlock();
9149 out:
9150 	kfree(tb);
9151 	return ret;
9152 }
9153 
ath12k_rfkill_state_change_event(struct ath12k_base * ab,struct sk_buff * skb)9154 static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
9155 					     struct sk_buff *skb)
9156 {
9157 	const struct wmi_rfkill_state_change_event *ev;
9158 	const void **tb;
9159 	int ret;
9160 
9161 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
9162 	if (IS_ERR(tb)) {
9163 		ret = PTR_ERR(tb);
9164 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
9165 		return;
9166 	}
9167 
9168 	ev = tb[WMI_TAG_RFKILL_EVENT];
9169 	if (!ev) {
9170 		kfree(tb);
9171 		return;
9172 	}
9173 
9174 	ath12k_dbg(ab, ATH12K_DBG_MAC,
9175 		   "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
9176 		   le32_to_cpu(ev->gpio_pin_num),
9177 		   le32_to_cpu(ev->int_type),
9178 		   le32_to_cpu(ev->radio_state));
9179 
9180 	spin_lock_bh(&ab->base_lock);
9181 	ab->rfkill_radio_on = (ev->radio_state == cpu_to_le32(WMI_RFKILL_RADIO_STATE_ON));
9182 	spin_unlock_bh(&ab->base_lock);
9183 
9184 	queue_work(ab->workqueue, &ab->rfkill_work);
9185 	kfree(tb);
9186 }
9187 
9188 static void
ath12k_wmi_diag_event(struct ath12k_base * ab,struct sk_buff * skb)9189 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
9190 {
9191 	trace_ath12k_wmi_diag(ab, skb->data, skb->len);
9192 }
9193 
ath12k_wmi_twt_enable_event(struct ath12k_base * ab,struct sk_buff * skb)9194 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
9195 					struct sk_buff *skb)
9196 {
9197 	const void **tb;
9198 	const struct wmi_twt_enable_event *ev;
9199 	int ret;
9200 
9201 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
9202 	if (IS_ERR(tb)) {
9203 		ret = PTR_ERR(tb);
9204 		ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
9205 			    ret);
9206 		return;
9207 	}
9208 
9209 	ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
9210 	if (!ev) {
9211 		ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
9212 		goto exit;
9213 	}
9214 
9215 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
9216 		   le32_to_cpu(ev->pdev_id),
9217 		   le32_to_cpu(ev->status));
9218 
9219 exit:
9220 	kfree(tb);
9221 }
9222 
ath12k_wmi_twt_disable_event(struct ath12k_base * ab,struct sk_buff * skb)9223 static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
9224 					 struct sk_buff *skb)
9225 {
9226 	const void **tb;
9227 	const struct wmi_twt_disable_event *ev;
9228 	int ret;
9229 
9230 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
9231 	if (IS_ERR(tb)) {
9232 		ret = PTR_ERR(tb);
9233 		ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
9234 			    ret);
9235 		return;
9236 	}
9237 
9238 	ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
9239 	if (!ev) {
9240 		ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
9241 		goto exit;
9242 	}
9243 
9244 	ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
9245 		   le32_to_cpu(ev->pdev_id),
9246 		   le32_to_cpu(ev->status));
9247 
9248 exit:
9249 	kfree(tb);
9250 }
9251 
ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)9252 static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
9253 					    u16 tag, u16 len,
9254 					    const void *ptr, void *data)
9255 {
9256 	const struct wmi_wow_ev_pg_fault_param *pf_param;
9257 	const struct wmi_wow_ev_param *param;
9258 	struct wmi_wow_ev_arg *arg = data;
9259 	int pf_len;
9260 
9261 	switch (tag) {
9262 	case WMI_TAG_WOW_EVENT_INFO:
9263 		param = ptr;
9264 		arg->wake_reason = le32_to_cpu(param->wake_reason);
9265 		ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
9266 			   arg->wake_reason, wow_reason(arg->wake_reason));
9267 		break;
9268 
9269 	case WMI_TAG_ARRAY_BYTE:
9270 		if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
9271 			pf_param = ptr;
9272 			pf_len = le32_to_cpu(pf_param->len);
9273 			if (pf_len > len - sizeof(pf_len) ||
9274 			    pf_len < 0) {
9275 				ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
9276 					    pf_len);
9277 				return -EINVAL;
9278 			}
9279 			ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
9280 				   pf_len);
9281 			ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
9282 					"wow_reason_page_fault packet present",
9283 					"wow_pg_fault ",
9284 					pf_param->data,
9285 					pf_len);
9286 		}
9287 		break;
9288 	default:
9289 		break;
9290 	}
9291 
9292 	return 0;
9293 }
9294 
ath12k_wmi_event_wow_wakeup_host(struct ath12k_base * ab,struct sk_buff * skb)9295 static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
9296 {
9297 	struct wmi_wow_ev_arg arg = { };
9298 	int ret;
9299 
9300 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
9301 				  ath12k_wmi_wow_wakeup_host_parse,
9302 				  &arg);
9303 	if (ret) {
9304 		ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
9305 			    ret);
9306 		return;
9307 	}
9308 
9309 	complete(&ab->wow.wakeup_completed);
9310 }
9311 
ath12k_wmi_gtk_offload_status_event(struct ath12k_base * ab,struct sk_buff * skb)9312 static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
9313 						struct sk_buff *skb)
9314 {
9315 	const struct wmi_gtk_offload_status_event *ev;
9316 	struct ath12k_link_vif *arvif;
9317 	__be64 replay_ctr_be;
9318 	u64 replay_ctr;
9319 	const void **tb;
9320 	int ret;
9321 
9322 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
9323 	if (IS_ERR(tb)) {
9324 		ret = PTR_ERR(tb);
9325 		ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
9326 		return;
9327 	}
9328 
9329 	ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
9330 	if (!ev) {
9331 		ath12k_warn(ab, "failed to fetch gtk offload status ev");
9332 		kfree(tb);
9333 		return;
9334 	}
9335 
9336 	rcu_read_lock();
9337 	arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
9338 	if (!arvif) {
9339 		rcu_read_unlock();
9340 		ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
9341 			    le32_to_cpu(ev->vdev_id));
9342 		kfree(tb);
9343 		return;
9344 	}
9345 
9346 	replay_ctr = le64_to_cpu(ev->replay_ctr);
9347 	arvif->rekey_data.replay_ctr = replay_ctr;
9348 	ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
9349 		   le32_to_cpu(ev->refresh_cnt), replay_ctr);
9350 
9351 	/* supplicant expects big-endian replay counter */
9352 	replay_ctr_be = cpu_to_be64(replay_ctr);
9353 
9354 	ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid,
9355 				   (void *)&replay_ctr_be, GFP_ATOMIC);
9356 
9357 	rcu_read_unlock();
9358 
9359 	kfree(tb);
9360 }
9361 
ath12k_wmi_event_mlo_setup_complete(struct ath12k_base * ab,struct sk_buff * skb)9362 static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
9363 						struct sk_buff *skb)
9364 {
9365 	const struct wmi_mlo_setup_complete_event *ev;
9366 	struct ath12k *ar = NULL;
9367 	struct ath12k_pdev *pdev;
9368 	const void **tb;
9369 	int ret, i;
9370 
9371 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
9372 	if (IS_ERR(tb)) {
9373 		ret = PTR_ERR(tb);
9374 		ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n",
9375 			    ret);
9376 		return;
9377 	}
9378 
9379 	ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
9380 	if (!ev) {
9381 		ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
9382 		kfree(tb);
9383 		return;
9384 	}
9385 
9386 	if (le32_to_cpu(ev->pdev_id) > ab->num_radios)
9387 		goto skip_lookup;
9388 
9389 	for (i = 0; i < ab->num_radios; i++) {
9390 		pdev = &ab->pdevs[i];
9391 		if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) {
9392 			ar = pdev->ar;
9393 			break;
9394 		}
9395 	}
9396 
9397 skip_lookup:
9398 	if (!ar) {
9399 		ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
9400 			    ev->pdev_id, ev->status);
9401 		goto out;
9402 	}
9403 
9404 	ar->mlo_setup_status = le32_to_cpu(ev->status);
9405 	complete(&ar->mlo_setup_done);
9406 
9407 out:
9408 	kfree(tb);
9409 }
9410 
ath12k_wmi_event_teardown_complete(struct ath12k_base * ab,struct sk_buff * skb)9411 static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
9412 					       struct sk_buff *skb)
9413 {
9414 	const struct wmi_mlo_teardown_complete_event *ev;
9415 	const void **tb;
9416 	int ret;
9417 
9418 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
9419 	if (IS_ERR(tb)) {
9420 		ret = PTR_ERR(tb);
9421 		ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret);
9422 		return;
9423 	}
9424 
9425 	ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
9426 	if (!ev) {
9427 		ath12k_warn(ab, "failed to fetch teardown complete event\n");
9428 		kfree(tb);
9429 		return;
9430 	}
9431 
9432 	kfree(tb);
9433 }
9434 
9435 #ifdef CONFIG_ATH12K_DEBUGFS
ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base * ab,const void * ptr,u16 tag,u16 len,struct wmi_tpc_stats_arg * tpc_stats)9436 static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab,
9437 					    const void *ptr, u16 tag, u16 len,
9438 					    struct wmi_tpc_stats_arg *tpc_stats)
9439 {
9440 	u32 len1, len2, len3, len4;
9441 	s16 *dst_ptr;
9442 	s8 *dst_ptr_ctl;
9443 
9444 	len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len);
9445 	len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len);
9446 	len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len);
9447 	len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len);
9448 
9449 	switch (tpc_stats->event_count) {
9450 	case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT:
9451 		if (len1 > len)
9452 			return -ENOBUFS;
9453 
9454 		if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) {
9455 			dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array;
9456 			memcpy(dst_ptr, ptr, len1);
9457 		}
9458 		break;
9459 	case ATH12K_TPC_STATS_RATES_EVENT1:
9460 		if (len2 > len)
9461 			return -ENOBUFS;
9462 
9463 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) {
9464 			dst_ptr = tpc_stats->rates_array1.rate_array;
9465 			memcpy(dst_ptr, ptr, len2);
9466 		}
9467 		break;
9468 	case ATH12K_TPC_STATS_RATES_EVENT2:
9469 		if (len3 > len)
9470 			return -ENOBUFS;
9471 
9472 		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) {
9473 			dst_ptr = tpc_stats->rates_array2.rate_array;
9474 			memcpy(dst_ptr, ptr, len3);
9475 		}
9476 		break;
9477 	case ATH12K_TPC_STATS_CTL_TABLE_EVENT:
9478 		if (len4 > len)
9479 			return -ENOBUFS;
9480 
9481 		if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
9482 			dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table;
9483 			memcpy(dst_ptr_ctl, ptr, len4);
9484 		}
9485 		break;
9486 	}
9487 	return 0;
9488 }
9489 
ath12k_tpc_get_reg_pwr(struct ath12k_base * ab,struct wmi_tpc_stats_arg * tpc_stats,struct wmi_max_reg_power_fixed_params * ev)9490 static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab,
9491 				  struct wmi_tpc_stats_arg *tpc_stats,
9492 				  struct wmi_max_reg_power_fixed_params *ev)
9493 {
9494 	struct wmi_max_reg_power_allowed_arg *reg_pwr;
9495 	u32 total_size;
9496 
9497 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9498 		   "Received reg power array type %d length %d for tpc stats\n",
9499 		   ev->reg_power_type, ev->reg_array_len);
9500 
9501 	switch (le32_to_cpu(ev->reg_power_type)) {
9502 	case TPC_STATS_REG_PWR_ALLOWED_TYPE:
9503 		reg_pwr = &tpc_stats->max_reg_allowed_power;
9504 		break;
9505 	default:
9506 		return -EINVAL;
9507 	}
9508 
9509 	/* Each entry is 2 byte hence multiplying the indices with 2 */
9510 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
9511 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2;
9512 	if (le32_to_cpu(ev->reg_array_len) != total_size) {
9513 		ath12k_warn(ab,
9514 			    "Total size and reg_array_len doesn't match for tpc stats\n");
9515 		return -EINVAL;
9516 	}
9517 
9518 	memcpy(&reg_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params));
9519 
9520 	reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len),
9521 					 GFP_ATOMIC);
9522 	if (!reg_pwr->reg_pwr_array)
9523 		return -ENOMEM;
9524 
9525 	tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED;
9526 
9527 	return 0;
9528 }
9529 
ath12k_tpc_get_rate_array(struct ath12k_base * ab,struct wmi_tpc_stats_arg * tpc_stats,struct wmi_tpc_rates_array_fixed_params * ev)9530 static int ath12k_tpc_get_rate_array(struct ath12k_base *ab,
9531 				     struct wmi_tpc_stats_arg *tpc_stats,
9532 				     struct wmi_tpc_rates_array_fixed_params *ev)
9533 {
9534 	struct wmi_tpc_rates_array_arg *rates_array;
9535 	u32 flag = 0, rate_array_len;
9536 
9537 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9538 		   "Received rates array type %d length %d for tpc stats\n",
9539 		   ev->rate_array_type, ev->rate_array_len);
9540 
9541 	switch (le32_to_cpu(ev->rate_array_type)) {
9542 	case ATH12K_TPC_STATS_RATES_ARRAY1:
9543 		rates_array = &tpc_stats->rates_array1;
9544 		flag = WMI_TPC_RATES_ARRAY1;
9545 		break;
9546 	case ATH12K_TPC_STATS_RATES_ARRAY2:
9547 		rates_array = &tpc_stats->rates_array2;
9548 		flag = WMI_TPC_RATES_ARRAY2;
9549 		break;
9550 	default:
9551 		ath12k_warn(ab,
9552 			    "Received invalid type of rates array for tpc stats\n");
9553 		return -EINVAL;
9554 	}
9555 	memcpy(&rates_array->tpc_rates_array, ev,
9556 	       sizeof(struct wmi_tpc_rates_array_fixed_params));
9557 	rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len);
9558 	rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC);
9559 	if (!rates_array->rate_array)
9560 		return -ENOMEM;
9561 
9562 	tpc_stats->tlvs_rcvd |= flag;
9563 	return 0;
9564 }
9565 
ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base * ab,struct wmi_tpc_stats_arg * tpc_stats,struct wmi_tpc_ctl_pwr_fixed_params * ev)9566 static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab,
9567 				      struct wmi_tpc_stats_arg *tpc_stats,
9568 				      struct wmi_tpc_ctl_pwr_fixed_params *ev)
9569 {
9570 	struct wmi_tpc_ctl_pwr_table_arg *ctl_array;
9571 	u32 total_size, ctl_array_len, flag = 0;
9572 
9573 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9574 		   "Received ctl array type %d length %d for tpc stats\n",
9575 		   ev->ctl_array_type, ev->ctl_array_len);
9576 
9577 	switch (le32_to_cpu(ev->ctl_array_type)) {
9578 	case ATH12K_TPC_STATS_CTL_ARRAY:
9579 		ctl_array = &tpc_stats->ctl_array;
9580 		flag = WMI_TPC_CTL_PWR_ARRAY;
9581 		break;
9582 	default:
9583 		ath12k_warn(ab,
9584 			    "Received invalid type of ctl pwr table for tpc stats\n");
9585 		return -EINVAL;
9586 	}
9587 
9588 	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
9589 		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4);
9590 	if (le32_to_cpu(ev->ctl_array_len) != total_size) {
9591 		ath12k_warn(ab,
9592 			    "Total size and ctl_array_len doesn't match for tpc stats\n");
9593 		return -EINVAL;
9594 	}
9595 
9596 	memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params));
9597 	ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len);
9598 	ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC);
9599 	if (!ctl_array->ctl_pwr_table)
9600 		return -ENOMEM;
9601 
9602 	tpc_stats->tlvs_rcvd |= flag;
9603 	return 0;
9604 }
9605 
ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)9606 static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab,
9607 					      u16 tag, u16 len,
9608 					      const void *ptr, void *data)
9609 {
9610 	struct wmi_tpc_rates_array_fixed_params *tpc_rates_array;
9611 	struct wmi_max_reg_power_fixed_params *tpc_reg_pwr;
9612 	struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr;
9613 	struct wmi_tpc_stats_arg *tpc_stats = data;
9614 	struct wmi_tpc_config_params *tpc_config;
9615 	int ret = 0;
9616 
9617 	if (!tpc_stats) {
9618 		ath12k_warn(ab, "tpc stats memory unavailable\n");
9619 		return -EINVAL;
9620 	}
9621 
9622 	switch (tag) {
9623 	case WMI_TAG_TPC_STATS_CONFIG_EVENT:
9624 		tpc_config = (struct wmi_tpc_config_params *)ptr;
9625 		memcpy(&tpc_stats->tpc_config, tpc_config,
9626 		       sizeof(struct wmi_tpc_config_params));
9627 		break;
9628 	case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED:
9629 		tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr;
9630 		ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr);
9631 		break;
9632 	case WMI_TAG_TPC_STATS_RATES_ARRAY:
9633 		tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr;
9634 		ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array);
9635 		break;
9636 	case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT:
9637 		tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr;
9638 		ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr);
9639 		break;
9640 	default:
9641 		ath12k_warn(ab,
9642 			    "Received invalid tag for tpc stats in subtlvs\n");
9643 		return -EINVAL;
9644 	}
9645 	return ret;
9646 }
9647 
ath12k_wmi_tpc_stats_event_parser(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)9648 static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab,
9649 					     u16 tag, u16 len,
9650 					     const void *ptr, void *data)
9651 {
9652 	struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data;
9653 	int ret;
9654 
9655 	switch (tag) {
9656 	case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM:
9657 		ret = 0;
9658 		/* Fixed param is already processed*/
9659 		break;
9660 	case WMI_TAG_ARRAY_STRUCT:
9661 		/* len 0 is expected for array of struct when there
9662 		 * is no content of that type to pack inside that tlv
9663 		 */
9664 		if (len == 0)
9665 			return 0;
9666 		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
9667 					  ath12k_wmi_tpc_stats_subtlv_parser,
9668 					  tpc_stats);
9669 		break;
9670 	case WMI_TAG_ARRAY_INT16:
9671 		if (len == 0)
9672 			return 0;
9673 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
9674 						       WMI_TAG_ARRAY_INT16,
9675 						       len, tpc_stats);
9676 		break;
9677 	case WMI_TAG_ARRAY_BYTE:
9678 		if (len == 0)
9679 			return 0;
9680 		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
9681 						       WMI_TAG_ARRAY_BYTE,
9682 						       len, tpc_stats);
9683 		break;
9684 	default:
9685 		ath12k_warn(ab, "Received invalid tag for tpc stats\n");
9686 		ret = -EINVAL;
9687 		break;
9688 	}
9689 	return ret;
9690 }
9691 
ath12k_wmi_free_tpc_stats_mem(struct ath12k * ar)9692 void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar)
9693 {
9694 	struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
9695 
9696 	lockdep_assert_held(&ar->data_lock);
9697 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n");
9698 	if (tpc_stats) {
9699 		kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array);
9700 		kfree(tpc_stats->rates_array1.rate_array);
9701 		kfree(tpc_stats->rates_array2.rate_array);
9702 		kfree(tpc_stats->ctl_array.ctl_pwr_table);
9703 		kfree(tpc_stats);
9704 		ar->debug.tpc_stats = NULL;
9705 	}
9706 }
9707 
ath12k_wmi_process_tpc_stats(struct ath12k_base * ab,struct sk_buff * skb)9708 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
9709 					 struct sk_buff *skb)
9710 {
9711 	struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param;
9712 	struct wmi_tpc_stats_arg *tpc_stats;
9713 	const struct wmi_tlv *tlv;
9714 	void *ptr = skb->data;
9715 	struct ath12k *ar;
9716 	u16 tlv_tag;
9717 	u32 event_count;
9718 	int ret;
9719 
9720 	if (!skb->data) {
9721 		ath12k_warn(ab, "No data present in tpc stats event\n");
9722 		return;
9723 	}
9724 
9725 	if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
9726 		ath12k_warn(ab, "TPC stats event size invalid\n");
9727 		return;
9728 	}
9729 
9730 	tlv = (struct wmi_tlv *)ptr;
9731 	tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
9732 	ptr += sizeof(*tlv);
9733 
9734 	if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) {
9735 		ath12k_warn(ab, "TPC stats without fixed param tlv at start\n");
9736 		return;
9737 	}
9738 
9739 	fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr;
9740 	rcu_read_lock();
9741 	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1);
9742 	if (!ar) {
9743 		ath12k_warn(ab, "Failed to get ar for tpc stats\n");
9744 		rcu_read_unlock();
9745 		return;
9746 	}
9747 	spin_lock_bh(&ar->data_lock);
9748 	if (!ar->debug.tpc_request) {
9749 		/* Event is received either without request or the
9750 		 * timeout, if memory is already allocated free it
9751 		 */
9752 		if (ar->debug.tpc_stats) {
9753 			ath12k_warn(ab, "Freeing memory for tpc_stats\n");
9754 			ath12k_wmi_free_tpc_stats_mem(ar);
9755 		}
9756 		goto unlock;
9757 	}
9758 
9759 	event_count = le32_to_cpu(fixed_param->event_count);
9760 	if (event_count == 0) {
9761 		if (ar->debug.tpc_stats) {
9762 			ath12k_warn(ab,
9763 				    "Invalid tpc memory present\n");
9764 			goto unlock;
9765 		}
9766 		ar->debug.tpc_stats =
9767 			kzalloc(sizeof(struct wmi_tpc_stats_arg),
9768 				GFP_ATOMIC);
9769 		if (!ar->debug.tpc_stats) {
9770 			ath12k_warn(ab,
9771 				    "Failed to allocate memory for tpc stats\n");
9772 			goto unlock;
9773 		}
9774 	}
9775 
9776 	tpc_stats = ar->debug.tpc_stats;
9777 	if (!tpc_stats) {
9778 		ath12k_warn(ab, "tpc stats memory unavailable\n");
9779 		goto unlock;
9780 	}
9781 
9782 	if (!(event_count == 0)) {
9783 		if (event_count != tpc_stats->event_count + 1) {
9784 			ath12k_warn(ab,
9785 				    "Invalid tpc event received\n");
9786 			goto unlock;
9787 		}
9788 	}
9789 	tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id);
9790 	tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event);
9791 	tpc_stats->event_count = le32_to_cpu(fixed_param->event_count);
9792 	ath12k_dbg(ab, ATH12K_DBG_WMI,
9793 		   "tpc stats event_count %d\n",
9794 		   tpc_stats->event_count);
9795 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
9796 				  ath12k_wmi_tpc_stats_event_parser,
9797 				  tpc_stats);
9798 	if (ret) {
9799 		ath12k_wmi_free_tpc_stats_mem(ar);
9800 		ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret);
9801 		goto unlock;
9802 	}
9803 
9804 	if (tpc_stats->end_of_event)
9805 		complete(&ar->debug.tpc_complete);
9806 
9807 unlock:
9808 	spin_unlock_bh(&ar->data_lock);
9809 	rcu_read_unlock();
9810 }
9811 #else
ath12k_wmi_process_tpc_stats(struct ath12k_base * ab,struct sk_buff * skb)9812 static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
9813 					 struct sk_buff *skb)
9814 {
9815 }
9816 #endif
9817 
9818 static int
ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)9819 ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser(struct ath12k_base *ab,
9820 						u16 tag, u16 len,
9821 						const void *ptr, void *data)
9822 {
9823 	const struct ath12k_wmi_rssi_dbm_conv_temp_info_params *temp_info;
9824 	const struct ath12k_wmi_rssi_dbm_conv_info_params *param_info;
9825 	struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info = data;
9826 	struct ath12k_wmi_rssi_dbm_conv_param_arg param_arg;
9827 	s32 nf_hw_dbm[ATH12K_MAX_NUM_NF_HW_DBM];
9828 	u8 num_20mhz_segments;
9829 	s8 min_nf, *nf_ptr;
9830 	int i, j;
9831 
9832 	switch (tag) {
9833 	case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO:
9834 		if (len < sizeof(*param_info)) {
9835 			ath12k_warn(ab,
9836 				    "RSSI dbm conv subtlv 0x%x invalid len %d rcvd",
9837 				    tag, len);
9838 			return -EINVAL;
9839 		}
9840 
9841 		param_info = ptr;
9842 
9843 		param_arg.curr_bw = le32_to_cpu(param_info->curr_bw);
9844 		param_arg.curr_rx_chainmask = le32_to_cpu(param_info->curr_rx_chainmask);
9845 
9846 		/* The received array is actually a 2D byte-array for per chain,
9847 		 * per 20MHz subband. Convert to 2D byte-array
9848 		 */
9849 		nf_ptr = &param_arg.nf_hw_dbm[0][0];
9850 
9851 		for (i = 0; i < ATH12K_MAX_NUM_NF_HW_DBM; i++) {
9852 			nf_hw_dbm[i] = a_sle32_to_cpu(param_info->nf_hw_dbm[i]);
9853 
9854 			for (j = 0; j < 4; j++) {
9855 				*nf_ptr = (nf_hw_dbm[i] >> (j * 8)) & 0xFF;
9856 				nf_ptr++;
9857 			}
9858 		}
9859 
9860 		switch (param_arg.curr_bw) {
9861 		case WMI_CHAN_WIDTH_20:
9862 			num_20mhz_segments = 1;
9863 			break;
9864 		case WMI_CHAN_WIDTH_40:
9865 			num_20mhz_segments = 2;
9866 			break;
9867 		case WMI_CHAN_WIDTH_80:
9868 			num_20mhz_segments = 4;
9869 			break;
9870 		case WMI_CHAN_WIDTH_160:
9871 			num_20mhz_segments = 8;
9872 			break;
9873 		case WMI_CHAN_WIDTH_320:
9874 			num_20mhz_segments = 16;
9875 			break;
9876 		default:
9877 			ath12k_warn(ab, "Invalid current bandwidth %d in RSSI dbm event",
9878 				    param_arg.curr_bw);
9879 			/* In error case, still consider the primary 20 MHz segment since
9880 			 * that would be much better than instead of dropping the whole
9881 			 * event
9882 			 */
9883 			num_20mhz_segments = 1;
9884 		}
9885 
9886 		min_nf = ATH12K_DEFAULT_NOISE_FLOOR;
9887 
9888 		for (i = 0; i < ATH12K_MAX_NUM_ANTENNA; i++) {
9889 			if (!(param_arg.curr_rx_chainmask & BIT(i)))
9890 				continue;
9891 
9892 			for (j = 0; j < num_20mhz_segments; j++) {
9893 				if (param_arg.nf_hw_dbm[i][j] < min_nf)
9894 					min_nf = param_arg.nf_hw_dbm[i][j];
9895 			}
9896 		}
9897 
9898 		rssi_info->min_nf_dbm = min_nf;
9899 		rssi_info->nf_dbm_present = true;
9900 		break;
9901 	case WMI_TAG_RSSI_DBM_CONVERSION_TEMP_OFFSET_INFO:
9902 		if (len < sizeof(*temp_info)) {
9903 			ath12k_warn(ab,
9904 				    "RSSI dbm conv subtlv 0x%x invalid len %d rcvd",
9905 				    tag, len);
9906 			return -EINVAL;
9907 		}
9908 
9909 		temp_info = ptr;
9910 		rssi_info->temp_offset = a_sle32_to_cpu(temp_info->offset);
9911 		rssi_info->temp_offset_present = true;
9912 		break;
9913 	default:
9914 		ath12k_dbg(ab, ATH12K_DBG_WMI,
9915 			   "Unknown subtlv 0x%x in RSSI dbm conversion event\n", tag);
9916 	}
9917 
9918 	return 0;
9919 }
9920 
9921 static int
ath12k_wmi_rssi_dbm_conv_info_event_parser(struct ath12k_base * ab,u16 tag,u16 len,const void * ptr,void * data)9922 ath12k_wmi_rssi_dbm_conv_info_event_parser(struct ath12k_base *ab,
9923 					   u16 tag, u16 len,
9924 					   const void *ptr, void *data)
9925 {
9926 	int ret = 0;
9927 
9928 	switch (tag) {
9929 	case WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM:
9930 		/* Fixed param is already processed*/
9931 		break;
9932 	case WMI_TAG_ARRAY_STRUCT:
9933 		/* len 0 is expected for array of struct when there
9934 		 * is no content of that type inside that tlv
9935 		 */
9936 		if (len == 0)
9937 			return 0;
9938 
9939 		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
9940 					  ath12k_wmi_rssi_dbm_conv_info_evt_subtlv_parser,
9941 					  data);
9942 		break;
9943 	default:
9944 		ath12k_dbg(ab, ATH12K_DBG_WMI,
9945 			   "Received invalid tag 0x%x for RSSI dbm conv info event\n",
9946 			   tag);
9947 		break;
9948 	}
9949 
9950 	return ret;
9951 }
9952 
9953 static int
ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(struct ath12k_base * ab,u8 * ptr,size_t len,int * pdev_id)9954 ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(struct ath12k_base *ab, u8 *ptr,
9955 						  size_t len, int *pdev_id)
9956 {
9957 	struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *fixed_param;
9958 	const struct wmi_tlv *tlv;
9959 	u16 tlv_tag;
9960 
9961 	if (len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
9962 		ath12k_warn(ab, "invalid RSSI dbm conv event size %zu\n", len);
9963 		return -EINVAL;
9964 	}
9965 
9966 	tlv = (struct wmi_tlv *)ptr;
9967 	tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
9968 	ptr += sizeof(*tlv);
9969 
9970 	if (tlv_tag != WMI_TAG_RSSI_DBM_CONVERSION_PARAMS_INFO_FIXED_PARAM) {
9971 		ath12k_warn(ab, "RSSI dbm conv event received without fixed param tlv\n");
9972 		return -EINVAL;
9973 	}
9974 
9975 	fixed_param = (struct ath12k_wmi_rssi_dbm_conv_info_fixed_params *)ptr;
9976 	*pdev_id = le32_to_cpu(fixed_param->pdev_id);
9977 
9978 	return 0;
9979 }
9980 
9981 static void
ath12k_wmi_update_rssi_offsets(struct ath12k * ar,struct ath12k_wmi_rssi_dbm_conv_info_arg * rssi_info)9982 ath12k_wmi_update_rssi_offsets(struct ath12k *ar,
9983 			       struct ath12k_wmi_rssi_dbm_conv_info_arg *rssi_info)
9984 {
9985 	struct ath12k_pdev_rssi_offsets *info = &ar->rssi_info;
9986 
9987 	lockdep_assert_held(&ar->data_lock);
9988 
9989 	if (rssi_info->temp_offset_present)
9990 		info->temp_offset = rssi_info->temp_offset;
9991 
9992 	if (rssi_info->nf_dbm_present)
9993 		info->min_nf_dbm = rssi_info->min_nf_dbm;
9994 
9995 	info->noise_floor = info->min_nf_dbm + info->temp_offset;
9996 }
9997 
9998 static void
ath12k_wmi_rssi_dbm_conversion_params_info_event(struct ath12k_base * ab,struct sk_buff * skb)9999 ath12k_wmi_rssi_dbm_conversion_params_info_event(struct ath12k_base *ab,
10000 						 struct sk_buff *skb)
10001 {
10002 	struct ath12k_wmi_rssi_dbm_conv_info_arg rssi_info;
10003 	struct ath12k *ar;
10004 	s32 noise_floor;
10005 	u32 pdev_id;
10006 	int ret;
10007 
10008 	ret = ath12k_wmi_rssi_dbm_conv_info_process_fixed_param(ab, skb->data, skb->len,
10009 								&pdev_id);
10010 	if (ret) {
10011 		ath12k_warn(ab, "failed to parse fixed param in RSSI dbm conv event: %d\n",
10012 			    ret);
10013 		return;
10014 	}
10015 
10016 	rcu_read_lock();
10017 	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
10018 	/* If pdev is not active, ignore the event */
10019 	if (!ar)
10020 		goto out_unlock;
10021 
10022 	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
10023 				  ath12k_wmi_rssi_dbm_conv_info_event_parser,
10024 				  &rssi_info);
10025 	if (ret) {
10026 		ath12k_warn(ab, "unable to parse RSSI dbm conversion event\n");
10027 		goto out_unlock;
10028 	}
10029 
10030 	spin_lock_bh(&ar->data_lock);
10031 	ath12k_wmi_update_rssi_offsets(ar, &rssi_info);
10032 	noise_floor = ath12k_pdev_get_noise_floor(ar);
10033 	spin_unlock_bh(&ar->data_lock);
10034 
10035 	ath12k_dbg(ab, ATH12K_DBG_WMI,
10036 		   "RSSI noise floor updated, new value is %d dbm\n", noise_floor);
10037 out_unlock:
10038 	rcu_read_unlock();
10039 }
10040 
ath12k_wmi_op_rx(struct ath12k_base * ab,struct sk_buff * skb)10041 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
10042 {
10043 	struct wmi_cmd_hdr *cmd_hdr;
10044 	enum wmi_tlv_event_id id;
10045 
10046 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
10047 	id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
10048 
10049 	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
10050 		goto out;
10051 
10052 	switch (id) {
10053 		/* Process all the WMI events here */
10054 	case WMI_SERVICE_READY_EVENTID:
10055 		ath12k_service_ready_event(ab, skb);
10056 		break;
10057 	case WMI_SERVICE_READY_EXT_EVENTID:
10058 		ath12k_service_ready_ext_event(ab, skb);
10059 		break;
10060 	case WMI_SERVICE_READY_EXT2_EVENTID:
10061 		ath12k_service_ready_ext2_event(ab, skb);
10062 		break;
10063 	case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
10064 		ath12k_reg_chan_list_event(ab, skb);
10065 		break;
10066 	case WMI_READY_EVENTID:
10067 		ath12k_ready_event(ab, skb);
10068 		break;
10069 	case WMI_PEER_DELETE_RESP_EVENTID:
10070 		ath12k_peer_delete_resp_event(ab, skb);
10071 		break;
10072 	case WMI_VDEV_START_RESP_EVENTID:
10073 		ath12k_vdev_start_resp_event(ab, skb);
10074 		break;
10075 	case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
10076 		ath12k_bcn_tx_status_event(ab, skb);
10077 		break;
10078 	case WMI_VDEV_STOPPED_EVENTID:
10079 		ath12k_vdev_stopped_event(ab, skb);
10080 		break;
10081 	case WMI_MGMT_RX_EVENTID:
10082 		ath12k_mgmt_rx_event(ab, skb);
10083 		/* mgmt_rx_event() owns the skb now! */
10084 		return;
10085 	case WMI_MGMT_TX_COMPLETION_EVENTID:
10086 		ath12k_mgmt_tx_compl_event(ab, skb);
10087 		break;
10088 	case WMI_SCAN_EVENTID:
10089 		ath12k_scan_event(ab, skb);
10090 		break;
10091 	case WMI_PEER_STA_KICKOUT_EVENTID:
10092 		ath12k_peer_sta_kickout_event(ab, skb);
10093 		break;
10094 	case WMI_ROAM_EVENTID:
10095 		ath12k_roam_event(ab, skb);
10096 		break;
10097 	case WMI_CHAN_INFO_EVENTID:
10098 		ath12k_chan_info_event(ab, skb);
10099 		break;
10100 	case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
10101 		ath12k_pdev_bss_chan_info_event(ab, skb);
10102 		break;
10103 	case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
10104 		ath12k_vdev_install_key_compl_event(ab, skb);
10105 		break;
10106 	case WMI_SERVICE_AVAILABLE_EVENTID:
10107 		ath12k_service_available_event(ab, skb);
10108 		break;
10109 	case WMI_PEER_ASSOC_CONF_EVENTID:
10110 		ath12k_peer_assoc_conf_event(ab, skb);
10111 		break;
10112 	case WMI_UPDATE_STATS_EVENTID:
10113 		ath12k_update_stats_event(ab, skb);
10114 		break;
10115 	case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
10116 		ath12k_pdev_ctl_failsafe_check_event(ab, skb);
10117 		break;
10118 	case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
10119 		ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
10120 		break;
10121 	case WMI_PDEV_TEMPERATURE_EVENTID:
10122 		ath12k_wmi_pdev_temperature_event(ab, skb);
10123 		break;
10124 	case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
10125 		ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
10126 		break;
10127 	case WMI_HOST_FILS_DISCOVERY_EVENTID:
10128 		ath12k_fils_discovery_event(ab, skb);
10129 		break;
10130 	case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
10131 		ath12k_probe_resp_tx_status_event(ab, skb);
10132 		break;
10133 	case WMI_RFKILL_STATE_CHANGE_EVENTID:
10134 		ath12k_rfkill_state_change_event(ab, skb);
10135 		break;
10136 	case WMI_TWT_ENABLE_EVENTID:
10137 		ath12k_wmi_twt_enable_event(ab, skb);
10138 		break;
10139 	case WMI_TWT_DISABLE_EVENTID:
10140 		ath12k_wmi_twt_disable_event(ab, skb);
10141 		break;
10142 	case WMI_P2P_NOA_EVENTID:
10143 		ath12k_wmi_p2p_noa_event(ab, skb);
10144 		break;
10145 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
10146 		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
10147 		break;
10148 	case WMI_VDEV_DELETE_RESP_EVENTID:
10149 		ath12k_vdev_delete_resp_event(ab, skb);
10150 		break;
10151 	case WMI_DIAG_EVENTID:
10152 		ath12k_wmi_diag_event(ab, skb);
10153 		break;
10154 	case WMI_WOW_WAKEUP_HOST_EVENTID:
10155 		ath12k_wmi_event_wow_wakeup_host(ab, skb);
10156 		break;
10157 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
10158 		ath12k_wmi_gtk_offload_status_event(ab, skb);
10159 		break;
10160 	case WMI_MLO_SETUP_COMPLETE_EVENTID:
10161 		ath12k_wmi_event_mlo_setup_complete(ab, skb);
10162 		break;
10163 	case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
10164 		ath12k_wmi_event_teardown_complete(ab, skb);
10165 		break;
10166 	case WMI_HALPHY_STATS_CTRL_PATH_EVENTID:
10167 		ath12k_wmi_process_tpc_stats(ab, skb);
10168 		break;
10169 	case WMI_11D_NEW_COUNTRY_EVENTID:
10170 		ath12k_reg_11d_new_cc_event(ab, skb);
10171 		break;
10172 	case WMI_PDEV_RSSI_DBM_CONVERSION_PARAMS_INFO_EVENTID:
10173 		ath12k_wmi_rssi_dbm_conversion_params_info_event(ab, skb);
10174 		break;
10175 	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
10176 		ath12k_wmi_obss_color_collision_event(ab, skb);
10177 		break;
10178 	/* add Unsupported events (rare) here */
10179 	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
10180 	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
10181 	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
10182 		ath12k_dbg(ab, ATH12K_DBG_WMI,
10183 			   "ignoring unsupported event 0x%x\n", id);
10184 		break;
10185 	/* add Unsupported events (frequent) here */
10186 	case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID:
10187 	case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
10188 		/* debug might flood hence silently ignore (no-op) */
10189 		break;
10190 	case WMI_PDEV_UTF_EVENTID:
10191 		if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
10192 			ath12k_tm_wmi_event_segmented(ab, id, skb);
10193 		else
10194 			ath12k_tm_wmi_event_unsegmented(ab, id, skb);
10195 		break;
10196 	default:
10197 		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
10198 		break;
10199 	}
10200 
10201 out:
10202 	dev_kfree_skb(skb);
10203 }
10204 
ath12k_connect_pdev_htc_service(struct ath12k_base * ab,u32 pdev_idx)10205 static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
10206 					   u32 pdev_idx)
10207 {
10208 	int status;
10209 	static const u32 svc_id[] = {
10210 		ATH12K_HTC_SVC_ID_WMI_CONTROL,
10211 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
10212 		ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2
10213 	};
10214 	struct ath12k_htc_svc_conn_req conn_req = {};
10215 	struct ath12k_htc_svc_conn_resp conn_resp = {};
10216 
10217 	/* these fields are the same for all service endpoints */
10218 	conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
10219 	conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
10220 	conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
10221 
10222 	/* connect to control service */
10223 	conn_req.service_id = svc_id[pdev_idx];
10224 
10225 	status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
10226 	if (status) {
10227 		ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
10228 			    status);
10229 		return status;
10230 	}
10231 
10232 	ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
10233 	ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
10234 	ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
10235 
10236 	return 0;
10237 }
10238 
10239 static int
ath12k_wmi_send_unit_test_cmd(struct ath12k * ar,struct wmi_unit_test_cmd ut_cmd,u32 * test_args)10240 ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
10241 			      struct wmi_unit_test_cmd ut_cmd,
10242 			      u32 *test_args)
10243 {
10244 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10245 	struct wmi_unit_test_cmd *cmd;
10246 	struct sk_buff *skb;
10247 	struct wmi_tlv *tlv;
10248 #if defined(__linux__)
10249 	void *ptr;
10250 #elif defined(__FreeBSD__)
10251 	u8 *ptr;
10252 #endif
10253 	u32 *ut_cmd_args;
10254 	int buf_len, arg_len;
10255 	int ret;
10256 	int i;
10257 
10258 	arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
10259 	buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
10260 
10261 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
10262 	if (!skb)
10263 		return -ENOMEM;
10264 
10265 	cmd = (struct wmi_unit_test_cmd *)skb->data;
10266 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
10267 						 sizeof(ut_cmd));
10268 
10269 	cmd->vdev_id = ut_cmd.vdev_id;
10270 	cmd->module_id = ut_cmd.module_id;
10271 	cmd->num_args = ut_cmd.num_args;
10272 	cmd->diag_token = ut_cmd.diag_token;
10273 
10274 	ptr = skb->data + sizeof(ut_cmd);
10275 
10276 #if defined(__linux__)
10277 	tlv = ptr;
10278 #elif defined(__FreeBSD__)
10279 	tlv = (void *)ptr;
10280 #endif
10281 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
10282 
10283 	ptr += TLV_HDR_SIZE;
10284 
10285 #if defined(__linux__)
10286 	ut_cmd_args = ptr;
10287 #elif defined(__FreeBSD__)
10288 	ut_cmd_args = (void *)ptr;
10289 #endif
10290 	for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
10291 		ut_cmd_args[i] = test_args[i];
10292 
10293 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10294 		   "WMI unit test : module %d vdev %d n_args %d token %d\n",
10295 		   cmd->module_id, cmd->vdev_id, cmd->num_args,
10296 		   cmd->diag_token);
10297 
10298 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
10299 
10300 	if (ret) {
10301 		ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
10302 			    ret);
10303 		dev_kfree_skb(skb);
10304 	}
10305 
10306 	return ret;
10307 }
10308 
ath12k_wmi_simulate_radar(struct ath12k * ar)10309 int ath12k_wmi_simulate_radar(struct ath12k *ar)
10310 {
10311 	struct ath12k_link_vif *arvif;
10312 	u32 dfs_args[DFS_MAX_TEST_ARGS];
10313 	struct wmi_unit_test_cmd wmi_ut;
10314 	bool arvif_found = false;
10315 
10316 	list_for_each_entry(arvif, &ar->arvifs, list) {
10317 		if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
10318 			arvif_found = true;
10319 			break;
10320 		}
10321 	}
10322 
10323 	if (!arvif_found)
10324 		return -EINVAL;
10325 
10326 	dfs_args[DFS_TEST_CMDID] = 0;
10327 	dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
10328 	/* Currently we could pass segment_id(b0 - b1), chirp(b2)
10329 	 * freq offset (b3 - b10) to unit test. For simulation
10330 	 * purpose this can be set to 0 which is valid.
10331 	 */
10332 	dfs_args[DFS_TEST_RADAR_PARAM] = 0;
10333 
10334 	wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
10335 	wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
10336 	wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
10337 	wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
10338 
10339 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
10340 
10341 	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
10342 }
10343 
ath12k_wmi_send_tpc_stats_request(struct ath12k * ar,enum wmi_halphy_ctrl_path_stats_id tpc_stats_type)10344 int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
10345 				      enum wmi_halphy_ctrl_path_stats_id tpc_stats_type)
10346 {
10347 	struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd;
10348 	struct ath12k_wmi_pdev *wmi = ar->wmi;
10349 	struct sk_buff *skb;
10350 	struct wmi_tlv *tlv;
10351 	__le32 *pdev_id;
10352 	u32 buf_len;
10353 	void *ptr;
10354 	int ret;
10355 
10356 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE;
10357 
10358 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
10359 	if (!skb)
10360 		return -ENOMEM;
10361 	cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data;
10362 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM,
10363 						 sizeof(*cmd));
10364 
10365 	cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT);
10366 	cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET);
10367 	cmd->subid = cpu_to_le32(tpc_stats_type);
10368 
10369 	ptr = skb->data + sizeof(*cmd);
10370 
10371 	/* The below TLV arrays optionally follow this fixed param TLV structure
10372 	 * 1. ARRAY_UINT32 pdev_ids[]
10373 	 *      If this array is present and non-zero length, stats should only
10374 	 *      be provided from the pdevs identified in the array.
10375 	 * 2. ARRAY_UNIT32 vdev_ids[]
10376 	 *      If this array is present and non-zero length, stats should only
10377 	 *      be provided from the vdevs identified in the array.
10378 	 * 3. ath12k_wmi_mac_addr_params peer_macaddr[];
10379 	 *      If this array is present and non-zero length, stats should only
10380 	 *      be provided from the peers with the MAC addresses specified
10381 	 *      in the array
10382 	 */
10383 	tlv = ptr;
10384 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
10385 	ptr += TLV_HDR_SIZE;
10386 
10387 	pdev_id = ptr;
10388 	*pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar));
10389 	ptr += sizeof(*pdev_id);
10390 
10391 	tlv = ptr;
10392 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10393 	ptr += TLV_HDR_SIZE;
10394 
10395 	tlv = ptr;
10396 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0);
10397 	ptr += TLV_HDR_SIZE;
10398 
10399 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID);
10400 	if (ret) {
10401 		ath12k_warn(ar->ab,
10402 			    "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n");
10403 		dev_kfree_skb(skb);
10404 		return ret;
10405 	}
10406 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n",
10407 		   ar->pdev->pdev_id);
10408 
10409 	return ret;
10410 }
10411 
ath12k_wmi_connect(struct ath12k_base * ab)10412 int ath12k_wmi_connect(struct ath12k_base *ab)
10413 {
10414 	u32 i;
10415 	u8 wmi_ep_count;
10416 
10417 	wmi_ep_count = ab->htc.wmi_ep_count;
10418 	if (wmi_ep_count > ab->hw_params->max_radios)
10419 		return -1;
10420 
10421 	for (i = 0; i < wmi_ep_count; i++)
10422 		ath12k_connect_pdev_htc_service(ab, i);
10423 
10424 	return 0;
10425 }
10426 
ath12k_wmi_pdev_detach(struct ath12k_base * ab,u8 pdev_id)10427 static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
10428 {
10429 	if (WARN_ON(pdev_id >= MAX_RADIOS))
10430 		return;
10431 
10432 	/* TODO: Deinit any pdev specific wmi resource */
10433 }
10434 
ath12k_wmi_pdev_attach(struct ath12k_base * ab,u8 pdev_id)10435 int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
10436 			   u8 pdev_id)
10437 {
10438 	struct ath12k_wmi_pdev *wmi_handle;
10439 
10440 	if (pdev_id >= ab->hw_params->max_radios)
10441 		return -EINVAL;
10442 
10443 	wmi_handle = &ab->wmi_ab.wmi[pdev_id];
10444 
10445 	wmi_handle->wmi_ab = &ab->wmi_ab;
10446 
10447 	ab->wmi_ab.ab = ab;
10448 	/* TODO: Init remaining resource specific to pdev */
10449 
10450 	return 0;
10451 }
10452 
ath12k_wmi_attach(struct ath12k_base * ab)10453 int ath12k_wmi_attach(struct ath12k_base *ab)
10454 {
10455 	int ret;
10456 
10457 	ret = ath12k_wmi_pdev_attach(ab, 0);
10458 	if (ret)
10459 		return ret;
10460 
10461 	ab->wmi_ab.ab = ab;
10462 	ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
10463 
10464 	/* It's overwritten when service_ext_ready is handled */
10465 	if (ab->hw_params->single_pdev_only)
10466 		ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
10467 
10468 	/* TODO: Init remaining wmi soc resources required */
10469 	init_completion(&ab->wmi_ab.service_ready);
10470 	init_completion(&ab->wmi_ab.unified_ready);
10471 
10472 	return 0;
10473 }
10474 
ath12k_wmi_detach(struct ath12k_base * ab)10475 void ath12k_wmi_detach(struct ath12k_base *ab)
10476 {
10477 	int i;
10478 
10479 	/* TODO: Deinit wmi resource specific to SOC as required */
10480 
10481 	for (i = 0; i < ab->htc.wmi_ep_count; i++)
10482 		ath12k_wmi_pdev_detach(ab, i);
10483 
10484 	ath12k_wmi_free_dbring_caps(ab);
10485 }
10486 
ath12k_wmi_hw_data_filter_cmd(struct ath12k * ar,struct wmi_hw_data_filter_arg * arg)10487 int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
10488 {
10489 	struct wmi_hw_data_filter_cmd *cmd;
10490 	struct sk_buff *skb;
10491 	int len;
10492 
10493 	len = sizeof(*cmd);
10494 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10495 
10496 	if (!skb)
10497 		return -ENOMEM;
10498 
10499 	cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
10500 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
10501 						 sizeof(*cmd));
10502 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
10503 	cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
10504 
10505 	/* Set all modes in case of disable */
10506 	if (arg->enable)
10507 		cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
10508 	else
10509 		cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
10510 
10511 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10512 		   "wmi hw data filter enable %d filter_bitmap 0x%x\n",
10513 		   arg->enable, arg->hw_filter_bitmap);
10514 
10515 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
10516 }
10517 
ath12k_wmi_wow_host_wakeup_ind(struct ath12k * ar)10518 int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
10519 {
10520 	struct wmi_wow_host_wakeup_cmd *cmd;
10521 	struct sk_buff *skb;
10522 	size_t len;
10523 
10524 	len = sizeof(*cmd);
10525 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10526 	if (!skb)
10527 		return -ENOMEM;
10528 
10529 	cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
10530 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
10531 						 sizeof(*cmd));
10532 
10533 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
10534 
10535 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
10536 }
10537 
ath12k_wmi_wow_enable(struct ath12k * ar)10538 int ath12k_wmi_wow_enable(struct ath12k *ar)
10539 {
10540 	struct wmi_wow_enable_cmd *cmd;
10541 	struct sk_buff *skb;
10542 	int len;
10543 
10544 	len = sizeof(*cmd);
10545 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10546 	if (!skb)
10547 		return -ENOMEM;
10548 
10549 	cmd = (struct wmi_wow_enable_cmd *)skb->data;
10550 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
10551 						 sizeof(*cmd));
10552 
10553 	cmd->enable = cpu_to_le32(1);
10554 	cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
10555 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
10556 
10557 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
10558 }
10559 
ath12k_wmi_wow_add_wakeup_event(struct ath12k * ar,u32 vdev_id,enum wmi_wow_wakeup_event event,u32 enable)10560 int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
10561 				    enum wmi_wow_wakeup_event event,
10562 				    u32 enable)
10563 {
10564 	struct wmi_wow_add_del_event_cmd *cmd;
10565 	struct sk_buff *skb;
10566 	size_t len;
10567 
10568 	len = sizeof(*cmd);
10569 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10570 	if (!skb)
10571 		return -ENOMEM;
10572 
10573 	cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
10574 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
10575 						 sizeof(*cmd));
10576 	cmd->vdev_id = cpu_to_le32(vdev_id);
10577 	cmd->is_add = cpu_to_le32(enable);
10578 	cmd->event_bitmap = cpu_to_le32((1 << event));
10579 
10580 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
10581 		   wow_wakeup_event(event), enable, vdev_id);
10582 
10583 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
10584 }
10585 
ath12k_wmi_wow_add_pattern(struct ath12k * ar,u32 vdev_id,u32 pattern_id,const u8 * pattern,const u8 * mask,int pattern_len,int pattern_offset)10586 int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
10587 			       const u8 *pattern, const u8 *mask,
10588 			       int pattern_len, int pattern_offset)
10589 {
10590 	struct wmi_wow_add_pattern_cmd *cmd;
10591 	struct wmi_wow_bitmap_pattern_params *bitmap;
10592 	struct wmi_tlv *tlv;
10593 	struct sk_buff *skb;
10594 	void *ptr;
10595 	size_t len;
10596 
10597 	len = sizeof(*cmd) +
10598 	      sizeof(*tlv) +			/* array struct */
10599 	      sizeof(*bitmap) +			/* bitmap */
10600 	      sizeof(*tlv) +			/* empty ipv4 sync */
10601 	      sizeof(*tlv) +			/* empty ipv6 sync */
10602 	      sizeof(*tlv) +			/* empty magic */
10603 	      sizeof(*tlv) +			/* empty info timeout */
10604 	      sizeof(*tlv) + sizeof(u32);	/* ratelimit interval */
10605 
10606 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10607 	if (!skb)
10608 		return -ENOMEM;
10609 
10610 	/* cmd */
10611 	ptr = skb->data;
10612 	cmd = ptr;
10613 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
10614 						 sizeof(*cmd));
10615 	cmd->vdev_id = cpu_to_le32(vdev_id);
10616 	cmd->pattern_id = cpu_to_le32(pattern_id);
10617 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
10618 
10619 	ptr += sizeof(*cmd);
10620 
10621 	/* bitmap */
10622 	tlv = ptr;
10623 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
10624 
10625 	ptr += sizeof(*tlv);
10626 
10627 	bitmap = ptr;
10628 	bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
10629 						    sizeof(*bitmap));
10630 	memcpy(bitmap->patternbuf, pattern, pattern_len);
10631 	memcpy(bitmap->bitmaskbuf, mask, pattern_len);
10632 	bitmap->pattern_offset = cpu_to_le32(pattern_offset);
10633 	bitmap->pattern_len = cpu_to_le32(pattern_len);
10634 	bitmap->bitmask_len = cpu_to_le32(pattern_len);
10635 	bitmap->pattern_id = cpu_to_le32(pattern_id);
10636 
10637 	ptr += sizeof(*bitmap);
10638 
10639 	/* ipv4 sync */
10640 	tlv = ptr;
10641 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
10642 
10643 	ptr += sizeof(*tlv);
10644 
10645 	/* ipv6 sync */
10646 	tlv = ptr;
10647 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
10648 
10649 	ptr += sizeof(*tlv);
10650 
10651 	/* magic */
10652 	tlv = ptr;
10653 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
10654 
10655 	ptr += sizeof(*tlv);
10656 
10657 	/* pattern info timeout */
10658 	tlv = ptr;
10659 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
10660 
10661 	ptr += sizeof(*tlv);
10662 
10663 	/* ratelimit interval */
10664 	tlv = ptr;
10665 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
10666 
10667 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
10668 		   vdev_id, pattern_id, pattern_offset, pattern_len);
10669 
10670 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
10671 			bitmap->patternbuf, pattern_len);
10672 	ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
10673 			bitmap->bitmaskbuf, pattern_len);
10674 
10675 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
10676 }
10677 
ath12k_wmi_wow_del_pattern(struct ath12k * ar,u32 vdev_id,u32 pattern_id)10678 int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
10679 {
10680 	struct wmi_wow_del_pattern_cmd *cmd;
10681 	struct sk_buff *skb;
10682 	size_t len;
10683 
10684 	len = sizeof(*cmd);
10685 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10686 	if (!skb)
10687 		return -ENOMEM;
10688 
10689 	cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
10690 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
10691 						 sizeof(*cmd));
10692 	cmd->vdev_id = cpu_to_le32(vdev_id);
10693 	cmd->pattern_id = cpu_to_le32(pattern_id);
10694 	cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
10695 
10696 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
10697 		   vdev_id, pattern_id);
10698 
10699 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
10700 }
10701 
10702 static struct sk_buff *
ath12k_wmi_op_gen_config_pno_start(struct ath12k * ar,u32 vdev_id,struct wmi_pno_scan_req_arg * pno)10703 ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
10704 				   struct wmi_pno_scan_req_arg *pno)
10705 {
10706 	struct nlo_configured_params *nlo_list;
10707 	size_t len, nlo_list_len, channel_list_len;
10708 	struct wmi_wow_nlo_config_cmd *cmd;
10709 	__le32 *channel_list;
10710 	struct wmi_tlv *tlv;
10711 	struct sk_buff *skb;
10712 	void *ptr;
10713 	u32 i;
10714 
10715 	len = sizeof(*cmd) +
10716 	      sizeof(*tlv) +
10717 	      /* TLV place holder for array of structures
10718 	       * nlo_configured_params(nlo_list)
10719 	       */
10720 	      sizeof(*tlv);
10721 	      /* TLV place holder for array of uint32 channel_list */
10722 
10723 	channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
10724 	len += channel_list_len;
10725 
10726 	nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
10727 	len += nlo_list_len;
10728 
10729 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10730 	if (!skb)
10731 		return ERR_PTR(-ENOMEM);
10732 
10733 	ptr = skb->data;
10734 	cmd = ptr;
10735 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
10736 
10737 	cmd->vdev_id = cpu_to_le32(pno->vdev_id);
10738 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
10739 
10740 	/* current FW does not support min-max range for dwell time */
10741 	cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
10742 	cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
10743 
10744 	if (pno->do_passive_scan)
10745 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
10746 
10747 	cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
10748 	cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
10749 	cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
10750 	cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
10751 
10752 	if (pno->enable_pno_scan_randomization) {
10753 		cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
10754 					  WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
10755 		ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
10756 		ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
10757 	}
10758 
10759 	ptr += sizeof(*cmd);
10760 
10761 	/* nlo_configured_params(nlo_list) */
10762 	cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
10763 	tlv = ptr;
10764 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
10765 
10766 	ptr += sizeof(*tlv);
10767 	nlo_list = ptr;
10768 	for (i = 0; i < pno->uc_networks_count; i++) {
10769 		tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
10770 		tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
10771 						     sizeof(*nlo_list));
10772 
10773 		nlo_list[i].ssid.valid = cpu_to_le32(1);
10774 		nlo_list[i].ssid.ssid.ssid_len =
10775 			cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
10776 		memcpy(nlo_list[i].ssid.ssid.ssid,
10777 		       pno->a_networks[i].ssid.ssid,
10778 		       le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
10779 
10780 		if (pno->a_networks[i].rssi_threshold &&
10781 		    pno->a_networks[i].rssi_threshold > -300) {
10782 			nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
10783 			nlo_list[i].rssi_cond.rssi =
10784 					cpu_to_le32(pno->a_networks[i].rssi_threshold);
10785 		}
10786 
10787 		nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
10788 		nlo_list[i].bcast_nw_type.bcast_nw_type =
10789 					cpu_to_le32(pno->a_networks[i].bcast_nw_type);
10790 	}
10791 
10792 	ptr += nlo_list_len;
10793 	cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
10794 	tlv = ptr;
10795 	tlv->header =  ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
10796 	ptr += sizeof(*tlv);
10797 	channel_list = ptr;
10798 
10799 	for (i = 0; i < pno->a_networks[0].channel_count; i++)
10800 		channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
10801 
10802 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
10803 		   vdev_id);
10804 
10805 	return skb;
10806 }
10807 
ath12k_wmi_op_gen_config_pno_stop(struct ath12k * ar,u32 vdev_id)10808 static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
10809 							 u32 vdev_id)
10810 {
10811 	struct wmi_wow_nlo_config_cmd *cmd;
10812 	struct sk_buff *skb;
10813 	size_t len;
10814 
10815 	len = sizeof(*cmd);
10816 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10817 	if (!skb)
10818 		return ERR_PTR(-ENOMEM);
10819 
10820 	cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
10821 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
10822 
10823 	cmd->vdev_id = cpu_to_le32(vdev_id);
10824 	cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
10825 
10826 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10827 		   "wmi tlv stop pno config vdev_id %d\n", vdev_id);
10828 	return skb;
10829 }
10830 
ath12k_wmi_wow_config_pno(struct ath12k * ar,u32 vdev_id,struct wmi_pno_scan_req_arg * pno_scan)10831 int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
10832 			      struct wmi_pno_scan_req_arg  *pno_scan)
10833 {
10834 	struct sk_buff *skb;
10835 
10836 	if (pno_scan->enable)
10837 		skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
10838 	else
10839 		skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
10840 
10841 	if (IS_ERR_OR_NULL(skb))
10842 		return -ENOMEM;
10843 
10844 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
10845 }
10846 
ath12k_wmi_fill_ns_offload(struct ath12k * ar,struct wmi_arp_ns_offload_arg * offload,void ** ptr,bool enable,bool ext)10847 static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
10848 				       struct wmi_arp_ns_offload_arg *offload,
10849 				       void **ptr,
10850 				       bool enable,
10851 				       bool ext)
10852 {
10853 	struct wmi_ns_offload_params *ns;
10854 	struct wmi_tlv *tlv;
10855 	void *buf_ptr = *ptr;
10856 	u32 ns_cnt, ns_ext_tuples;
10857 	int i, max_offloads;
10858 
10859 	ns_cnt = offload->ipv6_count;
10860 
10861 	tlv  = buf_ptr;
10862 
10863 	if (ext) {
10864 		ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
10865 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10866 						 ns_ext_tuples * sizeof(*ns));
10867 		i = WMI_MAX_NS_OFFLOADS;
10868 		max_offloads = offload->ipv6_count;
10869 	} else {
10870 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10871 						 WMI_MAX_NS_OFFLOADS * sizeof(*ns));
10872 		i = 0;
10873 		max_offloads = WMI_MAX_NS_OFFLOADS;
10874 	}
10875 
10876 	buf_ptr += sizeof(*tlv);
10877 
10878 	for (; i < max_offloads; i++) {
10879 		ns = buf_ptr;
10880 		ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
10881 							sizeof(*ns));
10882 
10883 		if (enable) {
10884 			if (i < ns_cnt)
10885 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
10886 
10887 			memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
10888 			memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
10889 
10890 			if (offload->ipv6_type[i])
10891 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
10892 
10893 			memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
10894 
10895 			if (!is_zero_ether_addr(ns->target_mac.addr))
10896 				ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
10897 
10898 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
10899 				   "wmi index %d ns_solicited %pI6 target %pI6",
10900 				   i, ns->solicitation_ipaddr,
10901 				   ns->target_ipaddr[0]);
10902 		}
10903 
10904 		buf_ptr += sizeof(*ns);
10905 	}
10906 
10907 	*ptr = buf_ptr;
10908 }
10909 
ath12k_wmi_fill_arp_offload(struct ath12k * ar,struct wmi_arp_ns_offload_arg * offload,void ** ptr,bool enable)10910 static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
10911 					struct wmi_arp_ns_offload_arg *offload,
10912 					void **ptr,
10913 					bool enable)
10914 {
10915 	struct wmi_arp_offload_params *arp;
10916 	struct wmi_tlv *tlv;
10917 	void *buf_ptr = *ptr;
10918 	int i;
10919 
10920 	/* fill arp tuple */
10921 	tlv = buf_ptr;
10922 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
10923 					 WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
10924 	buf_ptr += sizeof(*tlv);
10925 
10926 	for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
10927 		arp = buf_ptr;
10928 		arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
10929 							 sizeof(*arp));
10930 
10931 		if (enable && i < offload->ipv4_count) {
10932 			/* Copy the target ip addr and flags */
10933 			arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
10934 			memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
10935 
10936 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
10937 				   arp->target_ipaddr);
10938 		}
10939 
10940 		buf_ptr += sizeof(*arp);
10941 	}
10942 
10943 	*ptr = buf_ptr;
10944 }
10945 
ath12k_wmi_arp_ns_offload(struct ath12k * ar,struct ath12k_link_vif * arvif,struct wmi_arp_ns_offload_arg * offload,bool enable)10946 int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
10947 			      struct ath12k_link_vif *arvif,
10948 			      struct wmi_arp_ns_offload_arg *offload,
10949 			      bool enable)
10950 {
10951 	struct wmi_set_arp_ns_offload_cmd *cmd;
10952 	struct wmi_tlv *tlv;
10953 	struct sk_buff *skb;
10954 	void *buf_ptr;
10955 	size_t len;
10956 	u8 ns_cnt, ns_ext_tuples = 0;
10957 
10958 	ns_cnt = offload->ipv6_count;
10959 
10960 	len = sizeof(*cmd) +
10961 	      sizeof(*tlv) +
10962 	      WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
10963 	      sizeof(*tlv) +
10964 	      WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
10965 
10966 	if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
10967 		ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
10968 		len += sizeof(*tlv) +
10969 		       ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
10970 	}
10971 
10972 	skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
10973 	if (!skb)
10974 		return -ENOMEM;
10975 
10976 	buf_ptr = skb->data;
10977 	cmd = buf_ptr;
10978 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
10979 						 sizeof(*cmd));
10980 	cmd->flags = cpu_to_le32(0);
10981 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
10982 	cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
10983 
10984 	buf_ptr += sizeof(*cmd);
10985 
10986 	ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
10987 	ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
10988 
10989 	if (ns_ext_tuples)
10990 		ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
10991 
10992 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
10993 }
10994 
ath12k_wmi_gtk_rekey_offload(struct ath12k * ar,struct ath12k_link_vif * arvif,bool enable)10995 int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
10996 				 struct ath12k_link_vif *arvif, bool enable)
10997 {
10998 	struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
10999 	struct wmi_gtk_rekey_offload_cmd *cmd;
11000 	struct sk_buff *skb;
11001 	__le64 replay_ctr;
11002 	int len;
11003 
11004 	len = sizeof(*cmd);
11005 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
11006 	if (!skb)
11007 		return -ENOMEM;
11008 
11009 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
11010 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
11011 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
11012 
11013 	if (enable) {
11014 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
11015 
11016 		/* the length in rekey_data and cmd is equal */
11017 		memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
11018 		memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
11019 
11020 		replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
11021 		memcpy(cmd->replay_ctr, &replay_ctr,
11022 		       sizeof(replay_ctr));
11023 	} else {
11024 		cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
11025 	}
11026 
11027 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
11028 		   arvif->vdev_id, enable);
11029 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
11030 }
11031 
ath12k_wmi_gtk_rekey_getinfo(struct ath12k * ar,struct ath12k_link_vif * arvif)11032 int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
11033 				 struct ath12k_link_vif *arvif)
11034 {
11035 	struct wmi_gtk_rekey_offload_cmd *cmd;
11036 	struct sk_buff *skb;
11037 	int len;
11038 
11039 	len = sizeof(*cmd);
11040 	skb =  ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
11041 	if (!skb)
11042 		return -ENOMEM;
11043 
11044 	cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
11045 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
11046 	cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
11047 	cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
11048 
11049 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
11050 		   arvif->vdev_id);
11051 	return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
11052 }
11053 
ath12k_wmi_sta_keepalive(struct ath12k * ar,const struct wmi_sta_keepalive_arg * arg)11054 int ath12k_wmi_sta_keepalive(struct ath12k *ar,
11055 			     const struct wmi_sta_keepalive_arg *arg)
11056 {
11057 	struct wmi_sta_keepalive_arp_resp_params *arp;
11058 	struct ath12k_wmi_pdev *wmi = ar->wmi;
11059 	struct wmi_sta_keepalive_cmd *cmd;
11060 	struct sk_buff *skb;
11061 	size_t len;
11062 
11063 	len = sizeof(*cmd) + sizeof(*arp);
11064 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
11065 	if (!skb)
11066 		return -ENOMEM;
11067 
11068 	cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
11069 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
11070 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
11071 	cmd->enabled = cpu_to_le32(arg->enabled);
11072 	cmd->interval = cpu_to_le32(arg->interval);
11073 	cmd->method = cpu_to_le32(arg->method);
11074 
11075 	arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
11076 	arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
11077 						 sizeof(*arp));
11078 	if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
11079 	    arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
11080 		arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
11081 		arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
11082 		ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
11083 	}
11084 
11085 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
11086 		   "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
11087 		   arg->vdev_id, arg->enabled, arg->method, arg->interval);
11088 
11089 	return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
11090 }
11091 
ath12k_wmi_mlo_setup(struct ath12k * ar,struct wmi_mlo_setup_arg * mlo_params)11092 int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
11093 {
11094 	struct wmi_mlo_setup_cmd *cmd;
11095 	struct ath12k_wmi_pdev *wmi = ar->wmi;
11096 	u32 *partner_links, num_links;
11097 	int i, ret, buf_len, arg_len;
11098 	struct sk_buff *skb;
11099 	struct wmi_tlv *tlv;
11100 	void *ptr;
11101 
11102 	num_links = mlo_params->num_partner_links;
11103 	arg_len = num_links * sizeof(u32);
11104 	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len;
11105 
11106 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
11107 	if (!skb)
11108 		return -ENOMEM;
11109 
11110 	cmd = (struct wmi_mlo_setup_cmd *)skb->data;
11111 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD,
11112 						 sizeof(*cmd));
11113 	cmd->mld_group_id = mlo_params->group_id;
11114 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
11115 	ptr = skb->data + sizeof(*cmd);
11116 
11117 	tlv = ptr;
11118 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
11119 	ptr += TLV_HDR_SIZE;
11120 
11121 	partner_links = ptr;
11122 	for (i = 0; i < num_links; i++)
11123 		partner_links[i] = mlo_params->partner_link_id[i];
11124 
11125 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
11126 	if (ret) {
11127 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n",
11128 			    ret);
11129 		dev_kfree_skb(skb);
11130 		return ret;
11131 	}
11132 
11133 	return 0;
11134 }
11135 
ath12k_wmi_mlo_ready(struct ath12k * ar)11136 int ath12k_wmi_mlo_ready(struct ath12k *ar)
11137 {
11138 	struct wmi_mlo_ready_cmd *cmd;
11139 	struct ath12k_wmi_pdev *wmi = ar->wmi;
11140 	struct sk_buff *skb;
11141 	int ret, len;
11142 
11143 	len = sizeof(*cmd);
11144 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
11145 	if (!skb)
11146 		return -ENOMEM;
11147 
11148 	cmd = (struct wmi_mlo_ready_cmd *)skb->data;
11149 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD,
11150 						 sizeof(*cmd));
11151 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
11152 
11153 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
11154 	if (ret) {
11155 		ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n",
11156 			    ret);
11157 		dev_kfree_skb(skb);
11158 		return ret;
11159 	}
11160 
11161 	return 0;
11162 }
11163 
ath12k_wmi_mlo_teardown(struct ath12k * ar)11164 int ath12k_wmi_mlo_teardown(struct ath12k *ar)
11165 {
11166 	struct wmi_mlo_teardown_cmd *cmd;
11167 	struct ath12k_wmi_pdev *wmi = ar->wmi;
11168 	struct sk_buff *skb;
11169 	int ret, len;
11170 
11171 	len = sizeof(*cmd);
11172 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
11173 	if (!skb)
11174 		return -ENOMEM;
11175 
11176 	cmd = (struct wmi_mlo_teardown_cmd *)skb->data;
11177 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD,
11178 						 sizeof(*cmd));
11179 	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
11180 	cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
11181 
11182 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
11183 	if (ret) {
11184 		ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n",
11185 			    ret);
11186 		dev_kfree_skb(skb);
11187 		return ret;
11188 	}
11189 
11190 	return 0;
11191 }
11192 
ath12k_wmi_supports_6ghz_cc_ext(struct ath12k * ar)11193 bool ath12k_wmi_supports_6ghz_cc_ext(struct ath12k *ar)
11194 {
11195 	return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
11196 			ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
11197 }
11198 
ath12k_wmi_send_vdev_set_tpc_power(struct ath12k * ar,u32 vdev_id,struct ath12k_reg_tpc_power_info * param)11199 int ath12k_wmi_send_vdev_set_tpc_power(struct ath12k *ar,
11200 				       u32 vdev_id,
11201 				       struct ath12k_reg_tpc_power_info *param)
11202 {
11203 	struct wmi_vdev_set_tpc_power_cmd *cmd;
11204 	struct ath12k_wmi_pdev *wmi = ar->wmi;
11205 	struct wmi_vdev_ch_power_params *ch;
11206 	int i, ret, len, array_len;
11207 	struct sk_buff *skb;
11208 	struct wmi_tlv *tlv;
11209 	u8 *ptr;
11210 
11211 	array_len = sizeof(*ch) * param->num_pwr_levels;
11212 	len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
11213 
11214 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
11215 	if (!skb)
11216 		return -ENOMEM;
11217 
11218 	ptr = skb->data;
11219 
11220 	cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
11221 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_TPC_POWER_CMD,
11222 						 sizeof(*cmd));
11223 	cmd->vdev_id = cpu_to_le32(vdev_id);
11224 	cmd->psd_power = cpu_to_le32(param->is_psd_power);
11225 	cmd->eirp_power = cpu_to_le32(param->eirp_power);
11226 	cmd->power_type_6ghz = cpu_to_le32(param->ap_power_type);
11227 
11228 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
11229 		   "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
11230 		   vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
11231 
11232 	ptr += sizeof(*cmd);
11233 	tlv = (struct wmi_tlv *)ptr;
11234 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, array_len);
11235 
11236 	ptr += TLV_HDR_SIZE;
11237 	ch = (struct wmi_vdev_ch_power_params *)ptr;
11238 
11239 	for (i = 0; i < param->num_pwr_levels; i++, ch++) {
11240 		ch->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CH_POWER_INFO,
11241 							sizeof(*ch));
11242 		ch->chan_cfreq = cpu_to_le32(param->chan_power_info[i].chan_cfreq);
11243 		ch->tx_power = cpu_to_le32(param->chan_power_info[i].tx_power);
11244 
11245 		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc chan freq %d TX power %d\n",
11246 			   ch->chan_cfreq, ch->tx_power);
11247 	}
11248 
11249 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
11250 	if (ret) {
11251 		ath12k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
11252 		dev_kfree_skb(skb);
11253 		return ret;
11254 	}
11255 
11256 	return 0;
11257 }
11258 
11259 static int
ath12k_wmi_fill_disallowed_bmap(struct ath12k_base * ab,struct wmi_disallowed_mlo_mode_bitmap_params * dislw_bmap,struct wmi_mlo_link_set_active_arg * arg)11260 ath12k_wmi_fill_disallowed_bmap(struct ath12k_base *ab,
11261 				struct wmi_disallowed_mlo_mode_bitmap_params *dislw_bmap,
11262 				struct wmi_mlo_link_set_active_arg *arg)
11263 {
11264 	struct wmi_ml_disallow_mode_bmap_arg *dislw_bmap_arg;
11265 	u8 i;
11266 
11267 	if (arg->num_disallow_mode_comb >
11268 	    ARRAY_SIZE(arg->disallow_bmap)) {
11269 		ath12k_warn(ab, "invalid num_disallow_mode_comb: %d",
11270 			    arg->num_disallow_mode_comb);
11271 		return -EINVAL;
11272 	}
11273 
11274 	dislw_bmap_arg = &arg->disallow_bmap[0];
11275 	for (i = 0; i < arg->num_disallow_mode_comb; i++) {
11276 		dislw_bmap->tlv_header =
11277 				ath12k_wmi_tlv_cmd_hdr(0, sizeof(*dislw_bmap));
11278 		dislw_bmap->disallowed_mode_bitmap =
11279 				cpu_to_le32(dislw_bmap_arg->disallowed_mode);
11280 		dislw_bmap->ieee_link_id_comb =
11281 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[0],
11282 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_1) |
11283 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[1],
11284 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_2) |
11285 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[2],
11286 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_3) |
11287 			le32_encode_bits(dislw_bmap_arg->ieee_link_id[3],
11288 					 WMI_DISALW_MLO_MODE_BMAP_IEEE_LINK_ID_COMB_4);
11289 
11290 		ath12k_dbg(ab, ATH12K_DBG_WMI,
11291 			   "entry %d disallowed_mode %d ieee_link_id_comb 0x%x",
11292 			   i, dislw_bmap_arg->disallowed_mode,
11293 			   dislw_bmap_arg->ieee_link_id_comb);
11294 		dislw_bmap++;
11295 		dislw_bmap_arg++;
11296 	}
11297 
11298 	return 0;
11299 }
11300 
ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base * ab,struct wmi_mlo_link_set_active_arg * arg)11301 int ath12k_wmi_send_mlo_link_set_active_cmd(struct ath12k_base *ab,
11302 					    struct wmi_mlo_link_set_active_arg *arg)
11303 {
11304 	struct wmi_disallowed_mlo_mode_bitmap_params *disallowed_mode_bmap;
11305 	struct wmi_mlo_set_active_link_number_params *link_num_param;
11306 	u32 num_link_num_param = 0, num_vdev_bitmap = 0;
11307 	struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
11308 	struct wmi_mlo_link_set_active_cmd *cmd;
11309 	u32 num_inactive_vdev_bitmap = 0;
11310 	u32 num_disallow_mode_comb = 0;
11311 	struct wmi_tlv *tlv;
11312 	struct sk_buff *skb;
11313 	__le32 *vdev_bitmap;
11314 	void *buf_ptr;
11315 	int i, ret;
11316 	u32 len;
11317 
11318 	if (!arg->num_vdev_bitmap && !arg->num_link_entry) {
11319 		ath12k_warn(ab, "Invalid num_vdev_bitmap and num_link_entry");
11320 		return -EINVAL;
11321 	}
11322 
11323 	switch (arg->force_mode) {
11324 	case WMI_MLO_LINK_FORCE_MODE_ACTIVE_LINK_NUM:
11325 	case WMI_MLO_LINK_FORCE_MODE_INACTIVE_LINK_NUM:
11326 		num_link_num_param = arg->num_link_entry;
11327 		fallthrough;
11328 	case WMI_MLO_LINK_FORCE_MODE_ACTIVE:
11329 	case WMI_MLO_LINK_FORCE_MODE_INACTIVE:
11330 	case WMI_MLO_LINK_FORCE_MODE_NO_FORCE:
11331 		num_vdev_bitmap = arg->num_vdev_bitmap;
11332 		break;
11333 	case WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE:
11334 		num_vdev_bitmap = arg->num_vdev_bitmap;
11335 		num_inactive_vdev_bitmap = arg->num_inactive_vdev_bitmap;
11336 		break;
11337 	default:
11338 		ath12k_warn(ab, "Invalid force mode: %u", arg->force_mode);
11339 		return -EINVAL;
11340 	}
11341 
11342 	num_disallow_mode_comb = arg->num_disallow_mode_comb;
11343 	len = sizeof(*cmd) +
11344 	      TLV_HDR_SIZE + sizeof(*link_num_param) * num_link_num_param +
11345 	      TLV_HDR_SIZE + sizeof(*vdev_bitmap) * num_vdev_bitmap +
11346 	      TLV_HDR_SIZE + TLV_HDR_SIZE + TLV_HDR_SIZE +
11347 	      TLV_HDR_SIZE + sizeof(*disallowed_mode_bmap) * num_disallow_mode_comb;
11348 	if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE)
11349 		len += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
11350 
11351 	skb = ath12k_wmi_alloc_skb(wmi_ab, len);
11352 	if (!skb)
11353 		return -ENOMEM;
11354 
11355 	cmd = (struct wmi_mlo_link_set_active_cmd *)skb->data;
11356 	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_LINK_SET_ACTIVE_CMD,
11357 						 sizeof(*cmd));
11358 	cmd->force_mode = cpu_to_le32(arg->force_mode);
11359 	cmd->reason = cpu_to_le32(arg->reason);
11360 	ath12k_dbg(ab, ATH12K_DBG_WMI,
11361 		   "mode %d reason %d num_link_num_param %d num_vdev_bitmap %d inactive %d num_disallow_mode_comb %d",
11362 		   arg->force_mode, arg->reason, num_link_num_param,
11363 		   num_vdev_bitmap, num_inactive_vdev_bitmap,
11364 		   num_disallow_mode_comb);
11365 
11366 	buf_ptr = skb->data + sizeof(*cmd);
11367 	tlv = buf_ptr;
11368 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
11369 					 sizeof(*link_num_param) * num_link_num_param);
11370 	buf_ptr += TLV_HDR_SIZE;
11371 
11372 	if (num_link_num_param) {
11373 		cmd->ctrl_flags =
11374 			le32_encode_bits(arg->ctrl_flags.dync_force_link_num ? 1 : 0,
11375 					 CRTL_F_DYNC_FORCE_LINK_NUM);
11376 
11377 		link_num_param = buf_ptr;
11378 		for (i = 0; i < num_link_num_param; i++) {
11379 			link_num_param->tlv_header =
11380 				ath12k_wmi_tlv_cmd_hdr(0, sizeof(*link_num_param));
11381 			link_num_param->num_of_link =
11382 				cpu_to_le32(arg->link_num[i].num_of_link);
11383 			link_num_param->vdev_type =
11384 				cpu_to_le32(arg->link_num[i].vdev_type);
11385 			link_num_param->vdev_subtype =
11386 				cpu_to_le32(arg->link_num[i].vdev_subtype);
11387 			link_num_param->home_freq =
11388 				cpu_to_le32(arg->link_num[i].home_freq);
11389 			ath12k_dbg(ab, ATH12K_DBG_WMI,
11390 				   "entry %d num_of_link %d vdev type %d subtype %d freq %d control_flags %d",
11391 				   i, arg->link_num[i].num_of_link,
11392 				   arg->link_num[i].vdev_type,
11393 				   arg->link_num[i].vdev_subtype,
11394 				   arg->link_num[i].home_freq,
11395 				   __le32_to_cpu(cmd->ctrl_flags));
11396 			link_num_param++;
11397 		}
11398 
11399 		buf_ptr += sizeof(*link_num_param) * num_link_num_param;
11400 	}
11401 
11402 	tlv = buf_ptr;
11403 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
11404 					 sizeof(*vdev_bitmap) * num_vdev_bitmap);
11405 	buf_ptr += TLV_HDR_SIZE;
11406 
11407 	if (num_vdev_bitmap) {
11408 		vdev_bitmap = buf_ptr;
11409 		for (i = 0; i < num_vdev_bitmap; i++) {
11410 			vdev_bitmap[i] = cpu_to_le32(arg->vdev_bitmap[i]);
11411 			ath12k_dbg(ab, ATH12K_DBG_WMI, "entry %d vdev_id_bitmap 0x%x",
11412 				   i, arg->vdev_bitmap[i]);
11413 		}
11414 
11415 		buf_ptr += sizeof(*vdev_bitmap) * num_vdev_bitmap;
11416 	}
11417 
11418 	if (arg->force_mode == WMI_MLO_LINK_FORCE_MODE_ACTIVE_INACTIVE) {
11419 		tlv = buf_ptr;
11420 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32,
11421 						 sizeof(*vdev_bitmap) *
11422 						 num_inactive_vdev_bitmap);
11423 		buf_ptr += TLV_HDR_SIZE;
11424 
11425 		if (num_inactive_vdev_bitmap) {
11426 			vdev_bitmap = buf_ptr;
11427 			for (i = 0; i < num_inactive_vdev_bitmap; i++) {
11428 				vdev_bitmap[i] =
11429 					cpu_to_le32(arg->inactive_vdev_bitmap[i]);
11430 				ath12k_dbg(ab, ATH12K_DBG_WMI,
11431 					   "entry %d inactive_vdev_id_bitmap 0x%x",
11432 					    i, arg->inactive_vdev_bitmap[i]);
11433 			}
11434 
11435 			buf_ptr += sizeof(*vdev_bitmap) * num_inactive_vdev_bitmap;
11436 		}
11437 	} else {
11438 		/* add empty vdev bitmap2 tlv */
11439 		tlv = buf_ptr;
11440 		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
11441 		buf_ptr += TLV_HDR_SIZE;
11442 	}
11443 
11444 	/* add empty ieee_link_id_bitmap tlv */
11445 	tlv = buf_ptr;
11446 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
11447 	buf_ptr += TLV_HDR_SIZE;
11448 
11449 	/* add empty ieee_link_id_bitmap2 tlv */
11450 	tlv = buf_ptr;
11451 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
11452 	buf_ptr += TLV_HDR_SIZE;
11453 
11454 	tlv = buf_ptr;
11455 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
11456 					 sizeof(*disallowed_mode_bmap) *
11457 					 arg->num_disallow_mode_comb);
11458 	buf_ptr += TLV_HDR_SIZE;
11459 
11460 	ret = ath12k_wmi_fill_disallowed_bmap(ab, buf_ptr, arg);
11461 	if (ret)
11462 		goto free_skb;
11463 
11464 	ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_MLO_LINK_SET_ACTIVE_CMDID);
11465 	if (ret) {
11466 		ath12k_warn(ab,
11467 			    "failed to send WMI_MLO_LINK_SET_ACTIVE_CMDID: %d\n", ret);
11468 		goto free_skb;
11469 	}
11470 
11471 	ath12k_dbg(ab, ATH12K_DBG_WMI, "WMI mlo link set active cmd");
11472 
11473 	return ret;
11474 
11475 free_skb:
11476 	dev_kfree_skb(skb);
11477 	return ret;
11478 }
11479