xref: /linux/drivers/net/wireless/ath/ath12k/dp_tx.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include "core.h"
8 #include "dp_tx.h"
9 #include "debug.h"
10 #include "debugfs.h"
11 #include "hw.h"
12 #include "peer.h"
13 #include "mac.h"
14 
15 static enum hal_tcl_encap_type
ath12k_dp_tx_get_encap_type(struct ath12k_base * ab,struct sk_buff * skb)16 ath12k_dp_tx_get_encap_type(struct ath12k_base *ab, struct sk_buff *skb)
17 {
18 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
19 
20 	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
21 		return HAL_TCL_ENCAP_TYPE_RAW;
22 
23 	if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
24 		return HAL_TCL_ENCAP_TYPE_ETHERNET;
25 
26 	return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
27 }
28 
ath12k_dp_tx_encap_nwifi(struct sk_buff * skb)29 static void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb)
30 {
31 	struct ieee80211_hdr *hdr = (void *)skb->data;
32 	u8 *qos_ctl;
33 
34 	if (!ieee80211_is_data_qos(hdr->frame_control))
35 		return;
36 
37 	qos_ctl = ieee80211_get_qos_ctl(hdr);
38 	memmove(skb->data + IEEE80211_QOS_CTL_LEN,
39 		skb->data, (void *)qos_ctl - (void *)skb->data);
40 	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
41 
42 	hdr = (void *)skb->data;
43 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
44 }
45 
ath12k_dp_tx_get_tid(struct sk_buff * skb)46 static u8 ath12k_dp_tx_get_tid(struct sk_buff *skb)
47 {
48 	struct ieee80211_hdr *hdr = (void *)skb->data;
49 	struct ath12k_skb_cb *cb = ATH12K_SKB_CB(skb);
50 
51 	if (cb->flags & ATH12K_SKB_HW_80211_ENCAP)
52 		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
53 	else if (!ieee80211_is_data_qos(hdr->frame_control))
54 		return HAL_DESC_REO_NON_QOS_TID;
55 	else
56 		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
57 }
58 
ath12k_dp_tx_get_encrypt_type(u32 cipher)59 enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher)
60 {
61 	switch (cipher) {
62 	case WLAN_CIPHER_SUITE_WEP40:
63 		return HAL_ENCRYPT_TYPE_WEP_40;
64 	case WLAN_CIPHER_SUITE_WEP104:
65 		return HAL_ENCRYPT_TYPE_WEP_104;
66 	case WLAN_CIPHER_SUITE_TKIP:
67 		return HAL_ENCRYPT_TYPE_TKIP_MIC;
68 	case WLAN_CIPHER_SUITE_CCMP:
69 		return HAL_ENCRYPT_TYPE_CCMP_128;
70 	case WLAN_CIPHER_SUITE_CCMP_256:
71 		return HAL_ENCRYPT_TYPE_CCMP_256;
72 	case WLAN_CIPHER_SUITE_GCMP:
73 		return HAL_ENCRYPT_TYPE_GCMP_128;
74 	case WLAN_CIPHER_SUITE_GCMP_256:
75 		return HAL_ENCRYPT_TYPE_AES_GCMP_256;
76 	default:
77 		return HAL_ENCRYPT_TYPE_OPEN;
78 	}
79 }
80 
ath12k_dp_tx_release_txbuf(struct ath12k_dp * dp,struct ath12k_tx_desc_info * tx_desc,u8 pool_id)81 static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
82 				       struct ath12k_tx_desc_info *tx_desc,
83 				       u8 pool_id)
84 {
85 	spin_lock_bh(&dp->tx_desc_lock[pool_id]);
86 	tx_desc->skb_ext_desc = NULL;
87 	list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
88 	spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
89 }
90 
ath12k_dp_tx_assign_buffer(struct ath12k_dp * dp,u8 pool_id)91 static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
92 							      u8 pool_id)
93 {
94 	struct ath12k_tx_desc_info *desc;
95 
96 	spin_lock_bh(&dp->tx_desc_lock[pool_id]);
97 	desc = list_first_entry_or_null(&dp->tx_desc_free_list[pool_id],
98 					struct ath12k_tx_desc_info,
99 					list);
100 	if (!desc) {
101 		spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
102 		ath12k_warn(dp->ab, "failed to allocate data Tx buffer\n");
103 		return NULL;
104 	}
105 
106 	list_move_tail(&desc->list, &dp->tx_desc_used_list[pool_id]);
107 	spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
108 
109 	return desc;
110 }
111 
ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base * ab,struct hal_tx_msdu_ext_desc * tcl_ext_cmd,struct hal_tx_info * ti)112 static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
113 					     struct hal_tx_msdu_ext_desc *tcl_ext_cmd,
114 					     struct hal_tx_info *ti)
115 {
116 	tcl_ext_cmd->info0 = le32_encode_bits(ti->paddr,
117 					      HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO);
118 	tcl_ext_cmd->info1 = le32_encode_bits(0x0,
119 					      HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI) |
120 			       le32_encode_bits(ti->data_len,
121 						HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
122 
123 	tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
124 				le32_encode_bits(ti->encap_type,
125 						 HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
126 				le32_encode_bits(ti->encrypt_type,
127 						 HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
128 }
129 
130 #define HTT_META_DATA_ALIGNMENT 0x8
131 
ath12k_dp_metadata_align_skb(struct sk_buff * skb,u8 tail_len)132 static void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len)
133 {
134 	struct sk_buff *tail;
135 	void *metadata;
136 
137 	if (unlikely(skb_cow_data(skb, tail_len, &tail) < 0))
138 		return NULL;
139 
140 	metadata = pskb_put(skb, tail, tail_len);
141 	memset(metadata, 0, tail_len);
142 	return metadata;
143 }
144 
145 /* Preparing HTT Metadata when utilized with ext MSDU */
ath12k_dp_prepare_htt_metadata(struct sk_buff * skb)146 static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb)
147 {
148 	struct hal_tx_msdu_metadata *desc_ext;
149 	u8 htt_desc_size;
150 	/* Size rounded of multiple of 8 bytes */
151 	u8 htt_desc_size_aligned;
152 
153 	htt_desc_size = sizeof(struct hal_tx_msdu_metadata);
154 	htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT);
155 
156 	desc_ext = ath12k_dp_metadata_align_skb(skb, htt_desc_size_aligned);
157 	if (!desc_ext)
158 		return -ENOMEM;
159 
160 	desc_ext->info0 = le32_encode_bits(1, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG) |
161 			  le32_encode_bits(0, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE) |
162 			  le32_encode_bits(1,
163 					   HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL);
164 
165 	return 0;
166 }
167 
ath12k_dp_tx_move_payload(struct sk_buff * skb,unsigned long delta,bool head)168 static void ath12k_dp_tx_move_payload(struct sk_buff *skb,
169 				      unsigned long delta,
170 				      bool head)
171 {
172 	unsigned long len = skb->len;
173 
174 	if (head) {
175 		skb_push(skb, delta);
176 		memmove(skb->data, skb->data + delta, len);
177 		skb_trim(skb, len);
178 	} else {
179 		skb_put(skb, delta);
180 		memmove(skb->data + delta, skb->data, len);
181 		skb_pull(skb, delta);
182 	}
183 }
184 
ath12k_dp_tx_align_payload(struct ath12k_base * ab,struct sk_buff ** pskb)185 static int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
186 				      struct sk_buff **pskb)
187 {
188 	u32 iova_mask = ab->hw_params->iova_mask;
189 	unsigned long offset, delta1, delta2;
190 	struct sk_buff *skb2, *skb = *pskb;
191 	unsigned int headroom = skb_headroom(skb);
192 	int tailroom = skb_tailroom(skb);
193 	int ret = 0;
194 
195 	offset = (unsigned long)skb->data & iova_mask;
196 	delta1 = offset;
197 	delta2 = iova_mask - offset + 1;
198 
199 	if (headroom >= delta1) {
200 		ath12k_dp_tx_move_payload(skb, delta1, true);
201 	} else if (tailroom >= delta2) {
202 		ath12k_dp_tx_move_payload(skb, delta2, false);
203 	} else {
204 		skb2 = skb_realloc_headroom(skb, iova_mask);
205 		if (!skb2) {
206 			ret = -ENOMEM;
207 			goto out;
208 		}
209 
210 		dev_kfree_skb_any(skb);
211 
212 		offset = (unsigned long)skb2->data & iova_mask;
213 		if (offset)
214 			ath12k_dp_tx_move_payload(skb2, offset, true);
215 		*pskb = skb2;
216 	}
217 
218 out:
219 	return ret;
220 }
221 
ath12k_dp_tx(struct ath12k * ar,struct ath12k_link_vif * arvif,struct sk_buff * skb,bool gsn_valid,int mcbc_gsn,bool is_mcast)222 int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
223 		 struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
224 		 bool is_mcast)
225 {
226 	struct ath12k_base *ab = ar->ab;
227 	struct ath12k_dp *dp = &ab->dp;
228 	struct hal_tx_info ti = {};
229 	struct ath12k_tx_desc_info *tx_desc;
230 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
231 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
232 	struct hal_tcl_data_cmd *hal_tcl_desc;
233 	struct hal_tx_msdu_ext_desc *msg;
234 	struct sk_buff *skb_ext_desc = NULL;
235 	struct hal_srng *tcl_ring;
236 	struct ieee80211_hdr *hdr = (void *)skb->data;
237 	struct ath12k_vif *ahvif = arvif->ahvif;
238 	struct dp_tx_ring *tx_ring;
239 	u8 pool_id;
240 	u8 hal_ring_id;
241 	int ret;
242 	u8 ring_selector, ring_map = 0;
243 	bool tcl_ring_retry;
244 	bool msdu_ext_desc = false;
245 	bool add_htt_metadata = false;
246 	u32 iova_mask = ab->hw_params->iova_mask;
247 	bool is_diff_encap = false;
248 	bool is_null_frame = false;
249 
250 	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
251 		return -ESHUTDOWN;
252 
253 	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
254 	    !ieee80211_is_data(hdr->frame_control))
255 		return -EOPNOTSUPP;
256 
257 	pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
258 
259 	/* Let the default ring selection be based on current processor
260 	 * number, where one of the 3 tcl rings are selected based on
261 	 * the smp_processor_id(). In case that ring
262 	 * is full/busy, we resort to other available rings.
263 	 * If all rings are full, we drop the packet.
264 	 * TODO: Add throttling logic when all rings are full
265 	 */
266 	ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb);
267 
268 tcl_ring_sel:
269 	tcl_ring_retry = false;
270 	ti.ring_id = ring_selector % ab->hw_params->max_tx_ring;
271 
272 	ring_map |= BIT(ti.ring_id);
273 	ti.rbm_id = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
274 
275 	tx_ring = &dp->tx_ring[ti.ring_id];
276 
277 	tx_desc = ath12k_dp_tx_assign_buffer(dp, pool_id);
278 	if (!tx_desc)
279 		return -ENOMEM;
280 
281 	ti.bank_id = arvif->bank_id;
282 	ti.meta_data_flags = arvif->tcl_metadata;
283 
284 	if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
285 	    test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
286 		if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
287 			ti.encrypt_type =
288 				ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
289 
290 			if (ieee80211_has_protected(hdr->frame_control))
291 				skb_put(skb, IEEE80211_CCMP_MIC_LEN);
292 		} else {
293 			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
294 		}
295 
296 		msdu_ext_desc = true;
297 	}
298 
299 	if (gsn_valid) {
300 		/* Reset and Initialize meta_data_flags with Global Sequence
301 		 * Number (GSN) info.
302 		 */
303 		ti.meta_data_flags =
304 			u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM,
305 					HTT_TCL_META_DATA_TYPE) |
306 			u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM);
307 	}
308 
309 	ti.encap_type = ath12k_dp_tx_get_encap_type(ab, skb);
310 	ti.addr_search_flags = arvif->hal_addr_search_flags;
311 	ti.search_type = arvif->search_type;
312 	ti.type = HAL_TCL_DESC_TYPE_BUFFER;
313 	ti.pkt_offset = 0;
314 	ti.lmac_id = ar->lmac_id;
315 
316 	ti.vdev_id = arvif->vdev_id;
317 	if (gsn_valid)
318 		ti.vdev_id += HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID;
319 
320 	ti.bss_ast_hash = arvif->ast_hash;
321 	ti.bss_ast_idx = arvif->ast_idx;
322 	ti.dscp_tid_tbl_idx = 0;
323 
324 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
325 	    ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
326 		ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN) |
327 			     u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN) |
328 			     u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN) |
329 			     u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN) |
330 			     u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN);
331 	}
332 
333 	ti.flags1 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE);
334 
335 	ti.tid = ath12k_dp_tx_get_tid(skb);
336 
337 	switch (ti.encap_type) {
338 	case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
339 		is_null_frame = ieee80211_is_nullfunc(hdr->frame_control);
340 		if (ahvif->vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) {
341 			if (skb->protocol == cpu_to_be16(ETH_P_PAE) || is_null_frame)
342 				is_diff_encap = true;
343 
344 			/* Firmware expects msdu ext descriptor for nwifi/raw packets
345 			 * received in ETH mode. Without this, observed tx fail for
346 			 * Multicast packets in ETH mode.
347 			 */
348 			msdu_ext_desc = true;
349 		} else {
350 			ath12k_dp_tx_encap_nwifi(skb);
351 		}
352 		break;
353 	case HAL_TCL_ENCAP_TYPE_RAW:
354 		if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
355 			ret = -EINVAL;
356 			goto fail_remove_tx_buf;
357 		}
358 		break;
359 	case HAL_TCL_ENCAP_TYPE_ETHERNET:
360 		/* no need to encap */
361 		break;
362 	case HAL_TCL_ENCAP_TYPE_802_3:
363 	default:
364 		/* TODO: Take care of other encap modes as well */
365 		ret = -EINVAL;
366 		atomic_inc(&ab->device_stats.tx_err.misc_fail);
367 		goto fail_remove_tx_buf;
368 	}
369 
370 	if (iova_mask &&
371 	    (unsigned long)skb->data & iova_mask) {
372 		ret = ath12k_dp_tx_align_payload(ab, &skb);
373 		if (ret) {
374 			ath12k_warn(ab, "failed to align TX buffer %d\n", ret);
375 			/* don't bail out, give original buffer
376 			 * a chance even unaligned.
377 			 */
378 			goto map;
379 		}
380 
381 		/* hdr is pointing to a wrong place after alignment,
382 		 * so refresh it for later use.
383 		 */
384 		hdr = (void *)skb->data;
385 	}
386 map:
387 	ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
388 	if (dma_mapping_error(ab->dev, ti.paddr)) {
389 		atomic_inc(&ab->device_stats.tx_err.misc_fail);
390 		ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
391 		ret = -ENOMEM;
392 		goto fail_remove_tx_buf;
393 	}
394 
395 	if ((!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
396 	     !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
397 	     !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
398 	     ieee80211_has_protected(hdr->frame_control)) ||
399 	    is_diff_encap) {
400 		/* Firmware is not expecting meta data for qos null
401 		 * nwifi packet received in ETH encap mode.
402 		 */
403 		if (is_null_frame && msdu_ext_desc)
404 			goto skip_htt_meta;
405 
406 		/* Add metadata for sw encrypted vlan group traffic
407 		 * and EAPOL nwifi packet received in ETH encap mode.
408 		 */
409 		add_htt_metadata = true;
410 		msdu_ext_desc = true;
411 		ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
412 skip_htt_meta:
413 		ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
414 		ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
415 		ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
416 	}
417 
418 	tx_desc->skb = skb;
419 	tx_desc->mac_id = ar->pdev_idx;
420 	ti.desc_id = tx_desc->desc_id;
421 	ti.data_len = skb->len;
422 	skb_cb->paddr = ti.paddr;
423 	skb_cb->vif = ahvif->vif;
424 	skb_cb->ar = ar;
425 
426 	if (msdu_ext_desc) {
427 		skb_ext_desc = dev_alloc_skb(sizeof(struct hal_tx_msdu_ext_desc));
428 		if (!skb_ext_desc) {
429 			ret = -ENOMEM;
430 			goto fail_unmap_dma;
431 		}
432 
433 		skb_put(skb_ext_desc, sizeof(struct hal_tx_msdu_ext_desc));
434 		memset(skb_ext_desc->data, 0, skb_ext_desc->len);
435 
436 		msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
437 		ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
438 
439 		if (add_htt_metadata) {
440 			ret = ath12k_dp_prepare_htt_metadata(skb_ext_desc);
441 			if (ret < 0) {
442 				ath12k_dbg(ab, ATH12K_DBG_DP_TX,
443 					   "Failed to add HTT meta data, dropping packet\n");
444 				goto fail_free_ext_skb;
445 			}
446 		}
447 
448 		ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
449 					  skb_ext_desc->len, DMA_TO_DEVICE);
450 		ret = dma_mapping_error(ab->dev, ti.paddr);
451 		if (ret)
452 			goto fail_free_ext_skb;
453 
454 		ti.data_len = skb_ext_desc->len;
455 		ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
456 
457 		skb_cb->paddr_ext_desc = ti.paddr;
458 		tx_desc->skb_ext_desc = skb_ext_desc;
459 	}
460 
461 	hal_ring_id = tx_ring->tcl_data_ring.ring_id;
462 	tcl_ring = &ab->hal.srng_list[hal_ring_id];
463 
464 	spin_lock_bh(&tcl_ring->lock);
465 
466 	ath12k_hal_srng_access_begin(ab, tcl_ring);
467 
468 	hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, tcl_ring);
469 	if (!hal_tcl_desc) {
470 		/* NOTE: It is highly unlikely we'll be running out of tcl_ring
471 		 * desc because the desc is directly enqueued onto hw queue.
472 		 */
473 		ath12k_hal_srng_access_end(ab, tcl_ring);
474 		ab->device_stats.tx_err.desc_na[ti.ring_id]++;
475 		spin_unlock_bh(&tcl_ring->lock);
476 		ret = -ENOMEM;
477 
478 		/* Checking for available tcl descriptors in another ring in
479 		 * case of failure due to full tcl ring now, is better than
480 		 * checking this ring earlier for each pkt tx.
481 		 * Restart ring selection if some rings are not checked yet.
482 		 */
483 		if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) &&
484 		    ab->hw_params->tcl_ring_retry) {
485 			tcl_ring_retry = true;
486 			ring_selector++;
487 		}
488 
489 		goto fail_unmap_dma_ext;
490 	}
491 
492 	spin_lock_bh(&arvif->link_stats_lock);
493 	arvif->link_stats.tx_encap_type[ti.encap_type]++;
494 	arvif->link_stats.tx_encrypt_type[ti.encrypt_type]++;
495 	arvif->link_stats.tx_desc_type[ti.type]++;
496 
497 	if (is_mcast)
498 		arvif->link_stats.tx_bcast_mcast++;
499 	else
500 		arvif->link_stats.tx_enqueued++;
501 	spin_unlock_bh(&arvif->link_stats_lock);
502 
503 	ab->device_stats.tx_enqueued[ti.ring_id]++;
504 
505 	ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
506 
507 	ath12k_hal_srng_access_end(ab, tcl_ring);
508 
509 	spin_unlock_bh(&tcl_ring->lock);
510 
511 	ath12k_dbg_dump(ab, ATH12K_DBG_DP_TX, NULL, "dp tx msdu: ",
512 			skb->data, skb->len);
513 
514 	atomic_inc(&ar->dp.num_tx_pending);
515 
516 	return 0;
517 
518 fail_unmap_dma_ext:
519 	if (skb_cb->paddr_ext_desc)
520 		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
521 				 skb_ext_desc->len,
522 				 DMA_TO_DEVICE);
523 fail_free_ext_skb:
524 	kfree_skb(skb_ext_desc);
525 
526 fail_unmap_dma:
527 	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
528 
529 fail_remove_tx_buf:
530 	ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
531 
532 	spin_lock_bh(&arvif->link_stats_lock);
533 	arvif->link_stats.tx_dropped++;
534 	spin_unlock_bh(&arvif->link_stats_lock);
535 
536 	if (tcl_ring_retry)
537 		goto tcl_ring_sel;
538 
539 	return ret;
540 }
541 
ath12k_dp_tx_free_txbuf(struct ath12k_base * ab,struct dp_tx_ring * tx_ring,struct ath12k_tx_desc_params * desc_params)542 static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
543 				    struct dp_tx_ring *tx_ring,
544 				    struct ath12k_tx_desc_params *desc_params)
545 {
546 	struct ath12k *ar;
547 	struct sk_buff *msdu = desc_params->skb;
548 	struct ath12k_skb_cb *skb_cb;
549 	u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params->mac_id);
550 
551 	skb_cb = ATH12K_SKB_CB(msdu);
552 	ar = ab->pdevs[pdev_id].ar;
553 
554 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
555 	if (skb_cb->paddr_ext_desc) {
556 		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
557 				 desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
558 		dev_kfree_skb_any(desc_params->skb_ext_desc);
559 	}
560 
561 	ieee80211_free_txskb(ar->ah->hw, msdu);
562 
563 	if (atomic_dec_and_test(&ar->dp.num_tx_pending))
564 		wake_up(&ar->dp.tx_empty_waitq);
565 }
566 
567 static void
ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base * ab,struct ath12k_tx_desc_params * desc_params,struct dp_tx_ring * tx_ring,struct ath12k_dp_htt_wbm_tx_status * ts,u16 peer_id)568 ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
569 				 struct ath12k_tx_desc_params *desc_params,
570 				 struct dp_tx_ring *tx_ring,
571 				 struct ath12k_dp_htt_wbm_tx_status *ts,
572 				 u16 peer_id)
573 {
574 	struct ieee80211_tx_info *info;
575 	struct ath12k_link_vif *arvif;
576 	struct ath12k_skb_cb *skb_cb;
577 	struct ieee80211_vif *vif;
578 	struct ath12k_vif *ahvif;
579 	struct ath12k *ar;
580 	struct sk_buff *msdu = desc_params->skb;
581 	s32 noise_floor;
582 	struct ieee80211_tx_status status = {};
583 	struct ath12k_peer *peer;
584 
585 	skb_cb = ATH12K_SKB_CB(msdu);
586 	info = IEEE80211_SKB_CB(msdu);
587 
588 	ar = skb_cb->ar;
589 	ab->device_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
590 
591 	if (atomic_dec_and_test(&ar->dp.num_tx_pending))
592 		wake_up(&ar->dp.tx_empty_waitq);
593 
594 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
595 	if (skb_cb->paddr_ext_desc) {
596 		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
597 				 desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
598 		dev_kfree_skb_any(desc_params->skb_ext_desc);
599 	}
600 
601 	vif = skb_cb->vif;
602 	if (vif) {
603 		ahvif = ath12k_vif_to_ahvif(vif);
604 		rcu_read_lock();
605 		arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
606 		if (arvif) {
607 			spin_lock_bh(&arvif->link_stats_lock);
608 			arvif->link_stats.tx_completed++;
609 			spin_unlock_bh(&arvif->link_stats_lock);
610 		}
611 		rcu_read_unlock();
612 	}
613 
614 	memset(&info->status, 0, sizeof(info->status));
615 
616 	if (ts->acked) {
617 		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
618 			info->flags |= IEEE80211_TX_STAT_ACK;
619 			info->status.ack_signal = ts->ack_rssi;
620 
621 			if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
622 				      ab->wmi_ab.svc_map)) {
623 				spin_lock_bh(&ar->data_lock);
624 				noise_floor = ath12k_pdev_get_noise_floor(ar);
625 				spin_unlock_bh(&ar->data_lock);
626 
627 				info->status.ack_signal += noise_floor;
628 			}
629 
630 			info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
631 		} else {
632 			info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
633 		}
634 	}
635 	rcu_read_lock();
636 	spin_lock_bh(&ab->base_lock);
637 	peer = ath12k_peer_find_by_id(ab, peer_id);
638 	if (!peer || !peer->sta) {
639 		ath12k_dbg(ab, ATH12K_DBG_DATA,
640 			   "dp_tx: failed to find the peer with peer_id %d\n", peer_id);
641 		spin_unlock_bh(&ab->base_lock);
642 		ieee80211_free_txskb(ath12k_ar_to_hw(ar), msdu);
643 		goto exit;
644 	} else {
645 		status.sta = peer->sta;
646 	}
647 	spin_unlock_bh(&ab->base_lock);
648 
649 	status.info = info;
650 	status.skb = msdu;
651 	ieee80211_tx_status_ext(ath12k_ar_to_hw(ar), &status);
652 exit:
653 	rcu_read_unlock();
654 }
655 
656 static void
ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base * ab,void * desc,struct dp_tx_ring * tx_ring,struct ath12k_tx_desc_params * desc_params)657 ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, void *desc,
658 				     struct dp_tx_ring *tx_ring,
659 				     struct ath12k_tx_desc_params *desc_params)
660 {
661 	struct htt_tx_wbm_completion *status_desc;
662 	struct ath12k_dp_htt_wbm_tx_status ts = {};
663 	enum hal_wbm_htt_tx_comp_status wbm_status;
664 	u16 peer_id;
665 
666 	status_desc = desc;
667 
668 	wbm_status = le32_get_bits(status_desc->info0,
669 				   HTT_TX_WBM_COMP_INFO0_STATUS);
670 	ab->device_stats.fw_tx_status[wbm_status]++;
671 
672 	switch (wbm_status) {
673 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
674 		ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
675 		ts.ack_rssi = le32_get_bits(status_desc->info2,
676 					    HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
677 
678 		peer_id = le32_get_bits(((struct hal_wbm_completion_ring_tx *)desc)->
679 				info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
680 
681 		ath12k_dp_tx_htt_tx_complete_buf(ab, desc_params, tx_ring, &ts, peer_id);
682 		break;
683 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
684 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
685 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
686 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
687 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH:
688 		ath12k_dp_tx_free_txbuf(ab, tx_ring, desc_params);
689 		break;
690 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
691 		/* This event is to be handled only when the driver decides to
692 		 * use WDS offload functionality.
693 		 */
694 		break;
695 	default:
696 		ath12k_warn(ab, "Unknown htt wbm tx status %d\n", wbm_status);
697 		break;
698 	}
699 }
700 
ath12k_dp_tx_update_txcompl(struct ath12k * ar,struct hal_tx_status * ts)701 static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts)
702 {
703 	struct ath12k_base *ab = ar->ab;
704 	struct ath12k_peer *peer;
705 	struct ieee80211_sta *sta;
706 	struct ath12k_sta *ahsta;
707 	struct ath12k_link_sta *arsta;
708 	struct rate_info txrate = {};
709 	u16 rate, ru_tones;
710 	u8 rate_idx = 0;
711 	int ret;
712 
713 	spin_lock_bh(&ab->base_lock);
714 	peer = ath12k_peer_find_by_id(ab, ts->peer_id);
715 	if (!peer || !peer->sta) {
716 		ath12k_dbg(ab, ATH12K_DBG_DP_TX,
717 			   "failed to find the peer by id %u\n", ts->peer_id);
718 		spin_unlock_bh(&ab->base_lock);
719 		return;
720 	}
721 	sta = peer->sta;
722 	ahsta = ath12k_sta_to_ahsta(sta);
723 	arsta = &ahsta->deflink;
724 
725 	/* This is to prefer choose the real NSS value arsta->last_txrate.nss,
726 	 * if it is invalid, then choose the NSS value while assoc.
727 	 */
728 	if (arsta->last_txrate.nss)
729 		txrate.nss = arsta->last_txrate.nss;
730 	else
731 		txrate.nss = arsta->peer_nss;
732 	spin_unlock_bh(&ab->base_lock);
733 
734 	switch (ts->pkt_type) {
735 	case HAL_TX_RATE_STATS_PKT_TYPE_11A:
736 	case HAL_TX_RATE_STATS_PKT_TYPE_11B:
737 		ret = ath12k_mac_hw_ratecode_to_legacy_rate(ts->mcs,
738 							    ts->pkt_type,
739 							    &rate_idx,
740 							    &rate);
741 		if (ret < 0) {
742 			ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret);
743 			return;
744 		}
745 
746 		txrate.legacy = rate;
747 		break;
748 	case HAL_TX_RATE_STATS_PKT_TYPE_11N:
749 		if (ts->mcs > ATH12K_HT_MCS_MAX) {
750 			ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs);
751 			return;
752 		}
753 
754 		if (txrate.nss != 0)
755 			txrate.mcs = ts->mcs + 8 * (txrate.nss - 1);
756 
757 		txrate.flags = RATE_INFO_FLAGS_MCS;
758 
759 		if (ts->sgi)
760 			txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
761 		break;
762 	case HAL_TX_RATE_STATS_PKT_TYPE_11AC:
763 		if (ts->mcs > ATH12K_VHT_MCS_MAX) {
764 			ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs);
765 			return;
766 		}
767 
768 		txrate.mcs = ts->mcs;
769 		txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
770 
771 		if (ts->sgi)
772 			txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
773 		break;
774 	case HAL_TX_RATE_STATS_PKT_TYPE_11AX:
775 		if (ts->mcs > ATH12K_HE_MCS_MAX) {
776 			ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs);
777 			return;
778 		}
779 
780 		txrate.mcs = ts->mcs;
781 		txrate.flags = RATE_INFO_FLAGS_HE_MCS;
782 		txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(ts->sgi);
783 		break;
784 	case HAL_TX_RATE_STATS_PKT_TYPE_11BE:
785 		if (ts->mcs > ATH12K_EHT_MCS_MAX) {
786 			ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs);
787 			return;
788 		}
789 
790 		txrate.mcs = ts->mcs;
791 		txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
792 		txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(ts->sgi);
793 		break;
794 	default:
795 		ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
796 		return;
797 	}
798 
799 	txrate.bw = ath12k_mac_bw_to_mac80211_bw(ts->bw);
800 
801 	if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
802 		txrate.bw = RATE_INFO_BW_HE_RU;
803 		ru_tones = ath12k_mac_he_convert_tones_to_ru_tones(ts->tones);
804 		txrate.he_ru_alloc =
805 			ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
806 	}
807 
808 	if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) {
809 		txrate.bw = RATE_INFO_BW_EHT_RU;
810 		txrate.eht_ru_alloc =
811 			ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(ts->tones);
812 	}
813 
814 	spin_lock_bh(&ab->base_lock);
815 	arsta->txrate = txrate;
816 	spin_unlock_bh(&ab->base_lock);
817 }
818 
ath12k_dp_tx_complete_msdu(struct ath12k * ar,struct ath12k_tx_desc_params * desc_params,struct hal_tx_status * ts,int ring)819 static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
820 				       struct ath12k_tx_desc_params *desc_params,
821 				       struct hal_tx_status *ts,
822 				       int ring)
823 {
824 	struct ath12k_base *ab = ar->ab;
825 	struct ath12k_hw *ah = ar->ah;
826 	struct ieee80211_tx_info *info;
827 	struct ath12k_link_vif *arvif;
828 	struct ath12k_skb_cb *skb_cb;
829 	struct ieee80211_vif *vif;
830 	struct ath12k_vif *ahvif;
831 	struct sk_buff *msdu = desc_params->skb;
832 	s32 noise_floor;
833 	struct ieee80211_tx_status status = {};
834 	struct ieee80211_rate_status status_rate = {};
835 	struct ath12k_peer *peer;
836 	struct ath12k_link_sta *arsta;
837 	struct ath12k_sta *ahsta;
838 	struct rate_info rate;
839 
840 	if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
841 		/* Must not happen */
842 		return;
843 	}
844 
845 	skb_cb = ATH12K_SKB_CB(msdu);
846 	ab->device_stats.tx_completed[ring]++;
847 
848 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
849 	if (skb_cb->paddr_ext_desc) {
850 		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
851 				 desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
852 		dev_kfree_skb_any(desc_params->skb_ext_desc);
853 	}
854 
855 	rcu_read_lock();
856 
857 	if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
858 		ieee80211_free_txskb(ah->hw, msdu);
859 		goto exit;
860 	}
861 
862 	if (!skb_cb->vif) {
863 		ieee80211_free_txskb(ah->hw, msdu);
864 		goto exit;
865 	}
866 
867 	vif = skb_cb->vif;
868 	if (vif) {
869 		ahvif = ath12k_vif_to_ahvif(vif);
870 		arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
871 		if (arvif) {
872 			spin_lock_bh(&arvif->link_stats_lock);
873 			arvif->link_stats.tx_completed++;
874 			spin_unlock_bh(&arvif->link_stats_lock);
875 		}
876 	}
877 
878 	info = IEEE80211_SKB_CB(msdu);
879 	memset(&info->status, 0, sizeof(info->status));
880 
881 	/* skip tx rate update from ieee80211_status*/
882 	info->status.rates[0].idx = -1;
883 
884 	switch (ts->status) {
885 	case HAL_WBM_TQM_REL_REASON_FRAME_ACKED:
886 		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
887 			info->flags |= IEEE80211_TX_STAT_ACK;
888 			info->status.ack_signal = ts->ack_rssi;
889 
890 			if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
891 				      ab->wmi_ab.svc_map)) {
892 				spin_lock_bh(&ar->data_lock);
893 				noise_floor = ath12k_pdev_get_noise_floor(ar);
894 				spin_unlock_bh(&ar->data_lock);
895 
896 				info->status.ack_signal += noise_floor;
897 			}
898 
899 			info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
900 		}
901 		break;
902 	case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX:
903 		if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
904 			info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
905 			break;
906 		}
907 		fallthrough;
908 	case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU:
909 	case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD:
910 	case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES:
911 		/* The failure status is due to internal firmware tx failure
912 		 * hence drop the frame; do not update the status of frame to
913 		 * the upper layer
914 		 */
915 		ieee80211_free_txskb(ah->hw, msdu);
916 		goto exit;
917 	default:
918 		ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n",
919 			   ts->status);
920 		break;
921 	}
922 
923 	/* NOTE: Tx rate status reporting. Tx completion status does not have
924 	 * necessary information (for example nss) to build the tx rate.
925 	 * Might end up reporting it out-of-band from HTT stats.
926 	 */
927 
928 	ath12k_dp_tx_update_txcompl(ar, ts);
929 
930 	spin_lock_bh(&ab->base_lock);
931 	peer = ath12k_peer_find_by_id(ab, ts->peer_id);
932 	if (!peer || !peer->sta) {
933 		ath12k_err(ab,
934 			   "dp_tx: failed to find the peer with peer_id %d\n",
935 			   ts->peer_id);
936 		spin_unlock_bh(&ab->base_lock);
937 		ieee80211_free_txskb(ath12k_ar_to_hw(ar), msdu);
938 		goto exit;
939 	}
940 	ahsta = ath12k_sta_to_ahsta(peer->sta);
941 	arsta = &ahsta->deflink;
942 
943 	spin_unlock_bh(&ab->base_lock);
944 
945 	status.sta = peer->sta;
946 	status.info = info;
947 	status.skb = msdu;
948 	rate = arsta->last_txrate;
949 
950 	status_rate.rate_idx = rate;
951 	status_rate.try_count = 1;
952 
953 	status.rates = &status_rate;
954 	status.n_rates = 1;
955 	ieee80211_tx_status_ext(ath12k_ar_to_hw(ar), &status);
956 
957 exit:
958 	rcu_read_unlock();
959 }
960 
ath12k_dp_tx_status_parse(struct ath12k_base * ab,struct hal_wbm_completion_ring_tx * desc,struct hal_tx_status * ts)961 static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
962 				      struct hal_wbm_completion_ring_tx *desc,
963 				      struct hal_tx_status *ts)
964 {
965 	u32 info0 = le32_to_cpu(desc->rate_stats.info0);
966 
967 	ts->buf_rel_source =
968 		le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
969 	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
970 	    ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
971 		return;
972 
973 	if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
974 		return;
975 
976 	ts->status = le32_get_bits(desc->info0,
977 				   HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
978 
979 	ts->ppdu_id = le32_get_bits(desc->info1,
980 				    HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
981 
982 	ts->peer_id = le32_get_bits(desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
983 
984 	ts->ack_rssi = le32_get_bits(desc->info2,
985 				     HAL_WBM_COMPL_TX_INFO2_ACK_FRAME_RSSI);
986 
987 	if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) {
988 		ts->pkt_type = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE);
989 		ts->mcs = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_MCS);
990 		ts->sgi = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_SGI);
991 		ts->bw = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_BW);
992 		ts->tones = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_TONES_IN_RU);
993 		ts->ofdma = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_OFDMA_TX);
994 	}
995 }
996 
ath12k_dp_tx_completion_handler(struct ath12k_base * ab,int ring_id)997 void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
998 {
999 	struct ath12k *ar;
1000 	struct ath12k_dp *dp = &ab->dp;
1001 	int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
1002 	struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
1003 	struct ath12k_tx_desc_info *tx_desc = NULL;
1004 	struct hal_tx_status ts = {};
1005 	struct ath12k_tx_desc_params desc_params;
1006 	struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
1007 	struct hal_wbm_release_ring *desc;
1008 	u8 pdev_id;
1009 	u64 desc_va;
1010 	enum hal_wbm_rel_src_module buf_rel_source;
1011 	enum hal_wbm_tqm_rel_reason rel_status;
1012 
1013 	spin_lock_bh(&status_ring->lock);
1014 
1015 	ath12k_hal_srng_access_begin(ab, status_ring);
1016 
1017 	while (ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head) !=
1018 	       tx_ring->tx_status_tail) {
1019 		desc = ath12k_hal_srng_dst_get_next_entry(ab, status_ring);
1020 		if (!desc)
1021 			break;
1022 
1023 		memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
1024 		       desc, sizeof(*desc));
1025 		tx_ring->tx_status_head =
1026 			ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head);
1027 	}
1028 
1029 	if (ath12k_hal_srng_dst_peek(ab, status_ring) &&
1030 	    (ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_head) ==
1031 	     tx_ring->tx_status_tail)) {
1032 		/* TODO: Process pending tx_status messages when kfifo_is_full() */
1033 		ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
1034 	}
1035 
1036 	ath12k_hal_srng_access_end(ab, status_ring);
1037 
1038 	spin_unlock_bh(&status_ring->lock);
1039 
1040 	while (ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_tail) !=
1041 	       tx_ring->tx_status_head) {
1042 		struct hal_wbm_completion_ring_tx *tx_status;
1043 		u32 desc_id;
1044 
1045 		tx_ring->tx_status_tail =
1046 			ATH12K_TX_COMPL_NEXT(ab, tx_ring->tx_status_tail);
1047 		tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
1048 		ath12k_dp_tx_status_parse(ab, tx_status, &ts);
1049 
1050 		if (le32_get_bits(tx_status->info0, HAL_WBM_COMPL_TX_INFO0_CC_DONE)) {
1051 			/* HW done cookie conversion */
1052 			desc_va = ((u64)le32_to_cpu(tx_status->buf_va_hi) << 32 |
1053 				   le32_to_cpu(tx_status->buf_va_lo));
1054 			tx_desc = (struct ath12k_tx_desc_info *)((unsigned long)desc_va);
1055 		} else {
1056 			/* SW does cookie conversion to VA */
1057 			desc_id = le32_get_bits(tx_status->buf_va_hi,
1058 						BUFFER_ADDR_INFO1_SW_COOKIE);
1059 
1060 			tx_desc = ath12k_dp_get_tx_desc(ab, desc_id);
1061 		}
1062 		if (!tx_desc) {
1063 			ath12k_warn(ab, "unable to retrieve tx_desc!");
1064 			continue;
1065 		}
1066 
1067 		desc_params.mac_id = tx_desc->mac_id;
1068 		desc_params.skb = tx_desc->skb;
1069 		desc_params.skb_ext_desc = tx_desc->skb_ext_desc;
1070 
1071 		/* Find the HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE value */
1072 		buf_rel_source = le32_get_bits(tx_status->info0,
1073 					       HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE);
1074 		ab->device_stats.tx_wbm_rel_source[buf_rel_source]++;
1075 
1076 		rel_status = le32_get_bits(tx_status->info0,
1077 					   HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
1078 		ab->device_stats.tqm_rel_reason[rel_status]++;
1079 
1080 		/* Release descriptor as soon as extracting necessary info
1081 		 * to reduce contention
1082 		 */
1083 		ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
1084 		if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
1085 			ath12k_dp_tx_process_htt_tx_complete(ab, (void *)tx_status,
1086 							     tx_ring, &desc_params);
1087 			continue;
1088 		}
1089 
1090 		pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params.mac_id);
1091 		ar = ab->pdevs[pdev_id].ar;
1092 
1093 		if (atomic_dec_and_test(&ar->dp.num_tx_pending))
1094 			wake_up(&ar->dp.tx_empty_waitq);
1095 
1096 		ath12k_dp_tx_complete_msdu(ar, &desc_params, &ts,
1097 					   tx_ring->tcl_data_ring_id);
1098 	}
1099 }
1100 
1101 static int
ath12k_dp_tx_get_ring_id_type(struct ath12k_base * ab,int mac_id,u32 ring_id,enum hal_ring_type ring_type,enum htt_srng_ring_type * htt_ring_type,enum htt_srng_ring_id * htt_ring_id)1102 ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
1103 			      int mac_id, u32 ring_id,
1104 			      enum hal_ring_type ring_type,
1105 			      enum htt_srng_ring_type *htt_ring_type,
1106 			      enum htt_srng_ring_id *htt_ring_id)
1107 {
1108 	int ret = 0;
1109 
1110 	switch (ring_type) {
1111 	case HAL_RXDMA_BUF:
1112 		/* for some targets, host fills rx buffer to fw and fw fills to
1113 		 * rxbuf ring for each rxdma
1114 		 */
1115 		if (!ab->hw_params->rx_mac_buf_ring) {
1116 			if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 ||
1117 			      ring_id == HAL_SRNG_SW2RXDMA_BUF1)) {
1118 				ret = -EINVAL;
1119 			}
1120 			*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1121 			*htt_ring_type = HTT_SW_TO_HW_RING;
1122 		} else {
1123 			if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) {
1124 				*htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
1125 				*htt_ring_type = HTT_SW_TO_SW_RING;
1126 			} else {
1127 				*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
1128 				*htt_ring_type = HTT_SW_TO_HW_RING;
1129 			}
1130 		}
1131 		break;
1132 	case HAL_RXDMA_DST:
1133 		*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
1134 		*htt_ring_type = HTT_HW_TO_SW_RING;
1135 		break;
1136 	case HAL_RXDMA_MONITOR_BUF:
1137 		*htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING;
1138 		*htt_ring_type = HTT_SW_TO_HW_RING;
1139 		break;
1140 	case HAL_RXDMA_MONITOR_STATUS:
1141 		*htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
1142 		*htt_ring_type = HTT_SW_TO_HW_RING;
1143 		break;
1144 	case HAL_RXDMA_MONITOR_DST:
1145 		*htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING;
1146 		*htt_ring_type = HTT_HW_TO_SW_RING;
1147 		break;
1148 	case HAL_RXDMA_MONITOR_DESC:
1149 		*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
1150 		*htt_ring_type = HTT_SW_TO_HW_RING;
1151 		break;
1152 	default:
1153 		ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
1154 		ret = -EINVAL;
1155 	}
1156 	return ret;
1157 }
1158 
ath12k_dp_tx_htt_srng_setup(struct ath12k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type)1159 int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
1160 				int mac_id, enum hal_ring_type ring_type)
1161 {
1162 	struct htt_srng_setup_cmd *cmd;
1163 	struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1164 	struct hal_srng_params params;
1165 	struct sk_buff *skb;
1166 	u32 ring_entry_sz;
1167 	int len = sizeof(*cmd);
1168 	dma_addr_t hp_addr, tp_addr;
1169 	enum htt_srng_ring_type htt_ring_type;
1170 	enum htt_srng_ring_id htt_ring_id;
1171 	int ret;
1172 
1173 	skb = ath12k_htc_alloc_skb(ab, len);
1174 	if (!skb)
1175 		return -ENOMEM;
1176 
1177 	memset(&params, 0, sizeof(params));
1178 	ath12k_hal_srng_get_params(ab, srng, &params);
1179 
1180 	hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
1181 	tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
1182 
1183 	ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1184 					    ring_type, &htt_ring_type,
1185 					    &htt_ring_id);
1186 	if (ret)
1187 		goto err_free;
1188 
1189 	skb_put(skb, len);
1190 	cmd = (struct htt_srng_setup_cmd *)skb->data;
1191 	cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP,
1192 				      HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE);
1193 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
1194 	    htt_ring_type == HTT_HW_TO_SW_RING)
1195 		cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id),
1196 					       HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
1197 	else
1198 		cmd->info0 |= le32_encode_bits(mac_id,
1199 					       HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
1200 	cmd->info0 |= le32_encode_bits(htt_ring_type,
1201 				       HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE);
1202 	cmd->info0 |= le32_encode_bits(htt_ring_id,
1203 				       HTT_SRNG_SETUP_CMD_INFO0_RING_ID);
1204 
1205 	cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr &
1206 					     HAL_ADDR_LSB_REG_MASK);
1207 
1208 	cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >>
1209 					     HAL_ADDR_MSB_REG_SHIFT);
1210 
1211 	ret = ath12k_hal_srng_get_entrysize(ab, ring_type);
1212 	if (ret < 0)
1213 		goto err_free;
1214 
1215 	ring_entry_sz = ret;
1216 
1217 	ring_entry_sz >>= 2;
1218 	cmd->info1 = le32_encode_bits(ring_entry_sz,
1219 				      HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE);
1220 	cmd->info1 |= le32_encode_bits(params.num_entries * ring_entry_sz,
1221 				       HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE);
1222 	cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
1223 				       HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP);
1224 	cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
1225 				       HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP);
1226 	cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP),
1227 				       HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP);
1228 	if (htt_ring_type == HTT_SW_TO_HW_RING)
1229 		cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS);
1230 
1231 	cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr));
1232 	cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr));
1233 
1234 	cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr));
1235 	cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr));
1236 
1237 	cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr));
1238 	cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr));
1239 	cmd->msi_data = cpu_to_le32(params.msi_data);
1240 
1241 	cmd->intr_info =
1242 		le32_encode_bits(params.intr_batch_cntr_thres_entries * ring_entry_sz,
1243 				 HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH);
1244 	cmd->intr_info |=
1245 		le32_encode_bits(params.intr_timer_thres_us >> 3,
1246 				 HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH);
1247 
1248 	cmd->info2 = 0;
1249 	if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
1250 		cmd->info2 = le32_encode_bits(params.low_threshold,
1251 					      HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH);
1252 	}
1253 
1254 	ath12k_dbg(ab, ATH12K_DBG_HAL,
1255 		   "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
1256 		   __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
1257 		   cmd->msi_data);
1258 
1259 	ath12k_dbg(ab, ATH12K_DBG_HAL,
1260 		   "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
1261 		   ring_id, ring_type, cmd->intr_info, cmd->info2);
1262 
1263 	ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
1264 	if (ret)
1265 		goto err_free;
1266 
1267 	return 0;
1268 
1269 err_free:
1270 	dev_kfree_skb_any(skb);
1271 
1272 	return ret;
1273 }
1274 
1275 #define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
1276 
ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base * ab)1277 int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
1278 {
1279 	struct ath12k_dp *dp = &ab->dp;
1280 	struct sk_buff *skb;
1281 	struct htt_ver_req_cmd *cmd;
1282 	int len = sizeof(*cmd);
1283 	u32 metadata_version;
1284 	int ret;
1285 
1286 	init_completion(&dp->htt_tgt_version_received);
1287 
1288 	skb = ath12k_htc_alloc_skb(ab, len);
1289 	if (!skb)
1290 		return -ENOMEM;
1291 
1292 	skb_put(skb, len);
1293 	cmd = (struct htt_ver_req_cmd *)skb->data;
1294 	cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
1295 					     HTT_OPTION_TAG);
1296 	metadata_version = ath12k_ftm_mode ? HTT_OPTION_TCL_METADATA_VER_V1 :
1297 			   HTT_OPTION_TCL_METADATA_VER_V2;
1298 
1299 	cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
1300 						     HTT_OPTION_TAG) |
1301 				    le32_encode_bits(HTT_TCL_METADATA_VER_SZ,
1302 						     HTT_OPTION_LEN) |
1303 				    le32_encode_bits(metadata_version,
1304 						     HTT_OPTION_VALUE);
1305 
1306 	ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1307 	if (ret) {
1308 		dev_kfree_skb_any(skb);
1309 		return ret;
1310 	}
1311 
1312 	ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
1313 					  HTT_TARGET_VERSION_TIMEOUT_HZ);
1314 	if (ret == 0) {
1315 		ath12k_warn(ab, "htt target version request timed out\n");
1316 		return -ETIMEDOUT;
1317 	}
1318 
1319 	if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
1320 		ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
1321 			   dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
1322 		return -EOPNOTSUPP;
1323 	}
1324 
1325 	return 0;
1326 }
1327 
ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k * ar,u32 mask)1328 int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
1329 {
1330 	struct ath12k_base *ab = ar->ab;
1331 	struct ath12k_dp *dp = &ab->dp;
1332 	struct sk_buff *skb;
1333 	struct htt_ppdu_stats_cfg_cmd *cmd;
1334 	int len = sizeof(*cmd);
1335 	u8 pdev_mask;
1336 	int ret;
1337 	int i;
1338 
1339 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1340 		skb = ath12k_htc_alloc_skb(ab, len);
1341 		if (!skb)
1342 			return -ENOMEM;
1343 
1344 		skb_put(skb, len);
1345 		cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
1346 		cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
1347 					    HTT_PPDU_STATS_CFG_MSG_TYPE);
1348 
1349 		pdev_mask = 1 << (i + ar->pdev_idx);
1350 		cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
1351 		cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
1352 
1353 		ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1354 		if (ret) {
1355 			dev_kfree_skb_any(skb);
1356 			return ret;
1357 		}
1358 	}
1359 
1360 	return 0;
1361 }
1362 
ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type,int rx_buf_size,struct htt_rx_ring_tlv_filter * tlv_filter)1363 int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
1364 				     int mac_id, enum hal_ring_type ring_type,
1365 				     int rx_buf_size,
1366 				     struct htt_rx_ring_tlv_filter *tlv_filter)
1367 {
1368 	struct htt_rx_ring_selection_cfg_cmd *cmd;
1369 	struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1370 	struct hal_srng_params params;
1371 	struct sk_buff *skb;
1372 	int len = sizeof(*cmd);
1373 	enum htt_srng_ring_type htt_ring_type;
1374 	enum htt_srng_ring_id htt_ring_id;
1375 	int ret;
1376 
1377 	skb = ath12k_htc_alloc_skb(ab, len);
1378 	if (!skb)
1379 		return -ENOMEM;
1380 
1381 	memset(&params, 0, sizeof(params));
1382 	ath12k_hal_srng_get_params(ab, srng, &params);
1383 
1384 	ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1385 					    ring_type, &htt_ring_type,
1386 					    &htt_ring_id);
1387 	if (ret)
1388 		goto err_free;
1389 
1390 	skb_put(skb, len);
1391 	cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
1392 	cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
1393 				      HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
1394 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
1395 	    htt_ring_type == HTT_HW_TO_SW_RING)
1396 		cmd->info0 |=
1397 			le32_encode_bits(DP_SW2HW_MACID(mac_id),
1398 					 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1399 	else
1400 		cmd->info0 |=
1401 			le32_encode_bits(mac_id,
1402 					 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1403 	cmd->info0 |= le32_encode_bits(htt_ring_id,
1404 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
1405 	cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
1406 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS);
1407 	cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
1408 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
1409 	cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
1410 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID);
1411 	cmd->info0 |=
1412 		le32_encode_bits(tlv_filter->drop_threshold_valid,
1413 				 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL);
1414 	cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable,
1415 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON);
1416 
1417 	cmd->info1 = le32_encode_bits(rx_buf_size,
1418 				      HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
1419 	cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt,
1420 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
1421 	cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl,
1422 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
1423 	cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data,
1424 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
1425 	cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
1426 	cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
1427 	cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
1428 	cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
1429 	cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
1430 
1431 	cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold,
1432 				      HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD);
1433 	cmd->info2 |=
1434 		le32_encode_bits(tlv_filter->enable_log_mgmt_type,
1435 				 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE);
1436 	cmd->info2 |=
1437 		le32_encode_bits(tlv_filter->enable_log_ctrl_type,
1438 				 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE);
1439 	cmd->info2 |=
1440 		le32_encode_bits(tlv_filter->enable_log_data_type,
1441 				 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE);
1442 
1443 	cmd->info3 =
1444 		le32_encode_bits(tlv_filter->enable_rx_tlv_offset,
1445 				 HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET);
1446 	cmd->info3 |=
1447 		le32_encode_bits(tlv_filter->rx_tlv_offset,
1448 				 HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET);
1449 
1450 	if (tlv_filter->offset_valid) {
1451 		cmd->rx_packet_offset =
1452 			le32_encode_bits(tlv_filter->rx_packet_offset,
1453 					 HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET);
1454 
1455 		cmd->rx_packet_offset |=
1456 			le32_encode_bits(tlv_filter->rx_header_offset,
1457 					 HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET);
1458 
1459 		cmd->rx_mpdu_offset =
1460 			le32_encode_bits(tlv_filter->rx_mpdu_end_offset,
1461 					 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET);
1462 
1463 		cmd->rx_mpdu_offset |=
1464 			le32_encode_bits(tlv_filter->rx_mpdu_start_offset,
1465 					 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET);
1466 
1467 		cmd->rx_msdu_offset =
1468 			le32_encode_bits(tlv_filter->rx_msdu_end_offset,
1469 					 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET);
1470 
1471 		cmd->rx_msdu_offset |=
1472 			le32_encode_bits(tlv_filter->rx_msdu_start_offset,
1473 					 HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET);
1474 
1475 		cmd->rx_attn_offset =
1476 			le32_encode_bits(tlv_filter->rx_attn_offset,
1477 					 HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
1478 	}
1479 
1480 	if (tlv_filter->rx_mpdu_start_wmask > 0 &&
1481 	    tlv_filter->rx_msdu_end_wmask > 0) {
1482 		cmd->info2 |=
1483 			le32_encode_bits(true,
1484 					 HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
1485 		cmd->rx_mpdu_start_end_mask =
1486 			le32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
1487 					 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
1488 		/* mpdu_end is not used for any hardwares so far
1489 		 * please assign it in future if any chip is
1490 		 * using through hal ops
1491 		 */
1492 		cmd->rx_mpdu_start_end_mask |=
1493 			le32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
1494 					 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
1495 		cmd->rx_msdu_end_word_mask =
1496 			le32_encode_bits(tlv_filter->rx_msdu_end_wmask,
1497 					 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
1498 	}
1499 
1500 	ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
1501 	if (ret)
1502 		goto err_free;
1503 
1504 	return 0;
1505 
1506 err_free:
1507 	dev_kfree_skb_any(skb);
1508 
1509 	return ret;
1510 }
1511 
1512 int
ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k * ar,u8 type,struct htt_ext_stats_cfg_params * cfg_params,u64 cookie)1513 ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
1514 				   struct htt_ext_stats_cfg_params *cfg_params,
1515 				   u64 cookie)
1516 {
1517 	struct ath12k_base *ab = ar->ab;
1518 	struct ath12k_dp *dp = &ab->dp;
1519 	struct sk_buff *skb;
1520 	struct htt_ext_stats_cfg_cmd *cmd;
1521 	int len = sizeof(*cmd);
1522 	int ret;
1523 	u32 pdev_id;
1524 
1525 	skb = ath12k_htc_alloc_skb(ab, len);
1526 	if (!skb)
1527 		return -ENOMEM;
1528 
1529 	skb_put(skb, len);
1530 
1531 	cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
1532 	memset(cmd, 0, sizeof(*cmd));
1533 	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
1534 
1535 	pdev_id = ath12k_mac_get_target_pdev_id(ar);
1536 	cmd->hdr.pdev_mask = 1 << pdev_id;
1537 
1538 	cmd->hdr.stats_type = type;
1539 	cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
1540 	cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1);
1541 	cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2);
1542 	cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3);
1543 	cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie));
1544 	cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie));
1545 
1546 	ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1547 	if (ret) {
1548 		ath12k_warn(ab, "failed to send htt type stats request: %d",
1549 			    ret);
1550 		dev_kfree_skb_any(skb);
1551 		return ret;
1552 	}
1553 
1554 	return 0;
1555 }
1556 
ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k * ar,bool reset)1557 int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1558 {
1559 	struct ath12k_base *ab = ar->ab;
1560 	int ret;
1561 
1562 	ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset);
1563 	if (ret) {
1564 		ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
1565 		return ret;
1566 	}
1567 
1568 	return 0;
1569 }
1570 
ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k * ar,bool reset)1571 int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1572 {
1573 	struct ath12k_base *ab = ar->ab;
1574 	struct htt_rx_ring_tlv_filter tlv_filter = {};
1575 	int ret, ring_id, i;
1576 
1577 	tlv_filter.offset_valid = false;
1578 
1579 	if (!reset) {
1580 		tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING;
1581 
1582 		tlv_filter.drop_threshold_valid = true;
1583 		tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE;
1584 
1585 		tlv_filter.enable_log_mgmt_type = true;
1586 		tlv_filter.enable_log_ctrl_type = true;
1587 		tlv_filter.enable_log_data_type = true;
1588 
1589 		tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH;
1590 		tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH;
1591 		tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH;
1592 
1593 		tlv_filter.enable_rx_tlv_offset = true;
1594 		tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET;
1595 
1596 		tlv_filter.pkt_filter_flags0 =
1597 					HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
1598 					HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
1599 		tlv_filter.pkt_filter_flags1 =
1600 					HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
1601 					HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
1602 		tlv_filter.pkt_filter_flags2 =
1603 					HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
1604 					HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
1605 		tlv_filter.pkt_filter_flags3 =
1606 					HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
1607 					HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
1608 					HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
1609 					HTT_RX_MON_MO_DATA_FILTER_FLASG3;
1610 	} else {
1611 		tlv_filter = ath12k_mac_mon_status_filter_default;
1612 
1613 		if (ath12k_debugfs_is_extd_rx_stats_enabled(ar))
1614 			tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
1615 	}
1616 
1617 	if (ab->hw_params->rxdma1_enable) {
1618 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1619 			ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
1620 			ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
1621 							       ar->dp.mac_id + i,
1622 							       HAL_RXDMA_MONITOR_DST,
1623 							       DP_RXDMA_REFILL_RING_SIZE,
1624 							       &tlv_filter);
1625 			if (ret) {
1626 				ath12k_err(ab,
1627 					   "failed to setup filter for monitor buf %d\n",
1628 					   ret);
1629 				return ret;
1630 			}
1631 		}
1632 		return 0;
1633 	}
1634 
1635 	if (!reset) {
1636 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1637 			ring_id = ab->dp.rx_mac_buf_ring[i].ring_id;
1638 			ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
1639 							       i,
1640 							       HAL_RXDMA_BUF,
1641 							       DP_RXDMA_REFILL_RING_SIZE,
1642 							       &tlv_filter);
1643 			if (ret) {
1644 				ath12k_err(ab,
1645 					   "failed to setup filter for mon rx buf %d\n",
1646 					   ret);
1647 				return ret;
1648 			}
1649 		}
1650 	}
1651 
1652 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1653 		ring_id = ab->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
1654 		if (!reset) {
1655 			tlv_filter.rx_filter =
1656 				HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
1657 		}
1658 
1659 		ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
1660 						       i,
1661 						       HAL_RXDMA_MONITOR_STATUS,
1662 						       RX_MON_STATUS_BUF_SIZE,
1663 						       &tlv_filter);
1664 		if (ret) {
1665 			ath12k_err(ab,
1666 				   "failed to setup filter for mon status buf %d\n",
1667 				   ret);
1668 			return ret;
1669 		}
1670 	}
1671 
1672 	return 0;
1673 }
1674 
ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type,int tx_buf_size,struct htt_tx_ring_tlv_filter * htt_tlv_filter)1675 int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
1676 				     int mac_id, enum hal_ring_type ring_type,
1677 				     int tx_buf_size,
1678 				     struct htt_tx_ring_tlv_filter *htt_tlv_filter)
1679 {
1680 	struct htt_tx_ring_selection_cfg_cmd *cmd;
1681 	struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1682 	struct hal_srng_params params;
1683 	struct sk_buff *skb;
1684 	int len = sizeof(*cmd);
1685 	enum htt_srng_ring_type htt_ring_type;
1686 	enum htt_srng_ring_id htt_ring_id;
1687 	int ret;
1688 
1689 	skb = ath12k_htc_alloc_skb(ab, len);
1690 	if (!skb)
1691 		return -ENOMEM;
1692 
1693 	memset(&params, 0, sizeof(params));
1694 	ath12k_hal_srng_get_params(ab, srng, &params);
1695 
1696 	ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1697 					    ring_type, &htt_ring_type,
1698 					    &htt_ring_id);
1699 
1700 	if (ret)
1701 		goto err_free;
1702 
1703 	skb_put(skb, len);
1704 	cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data;
1705 	cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG,
1706 				      HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
1707 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
1708 	    htt_ring_type == HTT_HW_TO_SW_RING)
1709 		cmd->info0 |=
1710 			le32_encode_bits(DP_SW2HW_MACID(mac_id),
1711 					 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1712 	else
1713 		cmd->info0 |=
1714 			le32_encode_bits(mac_id,
1715 					 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1716 	cmd->info0 |= le32_encode_bits(htt_ring_id,
1717 				       HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
1718 	cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
1719 				       HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS);
1720 	cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
1721 				       HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS);
1722 
1723 	cmd->info1 |=
1724 		le32_encode_bits(tx_buf_size,
1725 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE);
1726 
1727 	if (htt_tlv_filter->tx_mon_mgmt_filter) {
1728 		cmd->info1 |=
1729 			le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
1730 					 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1731 		cmd->info1 |=
1732 		le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1733 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
1734 		cmd->info2 |=
1735 		le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
1736 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1737 	}
1738 
1739 	if (htt_tlv_filter->tx_mon_data_filter) {
1740 		cmd->info1 |=
1741 			le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
1742 					 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1743 		cmd->info1 |=
1744 		le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1745 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
1746 		cmd->info2 |=
1747 		le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
1748 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1749 	}
1750 
1751 	if (htt_tlv_filter->tx_mon_ctrl_filter) {
1752 		cmd->info1 |=
1753 			le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
1754 					 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1755 		cmd->info1 |=
1756 		le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1757 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
1758 		cmd->info2 |=
1759 		le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
1760 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1761 	}
1762 
1763 	cmd->tlv_filter_mask_in0 =
1764 		cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags);
1765 	cmd->tlv_filter_mask_in1 =
1766 		cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0);
1767 	cmd->tlv_filter_mask_in2 =
1768 		cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1);
1769 	cmd->tlv_filter_mask_in3 =
1770 		cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2);
1771 
1772 	ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
1773 	if (ret)
1774 		goto err_free;
1775 
1776 	return 0;
1777 
1778 err_free:
1779 	dev_kfree_skb_any(skb);
1780 	return ret;
1781 }
1782