1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5 */
6
7 #include "core.h"
8 #include "dp_tx.h"
9 #include "debug.h"
10 #include "debugfs.h"
11 #include "hw.h"
12 #include "peer.h"
13 #include "mac.h"
14
15 enum hal_tcl_encap_type
ath12k_dp_tx_get_encap_type(struct ath12k_base * ab,struct sk_buff * skb)16 ath12k_dp_tx_get_encap_type(struct ath12k_base *ab, struct sk_buff *skb)
17 {
18 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
19
20 if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
21 return HAL_TCL_ENCAP_TYPE_RAW;
22
23 if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
24 return HAL_TCL_ENCAP_TYPE_ETHERNET;
25
26 return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
27 }
28 EXPORT_SYMBOL(ath12k_dp_tx_get_encap_type);
29
ath12k_dp_tx_encap_nwifi(struct sk_buff * skb)30 void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb)
31 {
32 struct ieee80211_hdr *hdr = (void *)skb->data;
33 u8 *qos_ctl;
34
35 if (!ieee80211_is_data_qos(hdr->frame_control))
36 return;
37
38 qos_ctl = ieee80211_get_qos_ctl(hdr);
39 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
40 skb->data, (void *)qos_ctl - (void *)skb->data);
41 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
42
43 hdr = (void *)skb->data;
44 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
45 }
46 EXPORT_SYMBOL(ath12k_dp_tx_encap_nwifi);
47
ath12k_dp_tx_get_tid(struct sk_buff * skb)48 u8 ath12k_dp_tx_get_tid(struct sk_buff *skb)
49 {
50 struct ieee80211_hdr *hdr = (void *)skb->data;
51 struct ath12k_skb_cb *cb = ATH12K_SKB_CB(skb);
52
53 if (cb->flags & ATH12K_SKB_HW_80211_ENCAP)
54 return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
55 else if (!ieee80211_is_data_qos(hdr->frame_control))
56 return HAL_DESC_REO_NON_QOS_TID;
57 else
58 return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
59 }
60 EXPORT_SYMBOL(ath12k_dp_tx_get_tid);
61
ath12k_dp_tx_get_encrypt_type(u32 cipher)62 enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher)
63 {
64 switch (cipher) {
65 case WLAN_CIPHER_SUITE_WEP40:
66 return HAL_ENCRYPT_TYPE_WEP_40;
67 case WLAN_CIPHER_SUITE_WEP104:
68 return HAL_ENCRYPT_TYPE_WEP_104;
69 case WLAN_CIPHER_SUITE_TKIP:
70 return HAL_ENCRYPT_TYPE_TKIP_MIC;
71 case WLAN_CIPHER_SUITE_CCMP:
72 return HAL_ENCRYPT_TYPE_CCMP_128;
73 case WLAN_CIPHER_SUITE_CCMP_256:
74 return HAL_ENCRYPT_TYPE_CCMP_256;
75 case WLAN_CIPHER_SUITE_GCMP:
76 return HAL_ENCRYPT_TYPE_GCMP_128;
77 case WLAN_CIPHER_SUITE_GCMP_256:
78 return HAL_ENCRYPT_TYPE_AES_GCMP_256;
79 default:
80 return HAL_ENCRYPT_TYPE_OPEN;
81 }
82 }
83 EXPORT_SYMBOL(ath12k_dp_tx_get_encrypt_type);
84
ath12k_dp_tx_release_txbuf(struct ath12k_dp * dp,struct ath12k_tx_desc_info * tx_desc,u8 pool_id)85 void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
86 struct ath12k_tx_desc_info *tx_desc,
87 u8 pool_id)
88 {
89 spin_lock_bh(&dp->tx_desc_lock[pool_id]);
90 tx_desc->skb_ext_desc = NULL;
91 list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
92 spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
93 }
94 EXPORT_SYMBOL(ath12k_dp_tx_release_txbuf);
95
ath12k_dp_tx_assign_buffer(struct ath12k_dp * dp,u8 pool_id)96 struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
97 u8 pool_id)
98 {
99 struct ath12k_tx_desc_info *desc;
100
101 spin_lock_bh(&dp->tx_desc_lock[pool_id]);
102 desc = list_first_entry_or_null(&dp->tx_desc_free_list[pool_id],
103 struct ath12k_tx_desc_info,
104 list);
105 if (!desc) {
106 spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
107 ath12k_warn(dp->ab, "failed to allocate data Tx buffer\n");
108 return NULL;
109 }
110
111 list_move_tail(&desc->list, &dp->tx_desc_used_list[pool_id]);
112 spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
113
114 return desc;
115 }
116 EXPORT_SYMBOL(ath12k_dp_tx_assign_buffer);
117
ath12k_dp_metadata_align_skb(struct sk_buff * skb,u8 tail_len)118 void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len)
119 {
120 struct sk_buff *tail;
121 void *metadata;
122
123 if (unlikely(skb_cow_data(skb, tail_len, &tail) < 0))
124 return NULL;
125
126 metadata = pskb_put(skb, tail, tail_len);
127 memset(metadata, 0, tail_len);
128 return metadata;
129 }
130 EXPORT_SYMBOL(ath12k_dp_metadata_align_skb);
131
ath12k_dp_tx_move_payload(struct sk_buff * skb,unsigned long delta,bool head)132 static void ath12k_dp_tx_move_payload(struct sk_buff *skb,
133 unsigned long delta,
134 bool head)
135 {
136 unsigned long len = skb->len;
137
138 if (head) {
139 skb_push(skb, delta);
140 memmove(skb->data, skb->data + delta, len);
141 skb_trim(skb, len);
142 } else {
143 skb_put(skb, delta);
144 memmove(skb->data + delta, skb->data, len);
145 skb_pull(skb, delta);
146 }
147 }
148
ath12k_dp_tx_align_payload(struct ath12k_dp * dp,struct sk_buff ** pskb)149 int ath12k_dp_tx_align_payload(struct ath12k_dp *dp, struct sk_buff **pskb)
150 {
151 u32 iova_mask = dp->hw_params->iova_mask;
152 unsigned long offset, delta1, delta2;
153 struct sk_buff *skb2, *skb = *pskb;
154 unsigned int headroom = skb_headroom(skb);
155 int tailroom = skb_tailroom(skb);
156 int ret = 0;
157
158 offset = (unsigned long)skb->data & iova_mask;
159 delta1 = offset;
160 delta2 = iova_mask - offset + 1;
161
162 if (headroom >= delta1) {
163 ath12k_dp_tx_move_payload(skb, delta1, true);
164 } else if (tailroom >= delta2) {
165 ath12k_dp_tx_move_payload(skb, delta2, false);
166 } else {
167 skb2 = skb_realloc_headroom(skb, iova_mask);
168 if (!skb2) {
169 ret = -ENOMEM;
170 goto out;
171 }
172
173 dev_kfree_skb_any(skb);
174
175 offset = (unsigned long)skb2->data & iova_mask;
176 if (offset)
177 ath12k_dp_tx_move_payload(skb2, offset, true);
178 *pskb = skb2;
179 }
180
181 out:
182 return ret;
183 }
184 EXPORT_SYMBOL(ath12k_dp_tx_align_payload);
185
ath12k_dp_tx_free_txbuf(struct ath12k_dp * dp,struct dp_tx_ring * tx_ring,struct ath12k_tx_desc_params * desc_params)186 void ath12k_dp_tx_free_txbuf(struct ath12k_dp *dp,
187 struct dp_tx_ring *tx_ring,
188 struct ath12k_tx_desc_params *desc_params)
189 {
190 struct ath12k_pdev_dp *dp_pdev;
191 struct sk_buff *msdu = desc_params->skb;
192 struct ath12k_skb_cb *skb_cb;
193 u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(dp->hw_params, desc_params->mac_id);
194
195 skb_cb = ATH12K_SKB_CB(msdu);
196
197 dma_unmap_single(dp->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
198 if (skb_cb->paddr_ext_desc) {
199 dma_unmap_single(dp->dev, skb_cb->paddr_ext_desc,
200 desc_params->skb_ext_desc->len, DMA_TO_DEVICE);
201 dev_kfree_skb_any(desc_params->skb_ext_desc);
202 }
203
204 guard(rcu)();
205
206 dp_pdev = ath12k_dp_to_pdev_dp(dp, pdev_idx);
207
208 ieee80211_free_txskb(ath12k_pdev_dp_to_hw(dp_pdev), msdu);
209
210 if (atomic_dec_and_test(&dp_pdev->num_tx_pending))
211 wake_up(&dp_pdev->tx_empty_waitq);
212 }
213 EXPORT_SYMBOL(ath12k_dp_tx_free_txbuf);
214