1 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
2 /*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5 */
6
7 #ifndef ATH12K_DP_H
8 #define ATH12K_DP_H
9
10 #include "hw.h"
11 #include "dp_htt.h"
12 #include "dp_cmn.h"
13 #include <linux/rhashtable.h>
14
15 #define MAX_RXDMA_PER_PDEV 2
16
17 struct ath12k_base;
18 struct ath12k_dp_link_peer;
19 struct ath12k_dp;
20 struct ath12k_vif;
21 struct ath12k_link_vif;
22 struct ath12k_ext_irq_grp;
23 struct ath12k_dp_rx_tid;
24 struct ath12k_dp_rx_tid_rxq;
25
26 #define DP_MON_PURGE_TIMEOUT_MS 100
27 #define DP_MON_SERVICE_BUDGET 128
28
29 #define DP_ENCAP_TYPE_MAX 4
30 #define DP_ENCRYPT_TYPE_MAX 12
31 #define DP_DESC_TYPE_MAX 2
32
33 struct dp_srng {
34 u32 *vaddr_unaligned;
35 u32 *vaddr;
36 dma_addr_t paddr_unaligned;
37 dma_addr_t paddr;
38 int size;
39 u32 ring_id;
40 };
41
42 struct dp_rxdma_mon_ring {
43 struct dp_srng refill_buf_ring;
44 struct idr bufs_idr;
45 /* Protects bufs_idr */
46 spinlock_t idr_lock;
47 int bufs_max;
48 };
49
50 struct dp_rxdma_ring {
51 struct dp_srng refill_buf_ring;
52 int bufs_max;
53 };
54
55 #define ATH12K_TX_COMPL_NEXT(ab, x) (((x) + 1) % DP_TX_COMP_RING_SIZE(ab))
56
57 struct dp_tx_ring {
58 u8 tcl_data_ring_id;
59 struct dp_srng tcl_data_ring;
60 struct dp_srng tcl_comp_ring;
61 struct hal_wbm_completion_ring_tx *tx_status;
62 int tx_status_head;
63 int tx_status_tail;
64 };
65
66 struct ath12k_pdev_mon_stats {
67 u32 status_ppdu_state;
68 u32 status_ppdu_start;
69 u32 status_ppdu_end;
70 u32 status_ppdu_compl;
71 u32 status_ppdu_start_mis;
72 u32 status_ppdu_end_mis;
73 u32 status_ppdu_done;
74 u32 dest_ppdu_done;
75 u32 dest_mpdu_done;
76 u32 dest_mpdu_drop;
77 u32 dup_mon_linkdesc_cnt;
78 u32 dup_mon_buf_cnt;
79 u32 dest_mon_stuck;
80 u32 dest_mon_not_reaped;
81 };
82
83 enum dp_mon_status_buf_state {
84 DP_MON_STATUS_MATCH,
85 DP_MON_STATUS_NO_DMA,
86 DP_MON_STATUS_LAG,
87 DP_MON_STATUS_LEAD,
88 DP_MON_STATUS_REPLINISH,
89 };
90
91 struct dp_link_desc_bank {
92 void *vaddr_unaligned;
93 void *vaddr;
94 dma_addr_t paddr_unaligned;
95 dma_addr_t paddr;
96 u32 size;
97 };
98
99 /* Size to enforce scatter idle list mode */
100 #define DP_LINK_DESC_ALLOC_SIZE_THRESH 0x200000
101 #define DP_LINK_DESC_BANKS_MAX 8
102
103 #define DP_LINK_DESC_START 0x4000
104 #define DP_LINK_DESC_SHIFT 3
105
106 #define DP_LINK_DESC_COOKIE_SET(id, page) \
107 ((((id) + DP_LINK_DESC_START) << DP_LINK_DESC_SHIFT) | (page))
108
109 #define DP_LINK_DESC_BANK_MASK GENMASK(2, 0)
110
111 #define DP_RX_DESC_COOKIE_INDEX_MAX 0x3ffff
112 #define DP_RX_DESC_COOKIE_POOL_ID_MAX 0x1c0000
113 #define DP_RX_DESC_COOKIE_MAX \
114 (DP_RX_DESC_COOKIE_INDEX_MAX | DP_RX_DESC_COOKIE_POOL_ID_MAX)
115 #define DP_NOT_PPDU_ID_WRAP_AROUND 20000
116
117 enum ath12k_dp_ppdu_state {
118 DP_PPDU_STATUS_START,
119 DP_PPDU_STATUS_DONE,
120 };
121
122 struct dp_mon_mpdu {
123 struct list_head list;
124 struct sk_buff *head;
125 struct sk_buff *tail;
126 u32 err_bitmap;
127 u8 decap_format;
128 };
129
130 #define DP_MON_MAX_STATUS_BUF 32
131
132 struct ath12k_mon_data {
133 struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
134 struct hal_rx_mon_ppdu_info mon_ppdu_info;
135
136 u32 mon_ppdu_status;
137 u32 mon_last_buf_cookie;
138 u64 mon_last_linkdesc_paddr;
139 u16 chan_noise_floor;
140 u32 err_bitmap;
141 u8 decap_format;
142
143 struct ath12k_pdev_mon_stats rx_mon_stats;
144 enum dp_mon_status_buf_state buf_state;
145 /* lock for monitor data */
146 spinlock_t mon_lock;
147 struct sk_buff_head rx_status_q;
148 struct dp_mon_mpdu *mon_mpdu;
149 struct list_head dp_rx_mon_mpdu_list;
150 struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info;
151 struct dp_mon_tx_ppdu_info *tx_data_ppdu_info;
152 };
153
154 struct ath12k_pdev_dp {
155 u32 mac_id;
156 atomic_t num_tx_pending;
157 wait_queue_head_t tx_empty_waitq;
158
159 struct ath12k_dp *dp;
160 struct ieee80211_hw *hw;
161 u8 hw_link_id;
162 struct ath12k_dp_hw *dp_hw;
163
164 /* Protects ppdu stats */
165 spinlock_t ppdu_list_lock;
166 struct ath12k_per_peer_tx_stats peer_tx_stats;
167 struct list_head ppdu_stats_info;
168 u32 ppdu_stat_list_depth;
169
170 struct dp_srng rxdma_mon_dst_ring[MAX_RXDMA_PER_PDEV];
171 struct dp_srng tx_mon_dst_ring[MAX_RXDMA_PER_PDEV];
172
173 struct ieee80211_rx_status rx_status;
174 struct ath12k_mon_data mon_data;
175 };
176
177 #define DP_NUM_CLIENTS_MAX 64
178 #define DP_AVG_TIDS_PER_CLIENT 2
179 #define DP_NUM_TIDS_MAX (DP_NUM_CLIENTS_MAX * DP_AVG_TIDS_PER_CLIENT)
180 #define DP_AVG_MSDUS_PER_FLOW 128
181 #define DP_AVG_FLOWS_PER_TID 2
182 #define DP_AVG_MPDUS_PER_TID_MAX 128
183 #define DP_AVG_MSDUS_PER_MPDU 4
184
185 #define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */
186
187 #define DP_BA_WIN_SZ_MAX 1024
188
189 #define DP_TCL_NUM_RING_MAX 4
190
191 #define DP_IDLE_SCATTER_BUFS_MAX 16
192
193 #define DP_WBM_RELEASE_RING_SIZE 64
194 #define DP_TCL_DATA_RING_SIZE 512
195 #define DP_TX_COMP_RING_SIZE(ab) \
196 ((ab)->profile_param->dp_params.tx_comp_ring_size)
197 #define DP_TX_IDR_SIZE(ab) DP_TX_COMP_RING_SIZE(ab)
198 #define DP_TCL_CMD_RING_SIZE 32
199 #define DP_TCL_STATUS_RING_SIZE 32
200 #define DP_REO_DST_RING_MAX 8
201 #define DP_REO_DST_RING_SIZE 2048
202 #define DP_REO_REINJECT_RING_SIZE 32
203 #define DP_RX_RELEASE_RING_SIZE 1024
204 #define DP_REO_EXCEPTION_RING_SIZE 128
205 #define DP_REO_CMD_RING_SIZE 256
206 #define DP_REO_STATUS_RING_SIZE 2048
207 #define DP_RXDMA_BUF_RING_SIZE 4096
208 #define DP_RX_MAC_BUF_RING_SIZE 2048
209 #define DP_RXDMA_REFILL_RING_SIZE 2048
210 #define DP_RXDMA_ERR_DST_RING_SIZE 1024
211 #define DP_RXDMA_MON_STATUS_RING_SIZE 1024
212 #define DP_RXDMA_MONITOR_BUF_RING_SIZE(ab) \
213 ((ab)->profile_param->dp_params.rxdma_monitor_buf_ring_size)
214 #define DP_RXDMA_MONITOR_DST_RING_SIZE(ab) \
215 ((ab)->profile_param->dp_params.rxdma_monitor_dst_ring_size)
216 #define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
217 #define DP_TX_MONITOR_BUF_RING_SIZE 4096
218 #define DP_TX_MONITOR_DEST_RING_SIZE 2048
219
220 #define DP_TX_MONITOR_BUF_SIZE 2048
221 #define DP_TX_MONITOR_BUF_SIZE_MIN 48
222 #define DP_TX_MONITOR_BUF_SIZE_MAX 8192
223
224 #define DP_RX_BUFFER_SIZE 2048
225 #define DP_RX_BUFFER_SIZE_LITE 1024
226 #define DP_RX_BUFFER_ALIGN_SIZE 128
227
228 #define RX_MON_STATUS_BASE_BUF_SIZE 2048
229 #define RX_MON_STATUS_BUF_ALIGN 128
230 #define RX_MON_STATUS_BUF_RESERVATION 128
231 #define RX_MON_STATUS_BUF_SIZE (RX_MON_STATUS_BASE_BUF_SIZE - \
232 (RX_MON_STATUS_BUF_RESERVATION + \
233 RX_MON_STATUS_BUF_ALIGN + \
234 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
235
236 #define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
237 #define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(19, 18)
238
239 #define DP_HW2SW_MACID(mac_id) ({ typeof(mac_id) x = (mac_id); x ? x - 1 : 0; })
240 #define DP_SW2HW_MACID(mac_id) ((mac_id) + 1)
241
242 #define DP_TX_DESC_ID_MAC_ID GENMASK(1, 0)
243 #define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
244 #define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
245
246 #define ATH12K_SHADOW_DP_TIMER_INTERVAL 20
247 #define ATH12K_SHADOW_CTRL_TIMER_INTERVAL 10
248
249 #define ATH12K_NUM_POOL_TX_DESC(ab) \
250 ((ab)->profile_param->dp_params.num_pool_tx_desc)
251 /* TODO: revisit this count during testing */
252 #define ATH12K_RX_DESC_COUNT(ab) \
253 ((ab)->profile_param->dp_params.rx_desc_count)
254
255 #define ATH12K_PAGE_SIZE PAGE_SIZE
256
257 /* Total 1024 entries in PPT, i.e 4K/4 considering 4K aligned
258 * SPT pages which makes lower 12bits 0
259 */
260 #define ATH12K_MAX_PPT_ENTRIES 1024
261
262 /* Total 512 entries in a SPT, i.e 4K Page/8 */
263 #define ATH12K_MAX_SPT_ENTRIES 512
264
265 #define ATH12K_NUM_RX_SPT_PAGES(ab) ((ATH12K_RX_DESC_COUNT(ab)) / \
266 ATH12K_MAX_SPT_ENTRIES)
267
268 #define ATH12K_TX_SPT_PAGES_PER_POOL(ab) (ATH12K_NUM_POOL_TX_DESC(ab) / \
269 ATH12K_MAX_SPT_ENTRIES)
270 #define ATH12K_NUM_TX_SPT_PAGES(ab) (ATH12K_TX_SPT_PAGES_PER_POOL(ab) * \
271 ATH12K_HW_MAX_QUEUES)
272
273 #define ATH12K_TX_SPT_PAGE_OFFSET 0
274 #define ATH12K_RX_SPT_PAGE_OFFSET(ab) ATH12K_NUM_TX_SPT_PAGES(ab)
275
276 /* The SPT pages are divided for RX and TX, first block for RX
277 * and remaining for TX
278 */
279 #define ATH12K_NUM_TX_SPT_PAGE_START(ab) ATH12K_NUM_RX_SPT_PAGES(ab)
280
281 #define ATH12K_DP_RX_DESC_MAGIC 0xBABABABA
282
283 /* 4K aligned address have last 12 bits set to 0, this check is done
284 * so that two spt pages address can be stored per 8bytes
285 * of CMEM (PPT)
286 */
287 #define ATH12K_SPT_4K_ALIGN_CHECK 0xFFF
288 #define ATH12K_SPT_4K_ALIGN_OFFSET 12
289 #define ATH12K_PPT_ADDR_OFFSET(ppt_index) (4 * (ppt_index))
290
291 /* To indicate HW of CMEM address, b0-31 are cmem base received via QMI */
292 #define ATH12K_CMEM_ADDR_MSB 0x10
293
294 /* Of 20 bits cookie, b0-b8 is to indicate SPT offset and b9-19 for PPT */
295 #define ATH12K_CC_SPT_MSB 8
296 #define ATH12K_CC_PPT_MSB 19
297 #define ATH12K_CC_PPT_SHIFT 9
298 #define ATH12K_DP_CC_COOKIE_SPT GENMASK(8, 0)
299 #define ATH12K_DP_CC_COOKIE_PPT GENMASK(19, 9)
300
301 #define DP_REO_QREF_NUM GENMASK(31, 16)
302 #define DP_MAX_PEER_ID 2047
303
304 /* Total size of the LUT is based on 2K peers, each having reference
305 * for 17tids, note each entry is of type ath12k_reo_queue_ref
306 * hence total size is 2048 * 17 * 8 = 278528
307 */
308 #define DP_REOQ_LUT_SIZE 278528
309
310 /* Invalid TX Bank ID value */
311 #define DP_INVALID_BANK_ID -1
312
313 #define MAX_TQM_RELEASE_REASON 15
314 #define MAX_FW_TX_STATUS 7
315
316 struct ath12k_dp_tx_bank_profile {
317 u8 is_configured;
318 u32 num_users;
319 u32 bank_config;
320 };
321
322 struct ath12k_hp_update_timer {
323 struct timer_list timer;
324 bool started;
325 bool init;
326 u32 tx_num;
327 u32 timer_tx_num;
328 u32 ring_id;
329 u32 interval;
330 struct ath12k_base *ab;
331 };
332
333 struct ath12k_rx_desc_info {
334 struct list_head list;
335 struct sk_buff *skb;
336 u32 cookie;
337 u32 magic;
338 u8 in_use : 1,
339 device_id : 3,
340 reserved : 4;
341 };
342
343 struct ath12k_tx_desc_info {
344 struct list_head list;
345 struct sk_buff *skb;
346 struct sk_buff *skb_ext_desc;
347 u32 desc_id; /* Cookie */
348 u8 mac_id;
349 u8 pool_id;
350 };
351
352 struct ath12k_tx_desc_params {
353 struct sk_buff *skb;
354 struct sk_buff *skb_ext_desc;
355 u8 mac_id;
356 };
357
358 struct ath12k_spt_info {
359 dma_addr_t paddr;
360 u64 *vaddr;
361 };
362
363 struct ath12k_reo_queue_ref {
364 u32 info0;
365 u32 info1;
366 } __packed;
367
368 struct ath12k_reo_q_addr_lut {
369 u32 *vaddr_unaligned;
370 u32 *vaddr;
371 dma_addr_t paddr_unaligned;
372 dma_addr_t paddr;
373 u32 size;
374 };
375
376 struct ath12k_link_stats {
377 u32 tx_enqueued;
378 u32 tx_completed;
379 u32 tx_bcast_mcast;
380 u32 tx_dropped;
381 u32 tx_encap_type[DP_ENCAP_TYPE_MAX];
382 u32 tx_encrypt_type[DP_ENCRYPT_TYPE_MAX];
383 u32 tx_desc_type[DP_DESC_TYPE_MAX];
384 };
385
386 /* DP arch ops to communicate from common module
387 * to arch specific module
388 */
389 struct ath12k_dp_arch_ops {
390 int (*service_srng)(struct ath12k_dp *dp,
391 struct ath12k_ext_irq_grp *irq_grp,
392 int budget);
393 u32 (*tx_get_vdev_bank_config)(struct ath12k_base *ab,
394 struct ath12k_link_vif *arvif);
395 int (*reo_cmd_send)(struct ath12k_base *ab,
396 struct ath12k_dp_rx_tid_rxq *rx_tid,
397 enum hal_reo_cmd_type type,
398 struct ath12k_hal_reo_cmd *cmd,
399 void (*cb)(struct ath12k_dp *dp, void *ctx,
400 enum hal_reo_cmd_status status));
401 void (*setup_pn_check_reo_cmd)(struct ath12k_hal_reo_cmd *cmd,
402 struct ath12k_dp_rx_tid *rx_tid,
403 u32 cipher, enum set_key_cmd key_cmd);
404 void (*rx_peer_tid_delete)(struct ath12k_base *ab,
405 struct ath12k_dp_link_peer *peer, u8 tid);
406 int (*reo_cache_flush)(struct ath12k_base *ab,
407 struct ath12k_dp_rx_tid_rxq *rx_tid);
408 int (*rx_link_desc_return)(struct ath12k_dp *dp,
409 struct ath12k_buffer_addr *buf_addr_info,
410 enum hal_wbm_rel_bm_act action);
411 void (*rx_frags_cleanup)(struct ath12k_dp_rx_tid *rx_tid,
412 bool rel_link_desc);
413 int (*peer_rx_tid_reo_update)(struct ath12k_dp *dp,
414 struct ath12k_dp_link_peer *peer,
415 struct ath12k_dp_rx_tid *rx_tid,
416 u32 ba_win_sz, u16 ssn,
417 bool update_ssn);
418 int (*rx_assign_reoq)(struct ath12k_base *ab, struct ath12k_dp_peer *dp_peer,
419 struct ath12k_dp_rx_tid *rx_tid,
420 u16 ssn, enum hal_pn_type pn_type);
421 void (*peer_rx_tid_qref_setup)(struct ath12k_base *ab, u16 peer_id, u16 tid,
422 dma_addr_t paddr);
423 void (*peer_rx_tid_qref_reset)(struct ath12k_base *ab, u16 peer_id, u16 tid);
424 int (*rx_tid_delete_handler)(struct ath12k_base *ab,
425 struct ath12k_dp_rx_tid_rxq *rx_tid);
426 };
427
428 struct ath12k_device_dp_tx_err_stats {
429 /* TCL Ring Descriptor unavailable */
430 u32 desc_na[DP_TCL_NUM_RING_MAX];
431 /* Other failures during dp_tx due to mem allocation failure
432 * idr unavailable etc.
433 */
434 atomic_t misc_fail;
435 };
436
437 struct ath12k_device_dp_stats {
438 u32 err_ring_pkts;
439 u32 invalid_rbm;
440 u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
441 u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
442 u32 hal_reo_error[DP_REO_DST_RING_MAX];
443 struct ath12k_device_dp_tx_err_stats tx_err;
444 u32 reo_rx[DP_REO_DST_RING_MAX][ATH12K_MAX_DEVICES];
445 u32 rx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX][ATH12K_MAX_DEVICES];
446 u32 tqm_rel_reason[MAX_TQM_RELEASE_REASON];
447 u32 fw_tx_status[MAX_FW_TX_STATUS];
448 u32 tx_wbm_rel_source[HAL_WBM_REL_SRC_MODULE_MAX];
449 u32 tx_enqueued[DP_TCL_NUM_RING_MAX];
450 u32 tx_completed[DP_TCL_NUM_RING_MAX];
451 u32 reo_excep_msdu_buf_type;
452 };
453
454 struct ath12k_dp {
455 struct ath12k_base *ab;
456 u32 mon_dest_ring_stuck_cnt;
457 u8 num_bank_profiles;
458 /* protects the access and update of bank_profiles */
459 spinlock_t tx_bank_lock;
460 struct ath12k_dp_tx_bank_profile *bank_profiles;
461 enum ath12k_htc_ep_id eid;
462 struct completion htt_tgt_version_received;
463 u8 htt_tgt_ver_major;
464 u8 htt_tgt_ver_minor;
465 struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
466 enum hal_rx_buf_return_buf_manager idle_link_rbm;
467 struct dp_srng wbm_idle_ring;
468 struct dp_srng wbm_desc_rel_ring;
469 struct dp_srng reo_reinject_ring;
470 struct dp_srng rx_rel_ring;
471 struct dp_srng reo_except_ring;
472 struct dp_srng reo_cmd_ring;
473 struct dp_srng reo_status_ring;
474 enum ath12k_peer_metadata_version peer_metadata_ver;
475 struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
476 struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
477 struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
478 struct list_head reo_cmd_update_rx_queue_list;
479 struct list_head reo_cmd_cache_flush_list;
480 u32 reo_cmd_cache_flush_count;
481 /* protects access to below fields,
482 * - reo_cmd_update_rx_queue_list
483 * - reo_cmd_cache_flush_list
484 * - reo_cmd_cache_flush_count
485 */
486 spinlock_t reo_rxq_flush_lock;
487 struct list_head reo_cmd_list;
488 /* protects access to below fields,
489 * - reo_cmd_list
490 */
491 spinlock_t reo_cmd_lock;
492 struct ath12k_hp_update_timer reo_cmd_timer;
493 struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
494 struct ath12k_spt_info *spt_info;
495 u32 num_spt_pages;
496 u32 rx_ppt_base;
497 struct ath12k_rx_desc_info **rxbaddr;
498 struct ath12k_tx_desc_info **txbaddr;
499 struct list_head rx_desc_free_list;
500 /* protects the free desc list */
501 spinlock_t rx_desc_lock;
502
503 struct list_head tx_desc_free_list[ATH12K_HW_MAX_QUEUES];
504 struct list_head tx_desc_used_list[ATH12K_HW_MAX_QUEUES];
505 /* protects the free and used desc lists */
506 spinlock_t tx_desc_lock[ATH12K_HW_MAX_QUEUES];
507
508 struct dp_rxdma_ring rx_refill_buf_ring;
509 struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
510 struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
511 struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
512 struct dp_rxdma_mon_ring tx_mon_buf_ring;
513 struct dp_rxdma_mon_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
514 struct ath12k_reo_q_addr_lut reoq_lut;
515 struct ath12k_reo_q_addr_lut ml_reoq_lut;
516 const struct ath12k_hw_params *hw_params;
517 struct device *dev;
518 struct ath12k_hal *hal;
519
520 /* RCU on dp_pdevs[] provides a teardown synchronization mechanism,
521 * ensuring in-flight data path readers complete before reclaim. Writers
522 * update internal fields under their own synchronization, while readers of
523 * internal fields may perform lockless read if occasional inconsistency
524 * is acceptable or use additional synchronization for a coherent view.
525 *
526 * RCU is used for dp_pdevs[] at this stage to align with
527 * ab->pdevs_active[]. However, if the teardown paths ensure quiescence,
528 * both dp_pdevs[] and pdevs_active[] can be converted to plain pointers,
529 * removing RCU synchronize overhead.
530 *
531 * TODO: evaluate removal of RCU from dp_pdevs in the future
532 */
533 struct ath12k_pdev_dp __rcu *dp_pdevs[MAX_RADIOS];
534
535 struct ath12k_hw_group *ag;
536 u8 device_id;
537
538 /* Lock for protection of peers and rhead_peer_addr */
539 spinlock_t dp_lock;
540
541 struct ath12k_dp_arch_ops *ops;
542
543 /* Linked list of struct ath12k_dp_link_peer */
544 struct list_head peers;
545
546 /* For rhash table init and deinit protection */
547 struct mutex link_peer_rhash_tbl_lock;
548
549 /* The rhashtable containing struct ath12k_link_peer keyed by mac addr */
550 struct rhashtable *rhead_peer_addr;
551 struct rhashtable_params rhash_peer_addr_param;
552 struct ath12k_device_dp_stats device_stats;
553 };
554
ath12k_dp_arch_tx_get_vdev_bank_config(struct ath12k_dp * dp,struct ath12k_link_vif * arvif)555 static inline u32 ath12k_dp_arch_tx_get_vdev_bank_config(struct ath12k_dp *dp,
556 struct ath12k_link_vif *arvif)
557 {
558 return dp->ops->tx_get_vdev_bank_config(dp->ab, arvif);
559 }
560
ath12k_dp_arch_reo_cmd_send(struct ath12k_dp * dp,struct ath12k_dp_rx_tid_rxq * rx_tid,enum hal_reo_cmd_type type,struct ath12k_hal_reo_cmd * cmd,void (* cb)(struct ath12k_dp * dp,void * ctx,enum hal_reo_cmd_status status))561 static inline int ath12k_dp_arch_reo_cmd_send(struct ath12k_dp *dp,
562 struct ath12k_dp_rx_tid_rxq *rx_tid,
563 enum hal_reo_cmd_type type,
564 struct ath12k_hal_reo_cmd *cmd,
565 void (*cb)(struct ath12k_dp *dp, void *ctx,
566 enum hal_reo_cmd_status status))
567 {
568 return dp->ops->reo_cmd_send(dp->ab, rx_tid, type, cmd, cb);
569 }
570
571 static inline
ath12k_dp_arch_setup_pn_check_reo_cmd(struct ath12k_dp * dp,struct ath12k_hal_reo_cmd * cmd,struct ath12k_dp_rx_tid * rx_tid,u32 cipher,enum set_key_cmd key_cmd)572 void ath12k_dp_arch_setup_pn_check_reo_cmd(struct ath12k_dp *dp,
573 struct ath12k_hal_reo_cmd *cmd,
574 struct ath12k_dp_rx_tid *rx_tid,
575 u32 cipher,
576 enum set_key_cmd key_cmd)
577 {
578 dp->ops->setup_pn_check_reo_cmd(cmd, rx_tid, cipher, key_cmd);
579 }
580
ath12k_dp_arch_rx_peer_tid_delete(struct ath12k_dp * dp,struct ath12k_dp_link_peer * peer,u8 tid)581 static inline void ath12k_dp_arch_rx_peer_tid_delete(struct ath12k_dp *dp,
582 struct ath12k_dp_link_peer *peer,
583 u8 tid)
584 {
585 dp->ops->rx_peer_tid_delete(dp->ab, peer, tid);
586 }
587
ath12k_dp_arch_reo_cache_flush(struct ath12k_dp * dp,struct ath12k_dp_rx_tid_rxq * rx_tid)588 static inline int ath12k_dp_arch_reo_cache_flush(struct ath12k_dp *dp,
589 struct ath12k_dp_rx_tid_rxq *rx_tid)
590 {
591 return dp->ops->reo_cache_flush(dp->ab, rx_tid);
592 }
593
594 static inline
ath12k_dp_arch_rx_link_desc_return(struct ath12k_dp * dp,struct ath12k_buffer_addr * buf_addr_info,enum hal_wbm_rel_bm_act action)595 int ath12k_dp_arch_rx_link_desc_return(struct ath12k_dp *dp,
596 struct ath12k_buffer_addr *buf_addr_info,
597 enum hal_wbm_rel_bm_act action)
598 {
599 return dp->ops->rx_link_desc_return(dp, buf_addr_info, action);
600 }
601
602 static inline
ath12k_dp_arch_rx_frags_cleanup(struct ath12k_dp * dp,struct ath12k_dp_rx_tid * rx_tid,bool rel_link_desc)603 void ath12k_dp_arch_rx_frags_cleanup(struct ath12k_dp *dp,
604 struct ath12k_dp_rx_tid *rx_tid,
605 bool rel_link_desc)
606 {
607 dp->ops->rx_frags_cleanup(rx_tid, rel_link_desc);
608 }
609
ath12k_dp_arch_peer_rx_tid_reo_update(struct ath12k_dp * dp,struct ath12k_dp_link_peer * peer,struct ath12k_dp_rx_tid * rx_tid,u32 ba_win_sz,u16 ssn,bool update_ssn)610 static inline int ath12k_dp_arch_peer_rx_tid_reo_update(struct ath12k_dp *dp,
611 struct ath12k_dp_link_peer *peer,
612 struct ath12k_dp_rx_tid *rx_tid,
613 u32 ba_win_sz, u16 ssn,
614 bool update_ssn)
615 {
616 return dp->ops->peer_rx_tid_reo_update(dp, peer, rx_tid,
617 ba_win_sz, ssn, update_ssn);
618 }
619
ath12k_dp_arch_rx_assign_reoq(struct ath12k_dp * dp,struct ath12k_dp_peer * dp_peer,struct ath12k_dp_rx_tid * rx_tid,u16 ssn,enum hal_pn_type pn_type)620 static inline int ath12k_dp_arch_rx_assign_reoq(struct ath12k_dp *dp,
621 struct ath12k_dp_peer *dp_peer,
622 struct ath12k_dp_rx_tid *rx_tid,
623 u16 ssn, enum hal_pn_type pn_type)
624 {
625 return dp->ops->rx_assign_reoq(dp->ab, dp_peer, rx_tid, ssn, pn_type);
626 }
627
ath12k_dp_arch_peer_rx_tid_qref_setup(struct ath12k_dp * dp,u16 peer_id,u16 tid,dma_addr_t paddr)628 static inline void ath12k_dp_arch_peer_rx_tid_qref_setup(struct ath12k_dp *dp,
629 u16 peer_id, u16 tid,
630 dma_addr_t paddr)
631 {
632 dp->ops->peer_rx_tid_qref_setup(dp->ab, peer_id, tid, paddr);
633 }
634
ath12k_dp_arch_peer_rx_tid_qref_reset(struct ath12k_dp * dp,u16 peer_id,u16 tid)635 static inline void ath12k_dp_arch_peer_rx_tid_qref_reset(struct ath12k_dp *dp,
636 u16 peer_id, u16 tid)
637 {
638 dp->ops->peer_rx_tid_qref_reset(dp->ab, peer_id, tid);
639 }
640
641 static inline
ath12k_dp_arch_rx_tid_delete_handler(struct ath12k_dp * dp,struct ath12k_dp_rx_tid_rxq * rx_tid)642 int ath12k_dp_arch_rx_tid_delete_handler(struct ath12k_dp *dp,
643 struct ath12k_dp_rx_tid_rxq *rx_tid)
644 {
645 return dp->ops->rx_tid_delete_handler(dp->ab, rx_tid);
646 }
647
ath12k_dp_get_mac_addr(u32 addr_l32,u16 addr_h16,u8 * addr)648 static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
649 {
650 memcpy(addr, &addr_l32, 4);
651 memcpy(addr + 4, &addr_h16, ETH_ALEN - 4);
652 }
653
654 static inline struct ath12k_dp *
ath12k_dp_hw_grp_to_dp(struct ath12k_dp_hw_group * dp_hw_grp,u8 device_id)655 ath12k_dp_hw_grp_to_dp(struct ath12k_dp_hw_group *dp_hw_grp, u8 device_id)
656 {
657 return dp_hw_grp->dp[device_id];
658 }
659
660 static inline int
ath12k_dp_service_srng(struct ath12k_dp * dp,struct ath12k_ext_irq_grp * irq_grp,int budget)661 ath12k_dp_service_srng(struct ath12k_dp *dp, struct ath12k_ext_irq_grp *irq_grp,
662 int budget)
663 {
664 return dp->ops->service_srng(dp, irq_grp, budget);
665 }
666
667 static inline struct ieee80211_hw *
ath12k_pdev_dp_to_hw(struct ath12k_pdev_dp * pdev)668 ath12k_pdev_dp_to_hw(struct ath12k_pdev_dp *pdev)
669 {
670 return pdev->hw;
671 }
672
673 static inline struct ath12k_pdev_dp *
ath12k_dp_to_pdev_dp(struct ath12k_dp * dp,u8 pdev_idx)674 ath12k_dp_to_pdev_dp(struct ath12k_dp *dp, u8 pdev_idx)
675 {
676 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
677 "ath12k dp to dp pdev called without rcu lock");
678
679 return rcu_dereference(dp->dp_pdevs[pdev_idx]);
680 }
681
682 void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif);
683 void ath12k_dp_partner_cc_init(struct ath12k_base *ab);
684 int ath12k_dp_pdev_alloc(struct ath12k_base *ab);
685 void ath12k_dp_pdev_pre_alloc(struct ath12k *ar);
686 void ath12k_dp_pdev_free(struct ath12k_base *ab);
687 int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr);
688 void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr);
689 void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring);
690 int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
691 enum hal_ring_type type, int ring_num,
692 int mac_id, int num_entries);
693 void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
694 struct dp_link_desc_bank *desc_bank,
695 u32 ring_type, struct dp_srng *ring);
696 int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
697 struct dp_link_desc_bank *link_desc_banks,
698 u32 ring_type, struct hal_srng *srng,
699 u32 n_link_desc);
700 struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_dp *dp,
701 u32 cookie);
702 struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_dp *dp,
703 u32 desc_id);
704 #endif
705