1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 6 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 7 */ 8 9 #include "core.h" 10 #include "htc.h" 11 #include "htt.h" 12 #include "txrx.h" 13 #include "debug.h" 14 #include "trace.h" 15 #include "mac.h" 16 17 #include <linux/log2.h> 18 #include <linux/bitfield.h> 19 20 /* when under memory pressure rx ring refill may fail and needs a retry */ 21 #define HTT_RX_RING_REFILL_RETRY_MS 50 22 23 #define HTT_RX_RING_REFILL_RESCHED_MS 5 24 25 /* shortcut to interpret a raw memory buffer as a rx descriptor */ 26 #define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf) 27 28 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb); 29 30 static struct sk_buff * 31 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr) 32 { 33 struct ath10k_skb_rxcb *rxcb; 34 35 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr) 36 if (rxcb->paddr == paddr) 37 return ATH10K_RXCB_SKB(rxcb); 38 39 WARN_ON_ONCE(1); 40 return NULL; 41 } 42 43 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) 44 { 45 struct sk_buff *skb; 46 struct ath10k_skb_rxcb *rxcb; 47 struct hlist_node *n; 48 int i; 49 50 if (htt->rx_ring.in_ord_rx) { 51 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) { 52 skb = ATH10K_RXCB_SKB(rxcb); 53 dma_unmap_single(htt->ar->dev, rxcb->paddr, 54 skb->len + skb_tailroom(skb), 55 DMA_FROM_DEVICE); 56 hash_del(&rxcb->hlist); 57 dev_kfree_skb_any(skb); 58 } 59 } else { 60 for (i = 0; i < htt->rx_ring.size; i++) { 61 skb = htt->rx_ring.netbufs_ring[i]; 62 if (!skb) 63 continue; 64 65 rxcb = ATH10K_SKB_RXCB(skb); 66 dma_unmap_single(htt->ar->dev, rxcb->paddr, 67 skb->len + skb_tailroom(skb), 68 DMA_FROM_DEVICE); 69 dev_kfree_skb_any(skb); 70 } 71 } 72 73 htt->rx_ring.fill_cnt = 0; 74 hash_init(htt->rx_ring.skb_table); 75 memset(htt->rx_ring.netbufs_ring, 0, 76 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0])); 77 } 78 79 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt) 80 { 81 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32); 82 } 83 84 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt) 85 { 86 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64); 87 } 88 89 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt, 90 void *vaddr) 91 { 92 htt->rx_ring.paddrs_ring_32 = vaddr; 93 } 94 95 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt, 96 void *vaddr) 97 { 98 htt->rx_ring.paddrs_ring_64 = vaddr; 99 } 100 101 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt, 102 dma_addr_t paddr, int idx) 103 { 104 htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr); 105 } 106 107 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt, 108 dma_addr_t paddr, int idx) 109 { 110 htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr); 111 } 112 113 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx) 114 { 115 htt->rx_ring.paddrs_ring_32[idx] = 0; 116 } 117 118 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx) 119 { 120 htt->rx_ring.paddrs_ring_64[idx] = 0; 121 } 122 123 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt) 124 { 125 return (void *)htt->rx_ring.paddrs_ring_32; 126 } 127 128 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt) 129 { 130 return (void *)htt->rx_ring.paddrs_ring_64; 131 } 132 133 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 134 { 135 struct ath10k_hw_params *hw = &htt->ar->hw_params; 136 struct htt_rx_desc *rx_desc; 137 struct ath10k_skb_rxcb *rxcb; 138 struct sk_buff *skb; 139 dma_addr_t paddr; 140 int ret = 0, idx; 141 142 /* The Full Rx Reorder firmware has no way of telling the host 143 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring. 144 * To keep things simple make sure ring is always half empty. This 145 * guarantees there'll be no replenishment overruns possible. 146 */ 147 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2); 148 149 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 150 151 if (idx < 0 || idx >= htt->rx_ring.size) { 152 ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n"); 153 idx &= htt->rx_ring.size_mask; 154 ret = -ENOMEM; 155 goto fail; 156 } 157 158 while (num > 0) { 159 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); 160 if (!skb) { 161 ret = -ENOMEM; 162 goto fail; 163 } 164 165 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) 166 skb_pull(skb, 167 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - 168 skb->data); 169 170 /* Clear rx_desc attention word before posting to Rx ring */ 171 rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data); 172 ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0); 173 174 paddr = dma_map_single(htt->ar->dev, skb->data, 175 skb->len + skb_tailroom(skb), 176 DMA_FROM_DEVICE); 177 178 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { 179 dev_kfree_skb_any(skb); 180 ret = -ENOMEM; 181 goto fail; 182 } 183 184 rxcb = ATH10K_SKB_RXCB(skb); 185 rxcb->paddr = paddr; 186 htt->rx_ring.netbufs_ring[idx] = skb; 187 ath10k_htt_set_paddrs_ring(htt, paddr, idx); 188 htt->rx_ring.fill_cnt++; 189 190 if (htt->rx_ring.in_ord_rx) { 191 hash_add(htt->rx_ring.skb_table, 192 &ATH10K_SKB_RXCB(skb)->hlist, 193 paddr); 194 } 195 196 num--; 197 idx++; 198 idx &= htt->rx_ring.size_mask; 199 } 200 201 fail: 202 /* 203 * Make sure the rx buffer is updated before available buffer 204 * index to avoid any potential rx ring corruption. 205 */ 206 mb(); 207 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); 208 return ret; 209 } 210 211 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) 212 { 213 lockdep_assert_held(&htt->rx_ring.lock); 214 return __ath10k_htt_rx_ring_fill_n(htt, num); 215 } 216 217 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt) 218 { 219 int ret, num_deficit, num_to_fill; 220 221 /* Refilling the whole RX ring buffer proves to be a bad idea. The 222 * reason is RX may take up significant amount of CPU cycles and starve 223 * other tasks, e.g. TX on an ethernet device while acting as a bridge 224 * with ath10k wlan interface. This ended up with very poor performance 225 * once CPU the host system was overwhelmed with RX on ath10k. 226 * 227 * By limiting the number of refills the replenishing occurs 228 * progressively. This in turns makes use of the fact tasklets are 229 * processed in FIFO order. This means actual RX processing can starve 230 * out refilling. If there's not enough buffers on RX ring FW will not 231 * report RX until it is refilled with enough buffers. This 232 * automatically balances load wrt to CPU power. 233 * 234 * This probably comes at a cost of lower maximum throughput but 235 * improves the average and stability. 236 */ 237 spin_lock_bh(&htt->rx_ring.lock); 238 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt; 239 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit); 240 num_deficit -= num_to_fill; 241 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill); 242 if (ret == -ENOMEM) { 243 /* 244 * Failed to fill it to the desired level - 245 * we'll start a timer and try again next time. 246 * As long as enough buffers are left in the ring for 247 * another A-MPDU rx, no special recovery is needed. 248 */ 249 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 250 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS)); 251 } else if (num_deficit > 0) { 252 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies + 253 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS)); 254 } 255 spin_unlock_bh(&htt->rx_ring.lock); 256 } 257 258 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t) 259 { 260 struct ath10k_htt *htt = timer_container_of(htt, t, 261 rx_ring.refill_retry_timer); 262 263 ath10k_htt_rx_msdu_buff_replenish(htt); 264 } 265 266 int ath10k_htt_rx_ring_refill(struct ath10k *ar) 267 { 268 struct ath10k_htt *htt = &ar->htt; 269 int ret; 270 271 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 272 return 0; 273 274 spin_lock_bh(&htt->rx_ring.lock); 275 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level - 276 htt->rx_ring.fill_cnt)); 277 278 if (ret) 279 ath10k_htt_rx_ring_free(htt); 280 281 spin_unlock_bh(&htt->rx_ring.lock); 282 283 return ret; 284 } 285 286 void ath10k_htt_rx_free(struct ath10k_htt *htt) 287 { 288 if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 289 return; 290 291 timer_delete_sync(&htt->rx_ring.refill_retry_timer); 292 293 skb_queue_purge(&htt->rx_msdus_q); 294 skb_queue_purge(&htt->rx_in_ord_compl_q); 295 skb_queue_purge(&htt->tx_fetch_ind_q); 296 297 spin_lock_bh(&htt->rx_ring.lock); 298 ath10k_htt_rx_ring_free(htt); 299 spin_unlock_bh(&htt->rx_ring.lock); 300 301 dma_free_coherent(htt->ar->dev, 302 ath10k_htt_get_rx_ring_size(htt), 303 ath10k_htt_get_vaddr_ring(htt), 304 htt->rx_ring.base_paddr); 305 306 ath10k_htt_config_paddrs_ring(htt, NULL); 307 308 dma_free_coherent(htt->ar->dev, 309 sizeof(*htt->rx_ring.alloc_idx.vaddr), 310 htt->rx_ring.alloc_idx.vaddr, 311 htt->rx_ring.alloc_idx.paddr); 312 htt->rx_ring.alloc_idx.vaddr = NULL; 313 314 kfree(htt->rx_ring.netbufs_ring); 315 htt->rx_ring.netbufs_ring = NULL; 316 } 317 318 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) 319 { 320 struct ath10k *ar = htt->ar; 321 int idx; 322 struct sk_buff *msdu; 323 324 lockdep_assert_held(&htt->rx_ring.lock); 325 326 if (htt->rx_ring.fill_cnt == 0) { 327 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); 328 return NULL; 329 } 330 331 idx = htt->rx_ring.sw_rd_idx.msdu_payld; 332 msdu = htt->rx_ring.netbufs_ring[idx]; 333 htt->rx_ring.netbufs_ring[idx] = NULL; 334 ath10k_htt_reset_paddrs_ring(htt, idx); 335 336 idx++; 337 idx &= htt->rx_ring.size_mask; 338 htt->rx_ring.sw_rd_idx.msdu_payld = idx; 339 htt->rx_ring.fill_cnt--; 340 341 dma_unmap_single(htt->ar->dev, 342 ATH10K_SKB_RXCB(msdu)->paddr, 343 msdu->len + skb_tailroom(msdu), 344 DMA_FROM_DEVICE); 345 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 346 msdu->data, msdu->len + skb_tailroom(msdu)); 347 348 return msdu; 349 } 350 351 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ 352 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, 353 struct sk_buff_head *amsdu) 354 { 355 struct ath10k *ar = htt->ar; 356 struct ath10k_hw_params *hw = &ar->hw_params; 357 int msdu_len, msdu_chaining = 0; 358 struct sk_buff *msdu; 359 struct htt_rx_desc *rx_desc; 360 struct rx_attention *rx_desc_attention; 361 struct rx_frag_info_common *rx_desc_frag_info_common; 362 struct rx_msdu_start_common *rx_desc_msdu_start_common; 363 struct rx_msdu_end_common *rx_desc_msdu_end_common; 364 365 lockdep_assert_held(&htt->rx_ring.lock); 366 367 for (;;) { 368 int last_msdu, msdu_len_invalid, msdu_chained; 369 370 msdu = ath10k_htt_rx_netbuf_pop(htt); 371 if (!msdu) { 372 __skb_queue_purge(amsdu); 373 return -ENOENT; 374 } 375 376 __skb_queue_tail(amsdu, msdu); 377 378 rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 379 rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc); 380 rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, 381 rx_desc); 382 rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc); 383 rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc); 384 385 /* FIXME: we must report msdu payload since this is what caller 386 * expects now 387 */ 388 skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset); 389 skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset); 390 391 /* 392 * Sanity check - confirm the HW is finished filling in the 393 * rx data. 394 * If the HW and SW are working correctly, then it's guaranteed 395 * that the HW's MAC DMA is done before this point in the SW. 396 * To prevent the case that we handle a stale Rx descriptor, 397 * just assert for now until we have a way to recover. 398 */ 399 if (!(__le32_to_cpu(rx_desc_attention->flags) 400 & RX_ATTENTION_FLAGS_MSDU_DONE)) { 401 __skb_queue_purge(amsdu); 402 return -EIO; 403 } 404 405 msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags) 406 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | 407 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); 408 msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0), 409 RX_MSDU_START_INFO0_MSDU_LENGTH); 410 msdu_chained = rx_desc_frag_info_common->ring2_more_count; 411 412 if (msdu_len_invalid) 413 msdu_len = 0; 414 415 skb_trim(msdu, 0); 416 skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw))); 417 msdu_len -= msdu->len; 418 419 /* Note: Chained buffers do not contain rx descriptor */ 420 while (msdu_chained--) { 421 msdu = ath10k_htt_rx_netbuf_pop(htt); 422 if (!msdu) { 423 __skb_queue_purge(amsdu); 424 return -ENOENT; 425 } 426 427 __skb_queue_tail(amsdu, msdu); 428 skb_trim(msdu, 0); 429 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE)); 430 msdu_len -= msdu->len; 431 msdu_chaining = 1; 432 } 433 434 last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) & 435 RX_MSDU_END_INFO0_LAST_MSDU; 436 437 /* FIXME: why are we skipping the first part of the rx_desc? */ 438 trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32), 439 hw->rx_desc_ops->rx_desc_size - sizeof(u32)); 440 441 if (last_msdu) 442 break; 443 } 444 445 if (skb_queue_empty(amsdu)) 446 msdu_chaining = -1; 447 448 /* 449 * Don't refill the ring yet. 450 * 451 * First, the elements popped here are still in use - it is not 452 * safe to overwrite them until the matching call to 453 * mpdu_desc_list_next. Second, for efficiency it is preferable to 454 * refill the rx ring with 1 PPDU's worth of rx buffers (something 455 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers 456 * (something like 3 buffers). Consequently, we'll rely on the txrx 457 * SW to tell us when it is done pulling all the PPDU's rx buffers 458 * out of the rx ring, and then refill it just once. 459 */ 460 461 return msdu_chaining; 462 } 463 464 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, 465 u64 paddr) 466 { 467 struct ath10k *ar = htt->ar; 468 struct ath10k_skb_rxcb *rxcb; 469 struct sk_buff *msdu; 470 471 lockdep_assert_held(&htt->rx_ring.lock); 472 473 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr); 474 if (!msdu) 475 return NULL; 476 477 rxcb = ATH10K_SKB_RXCB(msdu); 478 hash_del(&rxcb->hlist); 479 htt->rx_ring.fill_cnt--; 480 481 dma_unmap_single(htt->ar->dev, rxcb->paddr, 482 msdu->len + skb_tailroom(msdu), 483 DMA_FROM_DEVICE); 484 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", 485 msdu->data, msdu->len + skb_tailroom(msdu)); 486 487 return msdu; 488 } 489 490 static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head, 491 struct sk_buff *frag_list, 492 unsigned int frag_len) 493 { 494 skb_shinfo(skb_head)->frag_list = frag_list; 495 skb_head->data_len = frag_len; 496 skb_head->len += skb_head->data_len; 497 } 498 499 static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt, 500 struct sk_buff *msdu, 501 struct htt_rx_in_ord_msdu_desc **msdu_desc) 502 { 503 struct ath10k *ar = htt->ar; 504 struct ath10k_hw_params *hw = &ar->hw_params; 505 u32 paddr; 506 struct sk_buff *frag_buf; 507 struct sk_buff *prev_frag_buf; 508 u8 last_frag; 509 struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc; 510 struct htt_rx_desc *rxd; 511 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); 512 513 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 514 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 515 516 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 517 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 518 skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw))); 519 amsdu_len -= msdu->len; 520 521 last_frag = ind_desc->reserved; 522 if (last_frag) { 523 if (amsdu_len) { 524 ath10k_warn(ar, "invalid amsdu len %u, left %d", 525 __le16_to_cpu(ind_desc->msdu_len), 526 amsdu_len); 527 } 528 return 0; 529 } 530 531 ind_desc++; 532 paddr = __le32_to_cpu(ind_desc->msdu_paddr); 533 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 534 if (!frag_buf) { 535 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr); 536 return -ENOENT; 537 } 538 539 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 540 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); 541 542 amsdu_len -= frag_buf->len; 543 prev_frag_buf = frag_buf; 544 last_frag = ind_desc->reserved; 545 while (!last_frag) { 546 ind_desc++; 547 paddr = __le32_to_cpu(ind_desc->msdu_paddr); 548 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 549 if (!frag_buf) { 550 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x", 551 paddr); 552 prev_frag_buf->next = NULL; 553 return -ENOENT; 554 } 555 556 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 557 last_frag = ind_desc->reserved; 558 amsdu_len -= frag_buf->len; 559 560 prev_frag_buf->next = frag_buf; 561 prev_frag_buf = frag_buf; 562 } 563 564 if (amsdu_len) { 565 ath10k_warn(ar, "invalid amsdu len %u, left %d", 566 __le16_to_cpu(ind_desc->msdu_len), amsdu_len); 567 } 568 569 *msdu_desc = ind_desc; 570 571 prev_frag_buf->next = NULL; 572 return 0; 573 } 574 575 static int 576 ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt, 577 struct sk_buff *msdu, 578 struct htt_rx_in_ord_msdu_desc_ext **msdu_desc) 579 { 580 struct ath10k *ar = htt->ar; 581 struct ath10k_hw_params *hw = &ar->hw_params; 582 u64 paddr; 583 struct sk_buff *frag_buf; 584 struct sk_buff *prev_frag_buf; 585 u8 last_frag; 586 struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc; 587 struct htt_rx_desc *rxd; 588 int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); 589 590 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 591 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 592 593 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 594 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 595 skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw))); 596 amsdu_len -= msdu->len; 597 598 last_frag = ind_desc->reserved; 599 if (last_frag) { 600 if (amsdu_len) { 601 ath10k_warn(ar, "invalid amsdu len %u, left %d", 602 __le16_to_cpu(ind_desc->msdu_len), 603 amsdu_len); 604 } 605 return 0; 606 } 607 608 ind_desc++; 609 paddr = __le64_to_cpu(ind_desc->msdu_paddr); 610 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 611 if (!frag_buf) { 612 ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr); 613 return -ENOENT; 614 } 615 616 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 617 ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); 618 619 amsdu_len -= frag_buf->len; 620 prev_frag_buf = frag_buf; 621 last_frag = ind_desc->reserved; 622 while (!last_frag) { 623 ind_desc++; 624 paddr = __le64_to_cpu(ind_desc->msdu_paddr); 625 frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); 626 if (!frag_buf) { 627 ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx", 628 paddr); 629 prev_frag_buf->next = NULL; 630 return -ENOENT; 631 } 632 633 skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); 634 last_frag = ind_desc->reserved; 635 amsdu_len -= frag_buf->len; 636 637 prev_frag_buf->next = frag_buf; 638 prev_frag_buf = frag_buf; 639 } 640 641 if (amsdu_len) { 642 ath10k_warn(ar, "invalid amsdu len %u, left %d", 643 __le16_to_cpu(ind_desc->msdu_len), amsdu_len); 644 } 645 646 *msdu_desc = ind_desc; 647 648 prev_frag_buf->next = NULL; 649 return 0; 650 } 651 652 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, 653 struct htt_rx_in_ord_ind *ev, 654 struct sk_buff_head *list) 655 { 656 struct ath10k *ar = htt->ar; 657 struct ath10k_hw_params *hw = &ar->hw_params; 658 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32; 659 struct htt_rx_desc *rxd; 660 struct rx_attention *rxd_attention; 661 struct sk_buff *msdu; 662 int msdu_count, ret; 663 bool is_offload; 664 u32 paddr; 665 666 lockdep_assert_held(&htt->rx_ring.lock); 667 668 msdu_count = __le16_to_cpu(ev->msdu_count); 669 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 670 671 while (msdu_count--) { 672 paddr = __le32_to_cpu(msdu_desc->msdu_paddr); 673 674 msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 675 if (!msdu) { 676 __skb_queue_purge(list); 677 return -ENOENT; 678 } 679 680 if (!is_offload && ar->monitor_arvif) { 681 ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu, 682 &msdu_desc); 683 if (ret) { 684 __skb_queue_purge(list); 685 return ret; 686 } 687 __skb_queue_tail(list, msdu); 688 msdu_desc++; 689 continue; 690 } 691 692 __skb_queue_tail(list, msdu); 693 694 if (!is_offload) { 695 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 696 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 697 698 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 699 700 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 701 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 702 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 703 704 if (!(__le32_to_cpu(rxd_attention->flags) & 705 RX_ATTENTION_FLAGS_MSDU_DONE)) { 706 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 707 return -EIO; 708 } 709 } 710 711 msdu_desc++; 712 } 713 714 return 0; 715 } 716 717 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt, 718 struct htt_rx_in_ord_ind *ev, 719 struct sk_buff_head *list) 720 { 721 struct ath10k *ar = htt->ar; 722 struct ath10k_hw_params *hw = &ar->hw_params; 723 struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64; 724 struct htt_rx_desc *rxd; 725 struct rx_attention *rxd_attention; 726 struct sk_buff *msdu; 727 int msdu_count, ret; 728 bool is_offload; 729 u64 paddr; 730 731 lockdep_assert_held(&htt->rx_ring.lock); 732 733 msdu_count = __le16_to_cpu(ev->msdu_count); 734 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 735 736 while (msdu_count--) { 737 paddr = __le64_to_cpu(msdu_desc->msdu_paddr); 738 msdu = ath10k_htt_rx_pop_paddr(htt, paddr); 739 if (!msdu) { 740 __skb_queue_purge(list); 741 return -ENOENT; 742 } 743 744 if (!is_offload && ar->monitor_arvif) { 745 ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu, 746 &msdu_desc); 747 if (ret) { 748 __skb_queue_purge(list); 749 return ret; 750 } 751 __skb_queue_tail(list, msdu); 752 msdu_desc++; 753 continue; 754 } 755 756 __skb_queue_tail(list, msdu); 757 758 if (!is_offload) { 759 rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data); 760 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 761 762 trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size); 763 764 skb_put(msdu, hw->rx_desc_ops->rx_desc_size); 765 skb_pull(msdu, hw->rx_desc_ops->rx_desc_size); 766 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len)); 767 768 if (!(__le32_to_cpu(rxd_attention->flags) & 769 RX_ATTENTION_FLAGS_MSDU_DONE)) { 770 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n"); 771 return -EIO; 772 } 773 } 774 775 msdu_desc++; 776 } 777 778 return 0; 779 } 780 781 int ath10k_htt_rx_alloc(struct ath10k_htt *htt) 782 { 783 struct ath10k *ar = htt->ar; 784 dma_addr_t paddr; 785 void *vaddr, *vaddr_ring; 786 size_t size; 787 struct timer_list *timer = &htt->rx_ring.refill_retry_timer; 788 789 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 790 return 0; 791 792 htt->rx_confused = false; 793 794 /* XXX: The fill level could be changed during runtime in response to 795 * the host processing latency. Is this really worth it? 796 */ 797 htt->rx_ring.size = HTT_RX_RING_SIZE; 798 htt->rx_ring.size_mask = htt->rx_ring.size - 1; 799 htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level; 800 801 if (!is_power_of_2(htt->rx_ring.size)) { 802 ath10k_warn(ar, "htt rx ring size is not power of 2\n"); 803 return -EINVAL; 804 } 805 806 htt->rx_ring.netbufs_ring = 807 kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *), 808 GFP_KERNEL); 809 if (!htt->rx_ring.netbufs_ring) 810 goto err_netbuf; 811 812 size = ath10k_htt_get_rx_ring_size(htt); 813 814 vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); 815 if (!vaddr_ring) 816 goto err_dma_ring; 817 818 ath10k_htt_config_paddrs_ring(htt, vaddr_ring); 819 htt->rx_ring.base_paddr = paddr; 820 821 vaddr = dma_alloc_coherent(htt->ar->dev, 822 sizeof(*htt->rx_ring.alloc_idx.vaddr), 823 &paddr, GFP_KERNEL); 824 if (!vaddr) 825 goto err_dma_idx; 826 827 htt->rx_ring.alloc_idx.vaddr = vaddr; 828 htt->rx_ring.alloc_idx.paddr = paddr; 829 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask; 830 *htt->rx_ring.alloc_idx.vaddr = 0; 831 832 /* Initialize the Rx refill retry timer */ 833 timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0); 834 835 spin_lock_init(&htt->rx_ring.lock); 836 837 htt->rx_ring.fill_cnt = 0; 838 htt->rx_ring.sw_rd_idx.msdu_payld = 0; 839 hash_init(htt->rx_ring.skb_table); 840 841 skb_queue_head_init(&htt->rx_msdus_q); 842 skb_queue_head_init(&htt->rx_in_ord_compl_q); 843 skb_queue_head_init(&htt->tx_fetch_ind_q); 844 atomic_set(&htt->num_mpdus_ready, 0); 845 846 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", 847 htt->rx_ring.size, htt->rx_ring.fill_level); 848 return 0; 849 850 err_dma_idx: 851 dma_free_coherent(htt->ar->dev, 852 ath10k_htt_get_rx_ring_size(htt), 853 vaddr_ring, 854 htt->rx_ring.base_paddr); 855 ath10k_htt_config_paddrs_ring(htt, NULL); 856 err_dma_ring: 857 kfree(htt->rx_ring.netbufs_ring); 858 htt->rx_ring.netbufs_ring = NULL; 859 err_netbuf: 860 return -ENOMEM; 861 } 862 863 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, 864 enum htt_rx_mpdu_encrypt_type type) 865 { 866 switch (type) { 867 case HTT_RX_MPDU_ENCRYPT_NONE: 868 return 0; 869 case HTT_RX_MPDU_ENCRYPT_WEP40: 870 case HTT_RX_MPDU_ENCRYPT_WEP104: 871 return IEEE80211_WEP_IV_LEN; 872 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 873 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 874 return IEEE80211_TKIP_IV_LEN; 875 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 876 return IEEE80211_CCMP_HDR_LEN; 877 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 878 return IEEE80211_CCMP_256_HDR_LEN; 879 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 880 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 881 return IEEE80211_GCMP_HDR_LEN; 882 case HTT_RX_MPDU_ENCRYPT_WEP128: 883 case HTT_RX_MPDU_ENCRYPT_WAPI: 884 break; 885 } 886 887 ath10k_warn(ar, "unsupported encryption type %d\n", type); 888 return 0; 889 } 890 891 #define MICHAEL_MIC_LEN 8 892 893 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar, 894 enum htt_rx_mpdu_encrypt_type type) 895 { 896 switch (type) { 897 case HTT_RX_MPDU_ENCRYPT_NONE: 898 case HTT_RX_MPDU_ENCRYPT_WEP40: 899 case HTT_RX_MPDU_ENCRYPT_WEP104: 900 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 901 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 902 return 0; 903 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 904 return IEEE80211_CCMP_MIC_LEN; 905 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 906 return IEEE80211_CCMP_256_MIC_LEN; 907 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 908 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 909 return IEEE80211_GCMP_MIC_LEN; 910 case HTT_RX_MPDU_ENCRYPT_WEP128: 911 case HTT_RX_MPDU_ENCRYPT_WAPI: 912 break; 913 } 914 915 ath10k_warn(ar, "unsupported encryption type %d\n", type); 916 return 0; 917 } 918 919 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar, 920 enum htt_rx_mpdu_encrypt_type type) 921 { 922 switch (type) { 923 case HTT_RX_MPDU_ENCRYPT_NONE: 924 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 925 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: 926 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: 927 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: 928 return 0; 929 case HTT_RX_MPDU_ENCRYPT_WEP40: 930 case HTT_RX_MPDU_ENCRYPT_WEP104: 931 return IEEE80211_WEP_ICV_LEN; 932 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: 933 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: 934 return IEEE80211_TKIP_ICV_LEN; 935 case HTT_RX_MPDU_ENCRYPT_WEP128: 936 case HTT_RX_MPDU_ENCRYPT_WAPI: 937 break; 938 } 939 940 ath10k_warn(ar, "unsupported encryption type %d\n", type); 941 return 0; 942 } 943 944 struct amsdu_subframe_hdr { 945 u8 dst[ETH_ALEN]; 946 u8 src[ETH_ALEN]; 947 __be16 len; 948 } __packed; 949 950 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63) 951 952 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw) 953 { 954 u8 ret = 0; 955 956 switch (bw) { 957 case 0: 958 ret = RATE_INFO_BW_20; 959 break; 960 case 1: 961 ret = RATE_INFO_BW_40; 962 break; 963 case 2: 964 ret = RATE_INFO_BW_80; 965 break; 966 case 3: 967 ret = RATE_INFO_BW_160; 968 break; 969 } 970 971 return ret; 972 } 973 974 static void ath10k_htt_rx_h_rates(struct ath10k *ar, 975 struct ieee80211_rx_status *status, 976 struct htt_rx_desc *rxd) 977 { 978 struct ath10k_hw_params *hw = &ar->hw_params; 979 struct rx_attention *rxd_attention; 980 struct rx_mpdu_start *rxd_mpdu_start; 981 struct rx_mpdu_end *rxd_mpdu_end; 982 struct rx_msdu_start_common *rxd_msdu_start_common; 983 struct rx_msdu_end_common *rxd_msdu_end_common; 984 struct rx_ppdu_start *rxd_ppdu_start; 985 struct ieee80211_supported_band *sband; 986 u8 cck, rate, bw, sgi, mcs, nss; 987 u8 *rxd_msdu_payload; 988 u8 preamble = 0; 989 u8 group_id; 990 u32 info1, info2, info3; 991 u32 stbc, nsts_su; 992 993 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 994 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 995 rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd); 996 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 997 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 998 rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd); 999 rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd); 1000 1001 info1 = __le32_to_cpu(rxd_ppdu_start->info1); 1002 info2 = __le32_to_cpu(rxd_ppdu_start->info2); 1003 info3 = __le32_to_cpu(rxd_ppdu_start->info3); 1004 1005 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE); 1006 1007 switch (preamble) { 1008 case HTT_RX_LEGACY: 1009 /* To get legacy rate index band is required. Since band can't 1010 * be undefined check if freq is non-zero. 1011 */ 1012 if (!status->freq) 1013 return; 1014 1015 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT; 1016 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE); 1017 rate &= ~RX_PPDU_START_RATE_FLAG; 1018 1019 sband = &ar->mac.sbands[status->band]; 1020 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck); 1021 break; 1022 case HTT_RX_HT: 1023 case HTT_RX_HT_WITH_TXBF: 1024 /* HT-SIG - Table 20-11 in info2 and info3 */ 1025 mcs = info2 & 0x1F; 1026 nss = mcs >> 3; 1027 bw = (info2 >> 7) & 1; 1028 sgi = (info3 >> 7) & 1; 1029 1030 status->rate_idx = mcs; 1031 status->encoding = RX_ENC_HT; 1032 if (sgi) 1033 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1034 if (bw) 1035 status->bw = RATE_INFO_BW_40; 1036 break; 1037 case HTT_RX_VHT: 1038 case HTT_RX_VHT_WITH_TXBF: 1039 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3 1040 * TODO check this 1041 */ 1042 bw = info2 & 3; 1043 sgi = info3 & 1; 1044 stbc = (info2 >> 3) & 1; 1045 group_id = (info2 >> 4) & 0x3F; 1046 1047 if (GROUP_ID_IS_SU_MIMO(group_id)) { 1048 mcs = (info3 >> 4) & 0x0F; 1049 nsts_su = ((info2 >> 10) & 0x07); 1050 if (stbc) 1051 nss = (nsts_su >> 2) + 1; 1052 else 1053 nss = (nsts_su + 1); 1054 } else { 1055 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor 1056 * so it's impossible to decode MCS. Also since 1057 * firmware consumes Group Id Management frames host 1058 * has no knowledge regarding group/user position 1059 * mapping so it's impossible to pick the correct Nsts 1060 * from VHT-SIG-A1. 1061 * 1062 * Bandwidth and SGI are valid so report the rateinfo 1063 * on best-effort basis. 1064 */ 1065 mcs = 0; 1066 nss = 1; 1067 } 1068 1069 if (mcs > 0x09) { 1070 ath10k_warn(ar, "invalid MCS received %u\n", mcs); 1071 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n", 1072 __le32_to_cpu(rxd_attention->flags), 1073 __le32_to_cpu(rxd_mpdu_start->info0), 1074 __le32_to_cpu(rxd_mpdu_start->info1), 1075 __le32_to_cpu(rxd_msdu_start_common->info0), 1076 __le32_to_cpu(rxd_msdu_start_common->info1), 1077 rxd_ppdu_start->info0, 1078 __le32_to_cpu(rxd_ppdu_start->info1), 1079 __le32_to_cpu(rxd_ppdu_start->info2), 1080 __le32_to_cpu(rxd_ppdu_start->info3), 1081 __le32_to_cpu(rxd_ppdu_start->info4)); 1082 1083 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n", 1084 __le32_to_cpu(rxd_msdu_end_common->info0), 1085 __le32_to_cpu(rxd_mpdu_end->info0)); 1086 1087 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, 1088 "rx desc msdu payload: ", 1089 rxd_msdu_payload, 50); 1090 } 1091 1092 status->rate_idx = mcs; 1093 status->nss = nss; 1094 1095 if (sgi) 1096 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 1097 1098 status->bw = ath10k_bw_to_mac80211_bw(bw); 1099 status->encoding = RX_ENC_VHT; 1100 break; 1101 default: 1102 break; 1103 } 1104 } 1105 1106 static struct ieee80211_channel * 1107 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) 1108 { 1109 struct ath10k_hw_params *hw = &ar->hw_params; 1110 struct rx_attention *rxd_attention; 1111 struct rx_msdu_end_common *rxd_msdu_end_common; 1112 struct rx_mpdu_start *rxd_mpdu_start; 1113 struct ath10k_peer *peer; 1114 struct ath10k_vif *arvif; 1115 struct cfg80211_chan_def def; 1116 u16 peer_id; 1117 1118 lockdep_assert_held(&ar->data_lock); 1119 1120 if (!rxd) 1121 return NULL; 1122 1123 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1124 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1125 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 1126 1127 if (rxd_attention->flags & 1128 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID)) 1129 return NULL; 1130 1131 if (!(rxd_msdu_end_common->info0 & 1132 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU))) 1133 return NULL; 1134 1135 peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0), 1136 RX_MPDU_START_INFO0_PEER_IDX); 1137 1138 peer = ath10k_peer_find_by_id(ar, peer_id); 1139 if (!peer) 1140 return NULL; 1141 1142 arvif = ath10k_get_arvif(ar, peer->vdev_id); 1143 if (WARN_ON_ONCE(!arvif)) 1144 return NULL; 1145 1146 if (ath10k_mac_vif_chan(arvif->vif, &def)) 1147 return NULL; 1148 1149 return def.chan; 1150 } 1151 1152 static struct ieee80211_channel * 1153 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id) 1154 { 1155 struct ath10k_vif *arvif; 1156 struct cfg80211_chan_def def; 1157 1158 lockdep_assert_held(&ar->data_lock); 1159 1160 list_for_each_entry(arvif, &ar->arvifs, list) { 1161 if (arvif->vdev_id == vdev_id && 1162 ath10k_mac_vif_chan(arvif->vif, &def) == 0) 1163 return def.chan; 1164 } 1165 1166 return NULL; 1167 } 1168 1169 static void 1170 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw, 1171 struct ieee80211_chanctx_conf *conf, 1172 void *data) 1173 { 1174 struct cfg80211_chan_def *def = data; 1175 1176 *def = conf->def; 1177 } 1178 1179 static struct ieee80211_channel * 1180 ath10k_htt_rx_h_any_channel(struct ath10k *ar) 1181 { 1182 struct cfg80211_chan_def def = {}; 1183 1184 ieee80211_iter_chan_contexts_atomic(ar->hw, 1185 ath10k_htt_rx_h_any_chan_iter, 1186 &def); 1187 1188 return def.chan; 1189 } 1190 1191 static bool ath10k_htt_rx_h_channel(struct ath10k *ar, 1192 struct ieee80211_rx_status *status, 1193 struct htt_rx_desc *rxd, 1194 u32 vdev_id) 1195 { 1196 struct ieee80211_channel *ch; 1197 1198 spin_lock_bh(&ar->data_lock); 1199 ch = ar->scan_channel; 1200 if (!ch) 1201 ch = ar->rx_channel; 1202 if (!ch) 1203 ch = ath10k_htt_rx_h_peer_channel(ar, rxd); 1204 if (!ch) 1205 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id); 1206 if (!ch) 1207 ch = ath10k_htt_rx_h_any_channel(ar); 1208 if (!ch) 1209 ch = ar->tgt_oper_chan; 1210 spin_unlock_bh(&ar->data_lock); 1211 1212 if (!ch) 1213 return false; 1214 1215 status->band = ch->band; 1216 status->freq = ch->center_freq; 1217 1218 return true; 1219 } 1220 1221 static void ath10k_htt_rx_h_signal(struct ath10k *ar, 1222 struct ieee80211_rx_status *status, 1223 struct htt_rx_desc *rxd) 1224 { 1225 struct ath10k_hw_params *hw = &ar->hw_params; 1226 struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd); 1227 int i; 1228 1229 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) { 1230 status->chains &= ~BIT(i); 1231 1232 if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) { 1233 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + 1234 rxd_ppdu_start->rssi_chains[i].pri20_mhz; 1235 1236 status->chains |= BIT(i); 1237 } 1238 } 1239 1240 /* FIXME: Get real NF */ 1241 status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 1242 rxd_ppdu_start->rssi_comb; 1243 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 1244 } 1245 1246 static void ath10k_htt_rx_h_mactime(struct ath10k *ar, 1247 struct ieee80211_rx_status *status, 1248 struct htt_rx_desc *rxd) 1249 { 1250 struct ath10k_hw_params *hw = &ar->hw_params; 1251 struct rx_ppdu_end_common *rxd_ppdu_end_common; 1252 1253 rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd); 1254 1255 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This 1256 * means all prior MSDUs in a PPDU are reported to mac80211 without the 1257 * TSF. Is it worth holding frames until end of PPDU is known? 1258 * 1259 * FIXME: Can we get/compute 64bit TSF? 1260 */ 1261 status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp); 1262 status->flag |= RX_FLAG_MACTIME_END; 1263 } 1264 1265 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, 1266 struct sk_buff_head *amsdu, 1267 struct ieee80211_rx_status *status, 1268 u32 vdev_id) 1269 { 1270 struct sk_buff *first; 1271 struct ath10k_hw_params *hw = &ar->hw_params; 1272 struct htt_rx_desc *rxd; 1273 struct rx_attention *rxd_attention; 1274 bool is_first_ppdu; 1275 bool is_last_ppdu; 1276 1277 if (skb_queue_empty(amsdu)) 1278 return; 1279 1280 first = skb_peek(amsdu); 1281 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1282 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 1283 1284 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1285 1286 is_first_ppdu = !!(rxd_attention->flags & 1287 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU)); 1288 is_last_ppdu = !!(rxd_attention->flags & 1289 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU)); 1290 1291 if (is_first_ppdu) { 1292 /* New PPDU starts so clear out the old per-PPDU status. */ 1293 status->freq = 0; 1294 status->rate_idx = 0; 1295 status->nss = 0; 1296 status->encoding = RX_ENC_LEGACY; 1297 status->bw = RATE_INFO_BW_20; 1298 1299 status->flag &= ~RX_FLAG_MACTIME; 1300 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1301 1302 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST); 1303 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; 1304 status->ampdu_reference = ar->ampdu_reference; 1305 1306 ath10k_htt_rx_h_signal(ar, status, rxd); 1307 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id); 1308 ath10k_htt_rx_h_rates(ar, status, rxd); 1309 } 1310 1311 if (is_last_ppdu) { 1312 ath10k_htt_rx_h_mactime(ar, status, rxd); 1313 1314 /* set ampdu last segment flag */ 1315 status->flag |= RX_FLAG_AMPDU_IS_LAST; 1316 ar->ampdu_reference++; 1317 } 1318 } 1319 1320 static const char * const tid_to_ac[] = { 1321 "BE", 1322 "BK", 1323 "BK", 1324 "BE", 1325 "VI", 1326 "VI", 1327 "VO", 1328 "VO", 1329 }; 1330 1331 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) 1332 { 1333 u8 *qc; 1334 int tid; 1335 1336 if (!ieee80211_is_data_qos(hdr->frame_control)) 1337 return ""; 1338 1339 qc = ieee80211_get_qos_ctl(hdr); 1340 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1341 if (tid < 8) 1342 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); 1343 else 1344 snprintf(out, size, "tid %d", tid); 1345 1346 return out; 1347 } 1348 1349 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar, 1350 struct ieee80211_rx_status *rx_status, 1351 struct sk_buff *skb) 1352 { 1353 struct ieee80211_rx_status *status; 1354 1355 status = IEEE80211_SKB_RXCB(skb); 1356 *status = *rx_status; 1357 1358 skb_queue_tail(&ar->htt.rx_msdus_q, skb); 1359 } 1360 1361 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb) 1362 { 1363 struct ieee80211_rx_status *status; 1364 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1365 char tid[32]; 1366 1367 status = IEEE80211_SKB_RXCB(skb); 1368 1369 if (!(ar->filter_flags & FIF_FCSFAIL) && 1370 status->flag & RX_FLAG_FAILED_FCS_CRC) { 1371 ar->stats.rx_crc_err_drop++; 1372 dev_kfree_skb_any(skb); 1373 return; 1374 } 1375 1376 ath10k_dbg(ar, ATH10K_DBG_DATA, 1377 "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", 1378 skb, 1379 skb->len, 1380 ieee80211_get_SA(hdr), 1381 ath10k_get_tid(hdr, tid, sizeof(tid)), 1382 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? 1383 "mcast" : "ucast", 1384 IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)), 1385 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "", 1386 (status->encoding == RX_ENC_HT) ? "ht" : "", 1387 (status->encoding == RX_ENC_VHT) ? "vht" : "", 1388 (status->bw == RATE_INFO_BW_40) ? "40" : "", 1389 (status->bw == RATE_INFO_BW_80) ? "80" : "", 1390 (status->bw == RATE_INFO_BW_160) ? "160" : "", 1391 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "", 1392 status->rate_idx, 1393 status->nss, 1394 status->freq, 1395 status->band, status->flag, 1396 !!(status->flag & RX_FLAG_FAILED_FCS_CRC), 1397 !!(status->flag & RX_FLAG_MMIC_ERROR), 1398 !!(status->flag & RX_FLAG_AMSDU_MORE)); 1399 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", 1400 skb->data, skb->len); 1401 trace_ath10k_rx_hdr(ar, skb->data, skb->len); 1402 trace_ath10k_rx_payload(ar, skb->data, skb->len); 1403 1404 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); 1405 } 1406 1407 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, 1408 struct ieee80211_hdr *hdr) 1409 { 1410 int len = ieee80211_hdrlen(hdr->frame_control); 1411 1412 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, 1413 ar->running_fw->fw_file.fw_features)) 1414 len = round_up(len, 4); 1415 1416 return len; 1417 } 1418 1419 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, 1420 struct sk_buff *msdu, 1421 struct ieee80211_rx_status *status, 1422 enum htt_rx_mpdu_encrypt_type enctype, 1423 bool is_decrypted, 1424 const u8 first_hdr[64]) 1425 { 1426 struct ieee80211_hdr *hdr; 1427 struct ath10k_hw_params *hw = &ar->hw_params; 1428 struct htt_rx_desc *rxd; 1429 struct rx_msdu_end_common *rxd_msdu_end_common; 1430 size_t hdr_len; 1431 size_t crypto_len; 1432 bool is_first; 1433 bool is_last; 1434 bool msdu_limit_err; 1435 int bytes_aligned = ar->hw_params.decap_align_bytes; 1436 u8 *qos; 1437 1438 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1439 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1440 1441 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1442 is_first = !!(rxd_msdu_end_common->info0 & 1443 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 1444 is_last = !!(rxd_msdu_end_common->info0 & 1445 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1446 1447 /* Delivered decapped frame: 1448 * [802.11 header] 1449 * [crypto param] <-- can be trimmed if !fcs_err && 1450 * !decrypt_err && !peer_idx_invalid 1451 * [amsdu header] <-- only if A-MSDU 1452 * [rfc1042/llc] 1453 * [payload] 1454 * [FCS] <-- at end, needs to be trimmed 1455 */ 1456 1457 /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when 1458 * deaggregate, so that unwanted MSDU-deaggregation is avoided for 1459 * error packets. If limit exceeds, hw sends all remaining MSDUs as 1460 * a single last MSDU with this msdu limit error set. 1461 */ 1462 msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd); 1463 1464 /* If MSDU limit error happens, then don't warn on, the partial raw MSDU 1465 * without first MSDU is expected in that case, and handled later here. 1466 */ 1467 /* This probably shouldn't happen but warn just in case */ 1468 if (WARN_ON_ONCE(!is_first && !msdu_limit_err)) 1469 return; 1470 1471 /* This probably shouldn't happen but warn just in case */ 1472 if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err)) 1473 return; 1474 1475 skb_trim(msdu, msdu->len - FCS_LEN); 1476 1477 /* Push original 80211 header */ 1478 if (unlikely(msdu_limit_err)) { 1479 hdr = (struct ieee80211_hdr *)first_hdr; 1480 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1481 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1482 1483 if (ieee80211_is_data_qos(hdr->frame_control)) { 1484 qos = ieee80211_get_qos_ctl(hdr); 1485 qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1486 } 1487 1488 if (crypto_len) 1489 memcpy(skb_push(msdu, crypto_len), 1490 (void *)hdr + round_up(hdr_len, bytes_aligned), 1491 crypto_len); 1492 1493 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1494 } 1495 1496 /* In most cases this will be true for sniffed frames. It makes sense 1497 * to deliver them as-is without stripping the crypto param. This is 1498 * necessary for software based decryption. 1499 * 1500 * If there's no error then the frame is decrypted. At least that is 1501 * the case for frames that come in via fragmented rx indication. 1502 */ 1503 if (!is_decrypted) 1504 return; 1505 1506 /* The payload is decrypted so strip crypto params. Start from tail 1507 * since hdr is used to compute some stuff. 1508 */ 1509 1510 hdr = (void *)msdu->data; 1511 1512 /* Tail */ 1513 if (status->flag & RX_FLAG_IV_STRIPPED) { 1514 skb_trim(msdu, msdu->len - 1515 ath10k_htt_rx_crypto_mic_len(ar, enctype)); 1516 1517 skb_trim(msdu, msdu->len - 1518 ath10k_htt_rx_crypto_icv_len(ar, enctype)); 1519 } else { 1520 /* MIC */ 1521 if (status->flag & RX_FLAG_MIC_STRIPPED) 1522 skb_trim(msdu, msdu->len - 1523 ath10k_htt_rx_crypto_mic_len(ar, enctype)); 1524 1525 /* ICV */ 1526 if (status->flag & RX_FLAG_ICV_STRIPPED) 1527 skb_trim(msdu, msdu->len - 1528 ath10k_htt_rx_crypto_icv_len(ar, enctype)); 1529 } 1530 1531 /* MMIC */ 1532 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1533 !ieee80211_has_morefrags(hdr->frame_control) && 1534 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 1535 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN); 1536 1537 /* Head */ 1538 if (status->flag & RX_FLAG_IV_STRIPPED) { 1539 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1540 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1541 1542 memmove((void *)msdu->data + crypto_len, 1543 (void *)msdu->data, hdr_len); 1544 skb_pull(msdu, crypto_len); 1545 } 1546 } 1547 1548 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, 1549 struct sk_buff *msdu, 1550 struct ieee80211_rx_status *status, 1551 const u8 first_hdr[64], 1552 enum htt_rx_mpdu_encrypt_type enctype) 1553 { 1554 struct ath10k_hw_params *hw = &ar->hw_params; 1555 struct ieee80211_hdr *hdr; 1556 struct htt_rx_desc *rxd; 1557 size_t hdr_len; 1558 u8 da[ETH_ALEN]; 1559 u8 sa[ETH_ALEN]; 1560 int l3_pad_bytes; 1561 int bytes_aligned = ar->hw_params.decap_align_bytes; 1562 1563 /* Delivered decapped frame: 1564 * [nwifi 802.11 header] <-- replaced with 802.11 hdr 1565 * [rfc1042/llc] 1566 * 1567 * Note: The nwifi header doesn't have QoS Control and is 1568 * (always?) a 3addr frame. 1569 * 1570 * Note2: There's no A-MSDU subframe header. Even if it's part 1571 * of an A-MSDU. 1572 */ 1573 1574 /* pull decapped header and copy SA & DA */ 1575 rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data - 1576 hw->rx_desc_ops->rx_desc_size); 1577 1578 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1579 skb_put(msdu, l3_pad_bytes); 1580 1581 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes); 1582 1583 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); 1584 ether_addr_copy(da, ieee80211_get_DA(hdr)); 1585 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 1586 skb_pull(msdu, hdr_len); 1587 1588 /* push original 802.11 header */ 1589 hdr = (struct ieee80211_hdr *)first_hdr; 1590 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1591 1592 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1593 memcpy(skb_push(msdu, 1594 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1595 (void *)hdr + round_up(hdr_len, bytes_aligned), 1596 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1597 } 1598 1599 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1600 1601 /* original 802.11 header has a different DA and in 1602 * case of 4addr it may also have different SA 1603 */ 1604 hdr = (struct ieee80211_hdr *)msdu->data; 1605 ether_addr_copy(ieee80211_get_DA(hdr), da); 1606 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1607 } 1608 1609 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, 1610 struct sk_buff *msdu, 1611 enum htt_rx_mpdu_encrypt_type enctype) 1612 { 1613 struct ieee80211_hdr *hdr; 1614 struct ath10k_hw_params *hw = &ar->hw_params; 1615 struct htt_rx_desc *rxd; 1616 struct rx_msdu_end_common *rxd_msdu_end_common; 1617 u8 *rxd_rx_hdr_status; 1618 size_t hdr_len, crypto_len; 1619 void *rfc1042; 1620 bool is_first, is_last, is_amsdu; 1621 int bytes_aligned = ar->hw_params.decap_align_bytes; 1622 1623 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1624 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1625 1626 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 1627 rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); 1628 hdr = (void *)rxd_rx_hdr_status; 1629 1630 is_first = !!(rxd_msdu_end_common->info0 & 1631 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 1632 is_last = !!(rxd_msdu_end_common->info0 & 1633 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 1634 is_amsdu = !(is_first && is_last); 1635 1636 rfc1042 = hdr; 1637 1638 if (is_first) { 1639 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1640 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 1641 1642 rfc1042 += round_up(hdr_len, bytes_aligned) + 1643 round_up(crypto_len, bytes_aligned); 1644 } 1645 1646 if (is_amsdu) 1647 rfc1042 += sizeof(struct amsdu_subframe_hdr); 1648 1649 return rfc1042; 1650 } 1651 1652 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, 1653 struct sk_buff *msdu, 1654 struct ieee80211_rx_status *status, 1655 const u8 first_hdr[64], 1656 enum htt_rx_mpdu_encrypt_type enctype) 1657 { 1658 struct ath10k_hw_params *hw = &ar->hw_params; 1659 struct ieee80211_hdr *hdr; 1660 struct ethhdr *eth; 1661 size_t hdr_len; 1662 void *rfc1042; 1663 u8 da[ETH_ALEN]; 1664 u8 sa[ETH_ALEN]; 1665 int l3_pad_bytes; 1666 struct htt_rx_desc *rxd; 1667 int bytes_aligned = ar->hw_params.decap_align_bytes; 1668 1669 /* Delivered decapped frame: 1670 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc 1671 * [payload] 1672 */ 1673 1674 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype); 1675 if (WARN_ON_ONCE(!rfc1042)) 1676 return; 1677 1678 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1679 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1680 1681 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1682 skb_put(msdu, l3_pad_bytes); 1683 skb_pull(msdu, l3_pad_bytes); 1684 1685 /* pull decapped header and copy SA & DA */ 1686 eth = (struct ethhdr *)msdu->data; 1687 ether_addr_copy(da, eth->h_dest); 1688 ether_addr_copy(sa, eth->h_source); 1689 skb_pull(msdu, sizeof(struct ethhdr)); 1690 1691 /* push rfc1042/llc/snap */ 1692 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042, 1693 sizeof(struct rfc1042_hdr)); 1694 1695 /* push original 802.11 header */ 1696 hdr = (struct ieee80211_hdr *)first_hdr; 1697 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1698 1699 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1700 memcpy(skb_push(msdu, 1701 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1702 (void *)hdr + round_up(hdr_len, bytes_aligned), 1703 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1704 } 1705 1706 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1707 1708 /* original 802.11 header has a different DA and in 1709 * case of 4addr it may also have different SA 1710 */ 1711 hdr = (struct ieee80211_hdr *)msdu->data; 1712 ether_addr_copy(ieee80211_get_DA(hdr), da); 1713 ether_addr_copy(ieee80211_get_SA(hdr), sa); 1714 } 1715 1716 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, 1717 struct sk_buff *msdu, 1718 struct ieee80211_rx_status *status, 1719 const u8 first_hdr[64], 1720 enum htt_rx_mpdu_encrypt_type enctype) 1721 { 1722 struct ath10k_hw_params *hw = &ar->hw_params; 1723 struct ieee80211_hdr *hdr; 1724 size_t hdr_len; 1725 int l3_pad_bytes; 1726 struct htt_rx_desc *rxd; 1727 int bytes_aligned = ar->hw_params.decap_align_bytes; 1728 1729 /* Delivered decapped frame: 1730 * [amsdu header] <-- replaced with 802.11 hdr 1731 * [rfc1042/llc] 1732 * [payload] 1733 */ 1734 1735 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1736 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1737 1738 l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd); 1739 1740 skb_put(msdu, l3_pad_bytes); 1741 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes); 1742 1743 hdr = (struct ieee80211_hdr *)first_hdr; 1744 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1745 1746 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1747 memcpy(skb_push(msdu, 1748 ath10k_htt_rx_crypto_param_len(ar, enctype)), 1749 (void *)hdr + round_up(hdr_len, bytes_aligned), 1750 ath10k_htt_rx_crypto_param_len(ar, enctype)); 1751 } 1752 1753 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1754 } 1755 1756 static void ath10k_htt_rx_h_undecap(struct ath10k *ar, 1757 struct sk_buff *msdu, 1758 struct ieee80211_rx_status *status, 1759 u8 first_hdr[64], 1760 enum htt_rx_mpdu_encrypt_type enctype, 1761 bool is_decrypted) 1762 { 1763 struct ath10k_hw_params *hw = &ar->hw_params; 1764 struct htt_rx_desc *rxd; 1765 struct rx_msdu_start_common *rxd_msdu_start_common; 1766 enum rx_msdu_decap_format decap; 1767 1768 /* First msdu's decapped header: 1769 * [802.11 header] <-- padded to 4 bytes long 1770 * [crypto param] <-- padded to 4 bytes long 1771 * [amsdu header] <-- only if A-MSDU 1772 * [rfc1042/llc] 1773 * 1774 * Other (2nd, 3rd, ..) msdu's decapped header: 1775 * [amsdu header] <-- only if A-MSDU 1776 * [rfc1042/llc] 1777 */ 1778 1779 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1780 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 1781 1782 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 1783 decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1), 1784 RX_MSDU_START_INFO1_DECAP_FORMAT); 1785 1786 switch (decap) { 1787 case RX_MSDU_DECAP_RAW: 1788 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, 1789 is_decrypted, first_hdr); 1790 break; 1791 case RX_MSDU_DECAP_NATIVE_WIFI: 1792 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr, 1793 enctype); 1794 break; 1795 case RX_MSDU_DECAP_ETHERNET2_DIX: 1796 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); 1797 break; 1798 case RX_MSDU_DECAP_8023_SNAP_LLC: 1799 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr, 1800 enctype); 1801 break; 1802 } 1803 } 1804 1805 static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb) 1806 { 1807 struct htt_rx_desc *rxd; 1808 struct rx_attention *rxd_attention; 1809 struct rx_msdu_start_common *rxd_msdu_start_common; 1810 u32 flags, info; 1811 bool is_ip4, is_ip6; 1812 bool is_tcp, is_udp; 1813 bool ip_csum_ok, tcpudp_csum_ok; 1814 1815 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1816 (void *)skb->data - hw->rx_desc_ops->rx_desc_size); 1817 1818 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1819 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 1820 flags = __le32_to_cpu(rxd_attention->flags); 1821 info = __le32_to_cpu(rxd_msdu_start_common->info1); 1822 1823 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); 1824 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); 1825 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO); 1826 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO); 1827 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL); 1828 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL); 1829 1830 if (!is_ip4 && !is_ip6) 1831 return CHECKSUM_NONE; 1832 if (!is_tcp && !is_udp) 1833 return CHECKSUM_NONE; 1834 if (!ip_csum_ok) 1835 return CHECKSUM_NONE; 1836 if (!tcpudp_csum_ok) 1837 return CHECKSUM_NONE; 1838 1839 return CHECKSUM_UNNECESSARY; 1840 } 1841 1842 static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw, 1843 struct sk_buff *msdu) 1844 { 1845 msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu); 1846 } 1847 1848 static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb, 1849 enum htt_rx_mpdu_encrypt_type enctype) 1850 { 1851 struct ieee80211_hdr *hdr; 1852 u64 pn = 0; 1853 u8 *ehdr; 1854 1855 hdr = (struct ieee80211_hdr *)skb->data; 1856 ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control); 1857 1858 if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) { 1859 pn = ehdr[0]; 1860 pn |= (u64)ehdr[1] << 8; 1861 pn |= (u64)ehdr[4] << 16; 1862 pn |= (u64)ehdr[5] << 24; 1863 pn |= (u64)ehdr[6] << 32; 1864 pn |= (u64)ehdr[7] << 40; 1865 } 1866 return pn; 1867 } 1868 1869 static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar, 1870 struct sk_buff *skb) 1871 { 1872 struct ieee80211_hdr *hdr; 1873 1874 hdr = (struct ieee80211_hdr *)skb->data; 1875 return !is_multicast_ether_addr(hdr->addr1); 1876 } 1877 1878 static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar, 1879 struct sk_buff *skb, 1880 u16 peer_id, 1881 enum htt_rx_mpdu_encrypt_type enctype) 1882 { 1883 struct ath10k_peer *peer; 1884 union htt_rx_pn_t *last_pn, new_pn = {0}; 1885 struct ieee80211_hdr *hdr; 1886 u8 tid, frag_number; 1887 u32 seq; 1888 1889 peer = ath10k_peer_find_by_id(ar, peer_id); 1890 if (!peer) { 1891 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n"); 1892 return false; 1893 } 1894 1895 hdr = (struct ieee80211_hdr *)skb->data; 1896 if (ieee80211_is_data_qos(hdr->frame_control)) 1897 tid = ieee80211_get_tid(hdr); 1898 else 1899 tid = ATH10K_TXRX_NON_QOS_TID; 1900 1901 last_pn = &peer->frag_tids_last_pn[tid]; 1902 new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype); 1903 frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 1904 seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); 1905 1906 if (frag_number == 0) { 1907 last_pn->pn48 = new_pn.pn48; 1908 peer->frag_tids_seq[tid] = seq; 1909 } else { 1910 if (seq != peer->frag_tids_seq[tid]) 1911 return false; 1912 1913 if (new_pn.pn48 != last_pn->pn48 + 1) 1914 return false; 1915 1916 last_pn->pn48 = new_pn.pn48; 1917 } 1918 1919 return true; 1920 } 1921 1922 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, 1923 struct sk_buff_head *amsdu, 1924 struct ieee80211_rx_status *status, 1925 bool fill_crypt_header, 1926 u8 *rx_hdr, 1927 enum ath10k_pkt_rx_err *err, 1928 u16 peer_id, 1929 bool frag) 1930 { 1931 struct sk_buff *first; 1932 struct sk_buff *last; 1933 struct sk_buff *msdu, *temp; 1934 struct ath10k_hw_params *hw = &ar->hw_params; 1935 struct htt_rx_desc *rxd; 1936 struct rx_attention *rxd_attention; 1937 struct rx_mpdu_start *rxd_mpdu_start; 1938 1939 struct ieee80211_hdr *hdr; 1940 enum htt_rx_mpdu_encrypt_type enctype; 1941 u8 first_hdr[64]; 1942 u8 *qos; 1943 bool has_fcs_err; 1944 bool has_crypto_err; 1945 bool has_tkip_err; 1946 bool has_peer_idx_invalid; 1947 bool is_decrypted; 1948 bool is_mgmt; 1949 u32 attention; 1950 bool frag_pn_check = true, multicast_check = true; 1951 1952 if (skb_queue_empty(amsdu)) 1953 return; 1954 1955 first = skb_peek(amsdu); 1956 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1957 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 1958 1959 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1960 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 1961 1962 is_mgmt = !!(rxd_attention->flags & 1963 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE)); 1964 1965 enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0), 1966 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 1967 1968 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11 1969 * decapped header. It'll be used for undecapping of each MSDU. 1970 */ 1971 hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); 1972 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 1973 1974 if (rx_hdr) 1975 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN); 1976 1977 /* Each A-MSDU subframe will use the original header as the base and be 1978 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1979 */ 1980 hdr = (void *)first_hdr; 1981 1982 if (ieee80211_is_data_qos(hdr->frame_control)) { 1983 qos = ieee80211_get_qos_ctl(hdr); 1984 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1985 } 1986 1987 /* Some attention flags are valid only in the last MSDU. */ 1988 last = skb_peek_tail(amsdu); 1989 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 1990 (void *)last->data - hw->rx_desc_ops->rx_desc_size); 1991 1992 rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd); 1993 attention = __le32_to_cpu(rxd_attention->flags); 1994 1995 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR); 1996 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR); 1997 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR); 1998 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID); 1999 2000 /* Note: If hardware captures an encrypted frame that it can't decrypt, 2001 * e.g. due to fcs error, missing peer or invalid key data it will 2002 * report the frame as raw. 2003 */ 2004 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE && 2005 !has_fcs_err && 2006 !has_crypto_err && 2007 !has_peer_idx_invalid); 2008 2009 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */ 2010 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC | 2011 RX_FLAG_MMIC_ERROR | 2012 RX_FLAG_DECRYPTED | 2013 RX_FLAG_IV_STRIPPED | 2014 RX_FLAG_ONLY_MONITOR | 2015 RX_FLAG_MMIC_STRIPPED); 2016 2017 if (has_fcs_err) 2018 status->flag |= RX_FLAG_FAILED_FCS_CRC; 2019 2020 if (has_tkip_err) 2021 status->flag |= RX_FLAG_MMIC_ERROR; 2022 2023 if (err) { 2024 if (has_fcs_err) 2025 *err = ATH10K_PKT_RX_ERR_FCS; 2026 else if (has_tkip_err) 2027 *err = ATH10K_PKT_RX_ERR_TKIP; 2028 else if (has_crypto_err) 2029 *err = ATH10K_PKT_RX_ERR_CRYPT; 2030 else if (has_peer_idx_invalid) 2031 *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL; 2032 } 2033 2034 /* Firmware reports all necessary management frames via WMI already. 2035 * They are not reported to monitor interfaces at all so pass the ones 2036 * coming via HTT to monitor interfaces instead. This simplifies 2037 * matters a lot. 2038 */ 2039 if (is_mgmt) 2040 status->flag |= RX_FLAG_ONLY_MONITOR; 2041 2042 if (is_decrypted) { 2043 status->flag |= RX_FLAG_DECRYPTED; 2044 2045 if (likely(!is_mgmt)) 2046 status->flag |= RX_FLAG_MMIC_STRIPPED; 2047 2048 if (fill_crypt_header) 2049 status->flag |= RX_FLAG_MIC_STRIPPED | 2050 RX_FLAG_ICV_STRIPPED; 2051 else 2052 status->flag |= RX_FLAG_IV_STRIPPED; 2053 } 2054 2055 skb_queue_walk(amsdu, msdu) { 2056 if (frag && !fill_crypt_header && is_decrypted && 2057 enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) 2058 frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar, 2059 msdu, 2060 peer_id, 2061 enctype); 2062 2063 if (frag) 2064 multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar, 2065 msdu); 2066 2067 if (!frag_pn_check || !multicast_check) { 2068 /* Discard the fragment with invalid PN or multicast DA 2069 */ 2070 temp = msdu->prev; 2071 __skb_unlink(msdu, amsdu); 2072 dev_kfree_skb_any(msdu); 2073 msdu = temp; 2074 frag_pn_check = true; 2075 multicast_check = true; 2076 continue; 2077 } 2078 2079 ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu); 2080 2081 if (frag && !fill_crypt_header && 2082 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 2083 status->flag &= ~RX_FLAG_MMIC_STRIPPED; 2084 2085 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype, 2086 is_decrypted); 2087 2088 /* Undecapping involves copying the original 802.11 header back 2089 * to sk_buff. If frame is protected and hardware has decrypted 2090 * it then remove the protected bit. 2091 */ 2092 if (!is_decrypted) 2093 continue; 2094 if (is_mgmt) 2095 continue; 2096 2097 if (fill_crypt_header) 2098 continue; 2099 2100 hdr = (void *)msdu->data; 2101 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2102 2103 if (frag && !fill_crypt_header && 2104 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) 2105 status->flag &= ~RX_FLAG_IV_STRIPPED & 2106 ~RX_FLAG_MMIC_STRIPPED; 2107 } 2108 } 2109 2110 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar, 2111 struct sk_buff_head *amsdu, 2112 struct ieee80211_rx_status *status) 2113 { 2114 struct sk_buff *msdu; 2115 struct sk_buff *first_subframe; 2116 2117 first_subframe = skb_peek(amsdu); 2118 2119 while ((msdu = __skb_dequeue(amsdu))) { 2120 /* Setup per-MSDU flags */ 2121 if (skb_queue_empty(amsdu)) 2122 status->flag &= ~RX_FLAG_AMSDU_MORE; 2123 else 2124 status->flag |= RX_FLAG_AMSDU_MORE; 2125 2126 if (msdu == first_subframe) { 2127 first_subframe = NULL; 2128 status->flag &= ~RX_FLAG_ALLOW_SAME_PN; 2129 } else { 2130 status->flag |= RX_FLAG_ALLOW_SAME_PN; 2131 } 2132 2133 ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 2134 } 2135 } 2136 2137 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu, 2138 unsigned long *unchain_cnt) 2139 { 2140 struct sk_buff *skb, *first; 2141 int space; 2142 int total_len = 0; 2143 int amsdu_len = skb_queue_len(amsdu); 2144 2145 /* TODO: Might could optimize this by using 2146 * skb_try_coalesce or similar method to 2147 * decrease copying, or maybe get mac80211 to 2148 * provide a way to just receive a list of 2149 * skb? 2150 */ 2151 2152 first = __skb_dequeue(amsdu); 2153 2154 /* Allocate total length all at once. */ 2155 skb_queue_walk(amsdu, skb) 2156 total_len += skb->len; 2157 2158 space = total_len - skb_tailroom(first); 2159 if ((space > 0) && 2160 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) { 2161 /* TODO: bump some rx-oom error stat */ 2162 /* put it back together so we can free the 2163 * whole list at once. 2164 */ 2165 __skb_queue_head(amsdu, first); 2166 return -1; 2167 } 2168 2169 /* Walk list again, copying contents into 2170 * msdu_head 2171 */ 2172 while ((skb = __skb_dequeue(amsdu))) { 2173 skb_copy_from_linear_data(skb, skb_put(first, skb->len), 2174 skb->len); 2175 dev_kfree_skb_any(skb); 2176 } 2177 2178 __skb_queue_head(amsdu, first); 2179 2180 *unchain_cnt += amsdu_len - 1; 2181 2182 return 0; 2183 } 2184 2185 static void ath10k_htt_rx_h_unchain(struct ath10k *ar, 2186 struct sk_buff_head *amsdu, 2187 unsigned long *drop_cnt, 2188 unsigned long *unchain_cnt) 2189 { 2190 struct sk_buff *first; 2191 struct ath10k_hw_params *hw = &ar->hw_params; 2192 struct htt_rx_desc *rxd; 2193 struct rx_msdu_start_common *rxd_msdu_start_common; 2194 struct rx_frag_info_common *rxd_frag_info; 2195 enum rx_msdu_decap_format decap; 2196 2197 first = skb_peek(amsdu); 2198 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 2199 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 2200 2201 rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd); 2202 rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd); 2203 decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1), 2204 RX_MSDU_START_INFO1_DECAP_FORMAT); 2205 2206 /* FIXME: Current unchaining logic can only handle simple case of raw 2207 * msdu chaining. If decapping is other than raw the chaining may be 2208 * more complex and this isn't handled by the current code. Don't even 2209 * try re-constructing such frames - it'll be pretty much garbage. 2210 */ 2211 if (decap != RX_MSDU_DECAP_RAW || 2212 skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) { 2213 *drop_cnt += skb_queue_len(amsdu); 2214 __skb_queue_purge(amsdu); 2215 return; 2216 } 2217 2218 ath10k_unchain_msdu(amsdu, unchain_cnt); 2219 } 2220 2221 static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar, 2222 struct sk_buff_head *amsdu) 2223 { 2224 u8 *subframe_hdr; 2225 struct sk_buff *first; 2226 bool is_first, is_last; 2227 struct ath10k_hw_params *hw = &ar->hw_params; 2228 struct htt_rx_desc *rxd; 2229 struct rx_msdu_end_common *rxd_msdu_end_common; 2230 struct rx_mpdu_start *rxd_mpdu_start; 2231 struct ieee80211_hdr *hdr; 2232 size_t hdr_len, crypto_len; 2233 enum htt_rx_mpdu_encrypt_type enctype; 2234 int bytes_aligned = ar->hw_params.decap_align_bytes; 2235 2236 first = skb_peek(amsdu); 2237 2238 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 2239 (void *)first->data - hw->rx_desc_ops->rx_desc_size); 2240 2241 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 2242 rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd); 2243 hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd); 2244 2245 is_first = !!(rxd_msdu_end_common->info0 & 2246 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); 2247 is_last = !!(rxd_msdu_end_common->info0 & 2248 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); 2249 2250 /* Return in case of non-aggregated msdu */ 2251 if (is_first && is_last) 2252 return true; 2253 2254 /* First msdu flag is not set for the first msdu of the list */ 2255 if (!is_first) 2256 return false; 2257 2258 enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0), 2259 RX_MPDU_START_INFO0_ENCRYPT_TYPE); 2260 2261 hdr_len = ieee80211_hdrlen(hdr->frame_control); 2262 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); 2263 2264 subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) + 2265 crypto_len; 2266 2267 /* Validate if the amsdu has a proper first subframe. 2268 * There are chances a single msdu can be received as amsdu when 2269 * the unauthenticated amsdu flag of a QoS header 2270 * gets flipped in non-SPP AMSDU's, in such cases the first 2271 * subframe has llc/snap header in place of a valid da. 2272 * return false if the da matches rfc1042 pattern 2273 */ 2274 if (ether_addr_equal(subframe_hdr, rfc1042_header)) 2275 return false; 2276 2277 return true; 2278 } 2279 2280 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, 2281 struct sk_buff_head *amsdu, 2282 struct ieee80211_rx_status *rx_status) 2283 { 2284 if (!rx_status->freq) { 2285 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); 2286 return false; 2287 } 2288 2289 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { 2290 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n"); 2291 return false; 2292 } 2293 2294 if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) { 2295 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n"); 2296 return false; 2297 } 2298 2299 return true; 2300 } 2301 2302 static void ath10k_htt_rx_h_filter(struct ath10k *ar, 2303 struct sk_buff_head *amsdu, 2304 struct ieee80211_rx_status *rx_status, 2305 unsigned long *drop_cnt) 2306 { 2307 if (skb_queue_empty(amsdu)) 2308 return; 2309 2310 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status)) 2311 return; 2312 2313 if (drop_cnt) 2314 *drop_cnt += skb_queue_len(amsdu); 2315 2316 __skb_queue_purge(amsdu); 2317 } 2318 2319 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) 2320 { 2321 struct ath10k *ar = htt->ar; 2322 struct ieee80211_rx_status *rx_status = &htt->rx_status; 2323 struct sk_buff_head amsdu; 2324 int ret; 2325 unsigned long drop_cnt = 0; 2326 unsigned long unchain_cnt = 0; 2327 unsigned long drop_cnt_filter = 0; 2328 unsigned long msdus_to_queue, num_msdus; 2329 enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX; 2330 u8 first_hdr[RX_HTT_HDR_STATUS_LEN]; 2331 2332 __skb_queue_head_init(&amsdu); 2333 2334 spin_lock_bh(&htt->rx_ring.lock); 2335 if (htt->rx_confused) { 2336 spin_unlock_bh(&htt->rx_ring.lock); 2337 return -EIO; 2338 } 2339 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu); 2340 spin_unlock_bh(&htt->rx_ring.lock); 2341 2342 if (ret < 0) { 2343 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret); 2344 __skb_queue_purge(&amsdu); 2345 /* FIXME: It's probably a good idea to reboot the 2346 * device instead of leaving it inoperable. 2347 */ 2348 htt->rx_confused = true; 2349 return ret; 2350 } 2351 2352 num_msdus = skb_queue_len(&amsdu); 2353 2354 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); 2355 2356 /* only for ret = 1 indicates chained msdus */ 2357 if (ret > 0) 2358 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt); 2359 2360 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter); 2361 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0, 2362 false); 2363 msdus_to_queue = skb_queue_len(&amsdu); 2364 ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status); 2365 2366 ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err, 2367 unchain_cnt, drop_cnt, drop_cnt_filter, 2368 msdus_to_queue); 2369 2370 return 0; 2371 } 2372 2373 static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc, 2374 union htt_rx_pn_t *pn, 2375 int pn_len_bits) 2376 { 2377 switch (pn_len_bits) { 2378 case 48: 2379 pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) + 2380 ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32); 2381 break; 2382 case 24: 2383 pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0); 2384 break; 2385 } 2386 } 2387 2388 static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn, 2389 union htt_rx_pn_t *old_pn) 2390 { 2391 return ((new_pn->pn48 & 0xffffffffffffULL) <= 2392 (old_pn->pn48 & 0xffffffffffffULL)); 2393 } 2394 2395 static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar, 2396 struct ath10k_peer *peer, 2397 struct htt_rx_indication_hl *rx) 2398 { 2399 bool last_pn_valid, pn_invalid = false; 2400 enum htt_txrx_sec_cast_type sec_index; 2401 enum htt_security_types sec_type; 2402 union htt_rx_pn_t new_pn = {0}; 2403 struct htt_hl_rx_desc *rx_desc; 2404 union htt_rx_pn_t *last_pn; 2405 u32 rx_desc_info, tid; 2406 int num_mpdu_ranges; 2407 2408 lockdep_assert_held(&ar->data_lock); 2409 2410 if (!peer) 2411 return false; 2412 2413 if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU)) 2414 return false; 2415 2416 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2417 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2418 2419 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges]; 2420 rx_desc_info = __le32_to_cpu(rx_desc->info); 2421 2422 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) 2423 return false; 2424 2425 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2426 last_pn_valid = peer->tids_last_pn_valid[tid]; 2427 last_pn = &peer->tids_last_pn[tid]; 2428 2429 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST)) 2430 sec_index = HTT_TXRX_SEC_MCAST; 2431 else 2432 sec_index = HTT_TXRX_SEC_UCAST; 2433 2434 sec_type = peer->rx_pn[sec_index].sec_type; 2435 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); 2436 2437 if (sec_type != HTT_SECURITY_AES_CCMP && 2438 sec_type != HTT_SECURITY_TKIP && 2439 sec_type != HTT_SECURITY_TKIP_NOMIC) 2440 return false; 2441 2442 if (last_pn_valid) 2443 pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn); 2444 else 2445 peer->tids_last_pn_valid[tid] = true; 2446 2447 if (!pn_invalid) 2448 last_pn->pn48 = new_pn.pn48; 2449 2450 return pn_invalid; 2451 } 2452 2453 static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, 2454 struct htt_rx_indication_hl *rx, 2455 struct sk_buff *skb, 2456 enum htt_rx_pn_check_type check_pn_type, 2457 enum htt_rx_tkip_demic_type tkip_mic_type) 2458 { 2459 struct ath10k *ar = htt->ar; 2460 struct ath10k_peer *peer; 2461 struct htt_rx_indication_mpdu_range *mpdu_ranges; 2462 struct fw_rx_desc_hl *fw_desc; 2463 enum htt_txrx_sec_cast_type sec_index; 2464 enum htt_security_types sec_type; 2465 union htt_rx_pn_t new_pn = {0}; 2466 struct htt_hl_rx_desc *rx_desc; 2467 struct ieee80211_hdr *hdr; 2468 struct ieee80211_rx_status *rx_status; 2469 u16 peer_id; 2470 u8 rx_desc_len; 2471 int num_mpdu_ranges; 2472 size_t tot_hdr_len; 2473 struct ieee80211_channel *ch; 2474 bool pn_invalid, qos, first_msdu; 2475 u32 tid, rx_desc_info; 2476 2477 peer_id = __le16_to_cpu(rx->hdr.peer_id); 2478 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2479 2480 spin_lock_bh(&ar->data_lock); 2481 peer = ath10k_peer_find_by_id(ar, peer_id); 2482 spin_unlock_bh(&ar->data_lock); 2483 if (!peer && peer_id != HTT_INVALID_PEERID) 2484 ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id); 2485 2486 if (!peer) 2487 return true; 2488 2489 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2490 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2491 mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx); 2492 fw_desc = &rx->fw_desc; 2493 rx_desc_len = fw_desc->len; 2494 2495 if (fw_desc->u.bits.discard) { 2496 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n"); 2497 goto err; 2498 } 2499 2500 /* I have not yet seen any case where num_mpdu_ranges > 1. 2501 * qcacld does not seem handle that case either, so we introduce the 2502 * same limitation here as well. 2503 */ 2504 if (num_mpdu_ranges > 1) 2505 ath10k_warn(ar, 2506 "Unsupported number of MPDU ranges: %d, ignoring all but the first\n", 2507 num_mpdu_ranges); 2508 2509 if (mpdu_ranges->mpdu_range_status != 2510 HTT_RX_IND_MPDU_STATUS_OK && 2511 mpdu_ranges->mpdu_range_status != 2512 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) { 2513 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n", 2514 mpdu_ranges->mpdu_range_status); 2515 goto err; 2516 } 2517 2518 rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges]; 2519 rx_desc_info = __le32_to_cpu(rx_desc->info); 2520 2521 if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST)) 2522 sec_index = HTT_TXRX_SEC_MCAST; 2523 else 2524 sec_index = HTT_TXRX_SEC_UCAST; 2525 2526 sec_type = peer->rx_pn[sec_index].sec_type; 2527 first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU; 2528 2529 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); 2530 2531 if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) { 2532 spin_lock_bh(&ar->data_lock); 2533 pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx); 2534 spin_unlock_bh(&ar->data_lock); 2535 2536 if (pn_invalid) 2537 goto err; 2538 } 2539 2540 /* Strip off all headers before the MAC header before delivery to 2541 * mac80211 2542 */ 2543 tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) + 2544 sizeof(rx->ppdu) + sizeof(rx->prefix) + 2545 sizeof(rx->fw_desc) + 2546 sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len; 2547 2548 skb_pull(skb, tot_hdr_len); 2549 2550 hdr = (struct ieee80211_hdr *)skb->data; 2551 qos = ieee80211_is_data_qos(hdr->frame_control); 2552 2553 rx_status = IEEE80211_SKB_RXCB(skb); 2554 memset(rx_status, 0, sizeof(*rx_status)); 2555 2556 if (rx->ppdu.combined_rssi == 0) { 2557 /* SDIO firmware does not provide signal */ 2558 rx_status->signal = 0; 2559 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2560 } else { 2561 rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 2562 rx->ppdu.combined_rssi; 2563 rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 2564 } 2565 2566 spin_lock_bh(&ar->data_lock); 2567 ch = ar->scan_channel; 2568 if (!ch) 2569 ch = ar->rx_channel; 2570 if (!ch) 2571 ch = ath10k_htt_rx_h_any_channel(ar); 2572 if (!ch) 2573 ch = ar->tgt_oper_chan; 2574 spin_unlock_bh(&ar->data_lock); 2575 2576 if (ch) { 2577 rx_status->band = ch->band; 2578 rx_status->freq = ch->center_freq; 2579 } 2580 if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU) 2581 rx_status->flag &= ~RX_FLAG_AMSDU_MORE; 2582 else 2583 rx_status->flag |= RX_FLAG_AMSDU_MORE; 2584 2585 /* Not entirely sure about this, but all frames from the chipset has 2586 * the protected flag set even though they have already been decrypted. 2587 * Unmasking this flag is necessary in order for mac80211 not to drop 2588 * the frame. 2589 * TODO: Verify this is always the case or find out a way to check 2590 * if there has been hw decryption. 2591 */ 2592 if (ieee80211_has_protected(hdr->frame_control)) { 2593 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2594 rx_status->flag |= RX_FLAG_DECRYPTED | 2595 RX_FLAG_IV_STRIPPED | 2596 RX_FLAG_MMIC_STRIPPED; 2597 2598 if (tid < IEEE80211_NUM_TIDS && 2599 first_msdu && 2600 check_pn_type == HTT_RX_PN_CHECK && 2601 (sec_type == HTT_SECURITY_AES_CCMP || 2602 sec_type == HTT_SECURITY_TKIP || 2603 sec_type == HTT_SECURITY_TKIP_NOMIC)) { 2604 u8 offset, *ivp, i; 2605 s8 keyidx = 0; 2606 __le64 pn48 = cpu_to_le64(new_pn.pn48); 2607 2608 hdr = (struct ieee80211_hdr *)skb->data; 2609 offset = ieee80211_hdrlen(hdr->frame_control); 2610 hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2611 rx_status->flag &= ~RX_FLAG_IV_STRIPPED; 2612 2613 memmove(skb->data - IEEE80211_CCMP_HDR_LEN, 2614 skb->data, offset); 2615 skb_push(skb, IEEE80211_CCMP_HDR_LEN); 2616 ivp = skb->data + offset; 2617 memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN); 2618 /* Ext IV */ 2619 ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV; 2620 2621 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 2622 if (peer->keys[i] && 2623 peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE) 2624 keyidx = peer->keys[i]->keyidx; 2625 } 2626 2627 /* Key ID */ 2628 ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6; 2629 2630 if (sec_type == HTT_SECURITY_AES_CCMP) { 2631 rx_status->flag |= RX_FLAG_MIC_STRIPPED; 2632 /* pn 0, pn 1 */ 2633 memcpy(skb->data + offset, &pn48, 2); 2634 /* pn 1, pn 3 , pn 34 , pn 5 */ 2635 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4); 2636 } else { 2637 rx_status->flag |= RX_FLAG_ICV_STRIPPED; 2638 /* TSC 0 */ 2639 memcpy(skb->data + offset + 2, &pn48, 1); 2640 /* TSC 1 */ 2641 memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1); 2642 /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/ 2643 memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4); 2644 } 2645 } 2646 } 2647 2648 if (tkip_mic_type == HTT_RX_TKIP_MIC) 2649 rx_status->flag &= ~RX_FLAG_IV_STRIPPED & 2650 ~RX_FLAG_MMIC_STRIPPED; 2651 2652 if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) 2653 rx_status->flag |= RX_FLAG_MMIC_ERROR; 2654 2655 if (!qos && tid < IEEE80211_NUM_TIDS) { 2656 u8 offset; 2657 __le16 qos_ctrl = 0; 2658 2659 hdr = (struct ieee80211_hdr *)skb->data; 2660 offset = ieee80211_hdrlen(hdr->frame_control); 2661 2662 hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 2663 memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset); 2664 skb_push(skb, IEEE80211_QOS_CTL_LEN); 2665 qos_ctrl = cpu_to_le16(tid); 2666 memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN); 2667 } 2668 2669 if (ar->napi.dev) 2670 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi); 2671 else 2672 ieee80211_rx_ni(ar->hw, skb); 2673 2674 /* We have delivered the skb to the upper layers (mac80211) so we 2675 * must not free it. 2676 */ 2677 return false; 2678 err: 2679 /* Tell the caller that it must free the skb since we have not 2680 * consumed it 2681 */ 2682 return true; 2683 } 2684 2685 static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb, 2686 u16 head_len, 2687 u16 hdr_len) 2688 { 2689 u8 *ivp, *orig_hdr; 2690 2691 orig_hdr = skb->data; 2692 ivp = orig_hdr + hdr_len + head_len; 2693 2694 /* the ExtIV bit is always set to 1 for TKIP */ 2695 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) 2696 return -EINVAL; 2697 2698 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len); 2699 skb_pull(skb, IEEE80211_TKIP_IV_LEN); 2700 skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN); 2701 return 0; 2702 } 2703 2704 static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb, 2705 u16 head_len, 2706 u16 hdr_len) 2707 { 2708 u8 *ivp, *orig_hdr; 2709 2710 orig_hdr = skb->data; 2711 ivp = orig_hdr + hdr_len + head_len; 2712 2713 /* the ExtIV bit is always set to 1 for TKIP */ 2714 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) 2715 return -EINVAL; 2716 2717 memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len); 2718 skb_pull(skb, IEEE80211_TKIP_IV_LEN); 2719 skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN); 2720 return 0; 2721 } 2722 2723 static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb, 2724 u16 head_len, 2725 u16 hdr_len) 2726 { 2727 u8 *ivp, *orig_hdr; 2728 2729 orig_hdr = skb->data; 2730 ivp = orig_hdr + hdr_len + head_len; 2731 2732 /* the ExtIV bit is always set to 1 for CCMP */ 2733 if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV)) 2734 return -EINVAL; 2735 2736 skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN); 2737 memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len); 2738 skb_pull(skb, IEEE80211_CCMP_HDR_LEN); 2739 return 0; 2740 } 2741 2742 static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb, 2743 u16 head_len, 2744 u16 hdr_len) 2745 { 2746 u8 *orig_hdr; 2747 2748 orig_hdr = skb->data; 2749 2750 memmove(orig_hdr + IEEE80211_WEP_IV_LEN, 2751 orig_hdr, head_len + hdr_len); 2752 skb_pull(skb, IEEE80211_WEP_IV_LEN); 2753 skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN); 2754 return 0; 2755 } 2756 2757 static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt, 2758 struct htt_rx_fragment_indication *rx, 2759 struct sk_buff *skb) 2760 { 2761 struct ath10k *ar = htt->ar; 2762 enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC; 2763 enum htt_txrx_sec_cast_type sec_index; 2764 struct htt_rx_indication_hl *rx_hl; 2765 enum htt_security_types sec_type; 2766 u32 tid, frag, seq, rx_desc_info; 2767 union htt_rx_pn_t new_pn = {0}; 2768 struct htt_hl_rx_desc *rx_desc; 2769 u16 peer_id, sc, hdr_space; 2770 union htt_rx_pn_t *last_pn; 2771 struct ieee80211_hdr *hdr; 2772 int ret, num_mpdu_ranges; 2773 struct ath10k_peer *peer; 2774 struct htt_resp *resp; 2775 size_t tot_hdr_len; 2776 2777 resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN); 2778 skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN); 2779 skb_trim(skb, skb->len - FCS_LEN); 2780 2781 peer_id = __le16_to_cpu(rx->peer_id); 2782 rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl); 2783 2784 spin_lock_bh(&ar->data_lock); 2785 peer = ath10k_peer_find_by_id(ar, peer_id); 2786 if (!peer) { 2787 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id); 2788 goto err; 2789 } 2790 2791 num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1), 2792 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2793 2794 tot_hdr_len = sizeof(struct htt_resp_hdr) + 2795 sizeof(rx_hl->hdr) + 2796 sizeof(rx_hl->ppdu) + 2797 sizeof(rx_hl->prefix) + 2798 sizeof(rx_hl->fw_desc) + 2799 sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges; 2800 2801 tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2802 rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len); 2803 rx_desc_info = __le32_to_cpu(rx_desc->info); 2804 2805 hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len); 2806 2807 if (is_multicast_ether_addr(hdr->addr1)) { 2808 /* Discard the fragment with multicast DA */ 2809 goto err; 2810 } 2811 2812 if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) { 2813 spin_unlock_bh(&ar->data_lock); 2814 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, 2815 HTT_RX_NON_PN_CHECK, 2816 HTT_RX_NON_TKIP_MIC); 2817 } 2818 2819 if (ieee80211_has_retry(hdr->frame_control)) 2820 goto err; 2821 2822 hdr_space = ieee80211_hdrlen(hdr->frame_control); 2823 sc = __le16_to_cpu(hdr->seq_ctrl); 2824 seq = IEEE80211_SEQ_TO_SN(sc); 2825 frag = sc & IEEE80211_SCTL_FRAG; 2826 2827 sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ? 2828 HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST; 2829 sec_type = peer->rx_pn[sec_index].sec_type; 2830 ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len); 2831 2832 switch (sec_type) { 2833 case HTT_SECURITY_TKIP: 2834 tkip_mic = HTT_RX_TKIP_MIC; 2835 ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb, 2836 tot_hdr_len + 2837 rx_hl->fw_desc.len, 2838 hdr_space); 2839 if (ret) 2840 goto err; 2841 break; 2842 case HTT_SECURITY_TKIP_NOMIC: 2843 ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb, 2844 tot_hdr_len + 2845 rx_hl->fw_desc.len, 2846 hdr_space); 2847 if (ret) 2848 goto err; 2849 break; 2850 case HTT_SECURITY_AES_CCMP: 2851 ret = ath10k_htt_rx_frag_ccmp_decap(skb, 2852 tot_hdr_len + rx_hl->fw_desc.len, 2853 hdr_space); 2854 if (ret) 2855 goto err; 2856 break; 2857 case HTT_SECURITY_WEP128: 2858 case HTT_SECURITY_WEP104: 2859 case HTT_SECURITY_WEP40: 2860 ret = ath10k_htt_rx_frag_wep_decap(skb, 2861 tot_hdr_len + rx_hl->fw_desc.len, 2862 hdr_space); 2863 if (ret) 2864 goto err; 2865 break; 2866 default: 2867 break; 2868 } 2869 2870 resp = (struct htt_resp *)(skb->data); 2871 2872 if (sec_type != HTT_SECURITY_AES_CCMP && 2873 sec_type != HTT_SECURITY_TKIP && 2874 sec_type != HTT_SECURITY_TKIP_NOMIC) { 2875 spin_unlock_bh(&ar->data_lock); 2876 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, 2877 HTT_RX_NON_PN_CHECK, 2878 HTT_RX_NON_TKIP_MIC); 2879 } 2880 2881 last_pn = &peer->frag_tids_last_pn[tid]; 2882 2883 if (frag == 0) { 2884 if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl)) 2885 goto err; 2886 2887 last_pn->pn48 = new_pn.pn48; 2888 peer->frag_tids_seq[tid] = seq; 2889 } else if (sec_type == HTT_SECURITY_AES_CCMP) { 2890 if (seq != peer->frag_tids_seq[tid]) 2891 goto err; 2892 2893 if (new_pn.pn48 != last_pn->pn48 + 1) 2894 goto err; 2895 2896 last_pn->pn48 = new_pn.pn48; 2897 last_pn = &peer->tids_last_pn[tid]; 2898 last_pn->pn48 = new_pn.pn48; 2899 } 2900 2901 spin_unlock_bh(&ar->data_lock); 2902 2903 return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb, 2904 HTT_RX_NON_PN_CHECK, tkip_mic); 2905 2906 err: 2907 spin_unlock_bh(&ar->data_lock); 2908 2909 /* Tell the caller that it must free the skb since we have not 2910 * consumed it 2911 */ 2912 return true; 2913 } 2914 2915 static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt, 2916 struct htt_rx_indication *rx) 2917 { 2918 struct ath10k *ar = htt->ar; 2919 struct htt_rx_indication_mpdu_range *mpdu_ranges; 2920 int num_mpdu_ranges; 2921 int i, mpdu_count = 0; 2922 u16 peer_id; 2923 u8 tid; 2924 2925 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1), 2926 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES); 2927 peer_id = __le16_to_cpu(rx->hdr.peer_id); 2928 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID); 2929 2930 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx); 2931 2932 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", 2933 rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges)); 2934 2935 for (i = 0; i < num_mpdu_ranges; i++) 2936 mpdu_count += mpdu_ranges[i].mpdu_count; 2937 2938 atomic_add(mpdu_count, &htt->num_mpdus_ready); 2939 2940 ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges, 2941 num_mpdu_ranges); 2942 } 2943 2944 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, 2945 struct sk_buff *skb) 2946 { 2947 struct ath10k_htt *htt = &ar->htt; 2948 struct htt_resp *resp = (struct htt_resp *)skb->data; 2949 struct htt_tx_done tx_done = {}; 2950 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); 2951 __le16 msdu_id, *msdus; 2952 bool rssi_enabled = false; 2953 u8 msdu_count = 0, num_airtime_records, tid; 2954 int i, htt_pad = 0; 2955 struct htt_data_tx_compl_ppdu_dur *ppdu_info; 2956 struct ath10k_peer *peer; 2957 u16 ppdu_info_offset = 0, peer_id; 2958 u32 tx_duration; 2959 2960 switch (status) { 2961 case HTT_DATA_TX_STATUS_NO_ACK: 2962 tx_done.status = HTT_TX_COMPL_STATE_NOACK; 2963 break; 2964 case HTT_DATA_TX_STATUS_OK: 2965 tx_done.status = HTT_TX_COMPL_STATE_ACK; 2966 break; 2967 case HTT_DATA_TX_STATUS_DISCARD: 2968 case HTT_DATA_TX_STATUS_POSTPONE: 2969 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 2970 break; 2971 default: 2972 ath10k_warn(ar, "unhandled tx completion status %d\n", status); 2973 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 2974 break; 2975 } 2976 2977 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", 2978 resp->data_tx_completion.num_msdus); 2979 2980 msdu_count = resp->data_tx_completion.num_msdus; 2981 msdus = resp->data_tx_completion.msdus; 2982 rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp); 2983 2984 if (rssi_enabled) 2985 htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params, 2986 resp); 2987 2988 for (i = 0; i < msdu_count; i++) { 2989 msdu_id = msdus[i]; 2990 tx_done.msdu_id = __le16_to_cpu(msdu_id); 2991 2992 if (rssi_enabled) { 2993 /* Total no of MSDUs should be even, 2994 * if odd MSDUs are sent firmware fills 2995 * last msdu id with 0xffff 2996 */ 2997 if (msdu_count & 0x01) { 2998 msdu_id = msdus[msdu_count + i + 1 + htt_pad]; 2999 tx_done.ack_rssi = __le16_to_cpu(msdu_id); 3000 } else { 3001 msdu_id = msdus[msdu_count + i + htt_pad]; 3002 tx_done.ack_rssi = __le16_to_cpu(msdu_id); 3003 } 3004 } 3005 3006 /* kfifo_put: In practice firmware shouldn't fire off per-CE 3007 * interrupt and main interrupt (MSI/-X range case) for the same 3008 * HTC service so it should be safe to use kfifo_put w/o lock. 3009 * 3010 * From kfifo_put() documentation: 3011 * Note that with only one concurrent reader and one concurrent 3012 * writer, you don't need extra locking to use these macro. 3013 */ 3014 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) { 3015 ath10k_txrx_tx_unref(htt, &tx_done); 3016 } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) { 3017 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n", 3018 tx_done.msdu_id, tx_done.status); 3019 ath10k_txrx_tx_unref(htt, &tx_done); 3020 } 3021 } 3022 3023 if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT)) 3024 return; 3025 3026 ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count; 3027 3028 if (rssi_enabled) 3029 ppdu_info_offset += ppdu_info_offset; 3030 3031 if (resp->data_tx_completion.flags2 & 3032 (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT)) 3033 ppdu_info_offset += 2; 3034 3035 ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset]; 3036 num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK, 3037 __le32_to_cpu(ppdu_info->info0)); 3038 3039 for (i = 0; i < num_airtime_records; i++) { 3040 struct htt_data_tx_ppdu_dur *ppdu_dur; 3041 u32 info0; 3042 3043 ppdu_dur = &ppdu_info->ppdu_dur[i]; 3044 info0 = __le32_to_cpu(ppdu_dur->info0); 3045 3046 peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK, 3047 info0); 3048 rcu_read_lock(); 3049 spin_lock_bh(&ar->data_lock); 3050 3051 peer = ath10k_peer_find_by_id(ar, peer_id); 3052 if (!peer || !peer->sta) { 3053 spin_unlock_bh(&ar->data_lock); 3054 rcu_read_unlock(); 3055 continue; 3056 } 3057 3058 tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) & 3059 IEEE80211_QOS_CTL_TID_MASK; 3060 tx_duration = __le32_to_cpu(ppdu_dur->tx_duration); 3061 3062 ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0); 3063 3064 spin_unlock_bh(&ar->data_lock); 3065 rcu_read_unlock(); 3066 } 3067 } 3068 3069 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) 3070 { 3071 struct htt_rx_addba *ev = &resp->rx_addba; 3072 struct ath10k_peer *peer; 3073 struct ath10k_vif *arvif; 3074 u16 info0, tid, peer_id; 3075 3076 info0 = __le16_to_cpu(ev->info0); 3077 tid = MS(info0, HTT_RX_BA_INFO0_TID); 3078 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 3079 3080 ath10k_dbg(ar, ATH10K_DBG_HTT, 3081 "htt rx addba tid %u peer_id %u size %u\n", 3082 tid, peer_id, ev->window_size); 3083 3084 spin_lock_bh(&ar->data_lock); 3085 peer = ath10k_peer_find_by_id(ar, peer_id); 3086 if (!peer) { 3087 ath10k_warn(ar, "received addba event for invalid peer_id: %u\n", 3088 peer_id); 3089 spin_unlock_bh(&ar->data_lock); 3090 return; 3091 } 3092 3093 arvif = ath10k_get_arvif(ar, peer->vdev_id); 3094 if (!arvif) { 3095 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 3096 peer->vdev_id); 3097 spin_unlock_bh(&ar->data_lock); 3098 return; 3099 } 3100 3101 ath10k_dbg(ar, ATH10K_DBG_HTT, 3102 "htt rx start rx ba session sta %pM tid %u size %u\n", 3103 peer->addr, tid, ev->window_size); 3104 3105 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid); 3106 spin_unlock_bh(&ar->data_lock); 3107 } 3108 3109 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) 3110 { 3111 struct htt_rx_delba *ev = &resp->rx_delba; 3112 struct ath10k_peer *peer; 3113 struct ath10k_vif *arvif; 3114 u16 info0, tid, peer_id; 3115 3116 info0 = __le16_to_cpu(ev->info0); 3117 tid = MS(info0, HTT_RX_BA_INFO0_TID); 3118 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); 3119 3120 ath10k_dbg(ar, ATH10K_DBG_HTT, 3121 "htt rx delba tid %u peer_id %u\n", 3122 tid, peer_id); 3123 3124 spin_lock_bh(&ar->data_lock); 3125 peer = ath10k_peer_find_by_id(ar, peer_id); 3126 if (!peer) { 3127 ath10k_warn(ar, "received addba event for invalid peer_id: %u\n", 3128 peer_id); 3129 spin_unlock_bh(&ar->data_lock); 3130 return; 3131 } 3132 3133 arvif = ath10k_get_arvif(ar, peer->vdev_id); 3134 if (!arvif) { 3135 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", 3136 peer->vdev_id); 3137 spin_unlock_bh(&ar->data_lock); 3138 return; 3139 } 3140 3141 ath10k_dbg(ar, ATH10K_DBG_HTT, 3142 "htt rx stop rx ba session sta %pM tid %u\n", 3143 peer->addr, tid); 3144 3145 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid); 3146 spin_unlock_bh(&ar->data_lock); 3147 } 3148 3149 static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw, 3150 struct sk_buff_head *list, 3151 struct sk_buff_head *amsdu) 3152 { 3153 struct sk_buff *msdu; 3154 struct htt_rx_desc *rxd; 3155 struct rx_msdu_end_common *rxd_msdu_end_common; 3156 3157 if (skb_queue_empty(list)) 3158 return -ENOBUFS; 3159 3160 if (WARN_ON(!skb_queue_empty(amsdu))) 3161 return -EINVAL; 3162 3163 while ((msdu = __skb_dequeue(list))) { 3164 __skb_queue_tail(amsdu, msdu); 3165 3166 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 3167 (void *)msdu->data - 3168 hw->rx_desc_ops->rx_desc_size); 3169 3170 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 3171 if (rxd_msdu_end_common->info0 & 3172 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) 3173 break; 3174 } 3175 3176 msdu = skb_peek_tail(amsdu); 3177 rxd = HTT_RX_BUF_TO_RX_DESC(hw, 3178 (void *)msdu->data - hw->rx_desc_ops->rx_desc_size); 3179 3180 rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd); 3181 if (!(rxd_msdu_end_common->info0 & 3182 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { 3183 skb_queue_splice_init(amsdu, list); 3184 return -EAGAIN; 3185 } 3186 3187 return 0; 3188 } 3189 3190 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, 3191 struct sk_buff *skb) 3192 { 3193 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3194 3195 if (!ieee80211_has_protected(hdr->frame_control)) 3196 return; 3197 3198 /* Offloaded frames are already decrypted but firmware insists they are 3199 * protected in the 802.11 header. Strip the flag. Otherwise mac80211 3200 * will drop the frame. 3201 */ 3202 3203 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 3204 status->flag |= RX_FLAG_DECRYPTED | 3205 RX_FLAG_IV_STRIPPED | 3206 RX_FLAG_MMIC_STRIPPED; 3207 } 3208 3209 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar, 3210 struct sk_buff_head *list) 3211 { 3212 struct ath10k_htt *htt = &ar->htt; 3213 struct ieee80211_rx_status *status = &htt->rx_status; 3214 struct htt_rx_offload_msdu *rx; 3215 struct sk_buff *msdu; 3216 size_t offset; 3217 3218 while ((msdu = __skb_dequeue(list))) { 3219 /* Offloaded frames don't have Rx descriptor. Instead they have 3220 * a short meta information header. 3221 */ 3222 3223 rx = (void *)msdu->data; 3224 3225 skb_put(msdu, sizeof(*rx)); 3226 skb_pull(msdu, sizeof(*rx)); 3227 3228 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) { 3229 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n"); 3230 dev_kfree_skb_any(msdu); 3231 continue; 3232 } 3233 3234 skb_put(msdu, __le16_to_cpu(rx->msdu_len)); 3235 3236 /* Offloaded rx header length isn't multiple of 2 nor 4 so the 3237 * actual payload is unaligned. Align the frame. Otherwise 3238 * mac80211 complains. This shouldn't reduce performance much 3239 * because these offloaded frames are rare. 3240 */ 3241 offset = 4 - ((unsigned long)msdu->data & 3); 3242 skb_put(msdu, offset); 3243 memmove(msdu->data + offset, msdu->data, msdu->len); 3244 skb_pull(msdu, offset); 3245 3246 /* FIXME: The frame is NWifi. Re-construct QoS Control 3247 * if possible later. 3248 */ 3249 3250 memset(status, 0, sizeof(*status)); 3251 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 3252 3253 ath10k_htt_rx_h_rx_offload_prot(status, msdu); 3254 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); 3255 ath10k_htt_rx_h_queue_msdu(ar, status, msdu); 3256 } 3257 } 3258 3259 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) 3260 { 3261 struct ath10k_htt *htt = &ar->htt; 3262 struct htt_resp *resp = (void *)skb->data; 3263 struct ieee80211_rx_status *status = &htt->rx_status; 3264 struct sk_buff_head list; 3265 struct sk_buff_head amsdu; 3266 u16 peer_id; 3267 u16 msdu_count; 3268 u8 vdev_id; 3269 u8 tid; 3270 bool offload; 3271 bool frag; 3272 int ret; 3273 3274 lockdep_assert_held(&htt->rx_ring.lock); 3275 3276 if (htt->rx_confused) 3277 return -EIO; 3278 3279 skb_pull(skb, sizeof(resp->hdr)); 3280 skb_pull(skb, sizeof(resp->rx_in_ord_ind)); 3281 3282 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id); 3283 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count); 3284 vdev_id = resp->rx_in_ord_ind.vdev_id; 3285 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID); 3286 offload = !!(resp->rx_in_ord_ind.info & 3287 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK); 3288 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK); 3289 3290 ath10k_dbg(ar, ATH10K_DBG_HTT, 3291 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n", 3292 vdev_id, peer_id, tid, offload, frag, msdu_count); 3293 3294 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) { 3295 ath10k_warn(ar, "dropping invalid in order rx indication\n"); 3296 return -EINVAL; 3297 } 3298 3299 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later 3300 * extracted and processed. 3301 */ 3302 __skb_queue_head_init(&list); 3303 if (ar->hw_params.target_64bit) 3304 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind, 3305 &list); 3306 else 3307 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind, 3308 &list); 3309 3310 if (ret < 0) { 3311 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret); 3312 htt->rx_confused = true; 3313 return -EIO; 3314 } 3315 3316 /* Offloaded frames are very different and need to be handled 3317 * separately. 3318 */ 3319 if (offload) 3320 ath10k_htt_rx_h_rx_offload(ar, &list); 3321 3322 while (!skb_queue_empty(&list)) { 3323 __skb_queue_head_init(&amsdu); 3324 ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu); 3325 switch (ret) { 3326 case 0: 3327 /* Note: The in-order indication may report interleaved 3328 * frames from different PPDUs meaning reported rx rate 3329 * to mac80211 isn't accurate/reliable. It's still 3330 * better to report something than nothing though. This 3331 * should still give an idea about rx rate to the user. 3332 */ 3333 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); 3334 ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL); 3335 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL, 3336 NULL, peer_id, frag); 3337 ath10k_htt_rx_h_enqueue(ar, &amsdu, status); 3338 break; 3339 case -EAGAIN: 3340 fallthrough; 3341 default: 3342 /* Should not happen. */ 3343 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret); 3344 htt->rx_confused = true; 3345 __skb_queue_purge(&list); 3346 return -EIO; 3347 } 3348 } 3349 return ret; 3350 } 3351 3352 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, 3353 const __le32 *resp_ids, 3354 int num_resp_ids) 3355 { 3356 int i; 3357 u32 resp_id; 3358 3359 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n", 3360 num_resp_ids); 3361 3362 for (i = 0; i < num_resp_ids; i++) { 3363 resp_id = le32_to_cpu(resp_ids[i]); 3364 3365 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n", 3366 resp_id); 3367 3368 /* TODO: free resp_id */ 3369 } 3370 } 3371 3372 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) 3373 { 3374 struct ieee80211_hw *hw = ar->hw; 3375 struct ieee80211_txq *txq; 3376 struct htt_resp *resp = (struct htt_resp *)skb->data; 3377 struct htt_tx_fetch_record *record; 3378 size_t len; 3379 size_t max_num_bytes; 3380 size_t max_num_msdus; 3381 size_t num_bytes; 3382 size_t num_msdus; 3383 const __le32 *resp_ids; 3384 u16 num_records; 3385 u16 num_resp_ids; 3386 u16 peer_id; 3387 u8 tid; 3388 int ret; 3389 int i; 3390 bool may_tx; 3391 3392 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); 3393 3394 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind); 3395 if (unlikely(skb->len < len)) { 3396 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n"); 3397 return; 3398 } 3399 3400 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records); 3401 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids); 3402 3403 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records; 3404 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids; 3405 3406 if (unlikely(skb->len < len)) { 3407 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n"); 3408 return; 3409 } 3410 3411 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n", 3412 num_records, num_resp_ids, 3413 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num)); 3414 3415 if (!ar->htt.tx_q_state.enabled) { 3416 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n"); 3417 return; 3418 } 3419 3420 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) { 3421 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n"); 3422 return; 3423 } 3424 3425 rcu_read_lock(); 3426 3427 for (i = 0; i < num_records; i++) { 3428 record = &resp->tx_fetch_ind.records[i]; 3429 peer_id = MS(le16_to_cpu(record->info), 3430 HTT_TX_FETCH_RECORD_INFO_PEER_ID); 3431 tid = MS(le16_to_cpu(record->info), 3432 HTT_TX_FETCH_RECORD_INFO_TID); 3433 max_num_msdus = le16_to_cpu(record->num_msdus); 3434 max_num_bytes = le32_to_cpu(record->num_bytes); 3435 3436 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n", 3437 i, peer_id, tid, max_num_msdus, max_num_bytes); 3438 3439 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 3440 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 3441 ath10k_warn(ar, "received out of range peer_id %u tid %u\n", 3442 peer_id, tid); 3443 continue; 3444 } 3445 3446 spin_lock_bh(&ar->data_lock); 3447 txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 3448 spin_unlock_bh(&ar->data_lock); 3449 3450 /* It is okay to release the lock and use txq because RCU read 3451 * lock is held. 3452 */ 3453 3454 if (unlikely(!txq)) { 3455 ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n", 3456 peer_id, tid); 3457 continue; 3458 } 3459 3460 num_msdus = 0; 3461 num_bytes = 0; 3462 3463 ieee80211_txq_schedule_start(hw, txq->ac); 3464 may_tx = ieee80211_txq_may_transmit(hw, txq); 3465 while (num_msdus < max_num_msdus && 3466 num_bytes < max_num_bytes) { 3467 if (!may_tx) 3468 break; 3469 3470 ret = ath10k_mac_tx_push_txq(hw, txq); 3471 if (ret < 0) 3472 break; 3473 3474 num_msdus++; 3475 num_bytes += ret; 3476 } 3477 ieee80211_return_txq(hw, txq, false); 3478 ieee80211_txq_schedule_end(hw, txq->ac); 3479 3480 record->num_msdus = cpu_to_le16(num_msdus); 3481 record->num_bytes = cpu_to_le32(num_bytes); 3482 3483 ath10k_htt_tx_txq_recalc(hw, txq); 3484 } 3485 3486 rcu_read_unlock(); 3487 3488 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind); 3489 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids); 3490 3491 ret = ath10k_htt_tx_fetch_resp(ar, 3492 resp->tx_fetch_ind.token, 3493 resp->tx_fetch_ind.fetch_seq_num, 3494 resp->tx_fetch_ind.records, 3495 num_records); 3496 if (unlikely(ret)) { 3497 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n", 3498 le32_to_cpu(resp->tx_fetch_ind.token), ret); 3499 /* FIXME: request fw restart */ 3500 } 3501 3502 ath10k_htt_tx_txq_sync(ar); 3503 } 3504 3505 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar, 3506 struct sk_buff *skb) 3507 { 3508 const struct htt_resp *resp = (void *)skb->data; 3509 size_t len; 3510 int num_resp_ids; 3511 3512 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n"); 3513 3514 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm); 3515 if (unlikely(skb->len < len)) { 3516 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n"); 3517 return; 3518 } 3519 3520 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids); 3521 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids; 3522 3523 if (unlikely(skb->len < len)) { 3524 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n"); 3525 return; 3526 } 3527 3528 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, 3529 resp->tx_fetch_confirm.resp_ids, 3530 num_resp_ids); 3531 } 3532 3533 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, 3534 struct sk_buff *skb) 3535 { 3536 const struct htt_resp *resp = (void *)skb->data; 3537 const struct htt_tx_mode_switch_record *record; 3538 struct ieee80211_txq *txq; 3539 struct ath10k_txq *artxq; 3540 size_t len; 3541 size_t num_records; 3542 enum htt_tx_mode_switch_mode mode; 3543 bool enable; 3544 u16 info0; 3545 u16 info1; 3546 u16 threshold; 3547 u16 peer_id; 3548 u8 tid; 3549 int i; 3550 3551 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n"); 3552 3553 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind); 3554 if (unlikely(skb->len < len)) { 3555 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n"); 3556 return; 3557 } 3558 3559 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0); 3560 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1); 3561 3562 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE); 3563 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 3564 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE); 3565 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD); 3566 3567 ath10k_dbg(ar, ATH10K_DBG_HTT, 3568 "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n", 3569 info0, info1, enable, num_records, mode, threshold); 3570 3571 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records; 3572 3573 if (unlikely(skb->len < len)) { 3574 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n"); 3575 return; 3576 } 3577 3578 switch (mode) { 3579 case HTT_TX_MODE_SWITCH_PUSH: 3580 case HTT_TX_MODE_SWITCH_PUSH_PULL: 3581 break; 3582 default: 3583 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n", 3584 mode); 3585 return; 3586 } 3587 3588 if (!enable) 3589 return; 3590 3591 ar->htt.tx_q_state.enabled = enable; 3592 ar->htt.tx_q_state.mode = mode; 3593 ar->htt.tx_q_state.num_push_allowed = threshold; 3594 3595 rcu_read_lock(); 3596 3597 for (i = 0; i < num_records; i++) { 3598 record = &resp->tx_mode_switch_ind.records[i]; 3599 info0 = le16_to_cpu(record->info0); 3600 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID); 3601 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID); 3602 3603 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 3604 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 3605 ath10k_warn(ar, "received out of range peer_id %u tid %u\n", 3606 peer_id, tid); 3607 continue; 3608 } 3609 3610 spin_lock_bh(&ar->data_lock); 3611 txq = ath10k_mac_txq_lookup(ar, peer_id, tid); 3612 spin_unlock_bh(&ar->data_lock); 3613 3614 /* It is okay to release the lock and use txq because RCU read 3615 * lock is held. 3616 */ 3617 3618 if (unlikely(!txq)) { 3619 ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n", 3620 peer_id, tid); 3621 continue; 3622 } 3623 3624 spin_lock_bh(&ar->htt.tx_lock); 3625 artxq = (void *)txq->drv_priv; 3626 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus); 3627 spin_unlock_bh(&ar->htt.tx_lock); 3628 } 3629 3630 rcu_read_unlock(); 3631 3632 ath10k_mac_tx_push_pending(ar); 3633 } 3634 3635 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 3636 { 3637 bool release; 3638 3639 release = ath10k_htt_t2h_msg_handler(ar, skb); 3640 3641 /* Free the indication buffer */ 3642 if (release) 3643 dev_kfree_skb_any(skb); 3644 } 3645 3646 static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate) 3647 { 3648 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12, 3649 18, 24, 36, 48, 54}; 3650 int i; 3651 3652 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) { 3653 if (rate == legacy_rates[i]) 3654 return i; 3655 } 3656 3657 ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate); 3658 return -EINVAL; 3659 } 3660 3661 static void 3662 ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, 3663 struct ath10k_sta *arsta, 3664 struct ath10k_per_peer_tx_stats *pstats, 3665 s8 legacy_rate_idx) 3666 { 3667 struct rate_info *txrate = &arsta->txrate; 3668 struct ath10k_htt_tx_stats *tx_stats; 3669 int idx, ht_idx, gi, mcs, bw, nss; 3670 unsigned long flags; 3671 3672 if (!arsta->tx_stats) 3673 return; 3674 3675 tx_stats = arsta->tx_stats; 3676 flags = txrate->flags; 3677 gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags); 3678 mcs = ATH10K_HW_MCS_RATE(pstats->ratecode); 3679 bw = txrate->bw; 3680 nss = txrate->nss; 3681 ht_idx = mcs + (nss - 1) * 8; 3682 idx = mcs * 8 + 8 * 10 * (nss - 1); 3683 idx += bw * 2 + gi; 3684 3685 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] 3686 3687 if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) { 3688 STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes; 3689 STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts; 3690 STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes; 3691 STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts; 3692 STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes; 3693 STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts; 3694 } else if (txrate->flags & RATE_INFO_FLAGS_MCS) { 3695 STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes; 3696 STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts; 3697 STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes; 3698 STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts; 3699 STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes; 3700 STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts; 3701 } else { 3702 mcs = legacy_rate_idx; 3703 3704 STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes; 3705 STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts; 3706 STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes; 3707 STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts; 3708 STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes; 3709 STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts; 3710 } 3711 3712 if (ATH10K_HW_AMPDU(pstats->flags)) { 3713 tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags); 3714 3715 if (txrate->flags & RATE_INFO_FLAGS_MCS) { 3716 STATS_OP_FMT(AMPDU).ht[0][ht_idx] += 3717 pstats->succ_bytes + pstats->retry_bytes; 3718 STATS_OP_FMT(AMPDU).ht[1][ht_idx] += 3719 pstats->succ_pkts + pstats->retry_pkts; 3720 } else { 3721 STATS_OP_FMT(AMPDU).vht[0][mcs] += 3722 pstats->succ_bytes + pstats->retry_bytes; 3723 STATS_OP_FMT(AMPDU).vht[1][mcs] += 3724 pstats->succ_pkts + pstats->retry_pkts; 3725 } 3726 STATS_OP_FMT(AMPDU).bw[0][bw] += 3727 pstats->succ_bytes + pstats->retry_bytes; 3728 STATS_OP_FMT(AMPDU).nss[0][nss - 1] += 3729 pstats->succ_bytes + pstats->retry_bytes; 3730 STATS_OP_FMT(AMPDU).gi[0][gi] += 3731 pstats->succ_bytes + pstats->retry_bytes; 3732 STATS_OP_FMT(AMPDU).rate_table[0][idx] += 3733 pstats->succ_bytes + pstats->retry_bytes; 3734 STATS_OP_FMT(AMPDU).bw[1][bw] += 3735 pstats->succ_pkts + pstats->retry_pkts; 3736 STATS_OP_FMT(AMPDU).nss[1][nss - 1] += 3737 pstats->succ_pkts + pstats->retry_pkts; 3738 STATS_OP_FMT(AMPDU).gi[1][gi] += 3739 pstats->succ_pkts + pstats->retry_pkts; 3740 STATS_OP_FMT(AMPDU).rate_table[1][idx] += 3741 pstats->succ_pkts + pstats->retry_pkts; 3742 } else { 3743 tx_stats->ack_fails += 3744 ATH10K_HW_BA_FAIL(pstats->flags); 3745 } 3746 3747 STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes; 3748 STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes; 3749 STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes; 3750 3751 STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts; 3752 STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts; 3753 STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts; 3754 3755 STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes; 3756 STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes; 3757 STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes; 3758 3759 STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts; 3760 STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts; 3761 STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts; 3762 3763 STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes; 3764 STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes; 3765 STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes; 3766 3767 STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; 3768 STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts; 3769 STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; 3770 3771 if (txrate->flags >= RATE_INFO_FLAGS_MCS) { 3772 STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes; 3773 STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts; 3774 STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes; 3775 STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts; 3776 STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes; 3777 STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts; 3778 } 3779 3780 tx_stats->tx_duration += pstats->duration; 3781 } 3782 3783 static void 3784 ath10k_update_per_peer_tx_stats(struct ath10k *ar, 3785 struct ieee80211_sta *sta, 3786 struct ath10k_per_peer_tx_stats *peer_stats) 3787 { 3788 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 3789 struct ieee80211_chanctx_conf *conf = NULL; 3790 u8 rate = 0, sgi; 3791 s8 rate_idx = 0; 3792 bool skip_auto_rate; 3793 struct rate_info txrate; 3794 3795 lockdep_assert_held(&ar->data_lock); 3796 3797 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode); 3798 txrate.bw = ATH10K_HW_BW(peer_stats->flags); 3799 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode); 3800 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode); 3801 sgi = ATH10K_HW_GI(peer_stats->flags); 3802 skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags); 3803 3804 /* Firmware's rate control skips broadcast/management frames, 3805 * if host has configure fixed rates and in some other special cases. 3806 */ 3807 if (skip_auto_rate) 3808 return; 3809 3810 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) { 3811 ath10k_warn(ar, "Invalid VHT mcs %d peer stats", txrate.mcs); 3812 return; 3813 } 3814 3815 if (txrate.flags == WMI_RATE_PREAMBLE_HT && 3816 (txrate.mcs > 7 || txrate.nss < 1)) { 3817 ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats", 3818 txrate.mcs, txrate.nss); 3819 return; 3820 } 3821 3822 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 3823 memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status)); 3824 if (txrate.flags == WMI_RATE_PREAMBLE_CCK || 3825 txrate.flags == WMI_RATE_PREAMBLE_OFDM) { 3826 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); 3827 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */ 3828 if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK) 3829 rate = 5; 3830 rate_idx = ath10k_get_legacy_rate_idx(ar, rate); 3831 if (rate_idx < 0) 3832 return; 3833 arsta->txrate.legacy = rate; 3834 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) { 3835 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 3836 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1); 3837 } else { 3838 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 3839 arsta->txrate.mcs = txrate.mcs; 3840 } 3841 3842 switch (txrate.flags) { 3843 case WMI_RATE_PREAMBLE_OFDM: 3844 if (arsta->arvif && arsta->arvif->vif) 3845 conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf); 3846 if (conf && conf->def.chan->band == NL80211_BAND_5GHZ) 3847 arsta->tx_info.status.rates[0].idx = rate_idx - 4; 3848 break; 3849 case WMI_RATE_PREAMBLE_CCK: 3850 arsta->tx_info.status.rates[0].idx = rate_idx; 3851 if (sgi) 3852 arsta->tx_info.status.rates[0].flags |= 3853 (IEEE80211_TX_RC_USE_SHORT_PREAMBLE | 3854 IEEE80211_TX_RC_SHORT_GI); 3855 break; 3856 case WMI_RATE_PREAMBLE_HT: 3857 arsta->tx_info.status.rates[0].idx = 3858 txrate.mcs + ((txrate.nss - 1) * 8); 3859 if (sgi) 3860 arsta->tx_info.status.rates[0].flags |= 3861 IEEE80211_TX_RC_SHORT_GI; 3862 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS; 3863 break; 3864 case WMI_RATE_PREAMBLE_VHT: 3865 ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], 3866 txrate.mcs, txrate.nss); 3867 if (sgi) 3868 arsta->tx_info.status.rates[0].flags |= 3869 IEEE80211_TX_RC_SHORT_GI; 3870 arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS; 3871 break; 3872 } 3873 3874 arsta->txrate.nss = txrate.nss; 3875 arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); 3876 arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate); 3877 if (sgi) 3878 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 3879 3880 switch (arsta->txrate.bw) { 3881 case RATE_INFO_BW_40: 3882 arsta->tx_info.status.rates[0].flags |= 3883 IEEE80211_TX_RC_40_MHZ_WIDTH; 3884 break; 3885 case RATE_INFO_BW_80: 3886 arsta->tx_info.status.rates[0].flags |= 3887 IEEE80211_TX_RC_80_MHZ_WIDTH; 3888 break; 3889 case RATE_INFO_BW_160: 3890 arsta->tx_info.status.rates[0].flags |= 3891 IEEE80211_TX_RC_160_MHZ_WIDTH; 3892 break; 3893 } 3894 3895 if (peer_stats->succ_pkts) { 3896 arsta->tx_info.flags = IEEE80211_TX_STAT_ACK; 3897 arsta->tx_info.status.rates[0].count = 1; 3898 ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); 3899 } 3900 3901 if (ar->htt.disable_tx_comp) { 3902 arsta->tx_failed += peer_stats->failed_pkts; 3903 ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n", 3904 arsta->tx_failed); 3905 } 3906 3907 arsta->tx_retries += peer_stats->retry_pkts; 3908 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries); 3909 3910 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) 3911 ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats, 3912 rate_idx); 3913 } 3914 3915 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, 3916 struct sk_buff *skb) 3917 { 3918 struct htt_resp *resp = (struct htt_resp *)skb->data; 3919 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 3920 struct htt_per_peer_tx_stats_ind *tx_stats; 3921 struct ieee80211_sta *sta; 3922 struct ath10k_peer *peer; 3923 int peer_id, i; 3924 u8 ppdu_len, num_ppdu; 3925 3926 num_ppdu = resp->peer_tx_stats.num_ppdu; 3927 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32); 3928 3929 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) { 3930 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len); 3931 return; 3932 } 3933 3934 tx_stats = (struct htt_per_peer_tx_stats_ind *) 3935 (resp->peer_tx_stats.payload); 3936 peer_id = __le16_to_cpu(tx_stats->peer_id); 3937 3938 rcu_read_lock(); 3939 spin_lock_bh(&ar->data_lock); 3940 peer = ath10k_peer_find_by_id(ar, peer_id); 3941 if (!peer || !peer->sta) { 3942 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", 3943 peer_id); 3944 goto out; 3945 } 3946 3947 sta = peer->sta; 3948 for (i = 0; i < num_ppdu; i++) { 3949 tx_stats = (struct htt_per_peer_tx_stats_ind *) 3950 (resp->peer_tx_stats.payload + i * ppdu_len); 3951 3952 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes); 3953 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes); 3954 p_tx_stats->failed_bytes = 3955 __le32_to_cpu(tx_stats->failed_bytes); 3956 p_tx_stats->ratecode = tx_stats->ratecode; 3957 p_tx_stats->flags = tx_stats->flags; 3958 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts); 3959 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts); 3960 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts); 3961 p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration); 3962 3963 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 3964 } 3965 3966 out: 3967 spin_unlock_bh(&ar->data_lock); 3968 rcu_read_unlock(); 3969 } 3970 3971 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) 3972 { 3973 struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data; 3974 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats; 3975 struct ath10k_10_2_peer_tx_stats *tx_stats; 3976 struct ieee80211_sta *sta; 3977 struct ath10k_peer *peer; 3978 u16 log_type = __le16_to_cpu(hdr->log_type); 3979 u32 peer_id = 0, i; 3980 3981 if (log_type != ATH_PKTLOG_TYPE_TX_STAT) 3982 return; 3983 3984 tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) + 3985 ATH10K_10_2_TX_STATS_OFFSET); 3986 3987 if (!tx_stats->tx_ppdu_cnt) 3988 return; 3989 3990 peer_id = tx_stats->peer_id; 3991 3992 rcu_read_lock(); 3993 spin_lock_bh(&ar->data_lock); 3994 peer = ath10k_peer_find_by_id(ar, peer_id); 3995 if (!peer || !peer->sta) { 3996 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", 3997 peer_id); 3998 goto out; 3999 } 4000 4001 sta = peer->sta; 4002 for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) { 4003 p_tx_stats->succ_bytes = 4004 __le16_to_cpu(tx_stats->success_bytes[i]); 4005 p_tx_stats->retry_bytes = 4006 __le16_to_cpu(tx_stats->retry_bytes[i]); 4007 p_tx_stats->failed_bytes = 4008 __le16_to_cpu(tx_stats->failed_bytes[i]); 4009 p_tx_stats->ratecode = tx_stats->ratecode[i]; 4010 p_tx_stats->flags = tx_stats->flags[i]; 4011 p_tx_stats->succ_pkts = tx_stats->success_pkts[i]; 4012 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i]; 4013 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i]; 4014 4015 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); 4016 } 4017 spin_unlock_bh(&ar->data_lock); 4018 rcu_read_unlock(); 4019 4020 return; 4021 4022 out: 4023 spin_unlock_bh(&ar->data_lock); 4024 rcu_read_unlock(); 4025 } 4026 4027 static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type) 4028 { 4029 switch (sec_type) { 4030 case HTT_SECURITY_TKIP: 4031 case HTT_SECURITY_TKIP_NOMIC: 4032 case HTT_SECURITY_AES_CCMP: 4033 return 48; 4034 default: 4035 return 0; 4036 } 4037 } 4038 4039 static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar, 4040 struct htt_security_indication *ev) 4041 { 4042 enum htt_txrx_sec_cast_type sec_index; 4043 enum htt_security_types sec_type; 4044 struct ath10k_peer *peer; 4045 4046 spin_lock_bh(&ar->data_lock); 4047 4048 peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id)); 4049 if (!peer) { 4050 ath10k_warn(ar, "failed to find peer id %d for security indication", 4051 __le16_to_cpu(ev->peer_id)); 4052 goto out; 4053 } 4054 4055 sec_type = MS(ev->flags, HTT_SECURITY_TYPE); 4056 4057 if (ev->flags & HTT_SECURITY_IS_UNICAST) 4058 sec_index = HTT_TXRX_SEC_UCAST; 4059 else 4060 sec_index = HTT_TXRX_SEC_MCAST; 4061 4062 peer->rx_pn[sec_index].sec_type = sec_type; 4063 peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type); 4064 4065 memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid)); 4066 memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn)); 4067 4068 out: 4069 spin_unlock_bh(&ar->data_lock); 4070 } 4071 4072 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) 4073 { 4074 struct ath10k_htt *htt = &ar->htt; 4075 struct htt_resp *resp = (struct htt_resp *)skb->data; 4076 enum htt_t2h_msg_type type; 4077 4078 /* confirm alignment */ 4079 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 4080 ath10k_warn(ar, "unaligned htt message, expect trouble\n"); 4081 4082 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", 4083 resp->hdr.msg_type); 4084 4085 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) { 4086 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X", 4087 resp->hdr.msg_type, ar->htt.t2h_msg_types_max); 4088 return true; 4089 } 4090 type = ar->htt.t2h_msg_types[resp->hdr.msg_type]; 4091 4092 switch (type) { 4093 case HTT_T2H_MSG_TYPE_VERSION_CONF: { 4094 htt->target_version_major = resp->ver_resp.major; 4095 htt->target_version_minor = resp->ver_resp.minor; 4096 complete(&htt->target_version_received); 4097 break; 4098 } 4099 case HTT_T2H_MSG_TYPE_RX_IND: 4100 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) { 4101 ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind); 4102 } else { 4103 skb_queue_tail(&htt->rx_indication_head, skb); 4104 return false; 4105 } 4106 break; 4107 case HTT_T2H_MSG_TYPE_PEER_MAP: { 4108 struct htt_peer_map_event ev = { 4109 .vdev_id = resp->peer_map.vdev_id, 4110 .peer_id = __le16_to_cpu(resp->peer_map.peer_id), 4111 }; 4112 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr)); 4113 ath10k_peer_map_event(htt, &ev); 4114 break; 4115 } 4116 case HTT_T2H_MSG_TYPE_PEER_UNMAP: { 4117 struct htt_peer_unmap_event ev = { 4118 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id), 4119 }; 4120 ath10k_peer_unmap_event(htt, &ev); 4121 break; 4122 } 4123 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { 4124 struct htt_tx_done tx_done = {}; 4125 struct ath10k_htt *htt = &ar->htt; 4126 struct ath10k_htc *htc = &ar->htc; 4127 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid]; 4128 int status = __le32_to_cpu(resp->mgmt_tx_completion.status); 4129 int info = __le32_to_cpu(resp->mgmt_tx_completion.info); 4130 4131 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); 4132 4133 switch (status) { 4134 case HTT_MGMT_TX_STATUS_OK: 4135 tx_done.status = HTT_TX_COMPL_STATE_ACK; 4136 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, 4137 ar->wmi.svc_map) && 4138 (resp->mgmt_tx_completion.flags & 4139 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) { 4140 tx_done.ack_rssi = 4141 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK, 4142 info); 4143 } 4144 break; 4145 case HTT_MGMT_TX_STATUS_RETRY: 4146 tx_done.status = HTT_TX_COMPL_STATE_NOACK; 4147 break; 4148 case HTT_MGMT_TX_STATUS_DROP: 4149 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 4150 break; 4151 } 4152 4153 if (htt->disable_tx_comp) { 4154 spin_lock_bh(&htc->tx_lock); 4155 ep->tx_credits++; 4156 spin_unlock_bh(&htc->tx_lock); 4157 } 4158 4159 status = ath10k_txrx_tx_unref(htt, &tx_done); 4160 if (!status) { 4161 spin_lock_bh(&htt->tx_lock); 4162 ath10k_htt_tx_mgmt_dec_pending(htt); 4163 spin_unlock_bh(&htt->tx_lock); 4164 } 4165 break; 4166 } 4167 case HTT_T2H_MSG_TYPE_TX_COMPL_IND: 4168 ath10k_htt_rx_tx_compl_ind(htt->ar, skb); 4169 break; 4170 case HTT_T2H_MSG_TYPE_SEC_IND: { 4171 struct ath10k *ar = htt->ar; 4172 struct htt_security_indication *ev = &resp->security_indication; 4173 4174 ath10k_htt_rx_sec_ind_handler(ar, ev); 4175 ath10k_dbg(ar, ATH10K_DBG_HTT, 4176 "sec ind peer_id %d unicast %d type %d\n", 4177 __le16_to_cpu(ev->peer_id), 4178 !!(ev->flags & HTT_SECURITY_IS_UNICAST), 4179 MS(ev->flags, HTT_SECURITY_TYPE)); 4180 complete(&ar->install_key_done); 4181 break; 4182 } 4183 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { 4184 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 4185 skb->data, skb->len); 4186 atomic_inc(&htt->num_mpdus_ready); 4187 4188 return ath10k_htt_rx_proc_rx_frag_ind(htt, 4189 &resp->rx_frag_ind, 4190 skb); 4191 } 4192 case HTT_T2H_MSG_TYPE_TEST: 4193 break; 4194 case HTT_T2H_MSG_TYPE_STATS_CONF: 4195 trace_ath10k_htt_stats(ar, skb->data, skb->len); 4196 break; 4197 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND: 4198 /* Firmware can return tx frames if it's unable to fully 4199 * process them and suspects host may be able to fix it. ath10k 4200 * sends all tx frames as already inspected so this shouldn't 4201 * happen unless fw has a bug. 4202 */ 4203 ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); 4204 break; 4205 case HTT_T2H_MSG_TYPE_RX_ADDBA: 4206 ath10k_htt_rx_addba(ar, resp); 4207 break; 4208 case HTT_T2H_MSG_TYPE_RX_DELBA: 4209 ath10k_htt_rx_delba(ar, resp); 4210 break; 4211 case HTT_T2H_MSG_TYPE_PKTLOG: { 4212 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, 4213 skb->len - 4214 offsetof(struct htt_resp, 4215 pktlog_msg.payload)); 4216 4217 if (ath10k_peer_stats_enabled(ar)) 4218 ath10k_fetch_10_2_tx_stats(ar, 4219 resp->pktlog_msg.payload); 4220 break; 4221 } 4222 case HTT_T2H_MSG_TYPE_RX_FLUSH: { 4223 /* Ignore this event because mac80211 takes care of Rx 4224 * aggregation reordering. 4225 */ 4226 break; 4227 } 4228 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { 4229 skb_queue_tail(&htt->rx_in_ord_compl_q, skb); 4230 return false; 4231 } 4232 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: { 4233 struct ath10k_htt *htt = &ar->htt; 4234 struct ath10k_htc *htc = &ar->htc; 4235 struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid]; 4236 u32 msg_word = __le32_to_cpu(*(__le32 *)resp); 4237 int htt_credit_delta; 4238 4239 htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word); 4240 if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word)) 4241 htt_credit_delta = -htt_credit_delta; 4242 4243 ath10k_dbg(ar, ATH10K_DBG_HTT, 4244 "htt credit update delta %d\n", 4245 htt_credit_delta); 4246 4247 if (htt->disable_tx_comp) { 4248 spin_lock_bh(&htc->tx_lock); 4249 ep->tx_credits += htt_credit_delta; 4250 spin_unlock_bh(&htc->tx_lock); 4251 ath10k_dbg(ar, ATH10K_DBG_HTT, 4252 "htt credit total %d\n", 4253 ep->tx_credits); 4254 ep->ep_ops.ep_tx_credits(htc->ar); 4255 } 4256 break; 4257 } 4258 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: { 4259 u32 phymode = __le32_to_cpu(resp->chan_change.phymode); 4260 u32 freq = __le32_to_cpu(resp->chan_change.freq); 4261 4262 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq); 4263 ath10k_dbg(ar, ATH10K_DBG_HTT, 4264 "htt chan change freq %u phymode %s\n", 4265 freq, ath10k_wmi_phymode_str(phymode)); 4266 break; 4267 } 4268 case HTT_T2H_MSG_TYPE_AGGR_CONF: 4269 break; 4270 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: { 4271 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC); 4272 4273 if (!tx_fetch_ind) { 4274 ath10k_warn(ar, "failed to copy htt tx fetch ind\n"); 4275 break; 4276 } 4277 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind); 4278 break; 4279 } 4280 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: 4281 ath10k_htt_rx_tx_fetch_confirm(ar, skb); 4282 break; 4283 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: 4284 ath10k_htt_rx_tx_mode_switch_ind(ar, skb); 4285 break; 4286 case HTT_T2H_MSG_TYPE_PEER_STATS: 4287 ath10k_htt_fetch_peer_stats(ar, skb); 4288 break; 4289 case HTT_T2H_MSG_TYPE_EN_STATS: 4290 default: 4291 ath10k_warn(ar, "htt event (%d) not handled\n", 4292 resp->hdr.msg_type); 4293 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", 4294 skb->data, skb->len); 4295 break; 4296 } 4297 return true; 4298 } 4299 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); 4300 4301 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, 4302 struct sk_buff *skb) 4303 { 4304 trace_ath10k_htt_pktlog(ar, skb->data, skb->len); 4305 dev_kfree_skb_any(skb); 4306 } 4307 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); 4308 4309 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget) 4310 { 4311 struct sk_buff *skb; 4312 4313 while (quota < budget) { 4314 if (skb_queue_empty(&ar->htt.rx_msdus_q)) 4315 break; 4316 4317 skb = skb_dequeue(&ar->htt.rx_msdus_q); 4318 if (!skb) 4319 break; 4320 ath10k_process_rx(ar, skb); 4321 quota++; 4322 } 4323 4324 return quota; 4325 } 4326 4327 int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget) 4328 { 4329 struct htt_resp *resp; 4330 struct ath10k_htt *htt = &ar->htt; 4331 struct sk_buff *skb; 4332 bool release; 4333 int quota; 4334 4335 for (quota = 0; quota < budget; quota++) { 4336 skb = skb_dequeue(&htt->rx_indication_head); 4337 if (!skb) 4338 break; 4339 4340 resp = (struct htt_resp *)skb->data; 4341 4342 release = ath10k_htt_rx_proc_rx_ind_hl(htt, 4343 &resp->rx_ind_hl, 4344 skb, 4345 HTT_RX_PN_CHECK, 4346 HTT_RX_NON_TKIP_MIC); 4347 4348 if (release) 4349 dev_kfree_skb_any(skb); 4350 4351 ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n", 4352 skb_queue_len(&htt->rx_indication_head)); 4353 } 4354 return quota; 4355 } 4356 EXPORT_SYMBOL(ath10k_htt_rx_hl_indication); 4357 4358 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) 4359 { 4360 struct ath10k_htt *htt = &ar->htt; 4361 struct htt_tx_done tx_done = {}; 4362 struct sk_buff_head tx_ind_q; 4363 struct sk_buff *skb; 4364 unsigned long flags; 4365 int quota = 0, done, ret; 4366 bool resched_napi = false; 4367 4368 __skb_queue_head_init(&tx_ind_q); 4369 4370 /* Process pending frames before dequeuing more data 4371 * from hardware. 4372 */ 4373 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 4374 if (quota == budget) { 4375 resched_napi = true; 4376 goto exit; 4377 } 4378 4379 while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) { 4380 spin_lock_bh(&htt->rx_ring.lock); 4381 ret = ath10k_htt_rx_in_ord_ind(ar, skb); 4382 spin_unlock_bh(&htt->rx_ring.lock); 4383 4384 dev_kfree_skb_any(skb); 4385 if (ret == -EIO) { 4386 resched_napi = true; 4387 goto exit; 4388 } 4389 } 4390 4391 while (atomic_read(&htt->num_mpdus_ready)) { 4392 ret = ath10k_htt_rx_handle_amsdu(htt); 4393 if (ret == -EIO) { 4394 resched_napi = true; 4395 goto exit; 4396 } 4397 atomic_dec(&htt->num_mpdus_ready); 4398 } 4399 4400 /* Deliver received data after processing data from hardware */ 4401 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget); 4402 4403 /* From NAPI documentation: 4404 * The napi poll() function may also process TX completions, in which 4405 * case if it processes the entire TX ring then it should count that 4406 * work as the rest of the budget. 4407 */ 4408 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo)) 4409 quota = budget; 4410 4411 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized. 4412 * From kfifo_get() documentation: 4413 * Note that with only one concurrent reader and one concurrent writer, 4414 * you don't need extra locking to use these macro. 4415 */ 4416 while (kfifo_get(&htt->txdone_fifo, &tx_done)) 4417 ath10k_txrx_tx_unref(htt, &tx_done); 4418 4419 ath10k_mac_tx_push_pending(ar); 4420 4421 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags); 4422 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q); 4423 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags); 4424 4425 while ((skb = __skb_dequeue(&tx_ind_q))) { 4426 ath10k_htt_rx_tx_fetch_ind(ar, skb); 4427 dev_kfree_skb_any(skb); 4428 } 4429 4430 exit: 4431 ath10k_htt_rx_msdu_buff_replenish(htt); 4432 /* In case of rx failure or more data to read, report budget 4433 * to reschedule NAPI poll 4434 */ 4435 done = resched_napi ? budget : quota; 4436 4437 return done; 4438 } 4439 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task); 4440 4441 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = { 4442 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32, 4443 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32, 4444 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32, 4445 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32, 4446 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32, 4447 }; 4448 4449 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = { 4450 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64, 4451 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64, 4452 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64, 4453 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64, 4454 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64, 4455 }; 4456 4457 static const struct ath10k_htt_rx_ops htt_rx_ops_hl = { 4458 .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl, 4459 }; 4460 4461 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt) 4462 { 4463 struct ath10k *ar = htt->ar; 4464 4465 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 4466 htt->rx_ops = &htt_rx_ops_hl; 4467 else if (ar->hw_params.target_64bit) 4468 htt->rx_ops = &htt_rx_ops_64; 4469 else 4470 htt->rx_ops = &htt_rx_ops_32; 4471 } 4472