Lines Matching +full:queue +full:- +full:pkt +full:- +full:rx

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2024-2025 Intel Corporation
11 #include "iwl-trans.h"
15 #include "fw/api/mac-cfg.h"
16 #include "session-protect.h"
17 #include "fw/api/time-event.h"
28 #include "rx.h"
37 #include "ftm-initiator.h"
54 /* Use this for Rx handlers that do not need notification validation */
70 struct iwl_rx_packet *pkt, \
73 const struct notif_struct *notif = (const void *)pkt->data; \
75 return obj_id == _Generic((notif)->id_member, \
76 __le32: le32_to_cpu((notif)->id_member), \
77 __le16: le16_to_cpu((notif)->id_member), \
78 u8: (notif)->id_member); \
82 struct iwl_rx_packet *pkt, in iwl_mld_always_cancel() argument
122 struct iwl_rx_packet *pkt) in iwl_mld_handle_mfuart_notif() argument
124 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; in iwl_mld_handle_mfuart_notif()
128 le32_to_cpu(mfuart_notif->installed_ver), in iwl_mld_handle_mfuart_notif()
129 le32_to_cpu(mfuart_notif->external_ver)); in iwl_mld_handle_mfuart_notif()
132 le32_to_cpu(mfuart_notif->status), in iwl_mld_handle_mfuart_notif()
133 le32_to_cpu(mfuart_notif->duration), in iwl_mld_handle_mfuart_notif()
134 le32_to_cpu(mfuart_notif->image_size)); in iwl_mld_handle_mfuart_notif()
140 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; in iwl_mld_mu_mimo_iface_iterator()
143 if (WARN(hweight16(vif->active_links) > 1, in iwl_mld_mu_mimo_iface_iterator()
145 vif->active_links)) in iwl_mld_mu_mimo_iface_iterator()
149 link_id = __ffs(vif->active_links); in iwl_mld_mu_mimo_iface_iterator()
153 if (!WARN_ON(!bss_conf) && bss_conf->mu_mimo_owner) { in iwl_mld_mu_mimo_iface_iterator()
156 BUILD_BUG_ON(sizeof(notif->membership_status) != in iwl_mld_mu_mimo_iface_iterator()
158 BUILD_BUG_ON(sizeof(notif->user_position) != in iwl_mld_mu_mimo_iface_iterator()
161 /* MU-MIMO Group Id action frame is little endian. We treat in iwl_mld_mu_mimo_iface_iterator()
166 (u8 *)&notif->membership_status, in iwl_mld_mu_mimo_iface_iterator()
167 (u8 *)&notif->user_position); in iwl_mld_mu_mimo_iface_iterator()
172 * Rx as specified in ieee80211_update_mu_groups()'s documentation.
175 struct iwl_rx_packet *pkt) in iwl_mld_handle_mu_mimo_grp_notif() argument
177 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; in iwl_mld_handle_mu_mimo_grp_notif()
179 ieee80211_iterate_active_interfaces_atomic(mld->hw, in iwl_mld_handle_mu_mimo_grp_notif()
187 struct iwl_rx_packet *pkt) in iwl_mld_handle_stored_beacon_notif() argument
189 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); in iwl_mld_handle_stored_beacon_notif()
190 struct iwl_stored_beacon_notif *sb = (void *)pkt->data; in iwl_mld_handle_stored_beacon_notif()
193 u32 size = le32_to_cpu(sb->common.byte_count); in iwl_mld_handle_stored_beacon_notif()
208 rx_status.mactime = le64_to_cpu(sb->common.tsf); in iwl_mld_handle_stored_beacon_notif()
211 rx_status.device_timestamp = le32_to_cpu(sb->common.system_time); in iwl_mld_handle_stored_beacon_notif()
213 iwl_mld_phy_band_to_nl80211(le16_to_cpu(sb->common.band)); in iwl_mld_handle_stored_beacon_notif()
215 ieee80211_channel_to_frequency(le16_to_cpu(sb->common.channel), in iwl_mld_handle_stored_beacon_notif()
219 skb_put_data(skb, sb->data, size); in iwl_mld_handle_stored_beacon_notif()
222 /* pass it as regular rx to mac80211 */ in iwl_mld_handle_stored_beacon_notif()
223 ieee80211_rx_napi(mld->hw, NULL, skb, NULL); in iwl_mld_handle_stored_beacon_notif()
228 struct iwl_rx_packet *pkt) in iwl_mld_handle_channel_switch_start_notif() argument
230 struct iwl_channel_switch_start_notif *notif = (void *)pkt->data; in iwl_mld_handle_channel_switch_start_notif()
231 u32 link_id = le32_to_cpu(notif->link_id); in iwl_mld_handle_channel_switch_start_notif()
239 vif = link_conf->vif; in iwl_mld_handle_channel_switch_start_notif()
243 vif->type, in iwl_mld_handle_channel_switch_start_notif()
244 link_conf->link_id); in iwl_mld_handle_channel_switch_start_notif()
246 switch (vif->type) { in iwl_mld_handle_channel_switch_start_notif()
251 if (!link_conf->csa_active) in iwl_mld_handle_channel_switch_start_notif()
254 ieee80211_csa_finish(vif, link_conf->link_id); in iwl_mld_handle_channel_switch_start_notif()
257 if (!link_conf->csa_active) { in iwl_mld_handle_channel_switch_start_notif()
274 ieee80211_chswitch_done(vif, true, link_conf->link_id); in iwl_mld_handle_channel_switch_start_notif()
278 WARN(1, "CSA on invalid vif type: %d", vif->type); in iwl_mld_handle_channel_switch_start_notif()
284 struct iwl_rx_packet *pkt) in iwl_mld_handle_channel_switch_error_notif() argument
286 struct iwl_channel_switch_error_notif *notif = (void *)pkt->data; in iwl_mld_handle_channel_switch_error_notif()
289 u32 link_id = le32_to_cpu(notif->link_id); in iwl_mld_handle_channel_switch_error_notif()
290 u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask); in iwl_mld_handle_channel_switch_error_notif()
296 vif = link_conf->vif; in iwl_mld_handle_channel_switch_error_notif()
308 struct iwl_rx_packet *pkt) in iwl_mld_handle_beacon_notification() argument
310 struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; in iwl_mld_handle_beacon_notification()
312 mld->ibss_manager = !!beacon->ibss_mgr_status; in iwl_mld_handle_beacon_notification()
417 * - RX_HANDLER_SYNC: will be called as part of the Rx path
418 * - RX_HANDLER_ASYNC: will be handled in a working with the wiphy_lock held
515 iwl_mld_notif_is_valid(struct iwl_mld *mld, struct iwl_rx_packet *pkt, in iwl_mld_notif_is_valid() argument
518 unsigned int size = iwl_rx_packet_payload_len(pkt); in iwl_mld_notif_is_valid()
524 if (!handler->n_sizes) { in iwl_mld_notif_is_valid()
525 if (handler->val_fn) in iwl_mld_notif_is_valid()
526 return handler->val_fn(mld, pkt); in iwl_mld_notif_is_valid()
530 notif_ver = iwl_fw_lookup_notif_ver(mld->fw, in iwl_mld_notif_is_valid()
531 iwl_cmd_groupid(handler->cmd_id), in iwl_mld_notif_is_valid()
532 iwl_cmd_opcode(handler->cmd_id), in iwl_mld_notif_is_valid()
535 for (int i = 0; i < handler->n_sizes; i++) { in iwl_mld_notif_is_valid()
536 if (handler->sizes[i].ver != notif_ver) in iwl_mld_notif_is_valid()
539 if (IWL_FW_CHECK(mld, size < handler->sizes[i].size, in iwl_mld_notif_is_valid()
541 handler->cmd_id, size, handler->sizes[i].size)) in iwl_mld_notif_is_valid()
548 handler->cmd_id, notif_ver, in iwl_mld_notif_is_valid()
549 handler->sizes[handler->n_sizes - 1].ver); in iwl_mld_notif_is_valid()
551 return size < handler->sizes[handler->n_sizes - 1].size; in iwl_mld_notif_is_valid()
564 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mld_log_async_handler_op() local
568 op, iwl_get_cmd_string(mld->trans, in iwl_mld_log_async_handler_op()
569 WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)), in iwl_mld_log_async_handler_op()
570 pkt->hdr.group_id, pkt->hdr.cmd, in iwl_mld_log_async_handler_op()
571 le16_to_cpu(pkt->hdr.sequence)); in iwl_mld_log_async_handler_op()
576 struct iwl_rx_packet *pkt) in iwl_mld_rx_notif() argument
582 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) in iwl_mld_rx_notif()
585 if (!iwl_mld_notif_is_valid(mld, pkt, rx_h)) in iwl_mld_rx_notif()
588 if (rx_h->context == RX_HANDLER_SYNC) { in iwl_mld_rx_notif()
589 rx_h->fn(mld, pkt); in iwl_mld_rx_notif()
599 entry->rxb._page = rxb_steal_page(rxb); in iwl_mld_rx_notif()
600 entry->rxb._offset = rxb->_offset; in iwl_mld_rx_notif()
601 entry->rxb._rx_page_order = rxb->_rx_page_order; in iwl_mld_rx_notif()
603 entry->rx_h = rx_h; in iwl_mld_rx_notif()
605 /* Add it to the list and queue the work */ in iwl_mld_rx_notif()
606 spin_lock(&mld->async_handlers_lock); in iwl_mld_rx_notif()
607 list_add_tail(&entry->list, &mld->async_handlers_list); in iwl_mld_rx_notif()
608 spin_unlock(&mld->async_handlers_lock); in iwl_mld_rx_notif()
610 wiphy_work_queue(mld->hw->wiphy, in iwl_mld_rx_notif()
611 &mld->async_handlers_wk); in iwl_mld_rx_notif()
617 iwl_notification_wait_notify(&mld->notif_wait, pkt); in iwl_mld_rx_notif()
623 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mld_rx() local
625 u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); in iwl_mld_rx()
630 iwl_mld_handle_frame_release_notif(mld, napi, pkt, 0); in iwl_mld_rx()
632 iwl_mld_handle_bar_frame_release_notif(mld, napi, pkt, 0); in iwl_mld_rx()
635 iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, 0); in iwl_mld_rx()
637 iwl_mld_rx_monitor_no_data(mld, napi, pkt, 0); in iwl_mld_rx()
639 iwl_mld_rx_notif(mld, rxb, pkt); in iwl_mld_rx()
643 struct iwl_rx_cmd_buffer *rxb, unsigned int queue) in iwl_mld_rx_rss() argument
645 struct iwl_rx_packet *pkt = rxb_addr(rxb); in iwl_mld_rx_rss() local
647 u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); in iwl_mld_rx_rss()
649 if (unlikely(queue >= mld->trans->num_rx_queues)) in iwl_mld_rx_rss()
653 iwl_mld_rx_mpdu(mld, napi, rxb, queue); in iwl_mld_rx_rss()
656 iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, queue); in iwl_mld_rx_rss()
658 iwl_mld_handle_frame_release_notif(mld, napi, pkt, queue); in iwl_mld_rx_rss()
665 spin_lock_bh(&mld->async_handlers_lock); in iwl_mld_delete_handlers()
666 list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) { in iwl_mld_delete_handlers()
670 if (entry->rx_h->cmd_id == cmds[i]) { in iwl_mld_delete_handlers()
679 iwl_mld_log_async_handler_op(mld, "Delete", &entry->rxb); in iwl_mld_delete_handlers()
680 iwl_free_rxb(&entry->rxb); in iwl_mld_delete_handlers()
681 list_del(&entry->list); in iwl_mld_delete_handlers()
684 spin_unlock_bh(&mld->async_handlers_lock); in iwl_mld_delete_handlers()
694 /* Sync with Rx path with a lock. Remove all the entries from this in iwl_mld_async_handlers_wk()
697 spin_lock_bh(&mld->async_handlers_lock); in iwl_mld_async_handlers_wk()
698 list_splice_init(&mld->async_handlers_list, &local_list); in iwl_mld_async_handlers_wk()
699 spin_unlock_bh(&mld->async_handlers_lock); in iwl_mld_async_handlers_wk()
702 iwl_mld_log_async_handler_op(mld, "Handle", &entry->rxb); in iwl_mld_async_handlers_wk()
703 entry->rx_h->fn(mld, rxb_addr(&entry->rxb)); in iwl_mld_async_handlers_wk()
704 iwl_free_rxb(&entry->rxb); in iwl_mld_async_handlers_wk()
705 list_del(&entry->list); in iwl_mld_async_handlers_wk()
714 spin_lock_bh(&mld->async_handlers_lock); in iwl_mld_purge_async_handlers_list()
715 list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) { in iwl_mld_purge_async_handlers_list()
716 iwl_mld_log_async_handler_op(mld, "Purged", &entry->rxb); in iwl_mld_purge_async_handlers_list()
717 iwl_free_rxb(&entry->rxb); in iwl_mld_purge_async_handlers_list()
718 list_del(&entry->list); in iwl_mld_purge_async_handlers_list()
721 spin_unlock_bh(&mld->async_handlers_lock); in iwl_mld_purge_async_handlers_list()
731 lockdep_assert_wiphy(mld->wiphy); in iwl_mld_cancel_notifications_of_object()
736 /* Sync with RX path and remove matching entries from the async list */ in iwl_mld_cancel_notifications_of_object()
737 spin_lock_bh(&mld->async_handlers_lock); in iwl_mld_cancel_notifications_of_object()
738 list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) { in iwl_mld_cancel_notifications_of_object()
739 const struct iwl_rx_handler *rx_h = entry->rx_h; in iwl_mld_cancel_notifications_of_object()
741 if (rx_h->obj_type != obj_type || WARN_ON(!rx_h->cancel)) in iwl_mld_cancel_notifications_of_object()
744 if (rx_h->cancel(mld, rxb_addr(&entry->rxb), obj_id)) { in iwl_mld_cancel_notifications_of_object()
745 iwl_mld_log_async_handler_op(mld, "Cancel", &entry->rxb); in iwl_mld_cancel_notifications_of_object()
746 list_del(&entry->list); in iwl_mld_cancel_notifications_of_object()
747 list_add_tail(&entry->list, &cancel_list); in iwl_mld_cancel_notifications_of_object()
751 spin_unlock_bh(&mld->async_handlers_lock); in iwl_mld_cancel_notifications_of_object()
755 iwl_free_rxb(&entry->rxb); in iwl_mld_cancel_notifications_of_object()
756 list_del(&entry->list); in iwl_mld_cancel_notifications_of_object()