1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2025 Intel Corporation 4 */ 5 #include <linux/etherdevice.h> 6 #include <linux/math64.h> 7 #include <net/cfg80211.h> 8 #include "mld.h" 9 #include "iface.h" 10 #include "phy.h" 11 #include "iwl-io.h" 12 #include "iwl-prph.h" 13 #include "constants.h" 14 #include "fw/api/location.h" 15 #include "ftm-initiator.h" 16 17 static void iwl_mld_ftm_cmd_common(struct iwl_mld *mld, 18 struct ieee80211_vif *vif, 19 struct iwl_tof_range_req_cmd *cmd, 20 struct cfg80211_pmsr_request *req) 21 { 22 int i; 23 24 cmd->initiator_flags = 25 cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM | 26 IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT); 27 cmd->request_id = req->cookie; 28 cmd->num_of_ap = req->n_peers; 29 30 /* Use a large value for "no timeout". Don't use the maximum value 31 * because of fw limitations. 32 */ 33 if (req->timeout) 34 cmd->req_timeout_ms = cpu_to_le32(min(req->timeout, 0xfffff)); 35 else 36 cmd->req_timeout_ms = cpu_to_le32(0xfffff); 37 38 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 39 for (i = 0; i < ETH_ALEN; i++) 40 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 41 42 if (vif->cfg.assoc) { 43 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 44 45 /* AP's TSF is only relevant if associated */ 46 for (i = 0; i < req->n_peers; i++) { 47 if (req->peers[i].report_ap_tsf) { 48 struct iwl_mld_vif *mld_vif = 49 iwl_mld_vif_from_mac80211(vif); 50 51 cmd->tsf_mac_id = cpu_to_le32(mld_vif->fw_id); 52 return; 53 } 54 } 55 } else { 56 eth_broadcast_addr(cmd->range_req_bssid); 57 } 58 59 /* Don't report AP's TSF */ 60 cmd->tsf_mac_id = cpu_to_le32(0xff); 61 } 62 63 static int 64 iwl_mld_ftm_set_target_chandef(struct iwl_mld *mld, 65 struct cfg80211_pmsr_request_peer *peer, 66 struct iwl_tof_range_req_ap_entry *target) 67 { 68 u32 freq = peer->chandef.chan->center_freq; 69 70 target->channel_num = ieee80211_frequency_to_channel(freq); 71 72 switch (peer->chandef.width) { 73 case NL80211_CHAN_WIDTH_20_NOHT: 74 target->format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY; 75 target->format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 76 break; 77 case NL80211_CHAN_WIDTH_20: 78 target->format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 79 target->format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 80 break; 81 case NL80211_CHAN_WIDTH_40: 82 target->format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 83 target->format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS; 84 break; 85 case NL80211_CHAN_WIDTH_80: 86 target->format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; 87 target->format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; 88 break; 89 case NL80211_CHAN_WIDTH_160: 90 target->format_bw = IWL_LOCATION_FRAME_FORMAT_HE; 91 target->format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; 92 break; 93 default: 94 IWL_ERR(mld, "Unsupported BW in FTM request (%d)\n", 95 peer->chandef.width); 96 return -EINVAL; 97 } 98 99 /* non EDCA based measurement must use HE preamble */ 100 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 101 target->format_bw |= IWL_LOCATION_FRAME_FORMAT_HE; 102 103 target->ctrl_ch_position = 104 (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 105 iwl_mld_get_fw_ctrl_pos(&peer->chandef) : 0; 106 107 target->band = iwl_mld_nl80211_band_to_fw(peer->chandef.chan->band); 108 return 0; 109 } 110 111 #define FTM_SET_FLAG(flag) (target->initiator_ap_flags |= \ 112 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) 113 114 static void 115 iwl_mld_ftm_set_target_flags(struct iwl_mld *mld, 116 struct cfg80211_pmsr_request_peer *peer, 117 struct iwl_tof_range_req_ap_entry *target) 118 { 119 target->initiator_ap_flags = cpu_to_le32(0); 120 121 if (peer->ftm.asap) 122 FTM_SET_FLAG(ASAP); 123 124 if (peer->ftm.request_lci) 125 FTM_SET_FLAG(LCI_REQUEST); 126 127 if (peer->ftm.request_civicloc) 128 FTM_SET_FLAG(CIVIC_REQUEST); 129 130 if (IWL_MLD_FTM_INITIATOR_DYNACK) 131 FTM_SET_FLAG(DYN_ACK); 132 133 if (IWL_MLD_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) 134 FTM_SET_FLAG(ALGO_LR); 135 else if (IWL_MLD_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) 136 FTM_SET_FLAG(ALGO_FFT); 137 138 if (peer->ftm.trigger_based) 139 FTM_SET_FLAG(TB); 140 else if (peer->ftm.non_trigger_based) 141 FTM_SET_FLAG(NON_TB); 142 143 if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && 144 peer->ftm.lmr_feedback) 145 FTM_SET_FLAG(LMR_FEEDBACK); 146 } 147 148 static void iwl_mld_ftm_set_sta(struct iwl_mld *mld, struct ieee80211_vif *vif, 149 struct cfg80211_pmsr_request_peer *peer, 150 struct iwl_tof_range_req_ap_entry *target) 151 { 152 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 153 u32 sta_id_mask; 154 155 target->sta_id = IWL_INVALID_STA; 156 157 /* TODO: add ftm_unprotected debugfs support */ 158 159 if (!vif->cfg.assoc || !mld_vif->ap_sta) 160 return; 161 162 sta_id_mask = iwl_mld_fw_sta_id_mask(mld, mld_vif->ap_sta); 163 if (WARN_ON(hweight32(sta_id_mask) != 1)) 164 return; 165 166 target->sta_id = __ffs(sta_id_mask); 167 168 if (mld_vif->ap_sta->mfp && 169 (peer->ftm.trigger_based || peer->ftm.non_trigger_based)) 170 FTM_SET_FLAG(PMF); 171 } 172 173 static int 174 iwl_mld_ftm_set_target(struct iwl_mld *mld, struct ieee80211_vif *vif, 175 struct cfg80211_pmsr_request_peer *peer, 176 struct iwl_tof_range_req_ap_entry *target) 177 { 178 u32 i2r_max_sts; 179 int ret; 180 181 ret = iwl_mld_ftm_set_target_chandef(mld, peer, target); 182 if (ret) 183 return ret; 184 185 memcpy(target->bssid, peer->addr, ETH_ALEN); 186 target->burst_period = cpu_to_le16(peer->ftm.burst_period); 187 target->samples_per_burst = peer->ftm.ftms_per_burst; 188 target->num_of_bursts = peer->ftm.num_bursts_exp; 189 iwl_mld_ftm_set_target_flags(mld, peer, target); 190 iwl_mld_ftm_set_sta(mld, vif, peer, target); 191 192 /* TODO: add secured ranging support */ 193 194 i2r_max_sts = IWL_MLD_FTM_I2R_MAX_STS > 1 ? 1 : 195 IWL_MLD_FTM_I2R_MAX_STS; 196 197 target->r2i_ndp_params = IWL_MLD_FTM_R2I_MAX_REP | 198 (IWL_MLD_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) | 199 (IWL_MLD_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); 200 target->i2r_ndp_params = IWL_MLD_FTM_I2R_MAX_REP | 201 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) | 202 (IWL_MLD_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); 203 204 if (peer->ftm.non_trigger_based) { 205 target->min_time_between_msr = 206 cpu_to_le16(IWL_MLD_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); 207 target->burst_period = 208 cpu_to_le16(IWL_MLD_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); 209 } else { 210 target->min_time_between_msr = cpu_to_le16(0); 211 } 212 213 /* TODO: Beacon interval is currently unknown, so use the common value 214 * of 100 TUs. 215 */ 216 target->beacon_interval = cpu_to_le16(100); 217 218 return 0; 219 } 220 221 int iwl_mld_ftm_start(struct iwl_mld *mld, struct ieee80211_vif *vif, 222 struct cfg80211_pmsr_request *req) 223 { 224 struct iwl_tof_range_req_cmd cmd; 225 struct iwl_host_cmd hcmd = { 226 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 227 .dataflags[0] = IWL_HCMD_DFL_DUP, 228 .data[0] = &cmd, 229 .len[0] = sizeof(cmd), 230 }; 231 u8 i; 232 int ret; 233 234 lockdep_assert_wiphy(mld->wiphy); 235 236 if (mld->ftm_initiator.req) 237 return -EBUSY; 238 239 if (req->n_peers > ARRAY_SIZE(cmd.ap)) 240 return -EINVAL; 241 242 memset(&cmd, 0, sizeof(cmd)); 243 244 iwl_mld_ftm_cmd_common(mld, vif, (void *)&cmd, req); 245 246 for (i = 0; i < cmd.num_of_ap; i++) { 247 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 248 struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i]; 249 250 ret = iwl_mld_ftm_set_target(mld, vif, peer, target); 251 if (ret) 252 return ret; 253 } 254 255 /* TODO: get the status from the response*/ 256 ret = iwl_mld_send_cmd(mld, &hcmd); 257 if (!ret) { 258 mld->ftm_initiator.req = req; 259 mld->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif); 260 } 261 262 return ret; 263 } 264 265 static void iwl_mld_ftm_reset(struct iwl_mld *mld) 266 { 267 lockdep_assert_wiphy(mld->wiphy); 268 269 mld->ftm_initiator.req = NULL; 270 mld->ftm_initiator.req_wdev = NULL; 271 memset(mld->ftm_initiator.responses, 0, 272 sizeof(mld->ftm_initiator.responses)); 273 } 274 275 static int iwl_mld_ftm_range_resp_valid(struct iwl_mld *mld, u8 request_id, 276 u8 num_of_aps) 277 { 278 if (IWL_FW_CHECK(mld, request_id != (u8)mld->ftm_initiator.req->cookie, 279 "Request ID mismatch, got %u, active %u\n", 280 request_id, (u8)mld->ftm_initiator.req->cookie)) 281 return -EINVAL; 282 283 if (IWL_FW_CHECK(mld, num_of_aps > mld->ftm_initiator.req->n_peers || 284 num_of_aps > IWL_TOF_MAX_APS, 285 "FTM range response: invalid num of APs (%u)\n", 286 num_of_aps)) 287 return -EINVAL; 288 289 return 0; 290 } 291 292 static int iwl_mld_ftm_find_peer(struct cfg80211_pmsr_request *req, 293 const u8 *addr) 294 { 295 for (int i = 0; i < req->n_peers; i++) { 296 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 297 298 if (ether_addr_equal_unaligned(peer->addr, addr)) 299 return i; 300 } 301 302 return -ENOENT; 303 } 304 305 static void iwl_mld_debug_range_resp(struct iwl_mld *mld, u8 index, 306 struct cfg80211_pmsr_result *res) 307 { 308 s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); 309 310 IWL_DEBUG_INFO(mld, "entry %d\n", index); 311 IWL_DEBUG_INFO(mld, "\tstatus: %d\n", res->status); 312 IWL_DEBUG_INFO(mld, "\tBSSID: %pM\n", res->addr); 313 IWL_DEBUG_INFO(mld, "\thost time: %llu\n", res->host_time); 314 IWL_DEBUG_INFO(mld, "\tburst index: %d\n", res->ftm.burst_index); 315 IWL_DEBUG_INFO(mld, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); 316 IWL_DEBUG_INFO(mld, "\trssi: %d\n", res->ftm.rssi_avg); 317 IWL_DEBUG_INFO(mld, "\trssi spread: %d\n", res->ftm.rssi_spread); 318 IWL_DEBUG_INFO(mld, "\trtt: %lld\n", res->ftm.rtt_avg); 319 IWL_DEBUG_INFO(mld, "\trtt var: %llu\n", res->ftm.rtt_variance); 320 IWL_DEBUG_INFO(mld, "\trtt spread: %llu\n", res->ftm.rtt_spread); 321 IWL_DEBUG_INFO(mld, "\tdistance: %lld\n", rtt_avg); 322 } 323 324 void iwl_mld_handle_ftm_resp_notif(struct iwl_mld *mld, 325 struct iwl_rx_packet *pkt) 326 { 327 struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data; 328 u8 num_of_aps, last_in_batch; 329 330 if (IWL_FW_CHECK(mld, !mld->ftm_initiator.req, 331 "FTM response without a pending request\n")) 332 return; 333 334 if (iwl_mld_ftm_range_resp_valid(mld, fw_resp->request_id, 335 fw_resp->num_of_aps)) 336 return; 337 338 num_of_aps = fw_resp->num_of_aps; 339 last_in_batch = fw_resp->last_report; 340 341 IWL_DEBUG_INFO(mld, "Range response received\n"); 342 IWL_DEBUG_INFO(mld, "request id: %llu, num of entries: %u\n", 343 mld->ftm_initiator.req->cookie, num_of_aps); 344 345 for (int i = 0; i < num_of_aps; i++) { 346 struct cfg80211_pmsr_result result = {}; 347 struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap; 348 int peer_idx; 349 350 fw_ap = &fw_resp->ap[i]; 351 result.final = fw_ap->last_burst; 352 result.ap_tsf = le32_to_cpu(fw_ap->start_tsf); 353 result.ap_tsf_valid = 1; 354 355 peer_idx = iwl_mld_ftm_find_peer(mld->ftm_initiator.req, 356 fw_ap->bssid); 357 if (peer_idx < 0) { 358 IWL_WARN(mld, 359 "Unknown address (%pM, target #%d) in FTM response\n", 360 fw_ap->bssid, i); 361 continue; 362 } 363 364 switch (fw_ap->measure_status) { 365 case IWL_TOF_ENTRY_SUCCESS: 366 result.status = NL80211_PMSR_STATUS_SUCCESS; 367 break; 368 case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: 369 result.status = NL80211_PMSR_STATUS_TIMEOUT; 370 break; 371 case IWL_TOF_ENTRY_NO_RESPONSE: 372 result.status = NL80211_PMSR_STATUS_FAILURE; 373 result.ftm.failure_reason = 374 NL80211_PMSR_FTM_FAILURE_NO_RESPONSE; 375 break; 376 case IWL_TOF_ENTRY_REQUEST_REJECTED: 377 result.status = NL80211_PMSR_STATUS_FAILURE; 378 result.ftm.failure_reason = 379 NL80211_PMSR_FTM_FAILURE_PEER_BUSY; 380 result.ftm.busy_retry_time = fw_ap->refusal_period; 381 break; 382 default: 383 result.status = NL80211_PMSR_STATUS_FAILURE; 384 result.ftm.failure_reason = 385 NL80211_PMSR_FTM_FAILURE_UNSPECIFIED; 386 break; 387 } 388 memcpy(result.addr, fw_ap->bssid, ETH_ALEN); 389 390 /* TODO: convert the timestamp from the result to systime */ 391 result.host_time = ktime_get_boottime_ns(); 392 393 result.type = NL80211_PMSR_TYPE_FTM; 394 result.ftm.burst_index = mld->ftm_initiator.responses[peer_idx]; 395 mld->ftm_initiator.responses[peer_idx]++; 396 result.ftm.rssi_avg = fw_ap->rssi; 397 result.ftm.rssi_avg_valid = 1; 398 result.ftm.rssi_spread = fw_ap->rssi_spread; 399 result.ftm.rssi_spread_valid = 1; 400 result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt); 401 result.ftm.rtt_avg_valid = 1; 402 result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance); 403 result.ftm.rtt_variance_valid = 1; 404 result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread); 405 result.ftm.rtt_spread_valid = 1; 406 407 cfg80211_pmsr_report(mld->ftm_initiator.req_wdev, 408 mld->ftm_initiator.req, 409 &result, GFP_KERNEL); 410 411 if (fw_has_api(&mld->fw->ucode_capa, 412 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 413 IWL_DEBUG_INFO(mld, "RTT confidence: %u\n", 414 fw_ap->rttConfidence); 415 416 iwl_mld_debug_range_resp(mld, i, &result); 417 } 418 419 if (last_in_batch) { 420 cfg80211_pmsr_complete(mld->ftm_initiator.req_wdev, 421 mld->ftm_initiator.req, 422 GFP_KERNEL); 423 iwl_mld_ftm_reset(mld); 424 } 425 } 426 427 void iwl_mld_ftm_restart_cleanup(struct iwl_mld *mld) 428 { 429 struct cfg80211_pmsr_result result = { 430 .status = NL80211_PMSR_STATUS_FAILURE, 431 .final = 1, 432 .host_time = ktime_get_boottime_ns(), 433 .type = NL80211_PMSR_TYPE_FTM, 434 }; 435 436 if (!mld->ftm_initiator.req) 437 return; 438 439 for (int i = 0; i < mld->ftm_initiator.req->n_peers; i++) { 440 memcpy(result.addr, mld->ftm_initiator.req->peers[i].addr, 441 ETH_ALEN); 442 443 cfg80211_pmsr_report(mld->ftm_initiator.req_wdev, 444 mld->ftm_initiator.req, 445 &result, GFP_KERNEL); 446 } 447 448 cfg80211_pmsr_complete(mld->ftm_initiator.req_wdev, 449 mld->ftm_initiator.req, GFP_KERNEL); 450 iwl_mld_ftm_reset(mld); 451 } 452