1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2015-2017 Intel Deutschland GmbH 4 * Copyright (C) 2018-2025 Intel Corporation 5 */ 6 #include <linux/etherdevice.h> 7 #include <linux/math64.h> 8 #include <net/cfg80211.h> 9 #include "mvm.h" 10 #include "iwl-io.h" 11 #include "iwl-prph.h" 12 #include "constants.h" 13 14 struct iwl_mvm_loc_entry { 15 struct list_head list; 16 u8 addr[ETH_ALEN]; 17 u8 lci_len, civic_len; 18 u8 buf[]; 19 }; 20 21 struct iwl_mvm_smooth_entry { 22 struct list_head list; 23 u8 addr[ETH_ALEN]; 24 s64 rtt_avg; 25 u64 host_time; 26 }; 27 28 enum iwl_mvm_pasn_flags { 29 IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0), 30 }; 31 32 struct iwl_mvm_ftm_pasn_entry { 33 struct list_head list; 34 u8 addr[ETH_ALEN]; 35 u8 hltk[HLTK_11AZ_LEN]; 36 u8 tk[TK_11AZ_LEN]; 37 u8 cipher; 38 u8 tx_pn[IEEE80211_CCMP_PN_LEN]; 39 u8 rx_pn[IEEE80211_CCMP_PN_LEN]; 40 u32 flags; 41 }; 42 43 struct iwl_mvm_ftm_iter_data { 44 u8 *cipher; 45 u8 *bssid; 46 u8 *tk; 47 }; 48 49 static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) 50 { 51 struct iwl_mvm_loc_entry *e, *t; 52 53 mvm->ftm_initiator.req = NULL; 54 mvm->ftm_initiator.req_wdev = NULL; 55 memset(mvm->ftm_initiator.responses, 0, 56 sizeof(mvm->ftm_initiator.responses)); 57 58 list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) { 59 list_del(&e->list); 60 kfree(e); 61 } 62 } 63 64 void iwl_mvm_ftm_restart(struct iwl_mvm *mvm) 65 { 66 struct cfg80211_pmsr_result result = { 67 .status = NL80211_PMSR_STATUS_FAILURE, 68 .final = 1, 69 .host_time = ktime_get_boottime_ns(), 70 .type = NL80211_PMSR_TYPE_FTM, 71 }; 72 int i; 73 74 lockdep_assert_held(&mvm->mutex); 75 76 if (!mvm->ftm_initiator.req) 77 return; 78 79 for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) { 80 memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr, 81 ETH_ALEN); 82 result.ftm.burst_index = mvm->ftm_initiator.responses[i]; 83 84 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, 85 mvm->ftm_initiator.req, 86 &result, GFP_KERNEL); 87 } 88 89 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, 90 mvm->ftm_initiator.req, GFP_KERNEL); 91 iwl_mvm_ftm_reset(mvm); 92 } 93 94 void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm) 95 { 96 INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp); 97 98 IWL_DEBUG_INFO(mvm, 99 "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n", 100 IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH, 101 IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA, 102 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ, 103 IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT, 104 IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT); 105 } 106 107 void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm) 108 { 109 struct iwl_mvm_smooth_entry *se, *st; 110 111 list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp, 112 list) { 113 list_del(&se->list); 114 kfree(se); 115 } 116 } 117 118 static int 119 iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s) 120 { 121 switch (s) { 122 case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS: 123 return 0; 124 case IWL_TOF_RANGE_REQUEST_STATUS_BUSY: 125 return -EBUSY; 126 default: 127 WARN_ON_ONCE(1); 128 return -EIO; 129 } 130 } 131 132 static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 133 struct iwl_tof_range_req_cmd_v5 *cmd, 134 struct cfg80211_pmsr_request *req) 135 { 136 int i; 137 138 cmd->request_id = req->cookie; 139 cmd->num_of_ap = req->n_peers; 140 141 /* use maximum for "no timeout" or bigger than what we can do */ 142 if (!req->timeout || req->timeout > 255 * 100) 143 cmd->req_timeout = 255; 144 else 145 cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100); 146 147 /* 148 * We treat it always as random, since if not we'll 149 * have filled our local address there instead. 150 */ 151 cmd->macaddr_random = 1; 152 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 153 for (i = 0; i < ETH_ALEN; i++) 154 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 155 156 if (vif->cfg.assoc) 157 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 158 else 159 eth_broadcast_addr(cmd->range_req_bssid); 160 } 161 162 static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm, 163 struct ieee80211_vif *vif, 164 struct iwl_tof_range_req_cmd_v9 *cmd, 165 struct cfg80211_pmsr_request *req) 166 { 167 int i; 168 169 cmd->initiator_flags = 170 cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM | 171 IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT); 172 cmd->request_id = req->cookie; 173 cmd->num_of_ap = req->n_peers; 174 175 /* 176 * Use a large value for "no timeout". Don't use the maximum value 177 * because of fw limitations. 178 */ 179 if (req->timeout) 180 cmd->req_timeout_ms = cpu_to_le32(req->timeout); 181 else 182 cmd->req_timeout_ms = cpu_to_le32(0xfffff); 183 184 memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); 185 for (i = 0; i < ETH_ALEN; i++) 186 cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; 187 188 if (vif->cfg.assoc) { 189 memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); 190 191 /* AP's TSF is only relevant if associated */ 192 for (i = 0; i < req->n_peers; i++) { 193 if (req->peers[i].report_ap_tsf) { 194 struct iwl_mvm_vif *mvmvif = 195 iwl_mvm_vif_from_mac80211(vif); 196 197 cmd->tsf_mac_id = cpu_to_le32(mvmvif->id); 198 return; 199 } 200 } 201 } else { 202 eth_broadcast_addr(cmd->range_req_bssid); 203 } 204 205 /* Don't report AP's TSF */ 206 cmd->tsf_mac_id = cpu_to_le32(0xff); 207 } 208 209 static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 210 struct iwl_tof_range_req_cmd_v8 *cmd, 211 struct cfg80211_pmsr_request *req) 212 { 213 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req); 214 } 215 216 static int 217 iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm, 218 struct cfg80211_pmsr_request_peer *peer, 219 u8 *channel, u8 *bandwidth, 220 u8 *ctrl_ch_position) 221 { 222 u32 freq = peer->chandef.chan->center_freq; 223 224 *channel = ieee80211_frequency_to_channel(freq); 225 226 switch (peer->chandef.width) { 227 case NL80211_CHAN_WIDTH_20_NOHT: 228 *bandwidth = IWL_TOF_BW_20_LEGACY; 229 break; 230 case NL80211_CHAN_WIDTH_20: 231 *bandwidth = IWL_TOF_BW_20_HT; 232 break; 233 case NL80211_CHAN_WIDTH_40: 234 *bandwidth = IWL_TOF_BW_40; 235 break; 236 case NL80211_CHAN_WIDTH_80: 237 *bandwidth = IWL_TOF_BW_80; 238 break; 239 default: 240 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", 241 peer->chandef.width); 242 return -EINVAL; 243 } 244 245 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 246 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; 247 248 return 0; 249 } 250 251 static int 252 iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm, 253 struct cfg80211_pmsr_request_peer *peer, 254 u8 *channel, u8 *format_bw, 255 u8 *ctrl_ch_position) 256 { 257 u32 freq = peer->chandef.chan->center_freq; 258 u8 cmd_ver; 259 260 *channel = ieee80211_frequency_to_channel(freq); 261 262 switch (peer->chandef.width) { 263 case NL80211_CHAN_WIDTH_20_NOHT: 264 *format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY; 265 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 266 break; 267 case NL80211_CHAN_WIDTH_20: 268 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 269 *format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; 270 break; 271 case NL80211_CHAN_WIDTH_40: 272 *format_bw = IWL_LOCATION_FRAME_FORMAT_HT; 273 *format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS; 274 break; 275 case NL80211_CHAN_WIDTH_80: 276 *format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; 277 *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; 278 break; 279 case NL80211_CHAN_WIDTH_160: 280 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 281 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 282 IWL_FW_CMD_VER_UNKNOWN); 283 284 if (cmd_ver >= 13) { 285 *format_bw = IWL_LOCATION_FRAME_FORMAT_HE; 286 *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; 287 break; 288 } 289 fallthrough; 290 default: 291 IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", 292 peer->chandef.width); 293 return -EINVAL; 294 } 295 296 /* non EDCA based measurement must use HE preamble */ 297 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 298 *format_bw |= IWL_LOCATION_FRAME_FORMAT_HE; 299 300 *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? 301 iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; 302 303 return 0; 304 } 305 306 static int 307 iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, 308 struct cfg80211_pmsr_request_peer *peer, 309 struct iwl_tof_range_req_ap_entry_v2 *target) 310 { 311 int ret; 312 313 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, 314 &target->bandwidth, 315 &target->ctrl_ch_position); 316 if (ret) 317 return ret; 318 319 memcpy(target->bssid, peer->addr, ETH_ALEN); 320 target->burst_period = 321 cpu_to_le16(peer->ftm.burst_period); 322 target->samples_per_burst = peer->ftm.ftms_per_burst; 323 target->num_of_bursts = peer->ftm.num_bursts_exp; 324 target->measure_type = 0; /* regular two-sided FTM */ 325 target->retries_per_sample = peer->ftm.ftmr_retries; 326 target->asap_mode = peer->ftm.asap; 327 target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK; 328 329 if (peer->ftm.request_lci) 330 target->location_req |= IWL_TOF_LOC_LCI; 331 if (peer->ftm.request_civicloc) 332 target->location_req |= IWL_TOF_LOC_CIVIC; 333 334 target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO; 335 336 return 0; 337 } 338 339 #define FTM_SET_FLAG(flag) (*flags |= \ 340 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) 341 342 static void 343 iwl_mvm_ftm_set_target_flags(struct iwl_mvm *mvm, 344 struct cfg80211_pmsr_request_peer *peer, 345 __le32 *flags) 346 { 347 *flags = cpu_to_le32(0); 348 349 if (peer->ftm.asap) 350 FTM_SET_FLAG(ASAP); 351 352 if (peer->ftm.request_lci) 353 FTM_SET_FLAG(LCI_REQUEST); 354 355 if (peer->ftm.request_civicloc) 356 FTM_SET_FLAG(CIVIC_REQUEST); 357 358 if (IWL_MVM_FTM_INITIATOR_DYNACK) 359 FTM_SET_FLAG(DYN_ACK); 360 361 if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) 362 FTM_SET_FLAG(ALGO_LR); 363 else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) 364 FTM_SET_FLAG(ALGO_FFT); 365 366 if (peer->ftm.trigger_based) 367 FTM_SET_FLAG(TB); 368 else if (peer->ftm.non_trigger_based) 369 FTM_SET_FLAG(NON_TB); 370 371 if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && 372 peer->ftm.lmr_feedback) 373 FTM_SET_FLAG(LMR_FEEDBACK); 374 } 375 376 static void 377 iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, 378 struct cfg80211_pmsr_request_peer *peer, 379 struct iwl_tof_range_req_ap_entry_v6 *target) 380 { 381 memcpy(target->bssid, peer->addr, ETH_ALEN); 382 target->burst_period = 383 cpu_to_le16(peer->ftm.burst_period); 384 target->samples_per_burst = peer->ftm.ftms_per_burst; 385 target->num_of_bursts = peer->ftm.num_bursts_exp; 386 target->ftmr_max_retries = peer->ftm.ftmr_retries; 387 iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags); 388 } 389 390 static int 391 iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm, 392 struct cfg80211_pmsr_request_peer *peer, 393 struct iwl_tof_range_req_ap_entry_v3 *target) 394 { 395 int ret; 396 397 ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num, 398 &target->bandwidth, 399 &target->ctrl_ch_position); 400 if (ret) 401 return ret; 402 403 /* 404 * Versions 3 and 4 has some common fields, so 405 * iwl_mvm_ftm_put_target_common() can be used for version 7 too. 406 */ 407 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); 408 409 return 0; 410 } 411 412 static int 413 iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, 414 struct cfg80211_pmsr_request_peer *peer, 415 struct iwl_tof_range_req_ap_entry_v4 *target) 416 { 417 int ret; 418 419 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 420 &target->format_bw, 421 &target->ctrl_ch_position); 422 if (ret) 423 return ret; 424 425 iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target); 426 427 return 0; 428 } 429 430 static int iwl_mvm_ftm_set_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 431 struct cfg80211_pmsr_request_peer *peer, 432 u8 *sta_id, __le32 *flags) 433 { 434 if (vif->cfg.assoc) { 435 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 436 struct ieee80211_sta *sta; 437 struct ieee80211_bss_conf *link_conf; 438 unsigned int link_id; 439 440 rcu_read_lock(); 441 for_each_vif_active_link(vif, link_conf, link_id) { 442 if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN)) 443 continue; 444 445 *sta_id = mvmvif->link[link_id]->ap_sta_id; 446 sta = rcu_dereference(mvm->fw_id_to_mac_id[*sta_id]); 447 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 448 rcu_read_unlock(); 449 return PTR_ERR_OR_ZERO(sta); 450 } 451 452 if (sta->mfp && (peer->ftm.trigger_based || 453 peer->ftm.non_trigger_based)) 454 FTM_SET_FLAG(PMF); 455 break; 456 } 457 rcu_read_unlock(); 458 459 #ifdef CONFIG_IWLWIFI_DEBUGFS 460 if (mvmvif->ftm_unprotected) { 461 *sta_id = IWL_INVALID_STA; 462 *flags &= ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF); 463 } 464 #endif 465 } else { 466 *sta_id = IWL_INVALID_STA; 467 } 468 469 return 0; 470 } 471 472 static int 473 iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 474 struct cfg80211_pmsr_request_peer *peer, 475 struct iwl_tof_range_req_ap_entry_v6 *target) 476 { 477 int ret; 478 479 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 480 &target->format_bw, 481 &target->ctrl_ch_position); 482 if (ret) 483 return ret; 484 485 iwl_mvm_ftm_put_target_common(mvm, peer, target); 486 487 iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id, 488 &target->initiator_ap_flags); 489 490 /* 491 * TODO: Beacon interval is currently unknown, so use the common value 492 * of 100 TUs. 493 */ 494 target->beacon_interval = cpu_to_le16(100); 495 return 0; 496 } 497 498 static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd) 499 { 500 u32 status; 501 int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status); 502 503 if (!err && status) { 504 IWL_ERR(mvm, "FTM range request command failure, status: %u\n", 505 status); 506 err = iwl_ftm_range_request_status_to_err(status); 507 } 508 509 return err; 510 } 511 512 static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 513 struct cfg80211_pmsr_request *req) 514 { 515 struct iwl_tof_range_req_cmd_v5 cmd_v5; 516 struct iwl_host_cmd hcmd = { 517 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 518 .dataflags[0] = IWL_HCMD_DFL_DUP, 519 .data[0] = &cmd_v5, 520 .len[0] = sizeof(cmd_v5), 521 }; 522 u8 i; 523 int err; 524 525 iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req); 526 527 for (i = 0; i < cmd_v5.num_of_ap; i++) { 528 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 529 530 err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]); 531 if (err) 532 return err; 533 } 534 535 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 536 } 537 538 static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 539 struct cfg80211_pmsr_request *req) 540 { 541 struct iwl_tof_range_req_cmd_v7 cmd_v7; 542 struct iwl_host_cmd hcmd = { 543 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 544 .dataflags[0] = IWL_HCMD_DFL_DUP, 545 .data[0] = &cmd_v7, 546 .len[0] = sizeof(cmd_v7), 547 }; 548 u8 i; 549 int err; 550 551 /* 552 * Versions 7 and 8 has the same structure except from the responders 553 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too. 554 */ 555 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req); 556 557 for (i = 0; i < cmd_v7.num_of_ap; i++) { 558 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 559 560 err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]); 561 if (err) 562 return err; 563 } 564 565 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 566 } 567 568 static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 569 struct cfg80211_pmsr_request *req) 570 { 571 struct iwl_tof_range_req_cmd_v8 cmd; 572 struct iwl_host_cmd hcmd = { 573 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 574 .dataflags[0] = IWL_HCMD_DFL_DUP, 575 .data[0] = &cmd, 576 .len[0] = sizeof(cmd), 577 }; 578 u8 i; 579 int err; 580 581 iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req); 582 583 for (i = 0; i < cmd.num_of_ap; i++) { 584 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 585 586 err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]); 587 if (err) 588 return err; 589 } 590 591 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 592 } 593 594 static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 595 struct cfg80211_pmsr_request *req) 596 { 597 struct iwl_tof_range_req_cmd_v9 cmd; 598 struct iwl_host_cmd hcmd = { 599 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 600 .dataflags[0] = IWL_HCMD_DFL_DUP, 601 .data[0] = &cmd, 602 .len[0] = sizeof(cmd), 603 }; 604 u8 i; 605 int err; 606 607 iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req); 608 609 for (i = 0; i < cmd.num_of_ap; i++) { 610 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 611 struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i]; 612 613 err = iwl_mvm_ftm_put_target(mvm, vif, peer, target); 614 if (err) 615 return err; 616 } 617 618 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 619 } 620 621 static void iter(struct ieee80211_hw *hw, 622 struct ieee80211_vif *vif, 623 struct ieee80211_sta *sta, 624 struct ieee80211_key_conf *key, 625 void *data) 626 { 627 struct iwl_mvm_ftm_iter_data *target = data; 628 629 if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN)) 630 return; 631 632 WARN_ON(!sta->mfp); 633 634 target->tk = key->key; 635 *target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); 636 WARN_ON(*target->cipher == IWL_LOCATION_CIPHER_INVALID); 637 } 638 639 static void 640 iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 641 u8 *bssid, u8 *cipher, u8 *hltk, u8 *tk, 642 u8 *rx_pn, u8 *tx_pn, __le32 *flags) 643 { 644 struct iwl_mvm_ftm_pasn_entry *entry; 645 #ifdef CONFIG_IWLWIFI_DEBUGFS 646 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 647 648 if (mvmvif->ftm_unprotected) 649 return; 650 #endif 651 652 if (!(le32_to_cpu(*flags) & (IWL_INITIATOR_AP_FLAGS_NON_TB | 653 IWL_INITIATOR_AP_FLAGS_TB))) 654 return; 655 656 lockdep_assert_held(&mvm->mutex); 657 658 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { 659 if (memcmp(entry->addr, bssid, sizeof(entry->addr))) 660 continue; 661 662 *cipher = entry->cipher; 663 664 if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK) 665 memcpy(hltk, entry->hltk, sizeof(entry->hltk)); 666 else 667 memset(hltk, 0, sizeof(entry->hltk)); 668 669 if (vif->cfg.assoc && 670 !memcmp(vif->bss_conf.bssid, bssid, ETH_ALEN)) { 671 struct iwl_mvm_ftm_iter_data target; 672 673 target.bssid = bssid; 674 target.cipher = cipher; 675 target.tk = NULL; 676 ieee80211_iter_keys(mvm->hw, vif, iter, &target); 677 678 if (!WARN_ON(!target.tk)) 679 memcpy(tk, target.tk, TK_11AZ_LEN); 680 } else { 681 memcpy(tk, entry->tk, sizeof(entry->tk)); 682 } 683 684 memcpy(rx_pn, entry->rx_pn, sizeof(entry->rx_pn)); 685 memcpy(tx_pn, entry->tx_pn, sizeof(entry->tx_pn)); 686 687 FTM_SET_FLAG(SECURED); 688 return; 689 } 690 } 691 692 static int 693 iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 694 struct cfg80211_pmsr_request_peer *peer, 695 struct iwl_tof_range_req_ap_entry_v7 *target) 696 { 697 int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target); 698 if (err) 699 return err; 700 701 iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid, 702 &target->cipher, target->hltk, 703 target->tk, target->rx_pn, 704 target->tx_pn, 705 &target->initiator_ap_flags); 706 return err; 707 } 708 709 static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, 710 struct ieee80211_vif *vif, 711 struct cfg80211_pmsr_request *req) 712 { 713 struct iwl_tof_range_req_cmd_v11 cmd; 714 struct iwl_host_cmd hcmd = { 715 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 716 .dataflags[0] = IWL_HCMD_DFL_DUP, 717 .data[0] = &cmd, 718 .len[0] = sizeof(cmd), 719 }; 720 u8 i; 721 int err; 722 723 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 724 725 for (i = 0; i < cmd.num_of_ap; i++) { 726 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 727 struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i]; 728 729 err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target); 730 if (err) 731 return err; 732 } 733 734 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 735 } 736 737 static void 738 iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm, 739 struct iwl_tof_range_req_ap_entry_v8 *target) 740 { 741 /* Only 2 STS are supported on Tx */ 742 u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : 743 IWL_MVM_FTM_I2R_MAX_STS; 744 745 target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | 746 (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS); 747 target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | 748 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS); 749 target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF; 750 target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF; 751 } 752 753 static int 754 iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 755 struct cfg80211_pmsr_request_peer *peer, 756 struct iwl_tof_range_req_ap_entry_v8 *target) 757 { 758 u32 flags; 759 int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target); 760 761 if (ret) 762 return ret; 763 764 iwl_mvm_ftm_set_ndp_params(mvm, target); 765 766 /* 767 * If secure LTF is turned off, replace the flag with PMF only 768 */ 769 flags = le32_to_cpu(target->initiator_ap_flags); 770 if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) { 771 if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF) 772 flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; 773 774 flags |= IWL_INITIATOR_AP_FLAGS_PMF; 775 target->initiator_ap_flags = cpu_to_le32(flags); 776 } 777 778 return 0; 779 } 780 781 static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, 782 struct ieee80211_vif *vif, 783 struct cfg80211_pmsr_request *req) 784 { 785 struct iwl_tof_range_req_cmd_v12 cmd; 786 struct iwl_host_cmd hcmd = { 787 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 788 .dataflags[0] = IWL_HCMD_DFL_DUP, 789 .data[0] = &cmd, 790 .len[0] = sizeof(cmd), 791 }; 792 u8 i; 793 int err; 794 795 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 796 797 for (i = 0; i < cmd.num_of_ap; i++) { 798 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 799 struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i]; 800 801 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target); 802 if (err) 803 return err; 804 } 805 806 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 807 } 808 809 static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, 810 struct ieee80211_vif *vif, 811 struct cfg80211_pmsr_request *req) 812 { 813 struct iwl_tof_range_req_cmd_v13 cmd; 814 struct iwl_host_cmd hcmd = { 815 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 816 .dataflags[0] = IWL_HCMD_DFL_DUP, 817 .data[0] = &cmd, 818 .len[0] = sizeof(cmd), 819 }; 820 u8 i; 821 int err; 822 823 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 824 825 for (i = 0; i < cmd.num_of_ap; i++) { 826 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 827 struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i]; 828 829 err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target); 830 if (err) 831 return err; 832 833 if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) 834 target->bss_color = peer->ftm.bss_color; 835 836 if (peer->ftm.non_trigger_based) { 837 target->min_time_between_msr = 838 cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); 839 target->burst_period = 840 cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); 841 } else { 842 target->min_time_between_msr = cpu_to_le16(0); 843 } 844 845 target->band = 846 iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); 847 } 848 849 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 850 } 851 852 static int 853 iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 854 struct cfg80211_pmsr_request_peer *peer, 855 struct iwl_tof_range_req_ap_entry *target) 856 { 857 u32 i2r_max_sts, flags; 858 int ret; 859 860 ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, 861 &target->format_bw, 862 &target->ctrl_ch_position); 863 if (ret) 864 return ret; 865 866 memcpy(target->bssid, peer->addr, ETH_ALEN); 867 target->burst_period = 868 cpu_to_le16(peer->ftm.burst_period); 869 target->samples_per_burst = peer->ftm.ftms_per_burst; 870 target->num_of_bursts = peer->ftm.num_bursts_exp; 871 iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags); 872 iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id, 873 &target->initiator_ap_flags); 874 iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid, 875 &target->cipher, target->hltk, 876 target->tk, target->rx_pn, 877 target->tx_pn, 878 &target->initiator_ap_flags); 879 880 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : 881 IWL_MVM_FTM_I2R_MAX_STS; 882 883 target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | 884 (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) | 885 (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); 886 target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | 887 (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) | 888 (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); 889 890 if (peer->ftm.non_trigger_based) { 891 target->min_time_between_msr = 892 cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); 893 target->burst_period = 894 cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); 895 } else { 896 target->min_time_between_msr = cpu_to_le16(0); 897 } 898 899 target->band = 900 iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); 901 902 /* 903 * TODO: Beacon interval is currently unknown, so use the common value 904 * of 100 TUs. 905 */ 906 target->beacon_interval = cpu_to_le16(100); 907 908 /* 909 * If secure LTF is turned off, replace the flag with PMF only 910 */ 911 flags = le32_to_cpu(target->initiator_ap_flags); 912 if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) { 913 if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF) 914 flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; 915 916 flags |= IWL_INITIATOR_AP_FLAGS_PMF; 917 target->initiator_ap_flags = cpu_to_le32(flags); 918 } 919 920 return 0; 921 } 922 923 static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm, 924 struct ieee80211_vif *vif, 925 struct cfg80211_pmsr_request *req) 926 { 927 struct iwl_tof_range_req_cmd cmd; 928 struct iwl_host_cmd hcmd = { 929 .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 930 .dataflags[0] = IWL_HCMD_DFL_DUP, 931 .data[0] = &cmd, 932 .len[0] = sizeof(cmd), 933 }; 934 u8 i; 935 int err; 936 937 iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); 938 939 for (i = 0; i < cmd.num_of_ap; i++) { 940 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 941 struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i]; 942 943 err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target); 944 if (err) 945 return err; 946 } 947 948 return iwl_mvm_ftm_send_cmd(mvm, &hcmd); 949 } 950 951 int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 952 struct cfg80211_pmsr_request *req) 953 { 954 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 955 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); 956 int err; 957 958 lockdep_assert_held(&mvm->mutex); 959 960 if (mvm->ftm_initiator.req) 961 return -EBUSY; 962 963 if (new_api) { 964 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, 965 WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), 966 IWL_FW_CMD_VER_UNKNOWN); 967 968 switch (cmd_ver) { 969 case 15: 970 /* Version 15 has the same struct as 14 */ 971 case 14: 972 err = iwl_mvm_ftm_start_v14(mvm, vif, req); 973 break; 974 case 13: 975 err = iwl_mvm_ftm_start_v13(mvm, vif, req); 976 break; 977 case 12: 978 err = iwl_mvm_ftm_start_v12(mvm, vif, req); 979 break; 980 case 11: 981 err = iwl_mvm_ftm_start_v11(mvm, vif, req); 982 break; 983 case 9: 984 case 10: 985 err = iwl_mvm_ftm_start_v9(mvm, vif, req); 986 break; 987 case 8: 988 err = iwl_mvm_ftm_start_v8(mvm, vif, req); 989 break; 990 default: 991 err = iwl_mvm_ftm_start_v7(mvm, vif, req); 992 break; 993 } 994 } else { 995 err = iwl_mvm_ftm_start_v5(mvm, vif, req); 996 } 997 998 if (!err) { 999 mvm->ftm_initiator.req = req; 1000 mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif); 1001 } 1002 1003 return err; 1004 } 1005 1006 void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) 1007 { 1008 struct iwl_tof_range_abort_cmd cmd = { 1009 .request_id = req->cookie, 1010 }; 1011 1012 lockdep_assert_held(&mvm->mutex); 1013 1014 if (req != mvm->ftm_initiator.req) 1015 return; 1016 1017 iwl_mvm_ftm_reset(mvm); 1018 1019 if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD), 1020 0, sizeof(cmd), &cmd)) 1021 IWL_ERR(mvm, "failed to abort FTM process\n"); 1022 } 1023 1024 static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req, 1025 const u8 *addr) 1026 { 1027 int i; 1028 1029 for (i = 0; i < req->n_peers; i++) { 1030 struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; 1031 1032 if (ether_addr_equal_unaligned(peer->addr, addr)) 1033 return i; 1034 } 1035 1036 return -ENOENT; 1037 } 1038 1039 static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts) 1040 { 1041 u32 gp2_ts = le32_to_cpu(fw_gp2_ts); 1042 u32 curr_gp2, diff; 1043 u64 now_from_boot_ns; 1044 1045 iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, 1046 &now_from_boot_ns, NULL); 1047 1048 if (curr_gp2 >= gp2_ts) 1049 diff = curr_gp2 - gp2_ts; 1050 else 1051 diff = curr_gp2 + (U32_MAX - gp2_ts + 1); 1052 1053 return now_from_boot_ns - (u64)diff * 1000; 1054 } 1055 1056 static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm, 1057 struct cfg80211_pmsr_result *res) 1058 { 1059 struct iwl_mvm_loc_entry *entry; 1060 1061 list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) { 1062 if (!ether_addr_equal_unaligned(res->addr, entry->addr)) 1063 continue; 1064 1065 if (entry->lci_len) { 1066 res->ftm.lci_len = entry->lci_len; 1067 res->ftm.lci = entry->buf; 1068 } 1069 1070 if (entry->civic_len) { 1071 res->ftm.civicloc_len = entry->civic_len; 1072 res->ftm.civicloc = entry->buf + entry->lci_len; 1073 } 1074 1075 /* we found the entry we needed */ 1076 break; 1077 } 1078 } 1079 1080 static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, 1081 u8 num_of_aps) 1082 { 1083 lockdep_assert_held(&mvm->mutex); 1084 1085 if (request_id != (u8)mvm->ftm_initiator.req->cookie) { 1086 IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n", 1087 request_id, (u8)mvm->ftm_initiator.req->cookie); 1088 return -EINVAL; 1089 } 1090 1091 if (num_of_aps > mvm->ftm_initiator.req->n_peers) { 1092 IWL_ERR(mvm, "FTM range response invalid\n"); 1093 return -EINVAL; 1094 } 1095 1096 return 0; 1097 } 1098 1099 static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, 1100 struct cfg80211_pmsr_result *res) 1101 { 1102 struct iwl_mvm_smooth_entry *resp = NULL, *iter; 1103 s64 rtt_avg, rtt = res->ftm.rtt_avg; 1104 u32 undershoot, overshoot; 1105 u8 alpha; 1106 1107 if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH) 1108 return; 1109 1110 WARN_ON(rtt < 0); 1111 1112 if (res->status != NL80211_PMSR_STATUS_SUCCESS) { 1113 IWL_DEBUG_INFO(mvm, 1114 ": %pM: ignore failed measurement. Status=%u\n", 1115 res->addr, res->status); 1116 return; 1117 } 1118 1119 list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) { 1120 if (!memcmp(res->addr, iter->addr, ETH_ALEN)) { 1121 resp = iter; 1122 break; 1123 } 1124 } 1125 1126 if (!resp) { 1127 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 1128 if (!resp) 1129 return; 1130 1131 memcpy(resp->addr, res->addr, ETH_ALEN); 1132 list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp); 1133 1134 resp->rtt_avg = rtt; 1135 1136 IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n", 1137 resp->addr, resp->rtt_avg); 1138 goto update_time; 1139 } 1140 1141 if (res->host_time - resp->host_time > 1142 IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) { 1143 resp->rtt_avg = rtt; 1144 1145 IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n", 1146 resp->addr, resp->rtt_avg); 1147 goto update_time; 1148 } 1149 1150 /* Smooth the results based on the tracked RTT average */ 1151 undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT; 1152 overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; 1153 alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; 1154 1155 rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100); 1156 1157 IWL_DEBUG_INFO(mvm, 1158 "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", 1159 resp->addr, resp->rtt_avg, rtt_avg, rtt); 1160 1161 /* 1162 * update the responder's average RTT results regardless of 1163 * the under/over shoot logic below 1164 */ 1165 resp->rtt_avg = rtt_avg; 1166 1167 /* smooth the results */ 1168 if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) { 1169 res->ftm.rtt_avg = rtt_avg; 1170 1171 IWL_DEBUG_INFO(mvm, 1172 "undershoot: val=%lld\n", 1173 (rtt_avg - rtt)); 1174 } else if (rtt_avg < rtt && (rtt - rtt_avg) > 1175 overshoot) { 1176 res->ftm.rtt_avg = rtt_avg; 1177 IWL_DEBUG_INFO(mvm, 1178 "overshoot: val=%lld\n", 1179 (rtt - rtt_avg)); 1180 } 1181 1182 update_time: 1183 resp->host_time = res->host_time; 1184 } 1185 1186 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, 1187 struct cfg80211_pmsr_result *res) 1188 { 1189 s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); 1190 1191 IWL_DEBUG_INFO(mvm, "entry %d\n", index); 1192 IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); 1193 IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr); 1194 IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time); 1195 IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index); 1196 IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); 1197 IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg); 1198 IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread); 1199 IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg); 1200 IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance); 1201 IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread); 1202 IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg); 1203 } 1204 1205 static void 1206 iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm, 1207 struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap) 1208 { 1209 struct iwl_mvm_ftm_pasn_entry *entry; 1210 1211 lockdep_assert_held(&mvm->mutex); 1212 1213 list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { 1214 if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr))) 1215 continue; 1216 1217 memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn)); 1218 memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn)); 1219 return; 1220 } 1221 } 1222 1223 static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm) 1224 { 1225 if (!fw_has_api(&mvm->fw->ucode_capa, 1226 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ)) 1227 return 5; 1228 1229 /* Starting from version 8, the FW advertises the version */ 1230 if (mvm->cmd_ver.range_resp >= 8) 1231 return mvm->cmd_ver.range_resp; 1232 else if (fw_has_api(&mvm->fw->ucode_capa, 1233 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 1234 return 7; 1235 1236 /* The first version of the new range request API */ 1237 return 6; 1238 } 1239 1240 static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len) 1241 { 1242 switch (ver) { 1243 case 9: 1244 case 8: 1245 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy); 1246 case 7: 1247 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7); 1248 case 6: 1249 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6); 1250 case 5: 1251 return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5); 1252 default: 1253 WARN_ONCE(1, "FTM: unsupported range response version %u", ver); 1254 return false; 1255 } 1256 } 1257 1258 void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1259 { 1260 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1261 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); 1262 struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; 1263 struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data; 1264 struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data; 1265 struct iwl_tof_range_rsp_ntfy *fw_resp_v8 = (void *)pkt->data; 1266 int i; 1267 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 1268 IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); 1269 u8 num_of_aps, last_in_batch; 1270 u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm); 1271 1272 lockdep_assert_held(&mvm->mutex); 1273 1274 if (!mvm->ftm_initiator.req) { 1275 return; 1276 } 1277 1278 if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len))) 1279 return; 1280 1281 if (new_api) { 1282 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id, 1283 fw_resp_v8->num_of_aps)) 1284 return; 1285 1286 num_of_aps = fw_resp_v8->num_of_aps; 1287 last_in_batch = fw_resp_v8->last_report; 1288 } else { 1289 if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id, 1290 fw_resp_v5->num_of_aps)) 1291 return; 1292 1293 num_of_aps = fw_resp_v5->num_of_aps; 1294 last_in_batch = fw_resp_v5->last_in_batch; 1295 } 1296 1297 IWL_DEBUG_INFO(mvm, "Range response received\n"); 1298 IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n", 1299 mvm->ftm_initiator.req->cookie, num_of_aps); 1300 1301 for (i = 0; i < num_of_aps && i < IWL_TOF_MAX_APS; i++) { 1302 struct cfg80211_pmsr_result result = {}; 1303 struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap; 1304 int peer_idx; 1305 1306 if (new_api) { 1307 if (notif_ver >= 8) { 1308 fw_ap = &fw_resp_v8->ap[i]; 1309 iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap); 1310 } else if (notif_ver == 7) { 1311 fw_ap = (void *)&fw_resp_v7->ap[i]; 1312 } else { 1313 fw_ap = (void *)&fw_resp_v6->ap[i]; 1314 } 1315 1316 result.final = fw_ap->last_burst; 1317 result.ap_tsf = le32_to_cpu(fw_ap->start_tsf); 1318 result.ap_tsf_valid = 1; 1319 } else { 1320 /* the first part is the same for old and new APIs */ 1321 fw_ap = (void *)&fw_resp_v5->ap[i]; 1322 /* 1323 * FIXME: the firmware needs to report this, we don't 1324 * even know the number of bursts the responder picked 1325 * (if we asked it to) 1326 */ 1327 result.final = 0; 1328 } 1329 1330 peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req, 1331 fw_ap->bssid); 1332 if (peer_idx < 0) { 1333 IWL_WARN(mvm, 1334 "Unknown address (%pM, target #%d) in FTM response\n", 1335 fw_ap->bssid, i); 1336 continue; 1337 } 1338 1339 switch (fw_ap->measure_status) { 1340 case IWL_TOF_ENTRY_SUCCESS: 1341 result.status = NL80211_PMSR_STATUS_SUCCESS; 1342 break; 1343 case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: 1344 result.status = NL80211_PMSR_STATUS_TIMEOUT; 1345 break; 1346 case IWL_TOF_ENTRY_NO_RESPONSE: 1347 result.status = NL80211_PMSR_STATUS_FAILURE; 1348 result.ftm.failure_reason = 1349 NL80211_PMSR_FTM_FAILURE_NO_RESPONSE; 1350 break; 1351 case IWL_TOF_ENTRY_REQUEST_REJECTED: 1352 result.status = NL80211_PMSR_STATUS_FAILURE; 1353 result.ftm.failure_reason = 1354 NL80211_PMSR_FTM_FAILURE_PEER_BUSY; 1355 result.ftm.busy_retry_time = fw_ap->refusal_period; 1356 break; 1357 default: 1358 result.status = NL80211_PMSR_STATUS_FAILURE; 1359 result.ftm.failure_reason = 1360 NL80211_PMSR_FTM_FAILURE_UNSPECIFIED; 1361 break; 1362 } 1363 memcpy(result.addr, fw_ap->bssid, ETH_ALEN); 1364 result.host_time = iwl_mvm_ftm_get_host_time(mvm, 1365 fw_ap->timestamp); 1366 result.type = NL80211_PMSR_TYPE_FTM; 1367 result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx]; 1368 mvm->ftm_initiator.responses[peer_idx]++; 1369 result.ftm.rssi_avg = fw_ap->rssi; 1370 result.ftm.rssi_avg_valid = 1; 1371 result.ftm.rssi_spread = fw_ap->rssi_spread; 1372 result.ftm.rssi_spread_valid = 1; 1373 result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt); 1374 result.ftm.rtt_avg_valid = 1; 1375 result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance); 1376 result.ftm.rtt_variance_valid = 1; 1377 result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread); 1378 result.ftm.rtt_spread_valid = 1; 1379 1380 iwl_mvm_ftm_get_lci_civic(mvm, &result); 1381 1382 iwl_mvm_ftm_rtt_smoothing(mvm, &result); 1383 1384 cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, 1385 mvm->ftm_initiator.req, 1386 &result, GFP_KERNEL); 1387 1388 if (fw_has_api(&mvm->fw->ucode_capa, 1389 IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) 1390 IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n", 1391 fw_ap->rttConfidence); 1392 1393 iwl_mvm_debug_range_resp(mvm, i, &result); 1394 } 1395 1396 if (last_in_batch) { 1397 cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, 1398 mvm->ftm_initiator.req, 1399 GFP_KERNEL); 1400 iwl_mvm_ftm_reset(mvm); 1401 } 1402 } 1403 1404 void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 1405 { 1406 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1407 const struct ieee80211_mgmt *mgmt = (void *)pkt->data; 1408 size_t len = iwl_rx_packet_payload_len(pkt); 1409 struct iwl_mvm_loc_entry *entry; 1410 const u8 *ies, *lci, *civic, *msr_ie; 1411 size_t ies_len, lci_len = 0, civic_len = 0; 1412 size_t baselen = IEEE80211_MIN_ACTION_SIZE + 1413 sizeof(mgmt->u.action.u.ftm); 1414 static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI; 1415 static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC; 1416 1417 if (len <= baselen) 1418 return; 1419 1420 lockdep_assert_held(&mvm->mutex); 1421 1422 ies = mgmt->u.action.u.ftm.variable; 1423 ies_len = len - baselen; 1424 1425 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, 1426 &rprt_type_lci, 1, 4); 1427 if (msr_ie) { 1428 lci = msr_ie + 2; 1429 lci_len = msr_ie[1]; 1430 } 1431 1432 msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, 1433 &rprt_type_civic, 1, 4); 1434 if (msr_ie) { 1435 civic = msr_ie + 2; 1436 civic_len = msr_ie[1]; 1437 } 1438 1439 entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL); 1440 if (!entry) 1441 return; 1442 1443 memcpy(entry->addr, mgmt->bssid, ETH_ALEN); 1444 1445 entry->lci_len = lci_len; 1446 if (lci_len) 1447 memcpy(entry->buf, lci, lci_len); 1448 1449 entry->civic_len = civic_len; 1450 if (civic_len) 1451 memcpy(entry->buf + lci_len, civic, civic_len); 1452 1453 list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list); 1454 } 1455