1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "debug.h" 8 #include "efuse.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "pci.h" 12 #include "ps.h" 13 #include "reg.h" 14 #include "util.h" 15 16 static const u32 rtw89_mac_mem_base_addrs_ax[RTW89_MAC_MEM_NUM] = { 17 [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR, 18 [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR, 19 [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR, 20 [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR, 21 [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR, 22 [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR, 23 [RTW89_MAC_MEM_SECURITY_CAM] = SECURITY_CAM_BASE_ADDR, 24 [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR, 25 [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR, 26 [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR, 27 [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR, 28 [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR, 29 [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR, 30 [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR, 31 [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR, 32 [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR, 33 [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR, 34 [RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR, 35 [RTW89_MAC_MEM_BSSID_CAM] = BSSID_CAM_BASE_ADDR, 36 [RTW89_MAC_MEM_TXD_FIFO_0_V1] = TXD_FIFO_0_BASE_ADDR_V1, 37 [RTW89_MAC_MEM_TXD_FIFO_1_V1] = TXD_FIFO_1_BASE_ADDR_V1, 38 }; 39 40 static void rtw89_mac_mem_write(struct rtw89_dev *rtwdev, u32 offset, 41 u32 val, enum rtw89_mac_mem_sel sel) 42 { 43 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 44 u32 addr = mac->mem_base_addrs[sel] + offset; 45 46 rtw89_write32(rtwdev, mac->filter_model_addr, addr); 47 rtw89_write32(rtwdev, mac->indir_access_addr, val); 48 } 49 50 static u32 rtw89_mac_mem_read(struct rtw89_dev *rtwdev, u32 offset, 51 enum rtw89_mac_mem_sel sel) 52 { 53 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 54 u32 addr = mac->mem_base_addrs[sel] + offset; 55 56 rtw89_write32(rtwdev, mac->filter_model_addr, addr); 57 return rtw89_read32(rtwdev, mac->indir_access_addr); 58 } 59 60 static int rtw89_mac_check_mac_en_ax(struct rtw89_dev *rtwdev, u8 mac_idx, 61 enum rtw89_mac_hwmod_sel sel) 62 { 63 u32 val, r_val; 64 65 if (sel == RTW89_DMAC_SEL) { 66 r_val = rtw89_read32(rtwdev, R_AX_DMAC_FUNC_EN); 67 val = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN); 68 } else if (sel == RTW89_CMAC_SEL && mac_idx == 0) { 69 r_val = rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN); 70 val = B_AX_CMAC_EN; 71 } else if (sel == RTW89_CMAC_SEL && mac_idx == 1) { 72 r_val = rtw89_read32(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND); 73 val = B_AX_CMAC1_FEN; 74 } else { 75 return -EINVAL; 76 } 77 if (r_val == RTW89_R32_EA || r_val == RTW89_R32_DEAD || 78 (val & r_val) != val) 79 return -EFAULT; 80 81 return 0; 82 } 83 84 int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val) 85 { 86 u8 lte_ctrl; 87 int ret; 88 89 ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0, 90 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3); 91 if (ret) 92 rtw89_err(rtwdev, "[ERR]lte not ready(W)\n"); 93 94 rtw89_write32(rtwdev, R_AX_LTE_WDATA, val); 95 rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0xC00F0000 | offset); 96 97 return ret; 98 } 99 100 int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val) 101 { 102 u8 lte_ctrl; 103 int ret; 104 105 ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0, 106 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3); 107 if (ret) 108 rtw89_err(rtwdev, "[ERR]lte not ready(W)\n"); 109 110 rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0x800F0000 | offset); 111 *val = rtw89_read32(rtwdev, R_AX_LTE_RDATA); 112 113 return ret; 114 } 115 116 int rtw89_mac_dle_dfi_cfg(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl) 117 { 118 u32 ctrl_reg, data_reg, ctrl_data; 119 u32 val; 120 int ret; 121 122 switch (ctrl->type) { 123 case DLE_CTRL_TYPE_WDE: 124 ctrl_reg = R_AX_WDE_DBG_FUN_INTF_CTL; 125 data_reg = R_AX_WDE_DBG_FUN_INTF_DATA; 126 ctrl_data = FIELD_PREP(B_AX_WDE_DFI_TRGSEL_MASK, ctrl->target) | 127 FIELD_PREP(B_AX_WDE_DFI_ADDR_MASK, ctrl->addr) | 128 B_AX_WDE_DFI_ACTIVE; 129 break; 130 case DLE_CTRL_TYPE_PLE: 131 ctrl_reg = R_AX_PLE_DBG_FUN_INTF_CTL; 132 data_reg = R_AX_PLE_DBG_FUN_INTF_DATA; 133 ctrl_data = FIELD_PREP(B_AX_PLE_DFI_TRGSEL_MASK, ctrl->target) | 134 FIELD_PREP(B_AX_PLE_DFI_ADDR_MASK, ctrl->addr) | 135 B_AX_PLE_DFI_ACTIVE; 136 break; 137 default: 138 rtw89_warn(rtwdev, "[ERR] dfi ctrl type %d\n", ctrl->type); 139 return -EINVAL; 140 } 141 142 rtw89_write32(rtwdev, ctrl_reg, ctrl_data); 143 144 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_WDE_DFI_ACTIVE), 145 1, 1000, false, rtwdev, ctrl_reg); 146 if (ret) { 147 rtw89_warn(rtwdev, "[ERR] dle dfi ctrl 0x%X set 0x%X timeout\n", 148 ctrl_reg, ctrl_data); 149 return ret; 150 } 151 152 ctrl->out_data = rtw89_read32(rtwdev, data_reg); 153 return 0; 154 } 155 156 int rtw89_mac_dle_dfi_quota_cfg(struct rtw89_dev *rtwdev, 157 struct rtw89_mac_dle_dfi_quota *quota) 158 { 159 struct rtw89_mac_dle_dfi_ctrl ctrl; 160 int ret; 161 162 ctrl.type = quota->dle_type; 163 ctrl.target = DLE_DFI_TYPE_QUOTA; 164 ctrl.addr = quota->qtaid; 165 ret = rtw89_mac_dle_dfi_cfg(rtwdev, &ctrl); 166 if (ret) { 167 rtw89_warn(rtwdev, "[ERR] dle dfi quota %d\n", ret); 168 return ret; 169 } 170 171 quota->rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, ctrl.out_data); 172 quota->use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, ctrl.out_data); 173 return 0; 174 } 175 176 int rtw89_mac_dle_dfi_qempty_cfg(struct rtw89_dev *rtwdev, 177 struct rtw89_mac_dle_dfi_qempty *qempty) 178 { 179 struct rtw89_mac_dle_dfi_ctrl ctrl; 180 u32 ret; 181 182 ctrl.type = qempty->dle_type; 183 ctrl.target = DLE_DFI_TYPE_QEMPTY; 184 ctrl.addr = qempty->grpsel; 185 ret = rtw89_mac_dle_dfi_cfg(rtwdev, &ctrl); 186 if (ret) { 187 rtw89_warn(rtwdev, "[ERR] dle dfi qempty %d\n", ret); 188 return ret; 189 } 190 191 qempty->qempty = FIELD_GET(B_AX_DLE_QEMPTY_GRP, ctrl.out_data); 192 return 0; 193 } 194 195 static void dump_err_status_dispatcher_ax(struct rtw89_dev *rtwdev) 196 { 197 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_IMR=0x%08x ", 198 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR)); 199 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_ISR=0x%08x\n", 200 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR)); 201 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_IMR=0x%08x ", 202 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR)); 203 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_ISR=0x%08x\n", 204 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR)); 205 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_IMR=0x%08x ", 206 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR)); 207 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_ISR=0x%08x\n", 208 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR)); 209 } 210 211 static void rtw89_mac_dump_qta_lost_ax(struct rtw89_dev *rtwdev) 212 { 213 struct rtw89_mac_dle_dfi_qempty qempty; 214 struct rtw89_mac_dle_dfi_quota quota; 215 struct rtw89_mac_dle_dfi_ctrl ctrl; 216 u32 val, not_empty, i; 217 int ret; 218 219 qempty.dle_type = DLE_CTRL_TYPE_PLE; 220 qempty.grpsel = 0; 221 qempty.qempty = ~(u32)0; 222 ret = rtw89_mac_dle_dfi_qempty_cfg(rtwdev, &qempty); 223 if (ret) 224 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 225 else 226 rtw89_info(rtwdev, "DLE group0 empty: 0x%x\n", qempty.qempty); 227 228 for (not_empty = ~qempty.qempty, i = 0; not_empty != 0; not_empty >>= 1, i++) { 229 if (!(not_empty & BIT(0))) 230 continue; 231 ctrl.type = DLE_CTRL_TYPE_PLE; 232 ctrl.target = DLE_DFI_TYPE_QLNKTBL; 233 ctrl.addr = (QLNKTBL_ADDR_INFO_SEL_0 ? QLNKTBL_ADDR_INFO_SEL : 0) | 234 u32_encode_bits(i, QLNKTBL_ADDR_TBL_IDX_MASK); 235 ret = rtw89_mac_dle_dfi_cfg(rtwdev, &ctrl); 236 if (ret) 237 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 238 else 239 rtw89_info(rtwdev, "qidx%d pktcnt = %d\n", i, 240 u32_get_bits(ctrl.out_data, 241 QLNKTBL_DATA_SEL1_PKT_CNT_MASK)); 242 } 243 244 quota.dle_type = DLE_CTRL_TYPE_PLE; 245 quota.qtaid = 6; 246 ret = rtw89_mac_dle_dfi_quota_cfg(rtwdev, "a); 247 if (ret) 248 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 249 else 250 rtw89_info(rtwdev, "quota6 rsv/use: 0x%x/0x%x\n", 251 quota.rsv_pgnum, quota.use_pgnum); 252 253 val = rtw89_read32(rtwdev, R_AX_PLE_QTA6_CFG); 254 rtw89_info(rtwdev, "[PLE][CMAC0_RX]min_pgnum=0x%x\n", 255 u32_get_bits(val, B_AX_PLE_Q6_MIN_SIZE_MASK)); 256 rtw89_info(rtwdev, "[PLE][CMAC0_RX]max_pgnum=0x%x\n", 257 u32_get_bits(val, B_AX_PLE_Q6_MAX_SIZE_MASK)); 258 val = rtw89_read32(rtwdev, R_AX_RX_FLTR_OPT); 259 rtw89_info(rtwdev, "[PLE][CMAC0_RX]B_AX_RX_MPDU_MAX_LEN=0x%x\n", 260 u32_get_bits(val, B_AX_RX_MPDU_MAX_LEN_MASK)); 261 rtw89_info(rtwdev, "R_AX_RSP_CHK_SIG=0x%08x\n", 262 rtw89_read32(rtwdev, R_AX_RSP_CHK_SIG)); 263 rtw89_info(rtwdev, "R_AX_TRXPTCL_RESP_0=0x%08x\n", 264 rtw89_read32(rtwdev, R_AX_TRXPTCL_RESP_0)); 265 rtw89_info(rtwdev, "R_AX_CCA_CONTROL=0x%08x\n", 266 rtw89_read32(rtwdev, R_AX_CCA_CONTROL)); 267 268 if (!rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL)) { 269 quota.dle_type = DLE_CTRL_TYPE_PLE; 270 quota.qtaid = 7; 271 ret = rtw89_mac_dle_dfi_quota_cfg(rtwdev, "a); 272 if (ret) 273 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 274 else 275 rtw89_info(rtwdev, "quota7 rsv/use: 0x%x/0x%x\n", 276 quota.rsv_pgnum, quota.use_pgnum); 277 278 val = rtw89_read32(rtwdev, R_AX_PLE_QTA7_CFG); 279 rtw89_info(rtwdev, "[PLE][CMAC1_RX]min_pgnum=0x%x\n", 280 u32_get_bits(val, B_AX_PLE_Q7_MIN_SIZE_MASK)); 281 rtw89_info(rtwdev, "[PLE][CMAC1_RX]max_pgnum=0x%x\n", 282 u32_get_bits(val, B_AX_PLE_Q7_MAX_SIZE_MASK)); 283 val = rtw89_read32(rtwdev, R_AX_RX_FLTR_OPT_C1); 284 rtw89_info(rtwdev, "[PLE][CMAC1_RX]B_AX_RX_MPDU_MAX_LEN=0x%x\n", 285 u32_get_bits(val, B_AX_RX_MPDU_MAX_LEN_MASK)); 286 rtw89_info(rtwdev, "R_AX_RSP_CHK_SIG_C1=0x%08x\n", 287 rtw89_read32(rtwdev, R_AX_RSP_CHK_SIG_C1)); 288 rtw89_info(rtwdev, "R_AX_TRXPTCL_RESP_0_C1=0x%08x\n", 289 rtw89_read32(rtwdev, R_AX_TRXPTCL_RESP_0_C1)); 290 rtw89_info(rtwdev, "R_AX_CCA_CONTROL_C1=0x%08x\n", 291 rtw89_read32(rtwdev, R_AX_CCA_CONTROL_C1)); 292 } 293 294 rtw89_info(rtwdev, "R_AX_DLE_EMPTY0=0x%08x\n", 295 rtw89_read32(rtwdev, R_AX_DLE_EMPTY0)); 296 rtw89_info(rtwdev, "R_AX_DLE_EMPTY1=0x%08x\n", 297 rtw89_read32(rtwdev, R_AX_DLE_EMPTY1)); 298 299 dump_err_status_dispatcher_ax(rtwdev); 300 } 301 302 void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev, 303 enum mac_ax_err_info err) 304 { 305 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 306 u32 dbg, event; 307 308 dbg = rtw89_read32(rtwdev, R_AX_SER_DBG_INFO); 309 event = u32_get_bits(dbg, B_AX_L0_TO_L1_EVENT_MASK); 310 311 switch (event) { 312 case MAC_AX_L0_TO_L1_RX_QTA_LOST: 313 rtw89_info(rtwdev, "quota lost!\n"); 314 mac->dump_qta_lost(rtwdev); 315 break; 316 default: 317 break; 318 } 319 } 320 321 void rtw89_mac_dump_dmac_err_status(struct rtw89_dev *rtwdev) 322 { 323 const struct rtw89_chip_info *chip = rtwdev->chip; 324 u32 dmac_err; 325 int i, ret; 326 327 ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL); 328 if (ret) { 329 rtw89_warn(rtwdev, "[DMAC] : DMAC not enabled\n"); 330 return; 331 } 332 333 dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR); 334 rtw89_info(rtwdev, "R_AX_DMAC_ERR_ISR=0x%08x\n", dmac_err); 335 rtw89_info(rtwdev, "R_AX_DMAC_ERR_IMR=0x%08x\n", 336 rtw89_read32(rtwdev, R_AX_DMAC_ERR_IMR)); 337 338 if (dmac_err) { 339 rtw89_info(rtwdev, "R_AX_WDE_ERR_FLAG_CFG=0x%08x\n", 340 rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG_NUM1)); 341 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_CFG=0x%08x\n", 342 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG_NUM1)); 343 if (chip->chip_id == RTL8852C) { 344 rtw89_info(rtwdev, "R_AX_PLE_ERRFLAG_MSG=0x%08x\n", 345 rtw89_read32(rtwdev, R_AX_PLE_ERRFLAG_MSG)); 346 rtw89_info(rtwdev, "R_AX_WDE_ERRFLAG_MSG=0x%08x\n", 347 rtw89_read32(rtwdev, R_AX_WDE_ERRFLAG_MSG)); 348 rtw89_info(rtwdev, "R_AX_PLE_DBGERR_LOCKEN=0x%08x\n", 349 rtw89_read32(rtwdev, R_AX_PLE_DBGERR_LOCKEN)); 350 rtw89_info(rtwdev, "R_AX_PLE_DBGERR_STS=0x%08x\n", 351 rtw89_read32(rtwdev, R_AX_PLE_DBGERR_STS)); 352 } 353 } 354 355 if (dmac_err & B_AX_WDRLS_ERR_FLAG) { 356 rtw89_info(rtwdev, "R_AX_WDRLS_ERR_IMR=0x%08x\n", 357 rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR)); 358 rtw89_info(rtwdev, "R_AX_WDRLS_ERR_ISR=0x%08x\n", 359 rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR)); 360 if (chip->chip_id == RTL8852C) 361 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX=0x%08x\n", 362 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX_V1)); 363 else 364 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX=0x%08x\n", 365 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 366 } 367 368 if (dmac_err & B_AX_WSEC_ERR_FLAG) { 369 if (chip->chip_id == RTL8852C) { 370 rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR=0x%08x\n", 371 rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG_IMR)); 372 rtw89_info(rtwdev, "R_AX_SEC_ERR_ISR=0x%08x\n", 373 rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG)); 374 rtw89_info(rtwdev, "R_AX_SEC_ENG_CTRL=0x%08x\n", 375 rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL)); 376 rtw89_info(rtwdev, "R_AX_SEC_MPDU_PROC=0x%08x\n", 377 rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC)); 378 rtw89_info(rtwdev, "R_AX_SEC_CAM_ACCESS=0x%08x\n", 379 rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS)); 380 rtw89_info(rtwdev, "R_AX_SEC_CAM_RDATA=0x%08x\n", 381 rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA)); 382 rtw89_info(rtwdev, "R_AX_SEC_DEBUG1=0x%08x\n", 383 rtw89_read32(rtwdev, R_AX_SEC_DEBUG1)); 384 rtw89_info(rtwdev, "R_AX_SEC_TX_DEBUG=0x%08x\n", 385 rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG)); 386 rtw89_info(rtwdev, "R_AX_SEC_RX_DEBUG=0x%08x\n", 387 rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG)); 388 389 rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL, 390 B_AX_DBG_SEL0, 0x8B); 391 rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL, 392 B_AX_DBG_SEL1, 0x8B); 393 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, 394 B_AX_SEL_0XC0_MASK, 1); 395 for (i = 0; i < 0x10; i++) { 396 rtw89_write32_mask(rtwdev, R_AX_SEC_ENG_CTRL, 397 B_AX_SEC_DBG_PORT_FIELD_MASK, i); 398 rtw89_info(rtwdev, "sel=%x,R_AX_SEC_DEBUG2=0x%08x\n", 399 i, rtw89_read32(rtwdev, R_AX_SEC_DEBUG2)); 400 } 401 } else if (chip->chip_id == RTL8922A) { 402 rtw89_info(rtwdev, "R_BE_SEC_ERROR_FLAG=0x%08x\n", 403 rtw89_read32(rtwdev, R_BE_SEC_ERROR_FLAG)); 404 rtw89_info(rtwdev, "R_BE_SEC_ERROR_IMR=0x%08x\n", 405 rtw89_read32(rtwdev, R_BE_SEC_ERROR_IMR)); 406 rtw89_info(rtwdev, "R_BE_SEC_ENG_CTRL=0x%08x\n", 407 rtw89_read32(rtwdev, R_BE_SEC_ENG_CTRL)); 408 rtw89_info(rtwdev, "R_BE_SEC_MPDU_PROC=0x%08x\n", 409 rtw89_read32(rtwdev, R_BE_SEC_MPDU_PROC)); 410 rtw89_info(rtwdev, "R_BE_SEC_CAM_ACCESS=0x%08x\n", 411 rtw89_read32(rtwdev, R_BE_SEC_CAM_ACCESS)); 412 rtw89_info(rtwdev, "R_BE_SEC_CAM_RDATA=0x%08x\n", 413 rtw89_read32(rtwdev, R_BE_SEC_CAM_RDATA)); 414 rtw89_info(rtwdev, "R_BE_SEC_DEBUG2=0x%08x\n", 415 rtw89_read32(rtwdev, R_BE_SEC_DEBUG2)); 416 } else { 417 rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR_ISR=0x%08x\n", 418 rtw89_read32(rtwdev, R_AX_SEC_DEBUG)); 419 rtw89_info(rtwdev, "R_AX_SEC_ENG_CTRL=0x%08x\n", 420 rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL)); 421 rtw89_info(rtwdev, "R_AX_SEC_MPDU_PROC=0x%08x\n", 422 rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC)); 423 rtw89_info(rtwdev, "R_AX_SEC_CAM_ACCESS=0x%08x\n", 424 rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS)); 425 rtw89_info(rtwdev, "R_AX_SEC_CAM_RDATA=0x%08x\n", 426 rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA)); 427 rtw89_info(rtwdev, "R_AX_SEC_CAM_WDATA=0x%08x\n", 428 rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA)); 429 rtw89_info(rtwdev, "R_AX_SEC_TX_DEBUG=0x%08x\n", 430 rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG)); 431 rtw89_info(rtwdev, "R_AX_SEC_RX_DEBUG=0x%08x\n", 432 rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG)); 433 rtw89_info(rtwdev, "R_AX_SEC_TRX_PKT_CNT=0x%08x\n", 434 rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT)); 435 rtw89_info(rtwdev, "R_AX_SEC_TRX_BLK_CNT=0x%08x\n", 436 rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT)); 437 } 438 } 439 440 if (dmac_err & B_AX_MPDU_ERR_FLAG) { 441 rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_IMR=0x%08x\n", 442 rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR)); 443 rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_ISR=0x%08x\n", 444 rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR)); 445 rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_IMR=0x%08x\n", 446 rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR)); 447 rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_ISR=0x%08x\n", 448 rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR)); 449 } 450 451 if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) { 452 if (chip->chip_id == RTL8922A) { 453 rtw89_info(rtwdev, "R_BE_INTERRUPT_MASK_REG=0x%08x\n", 454 rtw89_read32(rtwdev, R_BE_INTERRUPT_MASK_REG)); 455 rtw89_info(rtwdev, "R_BE_INTERRUPT_STS_REG=0x%08x\n", 456 rtw89_read32(rtwdev, R_BE_INTERRUPT_STS_REG)); 457 } else { 458 rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_IMR=0x%08x\n", 459 rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR)); 460 rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n", 461 rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR)); 462 } 463 } 464 465 if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) { 466 rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x\n", 467 rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); 468 rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n", 469 rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); 470 rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x\n", 471 rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); 472 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", 473 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); 474 } 475 476 if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) { 477 if (chip->chip_id == RTL8852C || chip->chip_id == RTL8922A) { 478 rtw89_info(rtwdev, "R_AX_TXPKTCTL_B0_ERRFLAG_IMR=0x%08x\n", 479 rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR)); 480 rtw89_info(rtwdev, "R_AX_TXPKTCTL_B0_ERRFLAG_ISR=0x%08x\n", 481 rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_ISR)); 482 rtw89_info(rtwdev, "R_AX_TXPKTCTL_B1_ERRFLAG_IMR=0x%08x\n", 483 rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_IMR)); 484 rtw89_info(rtwdev, "R_AX_TXPKTCTL_B1_ERRFLAG_ISR=0x%08x\n", 485 rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_ISR)); 486 } else { 487 rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n", 488 rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR)); 489 rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n", 490 rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1)); 491 } 492 } 493 494 if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) { 495 rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x\n", 496 rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); 497 rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n", 498 rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); 499 rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x\n", 500 rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); 501 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", 502 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); 503 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_0=0x%08x\n", 504 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0)); 505 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_1=0x%08x\n", 506 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1)); 507 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_2=0x%08x\n", 508 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2)); 509 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_0=0x%08x\n", 510 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0)); 511 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_1=0x%08x\n", 512 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1)); 513 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_2=0x%08x\n", 514 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2)); 515 if (chip->chip_id == RTL8922A) { 516 rtw89_info(rtwdev, "R_BE_WD_CPUQ_OP_3=0x%08x\n", 517 rtw89_read32(rtwdev, R_BE_WD_CPUQ_OP_3)); 518 rtw89_info(rtwdev, "R_BE_WD_CPUQ_OP_STATUS=0x%08x\n", 519 rtw89_read32(rtwdev, R_BE_WD_CPUQ_OP_STATUS)); 520 rtw89_info(rtwdev, "R_BE_PLE_CPUQ_OP_3=0x%08x\n", 521 rtw89_read32(rtwdev, R_BE_PL_CPUQ_OP_3)); 522 rtw89_info(rtwdev, "R_BE_PL_CPUQ_OP_STATUS=0x%08x\n", 523 rtw89_read32(rtwdev, R_BE_PL_CPUQ_OP_STATUS)); 524 } else { 525 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n", 526 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS)); 527 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n", 528 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS)); 529 if (chip->chip_id == RTL8852C) { 530 rtw89_info(rtwdev, "R_AX_RX_CTRL0=0x%08x\n", 531 rtw89_read32(rtwdev, R_AX_RX_CTRL0)); 532 rtw89_info(rtwdev, "R_AX_RX_CTRL1=0x%08x\n", 533 rtw89_read32(rtwdev, R_AX_RX_CTRL1)); 534 rtw89_info(rtwdev, "R_AX_RX_CTRL2=0x%08x\n", 535 rtw89_read32(rtwdev, R_AX_RX_CTRL2)); 536 } else { 537 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n", 538 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0)); 539 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n", 540 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1)); 541 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n", 542 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2)); 543 } 544 } 545 } 546 547 if (dmac_err & B_AX_PKTIN_ERR_FLAG) { 548 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR=0x%08x\n", 549 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR)); 550 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR=0x%08x\n", 551 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); 552 } 553 554 if (dmac_err & B_AX_DISPATCH_ERR_FLAG) { 555 if (chip->chip_id == RTL8922A) { 556 rtw89_info(rtwdev, "R_BE_DISP_HOST_IMR=0x%08x\n", 557 rtw89_read32(rtwdev, R_BE_DISP_HOST_IMR)); 558 rtw89_info(rtwdev, "R_BE_DISP_ERROR_ISR1=0x%08x\n", 559 rtw89_read32(rtwdev, R_BE_DISP_ERROR_ISR1)); 560 rtw89_info(rtwdev, "R_BE_DISP_CPU_IMR=0x%08x\n", 561 rtw89_read32(rtwdev, R_BE_DISP_CPU_IMR)); 562 rtw89_info(rtwdev, "R_BE_DISP_ERROR_ISR2=0x%08x\n", 563 rtw89_read32(rtwdev, R_BE_DISP_ERROR_ISR2)); 564 rtw89_info(rtwdev, "R_BE_DISP_OTHER_IMR=0x%08x\n", 565 rtw89_read32(rtwdev, R_BE_DISP_OTHER_IMR)); 566 rtw89_info(rtwdev, "R_BE_DISP_ERROR_ISR0=0x%08x\n", 567 rtw89_read32(rtwdev, R_BE_DISP_ERROR_ISR0)); 568 } else { 569 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ERR_IMR=0x%08x\n", 570 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR)); 571 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n", 572 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR)); 573 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ERR_IMR=0x%08x\n", 574 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR)); 575 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n", 576 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR)); 577 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ERR_IMR=0x%08x\n", 578 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR)); 579 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n", 580 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR)); 581 } 582 } 583 584 if (dmac_err & B_AX_BBRPT_ERR_FLAG) { 585 if (chip->chip_id == RTL8852C || chip->chip_id == RTL8922A) { 586 rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR=0x%08x\n", 587 rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR)); 588 rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_ISR=0x%08x\n", 589 rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_ISR)); 590 rtw89_info(rtwdev, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n", 591 rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR)); 592 rtw89_info(rtwdev, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n", 593 rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR)); 594 rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n", 595 rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR)); 596 rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n", 597 rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR)); 598 } else { 599 rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n", 600 rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR)); 601 rtw89_info(rtwdev, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n", 602 rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR)); 603 rtw89_info(rtwdev, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n", 604 rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR)); 605 rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n", 606 rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR)); 607 rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n", 608 rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR)); 609 } 610 if (chip->chip_id == RTL8922A) { 611 rtw89_info(rtwdev, "R_BE_LA_ERRFLAG_IMR=0x%08x\n", 612 rtw89_read32(rtwdev, R_BE_LA_ERRFLAG_IMR)); 613 rtw89_info(rtwdev, "R_BE_LA_ERRFLAG_ISR=0x%08x\n", 614 rtw89_read32(rtwdev, R_BE_LA_ERRFLAG_ISR)); 615 } 616 } 617 618 if (dmac_err & B_AX_HAXIDMA_ERR_FLAG) { 619 if (chip->chip_id == RTL8922A) { 620 rtw89_info(rtwdev, "R_BE_HAXI_IDCT_MSK=0x%08x\n", 621 rtw89_read32(rtwdev, R_BE_HAXI_IDCT_MSK)); 622 rtw89_info(rtwdev, "R_BE_HAXI_IDCT=0x%08x\n", 623 rtw89_read32(rtwdev, R_BE_HAXI_IDCT)); 624 } else if (chip->chip_id == RTL8852C) { 625 rtw89_info(rtwdev, "R_AX_HAXIDMA_ERR_IMR=0x%08x\n", 626 rtw89_read32(rtwdev, R_AX_HAXI_IDCT_MSK)); 627 rtw89_info(rtwdev, "R_AX_HAXIDMA_ERR_ISR=0x%08x\n", 628 rtw89_read32(rtwdev, R_AX_HAXI_IDCT)); 629 } 630 } 631 632 if (dmac_err & B_BE_P_AXIDMA_ERR_INT) { 633 rtw89_info(rtwdev, "R_BE_PL_AXIDMA_IDCT_MSK=0x%08x\n", 634 rtw89_mac_mem_read(rtwdev, R_BE_PL_AXIDMA_IDCT_MSK, 635 RTW89_MAC_MEM_AXIDMA)); 636 rtw89_info(rtwdev, "R_BE_PL_AXIDMA_IDCT=0x%08x\n", 637 rtw89_mac_mem_read(rtwdev, R_BE_PL_AXIDMA_IDCT, 638 RTW89_MAC_MEM_AXIDMA)); 639 } 640 641 if (dmac_err & B_BE_MLO_ERR_INT) { 642 rtw89_info(rtwdev, "R_BE_MLO_ERR_IDCT_IMR=0x%08x\n", 643 rtw89_read32(rtwdev, R_BE_MLO_ERR_IDCT_IMR)); 644 rtw89_info(rtwdev, "R_BE_PKTIN_ERR_ISR=0x%08x\n", 645 rtw89_read32(rtwdev, R_BE_MLO_ERR_IDCT_ISR)); 646 } 647 648 if (dmac_err & B_BE_PLRLS_ERR_INT) { 649 rtw89_info(rtwdev, "R_BE_PLRLS_ERR_IMR=0x%08x\n", 650 rtw89_read32(rtwdev, R_BE_PLRLS_ERR_IMR)); 651 rtw89_info(rtwdev, "R_BE_PLRLS_ERR_ISR=0x%08x\n", 652 rtw89_read32(rtwdev, R_BE_PLRLS_ERR_ISR)); 653 } 654 } 655 656 static void rtw89_mac_dump_cmac_err_status_ax(struct rtw89_dev *rtwdev, 657 u8 band) 658 { 659 const struct rtw89_chip_info *chip = rtwdev->chip; 660 u32 offset = 0; 661 u32 cmac_err; 662 int ret; 663 664 ret = rtw89_mac_check_mac_en(rtwdev, band, RTW89_CMAC_SEL); 665 if (ret) { 666 if (band) 667 rtw89_warn(rtwdev, "[CMAC] : CMAC1 not enabled\n"); 668 else 669 rtw89_warn(rtwdev, "[CMAC] : CMAC0 not enabled\n"); 670 return; 671 } 672 673 if (band) 674 offset = RTW89_MAC_AX_BAND_REG_OFFSET; 675 676 cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset); 677 rtw89_info(rtwdev, "R_AX_CMAC_ERR_ISR [%d]=0x%08x\n", band, 678 rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset)); 679 rtw89_info(rtwdev, "R_AX_CMAC_FUNC_EN [%d]=0x%08x\n", band, 680 rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN + offset)); 681 rtw89_info(rtwdev, "R_AX_CK_EN [%d]=0x%08x\n", band, 682 rtw89_read32(rtwdev, R_AX_CK_EN + offset)); 683 684 if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) { 685 rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_IMR [%d]=0x%08x\n", band, 686 rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR + offset)); 687 rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_ISR [%d]=0x%08x\n", band, 688 rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR + offset)); 689 } 690 691 if (cmac_err & B_AX_PTCL_TOP_ERR_IND) { 692 rtw89_info(rtwdev, "R_AX_PTCL_IMR0 [%d]=0x%08x\n", band, 693 rtw89_read32(rtwdev, R_AX_PTCL_IMR0 + offset)); 694 rtw89_info(rtwdev, "R_AX_PTCL_ISR0 [%d]=0x%08x\n", band, 695 rtw89_read32(rtwdev, R_AX_PTCL_ISR0 + offset)); 696 } 697 698 if (cmac_err & B_AX_DMA_TOP_ERR_IND) { 699 if (chip->chip_id == RTL8852C) { 700 rtw89_info(rtwdev, "R_AX_RX_ERR_FLAG [%d]=0x%08x\n", band, 701 rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG + offset)); 702 rtw89_info(rtwdev, "R_AX_RX_ERR_FLAG_IMR [%d]=0x%08x\n", band, 703 rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG_IMR + offset)); 704 } else { 705 rtw89_info(rtwdev, "R_AX_DLE_CTRL [%d]=0x%08x\n", band, 706 rtw89_read32(rtwdev, R_AX_DLE_CTRL + offset)); 707 } 708 } 709 710 if (cmac_err & B_AX_DMA_TOP_ERR_IND || cmac_err & B_AX_WMAC_RX_ERR_IND) { 711 if (chip->chip_id == RTL8852C) { 712 rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_ISR [%d]=0x%08x\n", band, 713 rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR + offset)); 714 rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band, 715 rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset)); 716 } else { 717 rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band, 718 rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset)); 719 } 720 } 721 722 if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) { 723 rtw89_info(rtwdev, "R_AX_TXPWR_IMR [%d]=0x%08x\n", band, 724 rtw89_read32(rtwdev, R_AX_TXPWR_IMR + offset)); 725 rtw89_info(rtwdev, "R_AX_TXPWR_ISR [%d]=0x%08x\n", band, 726 rtw89_read32(rtwdev, R_AX_TXPWR_ISR + offset)); 727 } 728 729 if (cmac_err & B_AX_WMAC_TX_ERR_IND) { 730 if (chip->chip_id == RTL8852C) { 731 rtw89_info(rtwdev, "R_AX_TRXPTCL_ERROR_INDICA [%d]=0x%08x\n", band, 732 rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA + offset)); 733 rtw89_info(rtwdev, "R_AX_TRXPTCL_ERROR_INDICA_MASK [%d]=0x%08x\n", band, 734 rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA_MASK + offset)); 735 } else { 736 rtw89_info(rtwdev, "R_AX_TMAC_ERR_IMR_ISR [%d]=0x%08x\n", band, 737 rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR + offset)); 738 } 739 rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL [%d]=0x%08x\n", band, 740 rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL + offset)); 741 } 742 743 rtw89_info(rtwdev, "R_AX_CMAC_ERR_IMR [%d]=0x%08x\n", band, 744 rtw89_read32(rtwdev, R_AX_CMAC_ERR_IMR + offset)); 745 } 746 747 static void rtw89_mac_dump_err_status_ax(struct rtw89_dev *rtwdev, 748 enum mac_ax_err_info err) 749 { 750 if (err != MAC_AX_ERR_L1_ERR_DMAC && 751 err != MAC_AX_ERR_L0_PROMOTE_TO_L1 && 752 err != MAC_AX_ERR_L0_ERR_CMAC0 && 753 err != MAC_AX_ERR_L0_ERR_CMAC1 && 754 err != MAC_AX_ERR_RXI300) 755 return; 756 757 rtw89_info(rtwdev, "--->\nerr=0x%x\n", err); 758 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n", 759 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 760 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n", 761 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 762 rtw89_info(rtwdev, "DBG Counter 1 (R_AX_DRV_FW_HSK_4)=0x%08x\n", 763 rtw89_read32(rtwdev, R_AX_DRV_FW_HSK_4)); 764 rtw89_info(rtwdev, "DBG Counter 2 (R_AX_DRV_FW_HSK_5)=0x%08x\n", 765 rtw89_read32(rtwdev, R_AX_DRV_FW_HSK_5)); 766 767 rtw89_mac_dump_dmac_err_status(rtwdev); 768 rtw89_mac_dump_cmac_err_status_ax(rtwdev, RTW89_MAC_0); 769 rtw89_mac_dump_cmac_err_status_ax(rtwdev, RTW89_MAC_1); 770 771 rtwdev->hci.ops->dump_err_status(rtwdev); 772 773 if (err == MAC_AX_ERR_L0_PROMOTE_TO_L1) 774 rtw89_mac_dump_l0_to_l1(rtwdev, err); 775 776 rtw89_info(rtwdev, "<---\n"); 777 } 778 779 static bool rtw89_mac_suppress_log(struct rtw89_dev *rtwdev, u32 err) 780 { 781 struct rtw89_ser *ser = &rtwdev->ser; 782 u32 dmac_err, imr, isr; 783 int ret; 784 785 if (rtwdev->chip->chip_id == RTL8852C) { 786 ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL); 787 if (ret) 788 return true; 789 790 if (err == MAC_AX_ERR_L1_ERR_DMAC) { 791 dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR); 792 imr = rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR); 793 isr = rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_ISR); 794 795 if ((dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) && 796 ((isr & imr) & B_AX_B0_ISR_ERR_CMDPSR_FRZTO)) { 797 set_bit(RTW89_SER_SUPPRESS_LOG, ser->flags); 798 return true; 799 } 800 } else if (err == MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE) { 801 if (test_bit(RTW89_SER_SUPPRESS_LOG, ser->flags)) 802 return true; 803 } else if (err == MAC_AX_ERR_L1_RESET_RECOVERY_DONE) { 804 if (test_and_clear_bit(RTW89_SER_SUPPRESS_LOG, ser->flags)) 805 return true; 806 } 807 } 808 809 return false; 810 } 811 812 u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev) 813 { 814 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 815 u32 err, err_scnr; 816 int ret; 817 818 ret = read_poll_timeout(rtw89_read32, err, (err != 0), 1000, 100000, 819 false, rtwdev, R_AX_HALT_C2H_CTRL); 820 if (ret) { 821 rtw89_warn(rtwdev, "Polling FW err status fail\n"); 822 return ret; 823 } 824 825 err = rtw89_read32(rtwdev, R_AX_HALT_C2H); 826 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 827 828 err_scnr = RTW89_ERROR_SCENARIO(err); 829 if (err_scnr == RTW89_WCPU_CPU_EXCEPTION) 830 err = MAC_AX_ERR_CPU_EXCEPTION; 831 else if (err_scnr == RTW89_WCPU_ASSERTION) 832 err = MAC_AX_ERR_ASSERTION; 833 else if (err_scnr == RTW89_RXI300_ERROR) 834 err = MAC_AX_ERR_RXI300; 835 836 if (rtw89_mac_suppress_log(rtwdev, err)) 837 return err; 838 839 rtw89_fw_st_dbg_dump(rtwdev); 840 mac->dump_err_status(rtwdev, err); 841 842 return err; 843 } 844 EXPORT_SYMBOL(rtw89_mac_get_err_status); 845 846 int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err) 847 { 848 struct rtw89_ser *ser = &rtwdev->ser; 849 u32 halt; 850 int ret = 0; 851 852 if (err > MAC_AX_SET_ERR_MAX) { 853 rtw89_err(rtwdev, "Bad set-err-status value 0x%08x\n", err); 854 return -EINVAL; 855 } 856 857 ret = read_poll_timeout(rtw89_read32, halt, (halt == 0x0), 1000, 858 100000, false, rtwdev, R_AX_HALT_H2C_CTRL); 859 if (ret) { 860 rtw89_err(rtwdev, "FW doesn't receive previous msg\n"); 861 return -EFAULT; 862 } 863 864 rtw89_write32(rtwdev, R_AX_HALT_H2C, err); 865 866 if (ser->prehandle_l1 && 867 (err == MAC_AX_ERR_L1_DISABLE_EN || err == MAC_AX_ERR_L1_RCVY_EN)) 868 return 0; 869 870 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, B_AX_HALT_H2C_TRIGGER); 871 872 return 0; 873 } 874 EXPORT_SYMBOL(rtw89_mac_set_err_status); 875 876 static int hfc_reset_param(struct rtw89_dev *rtwdev) 877 { 878 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 879 struct rtw89_hfc_param_ini param_ini = {NULL}; 880 u8 qta_mode = rtwdev->mac.dle_info.qta_mode; 881 882 switch (rtwdev->hci.type) { 883 case RTW89_HCI_TYPE_PCIE: 884 param_ini = rtwdev->chip->hfc_param_ini[qta_mode]; 885 param->en = 0; 886 break; 887 default: 888 return -EINVAL; 889 } 890 891 if (param_ini.pub_cfg) 892 param->pub_cfg = *param_ini.pub_cfg; 893 894 if (param_ini.prec_cfg) 895 param->prec_cfg = *param_ini.prec_cfg; 896 897 if (param_ini.ch_cfg) 898 param->ch_cfg = param_ini.ch_cfg; 899 900 memset(¶m->ch_info, 0, sizeof(param->ch_info)); 901 memset(¶m->pub_info, 0, sizeof(param->pub_info)); 902 param->mode = param_ini.mode; 903 904 return 0; 905 } 906 907 static int hfc_ch_cfg_chk(struct rtw89_dev *rtwdev, u8 ch) 908 { 909 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 910 const struct rtw89_hfc_ch_cfg *ch_cfg = param->ch_cfg; 911 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 912 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 913 914 if (ch >= RTW89_DMA_CH_NUM) 915 return -EINVAL; 916 917 if ((ch_cfg[ch].min && ch_cfg[ch].min < prec_cfg->ch011_prec) || 918 ch_cfg[ch].max > pub_cfg->pub_max) 919 return -EINVAL; 920 if (ch_cfg[ch].grp >= grp_num) 921 return -EINVAL; 922 923 return 0; 924 } 925 926 static int hfc_pub_info_chk(struct rtw89_dev *rtwdev) 927 { 928 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 929 const struct rtw89_hfc_pub_cfg *cfg = ¶m->pub_cfg; 930 struct rtw89_hfc_pub_info *info = ¶m->pub_info; 931 932 if (info->g0_used + info->g1_used + info->pub_aval != cfg->pub_max) { 933 if (rtwdev->chip->chip_id == RTL8852A) 934 return 0; 935 else 936 return -EFAULT; 937 } 938 939 return 0; 940 } 941 942 static int hfc_pub_cfg_chk(struct rtw89_dev *rtwdev) 943 { 944 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 945 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 946 947 if (pub_cfg->grp0 + pub_cfg->grp1 != pub_cfg->pub_max) 948 return -EFAULT; 949 950 return 0; 951 } 952 953 static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch) 954 { 955 const struct rtw89_chip_info *chip = rtwdev->chip; 956 const struct rtw89_page_regs *regs = chip->page_regs; 957 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 958 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; 959 int ret = 0; 960 u32 val = 0; 961 962 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 963 if (ret) 964 return ret; 965 966 ret = hfc_ch_cfg_chk(rtwdev, ch); 967 if (ret) 968 return ret; 969 970 if (ch > RTW89_DMA_B1HI) 971 return -EINVAL; 972 973 val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) | 974 u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) | 975 (cfg[ch].grp ? B_AX_GRP : 0); 976 rtw89_write32(rtwdev, regs->ach_page_ctrl + ch * 4, val); 977 978 return 0; 979 } 980 981 static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch) 982 { 983 const struct rtw89_chip_info *chip = rtwdev->chip; 984 const struct rtw89_page_regs *regs = chip->page_regs; 985 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 986 struct rtw89_hfc_ch_info *info = param->ch_info; 987 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; 988 u32 val; 989 u32 ret; 990 991 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 992 if (ret) 993 return ret; 994 995 if (ch > RTW89_DMA_H2C) 996 return -EINVAL; 997 998 val = rtw89_read32(rtwdev, regs->ach_page_info + ch * 4); 999 info[ch].aval = u32_get_bits(val, B_AX_AVAL_PG_MASK); 1000 if (ch < RTW89_DMA_H2C) 1001 info[ch].used = u32_get_bits(val, B_AX_USE_PG_MASK); 1002 else 1003 info[ch].used = cfg[ch].min - info[ch].aval; 1004 1005 return 0; 1006 } 1007 1008 static int hfc_pub_ctrl(struct rtw89_dev *rtwdev) 1009 { 1010 const struct rtw89_chip_info *chip = rtwdev->chip; 1011 const struct rtw89_page_regs *regs = chip->page_regs; 1012 const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg; 1013 u32 val; 1014 int ret; 1015 1016 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1017 if (ret) 1018 return ret; 1019 1020 ret = hfc_pub_cfg_chk(rtwdev); 1021 if (ret) 1022 return ret; 1023 1024 val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) | 1025 u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK); 1026 rtw89_write32(rtwdev, regs->pub_page_ctrl1, val); 1027 1028 val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK); 1029 rtw89_write32(rtwdev, regs->wp_page_ctrl2, val); 1030 1031 return 0; 1032 } 1033 1034 static void hfc_get_mix_info_ax(struct rtw89_dev *rtwdev) 1035 { 1036 const struct rtw89_chip_info *chip = rtwdev->chip; 1037 const struct rtw89_page_regs *regs = chip->page_regs; 1038 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 1039 struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 1040 struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 1041 struct rtw89_hfc_pub_info *info = ¶m->pub_info; 1042 u32 val; 1043 1044 val = rtw89_read32(rtwdev, regs->pub_page_info1); 1045 info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK); 1046 info->g1_used = u32_get_bits(val, B_AX_G1_USE_PG_MASK); 1047 val = rtw89_read32(rtwdev, regs->pub_page_info3); 1048 info->g0_aval = u32_get_bits(val, B_AX_G0_AVAL_PG_MASK); 1049 info->g1_aval = u32_get_bits(val, B_AX_G1_AVAL_PG_MASK); 1050 info->pub_aval = 1051 u32_get_bits(rtw89_read32(rtwdev, regs->pub_page_info2), 1052 B_AX_PUB_AVAL_PG_MASK); 1053 info->wp_aval = 1054 u32_get_bits(rtw89_read32(rtwdev, regs->wp_page_info1), 1055 B_AX_WP_AVAL_PG_MASK); 1056 1057 val = rtw89_read32(rtwdev, regs->hci_fc_ctrl); 1058 param->en = val & B_AX_HCI_FC_EN ? 1 : 0; 1059 param->h2c_en = val & B_AX_HCI_FC_CH12_EN ? 1 : 0; 1060 param->mode = u32_get_bits(val, B_AX_HCI_FC_MODE_MASK); 1061 prec_cfg->ch011_full_cond = 1062 u32_get_bits(val, B_AX_HCI_FC_WD_FULL_COND_MASK); 1063 prec_cfg->h2c_full_cond = 1064 u32_get_bits(val, B_AX_HCI_FC_CH12_FULL_COND_MASK); 1065 prec_cfg->wp_ch07_full_cond = 1066 u32_get_bits(val, B_AX_HCI_FC_WP_CH07_FULL_COND_MASK); 1067 prec_cfg->wp_ch811_full_cond = 1068 u32_get_bits(val, B_AX_HCI_FC_WP_CH811_FULL_COND_MASK); 1069 1070 val = rtw89_read32(rtwdev, regs->ch_page_ctrl); 1071 prec_cfg->ch011_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH011_MASK); 1072 prec_cfg->h2c_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH12_MASK); 1073 1074 val = rtw89_read32(rtwdev, regs->pub_page_ctrl2); 1075 pub_cfg->pub_max = u32_get_bits(val, B_AX_PUBPG_ALL_MASK); 1076 1077 val = rtw89_read32(rtwdev, regs->wp_page_ctrl1); 1078 prec_cfg->wp_ch07_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH07_MASK); 1079 prec_cfg->wp_ch811_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH811_MASK); 1080 1081 val = rtw89_read32(rtwdev, regs->wp_page_ctrl2); 1082 pub_cfg->wp_thrd = u32_get_bits(val, B_AX_WP_THRD_MASK); 1083 1084 val = rtw89_read32(rtwdev, regs->pub_page_ctrl1); 1085 pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK); 1086 pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK); 1087 } 1088 1089 static int hfc_upd_mix_info(struct rtw89_dev *rtwdev) 1090 { 1091 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1092 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 1093 int ret; 1094 1095 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1096 if (ret) 1097 return ret; 1098 1099 mac->hfc_get_mix_info(rtwdev); 1100 1101 ret = hfc_pub_info_chk(rtwdev); 1102 if (param->en && ret) 1103 return ret; 1104 1105 return 0; 1106 } 1107 1108 static void hfc_h2c_cfg_ax(struct rtw89_dev *rtwdev) 1109 { 1110 const struct rtw89_chip_info *chip = rtwdev->chip; 1111 const struct rtw89_page_regs *regs = chip->page_regs; 1112 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 1113 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 1114 u32 val; 1115 1116 val = u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK); 1117 rtw89_write32(rtwdev, regs->ch_page_ctrl, val); 1118 1119 rtw89_write32_mask(rtwdev, regs->hci_fc_ctrl, 1120 B_AX_HCI_FC_CH12_FULL_COND_MASK, 1121 prec_cfg->h2c_full_cond); 1122 } 1123 1124 static void hfc_mix_cfg_ax(struct rtw89_dev *rtwdev) 1125 { 1126 const struct rtw89_chip_info *chip = rtwdev->chip; 1127 const struct rtw89_page_regs *regs = chip->page_regs; 1128 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 1129 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 1130 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 1131 u32 val; 1132 1133 val = u32_encode_bits(prec_cfg->ch011_prec, B_AX_PREC_PAGE_CH011_MASK) | 1134 u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK); 1135 rtw89_write32(rtwdev, regs->ch_page_ctrl, val); 1136 1137 val = u32_encode_bits(pub_cfg->pub_max, B_AX_PUBPG_ALL_MASK); 1138 rtw89_write32(rtwdev, regs->pub_page_ctrl2, val); 1139 1140 val = u32_encode_bits(prec_cfg->wp_ch07_prec, 1141 B_AX_PREC_PAGE_WP_CH07_MASK) | 1142 u32_encode_bits(prec_cfg->wp_ch811_prec, 1143 B_AX_PREC_PAGE_WP_CH811_MASK); 1144 rtw89_write32(rtwdev, regs->wp_page_ctrl1, val); 1145 1146 val = u32_replace_bits(rtw89_read32(rtwdev, regs->hci_fc_ctrl), 1147 param->mode, B_AX_HCI_FC_MODE_MASK); 1148 val = u32_replace_bits(val, prec_cfg->ch011_full_cond, 1149 B_AX_HCI_FC_WD_FULL_COND_MASK); 1150 val = u32_replace_bits(val, prec_cfg->h2c_full_cond, 1151 B_AX_HCI_FC_CH12_FULL_COND_MASK); 1152 val = u32_replace_bits(val, prec_cfg->wp_ch07_full_cond, 1153 B_AX_HCI_FC_WP_CH07_FULL_COND_MASK); 1154 val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond, 1155 B_AX_HCI_FC_WP_CH811_FULL_COND_MASK); 1156 rtw89_write32(rtwdev, regs->hci_fc_ctrl, val); 1157 } 1158 1159 static void hfc_func_en_ax(struct rtw89_dev *rtwdev, bool en, bool h2c_en) 1160 { 1161 const struct rtw89_chip_info *chip = rtwdev->chip; 1162 const struct rtw89_page_regs *regs = chip->page_regs; 1163 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 1164 u32 val; 1165 1166 val = rtw89_read32(rtwdev, regs->hci_fc_ctrl); 1167 param->en = en; 1168 param->h2c_en = h2c_en; 1169 val = en ? (val | B_AX_HCI_FC_EN) : (val & ~B_AX_HCI_FC_EN); 1170 val = h2c_en ? (val | B_AX_HCI_FC_CH12_EN) : 1171 (val & ~B_AX_HCI_FC_CH12_EN); 1172 rtw89_write32(rtwdev, regs->hci_fc_ctrl, val); 1173 } 1174 1175 int rtw89_mac_hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en) 1176 { 1177 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1178 const struct rtw89_chip_info *chip = rtwdev->chip; 1179 u32 dma_ch_mask = chip->dma_ch_mask; 1180 u8 ch; 1181 u32 ret = 0; 1182 1183 if (reset) 1184 ret = hfc_reset_param(rtwdev); 1185 if (ret) 1186 return ret; 1187 1188 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1189 if (ret) 1190 return ret; 1191 1192 mac->hfc_func_en(rtwdev, false, false); 1193 1194 if (!en && h2c_en) { 1195 mac->hfc_h2c_cfg(rtwdev); 1196 mac->hfc_func_en(rtwdev, en, h2c_en); 1197 return ret; 1198 } 1199 1200 for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { 1201 if (dma_ch_mask & BIT(ch)) 1202 continue; 1203 ret = hfc_ch_ctrl(rtwdev, ch); 1204 if (ret) 1205 return ret; 1206 } 1207 1208 ret = hfc_pub_ctrl(rtwdev); 1209 if (ret) 1210 return ret; 1211 1212 mac->hfc_mix_cfg(rtwdev); 1213 if (en || h2c_en) { 1214 mac->hfc_func_en(rtwdev, en, h2c_en); 1215 udelay(10); 1216 } 1217 for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { 1218 if (dma_ch_mask & BIT(ch)) 1219 continue; 1220 ret = hfc_upd_ch_info(rtwdev, ch); 1221 if (ret) 1222 return ret; 1223 } 1224 ret = hfc_upd_mix_info(rtwdev); 1225 1226 return ret; 1227 } 1228 1229 #define PWR_POLL_CNT 2000 1230 static int pwr_cmd_poll(struct rtw89_dev *rtwdev, 1231 const struct rtw89_pwr_cfg *cfg) 1232 { 1233 u8 val = 0; 1234 int ret; 1235 u32 addr = cfg->base == PWR_INTF_MSK_SDIO ? 1236 cfg->addr | SDIO_LOCAL_BASE_ADDR : cfg->addr; 1237 1238 ret = read_poll_timeout(rtw89_read8, val, !((val ^ cfg->val) & cfg->msk), 1239 1000, 1000 * PWR_POLL_CNT, false, rtwdev, addr); 1240 1241 if (!ret) 1242 return 0; 1243 1244 rtw89_warn(rtwdev, "[ERR] Polling timeout\n"); 1245 rtw89_warn(rtwdev, "[ERR] addr: %X, %X\n", addr, cfg->addr); 1246 rtw89_warn(rtwdev, "[ERR] val: %X, %X\n", val, cfg->val); 1247 1248 return -EBUSY; 1249 } 1250 1251 static int rtw89_mac_sub_pwr_seq(struct rtw89_dev *rtwdev, u8 cv_msk, 1252 u8 intf_msk, const struct rtw89_pwr_cfg *cfg) 1253 { 1254 const struct rtw89_pwr_cfg *cur_cfg; 1255 u32 addr; 1256 u8 val; 1257 1258 for (cur_cfg = cfg; cur_cfg->cmd != PWR_CMD_END; cur_cfg++) { 1259 if (!(cur_cfg->intf_msk & intf_msk) || 1260 !(cur_cfg->cv_msk & cv_msk)) 1261 continue; 1262 1263 switch (cur_cfg->cmd) { 1264 case PWR_CMD_WRITE: 1265 addr = cur_cfg->addr; 1266 1267 if (cur_cfg->base == PWR_BASE_SDIO) 1268 addr |= SDIO_LOCAL_BASE_ADDR; 1269 1270 val = rtw89_read8(rtwdev, addr); 1271 val &= ~(cur_cfg->msk); 1272 val |= (cur_cfg->val & cur_cfg->msk); 1273 1274 rtw89_write8(rtwdev, addr, val); 1275 break; 1276 case PWR_CMD_POLL: 1277 if (pwr_cmd_poll(rtwdev, cur_cfg)) 1278 return -EBUSY; 1279 break; 1280 case PWR_CMD_DELAY: 1281 if (cur_cfg->val == PWR_DELAY_US) 1282 udelay(cur_cfg->addr); 1283 else 1284 fsleep(cur_cfg->addr * 1000); 1285 break; 1286 default: 1287 return -EINVAL; 1288 } 1289 } 1290 1291 return 0; 1292 } 1293 1294 static int rtw89_mac_pwr_seq(struct rtw89_dev *rtwdev, 1295 const struct rtw89_pwr_cfg * const *cfg_seq) 1296 { 1297 int ret; 1298 1299 for (; *cfg_seq; cfg_seq++) { 1300 ret = rtw89_mac_sub_pwr_seq(rtwdev, BIT(rtwdev->hal.cv), 1301 PWR_INTF_MSK_PCIE, *cfg_seq); 1302 if (ret) 1303 return -EBUSY; 1304 } 1305 1306 return 0; 1307 } 1308 1309 static enum rtw89_rpwm_req_pwr_state 1310 rtw89_mac_get_req_pwr_state(struct rtw89_dev *rtwdev) 1311 { 1312 enum rtw89_rpwm_req_pwr_state state; 1313 1314 switch (rtwdev->ps_mode) { 1315 case RTW89_PS_MODE_RFOFF: 1316 state = RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFOFF; 1317 break; 1318 case RTW89_PS_MODE_CLK_GATED: 1319 state = RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED; 1320 break; 1321 case RTW89_PS_MODE_PWR_GATED: 1322 state = RTW89_MAC_RPWM_REQ_PWR_STATE_PWR_GATED; 1323 break; 1324 default: 1325 state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE; 1326 break; 1327 } 1328 return state; 1329 } 1330 1331 static void rtw89_mac_send_rpwm(struct rtw89_dev *rtwdev, 1332 enum rtw89_rpwm_req_pwr_state req_pwr_state, 1333 bool notify_wake) 1334 { 1335 u16 request; 1336 1337 spin_lock_bh(&rtwdev->rpwm_lock); 1338 1339 request = rtw89_read16(rtwdev, R_AX_RPWM); 1340 request ^= request | PS_RPWM_TOGGLE; 1341 request |= req_pwr_state; 1342 1343 if (notify_wake) { 1344 request |= PS_RPWM_NOTIFY_WAKE; 1345 } else { 1346 rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) & 1347 RPWM_SEQ_NUM_MAX; 1348 request |= FIELD_PREP(PS_RPWM_SEQ_NUM, 1349 rtwdev->mac.rpwm_seq_num); 1350 1351 if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED) 1352 request |= PS_RPWM_ACK; 1353 } 1354 rtw89_write16(rtwdev, rtwdev->hci.rpwm_addr, request); 1355 1356 spin_unlock_bh(&rtwdev->rpwm_lock); 1357 } 1358 1359 static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev, 1360 enum rtw89_rpwm_req_pwr_state req_pwr_state) 1361 { 1362 bool request_deep_mode; 1363 bool in_deep_mode; 1364 u8 rpwm_req_num; 1365 u8 cpwm_rsp_seq; 1366 u8 cpwm_seq; 1367 u8 cpwm_status; 1368 1369 if (req_pwr_state >= RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED) 1370 request_deep_mode = true; 1371 else 1372 request_deep_mode = false; 1373 1374 if (rtw89_read32_mask(rtwdev, R_AX_LDM, B_AX_EN_32K)) 1375 in_deep_mode = true; 1376 else 1377 in_deep_mode = false; 1378 1379 if (request_deep_mode != in_deep_mode) 1380 return -EPERM; 1381 1382 if (request_deep_mode) 1383 return 0; 1384 1385 rpwm_req_num = rtwdev->mac.rpwm_seq_num; 1386 cpwm_rsp_seq = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, 1387 PS_CPWM_RSP_SEQ_NUM); 1388 1389 if (rpwm_req_num != cpwm_rsp_seq) 1390 return -EPERM; 1391 1392 rtwdev->mac.cpwm_seq_num = (rtwdev->mac.cpwm_seq_num + 1) & 1393 CPWM_SEQ_NUM_MAX; 1394 1395 cpwm_seq = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_SEQ_NUM); 1396 if (cpwm_seq != rtwdev->mac.cpwm_seq_num) 1397 return -EPERM; 1398 1399 cpwm_status = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_STATE); 1400 if (cpwm_status != req_pwr_state) 1401 return -EPERM; 1402 1403 return 0; 1404 } 1405 1406 void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter) 1407 { 1408 enum rtw89_rpwm_req_pwr_state state; 1409 unsigned long delay = enter ? 10 : 150; 1410 int ret; 1411 int i; 1412 1413 if (enter) 1414 state = rtw89_mac_get_req_pwr_state(rtwdev); 1415 else 1416 state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE; 1417 1418 for (i = 0; i < RPWM_TRY_CNT; i++) { 1419 rtw89_mac_send_rpwm(rtwdev, state, false); 1420 ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, 1421 !ret, delay, 15000, false, 1422 rtwdev, state); 1423 if (!ret) 1424 break; 1425 1426 if (i == RPWM_TRY_CNT - 1) 1427 rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n", 1428 enter ? "entering" : "leaving"); 1429 else 1430 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 1431 "%d time firmware failed to ack for %s ps mode\n", 1432 i + 1, enter ? "entering" : "leaving"); 1433 } 1434 } 1435 1436 void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev) 1437 { 1438 enum rtw89_rpwm_req_pwr_state state; 1439 1440 state = rtw89_mac_get_req_pwr_state(rtwdev); 1441 rtw89_mac_send_rpwm(rtwdev, state, true); 1442 } 1443 1444 static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) 1445 { 1446 #define PWR_ACT 1 1447 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1448 const struct rtw89_chip_info *chip = rtwdev->chip; 1449 const struct rtw89_pwr_cfg * const *cfg_seq; 1450 int (*cfg_func)(struct rtw89_dev *rtwdev); 1451 int ret; 1452 u8 val; 1453 1454 if (on) { 1455 cfg_seq = chip->pwr_on_seq; 1456 cfg_func = chip->ops->pwr_on_func; 1457 } else { 1458 cfg_seq = chip->pwr_off_seq; 1459 cfg_func = chip->ops->pwr_off_func; 1460 } 1461 1462 if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) 1463 __rtw89_leave_ps_mode(rtwdev); 1464 1465 val = rtw89_read32_mask(rtwdev, R_AX_IC_PWR_STATE, B_AX_WLMAC_PWR_STE_MASK); 1466 if (on && val == PWR_ACT) { 1467 rtw89_err(rtwdev, "MAC has already powered on\n"); 1468 return -EBUSY; 1469 } 1470 1471 ret = cfg_func ? cfg_func(rtwdev) : rtw89_mac_pwr_seq(rtwdev, cfg_seq); 1472 if (ret) 1473 return ret; 1474 1475 if (on) { 1476 if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags)) 1477 mac->efuse_read_fw_secure(rtwdev); 1478 1479 set_bit(RTW89_FLAG_POWERON, rtwdev->flags); 1480 set_bit(RTW89_FLAG_DMAC_FUNC, rtwdev->flags); 1481 set_bit(RTW89_FLAG_CMAC0_FUNC, rtwdev->flags); 1482 rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_TP_MAJOR); 1483 } else { 1484 clear_bit(RTW89_FLAG_POWERON, rtwdev->flags); 1485 clear_bit(RTW89_FLAG_DMAC_FUNC, rtwdev->flags); 1486 clear_bit(RTW89_FLAG_CMAC0_FUNC, rtwdev->flags); 1487 clear_bit(RTW89_FLAG_CMAC1_FUNC, rtwdev->flags); 1488 clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 1489 rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR); 1490 rtw89_set_entity_state(rtwdev, RTW89_PHY_0, false); 1491 rtw89_set_entity_state(rtwdev, RTW89_PHY_1, false); 1492 } 1493 1494 return 0; 1495 #undef PWR_ACT 1496 } 1497 1498 int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev) 1499 { 1500 int ret; 1501 1502 ret = rtw89_mac_power_switch(rtwdev, true); 1503 if (ret) { 1504 rtw89_mac_power_switch(rtwdev, false); 1505 ret = rtw89_mac_power_switch(rtwdev, true); 1506 if (ret) 1507 return ret; 1508 } 1509 1510 return 0; 1511 } 1512 1513 void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev) 1514 { 1515 rtw89_mac_power_switch(rtwdev, false); 1516 } 1517 1518 static int cmac_func_en_ax(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) 1519 { 1520 u32 func_en = 0; 1521 u32 ck_en = 0; 1522 u32 c1pc_en = 0; 1523 u32 addrl_func_en[] = {R_AX_CMAC_FUNC_EN, R_AX_CMAC_FUNC_EN_C1}; 1524 u32 addrl_ck_en[] = {R_AX_CK_EN, R_AX_CK_EN_C1}; 1525 1526 func_en = B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN | 1527 B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN | 1528 B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN | 1529 B_AX_CMAC_CRPRT; 1530 ck_en = B_AX_CMAC_CKEN | B_AX_PHYINTF_CKEN | B_AX_CMAC_DMA_CKEN | 1531 B_AX_PTCLTOP_CKEN | B_AX_SCHEDULER_CKEN | B_AX_TMAC_CKEN | 1532 B_AX_RMAC_CKEN; 1533 c1pc_en = B_AX_R_SYM_WLCMAC1_PC_EN | 1534 B_AX_R_SYM_WLCMAC1_P1_PC_EN | 1535 B_AX_R_SYM_WLCMAC1_P2_PC_EN | 1536 B_AX_R_SYM_WLCMAC1_P3_PC_EN | 1537 B_AX_R_SYM_WLCMAC1_P4_PC_EN; 1538 1539 if (en) { 1540 if (mac_idx == RTW89_MAC_1) { 1541 rtw89_write32_set(rtwdev, R_AX_AFE_CTRL1, c1pc_en); 1542 rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1543 B_AX_R_SYM_ISO_CMAC12PP); 1544 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1545 B_AX_CMAC1_FEN); 1546 } 1547 rtw89_write32_set(rtwdev, addrl_ck_en[mac_idx], ck_en); 1548 rtw89_write32_set(rtwdev, addrl_func_en[mac_idx], func_en); 1549 } else { 1550 rtw89_write32_clr(rtwdev, addrl_func_en[mac_idx], func_en); 1551 rtw89_write32_clr(rtwdev, addrl_ck_en[mac_idx], ck_en); 1552 if (mac_idx == RTW89_MAC_1) { 1553 rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1554 B_AX_CMAC1_FEN); 1555 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1556 B_AX_R_SYM_ISO_CMAC12PP); 1557 rtw89_write32_clr(rtwdev, R_AX_AFE_CTRL1, c1pc_en); 1558 } 1559 } 1560 1561 return 0; 1562 } 1563 1564 static int dmac_func_en_ax(struct rtw89_dev *rtwdev) 1565 { 1566 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1567 u32 val32; 1568 1569 if (chip_id == RTL8852C) 1570 val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | 1571 B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN | 1572 B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN | 1573 B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | 1574 B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN | 1575 B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN | 1576 B_AX_DMAC_CRPRT | B_AX_H_AXIDMA_EN); 1577 else 1578 val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | 1579 B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN | 1580 B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN | 1581 B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | 1582 B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN | 1583 B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN | 1584 B_AX_DMAC_CRPRT); 1585 rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val32); 1586 1587 val32 = (B_AX_MAC_SEC_CLK_EN | B_AX_DISPATCHER_CLK_EN | 1588 B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN | 1589 B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN | 1590 B_AX_WD_RLS_CLK_EN | B_AX_BBRPT_CLK_EN); 1591 if (chip_id == RTL8852BT) 1592 val32 |= B_AX_AXIDMA_CLK_EN; 1593 rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32); 1594 1595 return 0; 1596 } 1597 1598 static int chip_func_en_ax(struct rtw89_dev *rtwdev) 1599 { 1600 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1601 1602 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 1603 rtw89_write32_set(rtwdev, R_AX_SPS_DIG_ON_CTRL0, 1604 B_AX_OCP_L1_MASK); 1605 1606 return 0; 1607 } 1608 1609 static int sys_init_ax(struct rtw89_dev *rtwdev) 1610 { 1611 int ret; 1612 1613 ret = dmac_func_en_ax(rtwdev); 1614 if (ret) 1615 return ret; 1616 1617 ret = cmac_func_en_ax(rtwdev, 0, true); 1618 if (ret) 1619 return ret; 1620 1621 ret = chip_func_en_ax(rtwdev); 1622 if (ret) 1623 return ret; 1624 1625 return ret; 1626 } 1627 1628 const struct rtw89_mac_size_set rtw89_mac_size = { 1629 .hfc_preccfg_pcie = {2, 40, 0, 0, 1, 0, 0, 0}, 1630 .hfc_prec_cfg_c0 = {2, 32, 0, 0, 0, 0, 0, 0}, 1631 .hfc_prec_cfg_c2 = {0, 256, 0, 0, 0, 0, 0, 0}, 1632 /* PCIE 64 */ 1633 .wde_size0 = {RTW89_WDE_PG_64, 4095, 1,}, 1634 .wde_size0_v1 = {RTW89_WDE_PG_64, 3328, 0, 0,}, 1635 /* DLFW */ 1636 .wde_size4 = {RTW89_WDE_PG_64, 0, 4096,}, 1637 .wde_size4_v1 = {RTW89_WDE_PG_64, 0, 3328, 0,}, 1638 /* PCIE 64 */ 1639 .wde_size6 = {RTW89_WDE_PG_64, 512, 0,}, 1640 /* 8852B PCIE SCC */ 1641 .wde_size7 = {RTW89_WDE_PG_64, 510, 2,}, 1642 /* DLFW */ 1643 .wde_size9 = {RTW89_WDE_PG_64, 0, 1024,}, 1644 /* 8852C DLFW */ 1645 .wde_size18 = {RTW89_WDE_PG_64, 0, 2048,}, 1646 /* 8852C PCIE SCC */ 1647 .wde_size19 = {RTW89_WDE_PG_64, 3328, 0,}, 1648 .wde_size23 = {RTW89_WDE_PG_64, 1022, 2,}, 1649 /* PCIE */ 1650 .ple_size0 = {RTW89_PLE_PG_128, 1520, 16,}, 1651 .ple_size0_v1 = {RTW89_PLE_PG_128, 2688, 240, 212992,}, 1652 .ple_size3_v1 = {RTW89_PLE_PG_128, 2928, 0, 212992,}, 1653 /* DLFW */ 1654 .ple_size4 = {RTW89_PLE_PG_128, 64, 1472,}, 1655 /* PCIE 64 */ 1656 .ple_size6 = {RTW89_PLE_PG_128, 496, 16,}, 1657 /* DLFW */ 1658 .ple_size8 = {RTW89_PLE_PG_128, 64, 960,}, 1659 .ple_size9 = {RTW89_PLE_PG_128, 2288, 16,}, 1660 /* 8852C DLFW */ 1661 .ple_size18 = {RTW89_PLE_PG_128, 2544, 16,}, 1662 /* 8852C PCIE SCC */ 1663 .ple_size19 = {RTW89_PLE_PG_128, 1904, 16,}, 1664 /* PCIE 64 */ 1665 .wde_qt0 = {3792, 196, 0, 107,}, 1666 .wde_qt0_v1 = {3302, 6, 0, 20,}, 1667 /* DLFW */ 1668 .wde_qt4 = {0, 0, 0, 0,}, 1669 /* PCIE 64 */ 1670 .wde_qt6 = {448, 48, 0, 16,}, 1671 /* 8852B PCIE SCC */ 1672 .wde_qt7 = {446, 48, 0, 16,}, 1673 /* 8852C DLFW */ 1674 .wde_qt17 = {0, 0, 0, 0,}, 1675 /* 8852C PCIE SCC */ 1676 .wde_qt18 = {3228, 60, 0, 40,}, 1677 .wde_qt23 = {958, 48, 0, 16,}, 1678 .ple_qt0 = {320, 320, 32, 16, 13, 13, 292, 292, 64, 18, 1, 4, 0,}, 1679 .ple_qt1 = {320, 320, 32, 16, 1316, 1316, 1595, 1595, 1367, 1321, 1, 1307, 0,}, 1680 /* PCIE SCC */ 1681 .ple_qt4 = {264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,}, 1682 /* PCIE SCC */ 1683 .ple_qt5 = {264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,}, 1684 .ple_qt9 = {0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 1, 0, 0,}, 1685 /* DLFW */ 1686 .ple_qt13 = {0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0,}, 1687 /* PCIE 64 */ 1688 .ple_qt18 = {147, 0, 16, 20, 17, 13, 89, 0, 32, 14, 8, 0,}, 1689 /* DLFW 52C */ 1690 .ple_qt44 = {0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,}, 1691 /* DLFW 52C */ 1692 .ple_qt45 = {0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 0, 0,}, 1693 /* 8852C PCIE SCC */ 1694 .ple_qt46 = {525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,}, 1695 /* 8852C PCIE SCC */ 1696 .ple_qt47 = {525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,}, 1697 .ple_qt57 = {147, 0, 16, 20, 13, 13, 178, 0, 32, 14, 8, 0,}, 1698 /* PCIE 64 */ 1699 .ple_qt58 = {147, 0, 16, 20, 157, 13, 229, 0, 172, 14, 24, 0,}, 1700 .ple_qt59 = {147, 0, 32, 20, 1860, 13, 2025, 0, 1879, 14, 24, 0,}, 1701 /* 8852A PCIE WOW */ 1702 .ple_qt_52a_wow = {264, 0, 32, 20, 64, 13, 1005, 0, 64, 128, 120,}, 1703 /* 8852B PCIE WOW */ 1704 .ple_qt_52b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,}, 1705 /* 8852BT PCIE WOW */ 1706 .ple_qt_52bt_wow = {147, 0, 32, 20, 1860, 13, 1929, 0, 1879, 14, 24, 0,}, 1707 /* 8851B PCIE WOW */ 1708 .ple_qt_51b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,}, 1709 .ple_rsvd_qt0 = {2, 107, 107, 6, 6, 6, 6, 0, 0, 0,}, 1710 .ple_rsvd_qt1 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, 1711 .rsvd0_size0 = {212992, 0,}, 1712 .rsvd1_size0 = {587776, 2048,}, 1713 }; 1714 EXPORT_SYMBOL(rtw89_mac_size); 1715 1716 static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev, 1717 enum rtw89_qta_mode mode) 1718 { 1719 struct rtw89_mac_info *mac = &rtwdev->mac; 1720 const struct rtw89_dle_mem *cfg; 1721 1722 cfg = &rtwdev->chip->dle_mem[mode]; 1723 if (!cfg) 1724 return NULL; 1725 1726 if (cfg->mode != mode) { 1727 rtw89_warn(rtwdev, "qta mode unmatch!\n"); 1728 return NULL; 1729 } 1730 1731 mac->dle_info.rsvd_qt = cfg->rsvd_qt; 1732 mac->dle_info.ple_pg_size = cfg->ple_size->pge_size; 1733 mac->dle_info.ple_free_pg = cfg->ple_size->lnk_pge_num; 1734 mac->dle_info.qta_mode = mode; 1735 mac->dle_info.c0_rx_qta = cfg->ple_min_qt->cma0_dma; 1736 mac->dle_info.c1_rx_qta = cfg->ple_min_qt->cma1_dma; 1737 1738 return cfg; 1739 } 1740 1741 int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev, 1742 enum rtw89_mac_dle_rsvd_qt_type type, 1743 struct rtw89_mac_dle_rsvd_qt_cfg *cfg) 1744 { 1745 struct rtw89_dle_info *dle_info = &rtwdev->mac.dle_info; 1746 const struct rtw89_rsvd_quota *rsvd_qt = dle_info->rsvd_qt; 1747 1748 switch (type) { 1749 case DLE_RSVD_QT_MPDU_INFO: 1750 cfg->pktid = dle_info->ple_free_pg; 1751 cfg->pg_num = rsvd_qt->mpdu_info_tbl; 1752 break; 1753 case DLE_RSVD_QT_B0_CSI: 1754 cfg->pktid = dle_info->ple_free_pg + rsvd_qt->mpdu_info_tbl; 1755 cfg->pg_num = rsvd_qt->b0_csi; 1756 break; 1757 case DLE_RSVD_QT_B1_CSI: 1758 cfg->pktid = dle_info->ple_free_pg + 1759 rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi; 1760 cfg->pg_num = rsvd_qt->b1_csi; 1761 break; 1762 case DLE_RSVD_QT_B0_LMR: 1763 cfg->pktid = dle_info->ple_free_pg + 1764 rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi + rsvd_qt->b1_csi; 1765 cfg->pg_num = rsvd_qt->b0_lmr; 1766 break; 1767 case DLE_RSVD_QT_B1_LMR: 1768 cfg->pktid = dle_info->ple_free_pg + 1769 rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi + rsvd_qt->b1_csi + 1770 rsvd_qt->b0_lmr; 1771 cfg->pg_num = rsvd_qt->b1_lmr; 1772 break; 1773 case DLE_RSVD_QT_B0_FTM: 1774 cfg->pktid = dle_info->ple_free_pg + 1775 rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi + rsvd_qt->b1_csi + 1776 rsvd_qt->b0_lmr + rsvd_qt->b1_lmr; 1777 cfg->pg_num = rsvd_qt->b0_ftm; 1778 break; 1779 case DLE_RSVD_QT_B1_FTM: 1780 cfg->pktid = dle_info->ple_free_pg + 1781 rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi + rsvd_qt->b1_csi + 1782 rsvd_qt->b0_lmr + rsvd_qt->b1_lmr + rsvd_qt->b0_ftm; 1783 cfg->pg_num = rsvd_qt->b1_ftm; 1784 break; 1785 default: 1786 return -EINVAL; 1787 } 1788 1789 cfg->size = (u32)cfg->pg_num * dle_info->ple_pg_size; 1790 1791 return 0; 1792 } 1793 1794 static bool mac_is_txq_empty_ax(struct rtw89_dev *rtwdev) 1795 { 1796 struct rtw89_mac_dle_dfi_qempty qempty; 1797 u32 grpnum, qtmp, val32, msk32; 1798 int i, j, ret; 1799 1800 grpnum = rtwdev->chip->wde_qempty_acq_grpnum; 1801 qempty.dle_type = DLE_CTRL_TYPE_WDE; 1802 1803 for (i = 0; i < grpnum; i++) { 1804 qempty.grpsel = i; 1805 ret = rtw89_mac_dle_dfi_qempty_cfg(rtwdev, &qempty); 1806 if (ret) { 1807 rtw89_warn(rtwdev, "dle dfi acq empty %d\n", ret); 1808 return false; 1809 } 1810 qtmp = qempty.qempty; 1811 for (j = 0 ; j < QEMP_ACQ_GRP_MACID_NUM; j++) { 1812 val32 = u32_get_bits(qtmp, QEMP_ACQ_GRP_QSEL_MASK); 1813 if (val32 != QEMP_ACQ_GRP_QSEL_MASK) 1814 return false; 1815 qtmp >>= QEMP_ACQ_GRP_QSEL_SH; 1816 } 1817 } 1818 1819 qempty.grpsel = rtwdev->chip->wde_qempty_mgq_grpsel; 1820 ret = rtw89_mac_dle_dfi_qempty_cfg(rtwdev, &qempty); 1821 if (ret) { 1822 rtw89_warn(rtwdev, "dle dfi mgq empty %d\n", ret); 1823 return false; 1824 } 1825 msk32 = B_CMAC0_MGQ_NORMAL | B_CMAC0_MGQ_NO_PWRSAV | B_CMAC0_CPUMGQ; 1826 if ((qempty.qempty & msk32) != msk32) 1827 return false; 1828 1829 if (rtwdev->dbcc_en) { 1830 msk32 |= B_CMAC1_MGQ_NORMAL | B_CMAC1_MGQ_NO_PWRSAV | B_CMAC1_CPUMGQ; 1831 if ((qempty.qempty & msk32) != msk32) 1832 return false; 1833 } 1834 1835 msk32 = B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU | B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU | 1836 B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU | B_AX_PLE_EMPTY_QTA_DMAC_H2C | 1837 B_AX_WDE_EMPTY_QUE_OTHERS | B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX | 1838 B_AX_WDE_EMPTY_QTA_DMAC_CPUIO | B_AX_PLE_EMPTY_QTA_DMAC_CPUIO | 1839 B_AX_WDE_EMPTY_QUE_DMAC_PKTIN | B_AX_WDE_EMPTY_QTA_DMAC_HIF | 1840 B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX | B_AX_WDE_EMPTY_QTA_DMAC_PKTIN | 1841 B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL | B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL | 1842 B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX; 1843 val32 = rtw89_read32(rtwdev, R_AX_DLE_EMPTY0); 1844 1845 return (val32 & msk32) == msk32; 1846 } 1847 1848 static inline u32 dle_used_size(const struct rtw89_dle_mem *cfg) 1849 { 1850 const struct rtw89_dle_size *wde = cfg->wde_size; 1851 const struct rtw89_dle_size *ple = cfg->ple_size; 1852 u32 used; 1853 1854 used = wde->pge_size * (wde->lnk_pge_num + wde->unlnk_pge_num) + 1855 ple->pge_size * (ple->lnk_pge_num + ple->unlnk_pge_num); 1856 1857 if (cfg->rsvd0_size && cfg->rsvd1_size) { 1858 used += cfg->rsvd0_size->size; 1859 used += cfg->rsvd1_size->size; 1860 } 1861 1862 return used; 1863 } 1864 1865 static u32 dle_expected_used_size(struct rtw89_dev *rtwdev, 1866 enum rtw89_qta_mode mode) 1867 { 1868 u32 size = rtwdev->chip->fifo_size; 1869 1870 if (mode == RTW89_QTA_SCC) 1871 size -= rtwdev->chip->dle_scc_rsvd_size; 1872 1873 return size; 1874 } 1875 1876 static void dle_func_en_ax(struct rtw89_dev *rtwdev, bool enable) 1877 { 1878 if (enable) 1879 rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN, 1880 B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN); 1881 else 1882 rtw89_write32_clr(rtwdev, R_AX_DMAC_FUNC_EN, 1883 B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN); 1884 } 1885 1886 static void dle_clk_en_ax(struct rtw89_dev *rtwdev, bool enable) 1887 { 1888 u32 val = B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN; 1889 1890 if (enable) { 1891 if (rtwdev->chip->chip_id == RTL8851B) 1892 val |= B_AX_AXIDMA_CLK_EN; 1893 rtw89_write32_set(rtwdev, R_AX_DMAC_CLK_EN, val); 1894 } else { 1895 rtw89_write32_clr(rtwdev, R_AX_DMAC_CLK_EN, val); 1896 } 1897 } 1898 1899 static int dle_mix_cfg_ax(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg) 1900 { 1901 const struct rtw89_dle_size *size_cfg; 1902 u32 val; 1903 u8 bound = 0; 1904 1905 val = rtw89_read32(rtwdev, R_AX_WDE_PKTBUF_CFG); 1906 size_cfg = cfg->wde_size; 1907 1908 switch (size_cfg->pge_size) { 1909 default: 1910 case RTW89_WDE_PG_64: 1911 val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_64, 1912 B_AX_WDE_PAGE_SEL_MASK); 1913 break; 1914 case RTW89_WDE_PG_128: 1915 val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_128, 1916 B_AX_WDE_PAGE_SEL_MASK); 1917 break; 1918 case RTW89_WDE_PG_256: 1919 rtw89_err(rtwdev, "[ERR]WDE DLE doesn't support 256 byte!\n"); 1920 return -EINVAL; 1921 } 1922 1923 val = u32_replace_bits(val, bound, B_AX_WDE_START_BOUND_MASK); 1924 val = u32_replace_bits(val, size_cfg->lnk_pge_num, 1925 B_AX_WDE_FREE_PAGE_NUM_MASK); 1926 rtw89_write32(rtwdev, R_AX_WDE_PKTBUF_CFG, val); 1927 1928 val = rtw89_read32(rtwdev, R_AX_PLE_PKTBUF_CFG); 1929 bound = (size_cfg->lnk_pge_num + size_cfg->unlnk_pge_num) 1930 * size_cfg->pge_size / DLE_BOUND_UNIT; 1931 size_cfg = cfg->ple_size; 1932 1933 switch (size_cfg->pge_size) { 1934 default: 1935 case RTW89_PLE_PG_64: 1936 rtw89_err(rtwdev, "[ERR]PLE DLE doesn't support 64 byte!\n"); 1937 return -EINVAL; 1938 case RTW89_PLE_PG_128: 1939 val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_128, 1940 B_AX_PLE_PAGE_SEL_MASK); 1941 break; 1942 case RTW89_PLE_PG_256: 1943 val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_256, 1944 B_AX_PLE_PAGE_SEL_MASK); 1945 break; 1946 } 1947 1948 val = u32_replace_bits(val, bound, B_AX_PLE_START_BOUND_MASK); 1949 val = u32_replace_bits(val, size_cfg->lnk_pge_num, 1950 B_AX_PLE_FREE_PAGE_NUM_MASK); 1951 rtw89_write32(rtwdev, R_AX_PLE_PKTBUF_CFG, val); 1952 1953 return 0; 1954 } 1955 1956 static int chk_dle_rdy_ax(struct rtw89_dev *rtwdev, bool wde_or_ple) 1957 { 1958 u32 reg, mask; 1959 u32 ini; 1960 1961 if (wde_or_ple) { 1962 reg = R_AX_WDE_INI_STATUS; 1963 mask = WDE_MGN_INI_RDY; 1964 } else { 1965 reg = R_AX_PLE_INI_STATUS; 1966 mask = PLE_MGN_INI_RDY; 1967 } 1968 1969 return read_poll_timeout(rtw89_read32, ini, (ini & mask) == mask, 1, 1970 2000, false, rtwdev, reg); 1971 } 1972 1973 #define INVALID_QT_WCPU U16_MAX 1974 #define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx) \ 1975 do { \ 1976 val = u32_encode_bits(_min_x, B_AX_ ## _module ## _MIN_SIZE_MASK) | \ 1977 u32_encode_bits(_max_x, B_AX_ ## _module ## _MAX_SIZE_MASK); \ 1978 rtw89_write32(rtwdev, \ 1979 R_AX_ ## _module ## _QTA ## _idx ## _CFG, \ 1980 val); \ 1981 } while (0) 1982 #define SET_QUOTA(_x, _module, _idx) \ 1983 SET_QUOTA_VAL(min_cfg->_x, max_cfg->_x, _module, _idx) 1984 1985 static void wde_quota_cfg_ax(struct rtw89_dev *rtwdev, 1986 const struct rtw89_wde_quota *min_cfg, 1987 const struct rtw89_wde_quota *max_cfg, 1988 u16 ext_wde_min_qt_wcpu) 1989 { 1990 u16 min_qt_wcpu = ext_wde_min_qt_wcpu != INVALID_QT_WCPU ? 1991 ext_wde_min_qt_wcpu : min_cfg->wcpu; 1992 u32 val; 1993 1994 SET_QUOTA(hif, WDE, 0); 1995 SET_QUOTA_VAL(min_qt_wcpu, max_cfg->wcpu, WDE, 1); 1996 SET_QUOTA(pkt_in, WDE, 3); 1997 SET_QUOTA(cpu_io, WDE, 4); 1998 } 1999 2000 static void ple_quota_cfg_ax(struct rtw89_dev *rtwdev, 2001 const struct rtw89_ple_quota *min_cfg, 2002 const struct rtw89_ple_quota *max_cfg) 2003 { 2004 u32 val; 2005 2006 SET_QUOTA(cma0_tx, PLE, 0); 2007 SET_QUOTA(cma1_tx, PLE, 1); 2008 SET_QUOTA(c2h, PLE, 2); 2009 SET_QUOTA(h2c, PLE, 3); 2010 SET_QUOTA(wcpu, PLE, 4); 2011 SET_QUOTA(mpdu_proc, PLE, 5); 2012 SET_QUOTA(cma0_dma, PLE, 6); 2013 SET_QUOTA(cma1_dma, PLE, 7); 2014 SET_QUOTA(bb_rpt, PLE, 8); 2015 SET_QUOTA(wd_rel, PLE, 9); 2016 SET_QUOTA(cpu_io, PLE, 10); 2017 if (rtwdev->chip->chip_id == RTL8852C) 2018 SET_QUOTA(tx_rpt, PLE, 11); 2019 } 2020 2021 int rtw89_mac_resize_ple_rx_quota(struct rtw89_dev *rtwdev, bool wow) 2022 { 2023 const struct rtw89_ple_quota *min_cfg, *max_cfg; 2024 const struct rtw89_dle_mem *cfg; 2025 u32 val; 2026 2027 if (rtwdev->chip->chip_id == RTL8852C) 2028 return 0; 2029 2030 if (rtwdev->mac.qta_mode != RTW89_QTA_SCC) { 2031 rtw89_err(rtwdev, "[ERR]support SCC mode only\n"); 2032 return -EINVAL; 2033 } 2034 2035 if (wow) 2036 cfg = get_dle_mem_cfg(rtwdev, RTW89_QTA_WOW); 2037 else 2038 cfg = get_dle_mem_cfg(rtwdev, RTW89_QTA_SCC); 2039 if (!cfg) { 2040 rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); 2041 return -EINVAL; 2042 } 2043 2044 min_cfg = cfg->ple_min_qt; 2045 max_cfg = cfg->ple_max_qt; 2046 SET_QUOTA(cma0_dma, PLE, 6); 2047 SET_QUOTA(cma1_dma, PLE, 7); 2048 2049 return 0; 2050 } 2051 #undef SET_QUOTA 2052 2053 void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool enable) 2054 { 2055 const struct rtw89_chip_info *chip = rtwdev->chip; 2056 u32 msk32 = B_AX_UC_MGNT_DEC | B_AX_BMC_MGNT_DEC; 2057 2058 if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) 2059 return; 2060 2061 /* 8852C enable B_AX_UC_MGNT_DEC by default */ 2062 if (chip->chip_id == RTL8852C) 2063 msk32 = B_AX_BMC_MGNT_DEC; 2064 2065 if (enable) 2066 rtw89_write32_set(rtwdev, R_AX_SEC_ENG_CTRL, msk32); 2067 else 2068 rtw89_write32_clr(rtwdev, R_AX_SEC_ENG_CTRL, msk32); 2069 } 2070 2071 static void dle_quota_cfg(struct rtw89_dev *rtwdev, 2072 const struct rtw89_dle_mem *cfg, 2073 u16 ext_wde_min_qt_wcpu) 2074 { 2075 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 2076 2077 mac->wde_quota_cfg(rtwdev, cfg->wde_min_qt, cfg->wde_max_qt, ext_wde_min_qt_wcpu); 2078 mac->ple_quota_cfg(rtwdev, cfg->ple_min_qt, cfg->ple_max_qt); 2079 } 2080 2081 int rtw89_mac_dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode, 2082 enum rtw89_qta_mode ext_mode) 2083 { 2084 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 2085 const struct rtw89_dle_mem *cfg, *ext_cfg; 2086 u16 ext_wde_min_qt_wcpu = INVALID_QT_WCPU; 2087 int ret; 2088 2089 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 2090 if (ret) 2091 return ret; 2092 2093 cfg = get_dle_mem_cfg(rtwdev, mode); 2094 if (!cfg) { 2095 rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); 2096 ret = -EINVAL; 2097 goto error; 2098 } 2099 2100 if (mode == RTW89_QTA_DLFW) { 2101 ext_cfg = get_dle_mem_cfg(rtwdev, ext_mode); 2102 if (!ext_cfg) { 2103 rtw89_err(rtwdev, "[ERR]get_dle_ext_mem_cfg %d\n", 2104 ext_mode); 2105 ret = -EINVAL; 2106 goto error; 2107 } 2108 ext_wde_min_qt_wcpu = ext_cfg->wde_min_qt->wcpu; 2109 } 2110 2111 if (dle_used_size(cfg) != dle_expected_used_size(rtwdev, mode)) { 2112 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 2113 ret = -EINVAL; 2114 goto error; 2115 } 2116 2117 mac->dle_func_en(rtwdev, false); 2118 mac->dle_clk_en(rtwdev, true); 2119 2120 ret = mac->dle_mix_cfg(rtwdev, cfg); 2121 if (ret) { 2122 rtw89_err(rtwdev, "[ERR] dle mix cfg\n"); 2123 goto error; 2124 } 2125 dle_quota_cfg(rtwdev, cfg, ext_wde_min_qt_wcpu); 2126 2127 mac->dle_func_en(rtwdev, true); 2128 2129 ret = mac->chk_dle_rdy(rtwdev, true); 2130 if (ret) { 2131 rtw89_err(rtwdev, "[ERR]WDE cfg ready\n"); 2132 return ret; 2133 } 2134 2135 ret = mac->chk_dle_rdy(rtwdev, false); 2136 if (ret) { 2137 rtw89_err(rtwdev, "[ERR]PLE cfg ready\n"); 2138 return ret; 2139 } 2140 2141 return 0; 2142 error: 2143 mac->dle_func_en(rtwdev, false); 2144 rtw89_err(rtwdev, "[ERR]trxcfg wde 0x8900 = %x\n", 2145 rtw89_read32(rtwdev, R_AX_WDE_INI_STATUS)); 2146 rtw89_err(rtwdev, "[ERR]trxcfg ple 0x8D00 = %x\n", 2147 rtw89_read32(rtwdev, R_AX_PLE_INI_STATUS)); 2148 2149 return ret; 2150 } 2151 2152 static int preload_init_set(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx, 2153 enum rtw89_qta_mode mode) 2154 { 2155 u32 reg, max_preld_size, min_rsvd_size; 2156 2157 max_preld_size = (mac_idx == RTW89_MAC_0 ? 2158 PRELD_B0_ENT_NUM : PRELD_B1_ENT_NUM) * PRELD_AMSDU_SIZE; 2159 reg = mac_idx == RTW89_MAC_0 ? 2160 R_AX_TXPKTCTL_B0_PRELD_CFG0 : R_AX_TXPKTCTL_B1_PRELD_CFG0; 2161 rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_USEMAXSZ_MASK, max_preld_size); 2162 rtw89_write32_set(rtwdev, reg, B_AX_B0_PRELD_FEN); 2163 2164 min_rsvd_size = PRELD_AMSDU_SIZE; 2165 reg = mac_idx == RTW89_MAC_0 ? 2166 R_AX_TXPKTCTL_B0_PRELD_CFG1 : R_AX_TXPKTCTL_B1_PRELD_CFG1; 2167 rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_TXENDWIN_MASK, PRELD_NEXT_WND); 2168 rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_RSVMINSZ_MASK, min_rsvd_size); 2169 2170 return 0; 2171 } 2172 2173 static bool is_qta_poh(struct rtw89_dev *rtwdev) 2174 { 2175 return rtwdev->hci.type == RTW89_HCI_TYPE_PCIE; 2176 } 2177 2178 int rtw89_mac_preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx, 2179 enum rtw89_qta_mode mode) 2180 { 2181 const struct rtw89_chip_info *chip = rtwdev->chip; 2182 2183 if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev) || 2184 !is_qta_poh(rtwdev)) 2185 return 0; 2186 2187 return preload_init_set(rtwdev, mac_idx, mode); 2188 } 2189 2190 static bool dle_is_txq_empty(struct rtw89_dev *rtwdev) 2191 { 2192 u32 msk32; 2193 u32 val32; 2194 2195 msk32 = B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC | B_AX_WDE_EMPTY_QUE_CMAC0_MBH | 2196 B_AX_WDE_EMPTY_QUE_CMAC1_MBH | B_AX_WDE_EMPTY_QUE_CMAC0_WMM0 | 2197 B_AX_WDE_EMPTY_QUE_CMAC0_WMM1 | B_AX_WDE_EMPTY_QUE_OTHERS | 2198 B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX | B_AX_PLE_EMPTY_QTA_DMAC_H2C | 2199 B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX | B_AX_WDE_EMPTY_QUE_DMAC_PKTIN | 2200 B_AX_WDE_EMPTY_QTA_DMAC_HIF | B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU | 2201 B_AX_WDE_EMPTY_QTA_DMAC_PKTIN | B_AX_WDE_EMPTY_QTA_DMAC_CPUIO | 2202 B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL | 2203 B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL | 2204 B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX | 2205 B_AX_PLE_EMPTY_QTA_DMAC_CPUIO | 2206 B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU | 2207 B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU; 2208 val32 = rtw89_read32(rtwdev, R_AX_DLE_EMPTY0); 2209 2210 if ((val32 & msk32) == msk32) 2211 return true; 2212 2213 return false; 2214 } 2215 2216 static void _patch_ss2f_path(struct rtw89_dev *rtwdev) 2217 { 2218 const struct rtw89_chip_info *chip = rtwdev->chip; 2219 2220 if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2221 return; 2222 2223 rtw89_write32_mask(rtwdev, R_AX_SS2FINFO_PATH, B_AX_SS_DEST_QUEUE_MASK, 2224 SS2F_PATH_WLCPU); 2225 } 2226 2227 static int sta_sch_init_ax(struct rtw89_dev *rtwdev) 2228 { 2229 u32 p_val; 2230 u8 val; 2231 int ret; 2232 2233 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 2234 if (ret) 2235 return ret; 2236 2237 val = rtw89_read8(rtwdev, R_AX_SS_CTRL); 2238 val |= B_AX_SS_EN; 2239 rtw89_write8(rtwdev, R_AX_SS_CTRL, val); 2240 2241 ret = read_poll_timeout(rtw89_read32, p_val, p_val & B_AX_SS_INIT_DONE_1, 2242 1, TRXCFG_WAIT_CNT, false, rtwdev, R_AX_SS_CTRL); 2243 if (ret) { 2244 rtw89_err(rtwdev, "[ERR]STA scheduler init\n"); 2245 return ret; 2246 } 2247 2248 rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG); 2249 rtw89_write32_clr(rtwdev, R_AX_SS_CTRL, B_AX_SS_NONEMPTY_SS2FINFO_EN); 2250 2251 _patch_ss2f_path(rtwdev); 2252 2253 return 0; 2254 } 2255 2256 static int mpdu_proc_init_ax(struct rtw89_dev *rtwdev) 2257 { 2258 int ret; 2259 2260 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 2261 if (ret) 2262 return ret; 2263 2264 rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD); 2265 rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD); 2266 rtw89_write32_set(rtwdev, R_AX_MPDU_PROC, 2267 B_AX_APPEND_FCS | B_AX_A_ICV_ERR); 2268 rtw89_write32(rtwdev, R_AX_CUT_AMSDU_CTRL, TRXCFG_MPDU_PROC_CUT_CTRL); 2269 2270 return 0; 2271 } 2272 2273 static int sec_eng_init_ax(struct rtw89_dev *rtwdev) 2274 { 2275 const struct rtw89_chip_info *chip = rtwdev->chip; 2276 u32 val = 0; 2277 int ret; 2278 2279 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 2280 if (ret) 2281 return ret; 2282 2283 val = rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL); 2284 /* init clock */ 2285 val |= (B_AX_CLK_EN_CGCMP | B_AX_CLK_EN_WAPI | B_AX_CLK_EN_WEP_TKIP); 2286 /* init TX encryption */ 2287 val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC); 2288 val |= (B_AX_MC_DEC | B_AX_BC_DEC); 2289 if (chip->chip_id == RTL8852C) 2290 val |= B_AX_UC_MGNT_DEC; 2291 if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || 2292 chip->chip_id == RTL8851B) 2293 val &= ~B_AX_TX_PARTIAL_MODE; 2294 rtw89_write32(rtwdev, R_AX_SEC_ENG_CTRL, val); 2295 2296 /* init MIC ICV append */ 2297 val = rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC); 2298 val |= (B_AX_APPEND_ICV | B_AX_APPEND_MIC); 2299 2300 /* option init */ 2301 rtw89_write32(rtwdev, R_AX_SEC_MPDU_PROC, val); 2302 2303 if (chip->chip_id == RTL8852C) 2304 rtw89_write32_mask(rtwdev, R_AX_SEC_DEBUG1, 2305 B_AX_TX_TIMEOUT_SEL_MASK, AX_TX_TO_VAL); 2306 2307 return 0; 2308 } 2309 2310 static int dmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2311 { 2312 int ret; 2313 2314 ret = rtw89_mac_dle_init(rtwdev, rtwdev->mac.qta_mode, RTW89_QTA_INVALID); 2315 if (ret) { 2316 rtw89_err(rtwdev, "[ERR]DLE init %d\n", ret); 2317 return ret; 2318 } 2319 2320 ret = rtw89_mac_preload_init(rtwdev, RTW89_MAC_0, rtwdev->mac.qta_mode); 2321 if (ret) { 2322 rtw89_err(rtwdev, "[ERR]preload init %d\n", ret); 2323 return ret; 2324 } 2325 2326 ret = rtw89_mac_hfc_init(rtwdev, true, true, true); 2327 if (ret) { 2328 rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret); 2329 return ret; 2330 } 2331 2332 ret = sta_sch_init_ax(rtwdev); 2333 if (ret) { 2334 rtw89_err(rtwdev, "[ERR]STA SCH init %d\n", ret); 2335 return ret; 2336 } 2337 2338 ret = mpdu_proc_init_ax(rtwdev); 2339 if (ret) { 2340 rtw89_err(rtwdev, "[ERR]MPDU Proc init %d\n", ret); 2341 return ret; 2342 } 2343 2344 ret = sec_eng_init_ax(rtwdev); 2345 if (ret) { 2346 rtw89_err(rtwdev, "[ERR]Security Engine init %d\n", ret); 2347 return ret; 2348 } 2349 2350 return ret; 2351 } 2352 2353 static int addr_cam_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2354 { 2355 u32 val, reg; 2356 u16 p_val; 2357 int ret; 2358 2359 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2360 if (ret) 2361 return ret; 2362 2363 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_ADDR_CAM_CTRL, mac_idx); 2364 2365 val = rtw89_read32(rtwdev, reg); 2366 val |= u32_encode_bits(0x7f, B_AX_ADDR_CAM_RANGE_MASK) | 2367 B_AX_ADDR_CAM_CLR | B_AX_ADDR_CAM_EN; 2368 rtw89_write32(rtwdev, reg, val); 2369 2370 ret = read_poll_timeout(rtw89_read16, p_val, !(p_val & B_AX_ADDR_CAM_CLR), 2371 1, TRXCFG_WAIT_CNT, false, rtwdev, reg); 2372 if (ret) { 2373 rtw89_err(rtwdev, "[ERR]ADDR_CAM reset\n"); 2374 return ret; 2375 } 2376 2377 return 0; 2378 } 2379 2380 static int scheduler_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2381 { 2382 u32 ret; 2383 u32 reg; 2384 u32 val; 2385 2386 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2387 if (ret) 2388 return ret; 2389 2390 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PREBKF_CFG_1, mac_idx); 2391 if (rtwdev->chip->chip_id == RTL8852C) 2392 rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK, 2393 SIFS_MACTXEN_T1_V1); 2394 else 2395 rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK, 2396 SIFS_MACTXEN_T1); 2397 2398 if (rtw89_is_rtl885xb(rtwdev)) { 2399 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SCH_EXT_CTRL, mac_idx); 2400 rtw89_write32_set(rtwdev, reg, B_AX_PORT_RST_TSF_ADV); 2401 } 2402 2403 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CCA_CFG_0, mac_idx); 2404 rtw89_write32_clr(rtwdev, reg, B_AX_BTCCA_EN); 2405 2406 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PREBKF_CFG_0, mac_idx); 2407 if (rtwdev->chip->chip_id == RTL8852C) { 2408 val = rtw89_read32_mask(rtwdev, R_AX_SEC_ENG_CTRL, 2409 B_AX_TX_PARTIAL_MODE); 2410 if (!val) 2411 rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, 2412 SCH_PREBKF_24US); 2413 } else { 2414 rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, 2415 SCH_PREBKF_24US); 2416 } 2417 2418 return 0; 2419 } 2420 2421 static int rtw89_mac_typ_fltr_opt_ax(struct rtw89_dev *rtwdev, 2422 enum rtw89_machdr_frame_type type, 2423 enum rtw89_mac_fwd_target fwd_target, 2424 u8 mac_idx) 2425 { 2426 u32 reg; 2427 u32 val; 2428 2429 switch (fwd_target) { 2430 case RTW89_FWD_DONT_CARE: 2431 val = RX_FLTR_FRAME_DROP; 2432 break; 2433 case RTW89_FWD_TO_HOST: 2434 val = RX_FLTR_FRAME_TO_HOST; 2435 break; 2436 case RTW89_FWD_TO_WLAN_CPU: 2437 val = RX_FLTR_FRAME_TO_WLCPU; 2438 break; 2439 default: 2440 rtw89_err(rtwdev, "[ERR]set rx filter fwd target err\n"); 2441 return -EINVAL; 2442 } 2443 2444 switch (type) { 2445 case RTW89_MGNT: 2446 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MGNT_FLTR, mac_idx); 2447 break; 2448 case RTW89_CTRL: 2449 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CTRL_FLTR, mac_idx); 2450 break; 2451 case RTW89_DATA: 2452 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_DATA_FLTR, mac_idx); 2453 break; 2454 default: 2455 rtw89_err(rtwdev, "[ERR]set rx filter type err\n"); 2456 return -EINVAL; 2457 } 2458 rtw89_write32(rtwdev, reg, val); 2459 2460 return 0; 2461 } 2462 2463 static int rx_fltr_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2464 { 2465 int ret, i; 2466 u32 mac_ftlr, plcp_ftlr; 2467 2468 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2469 if (ret) 2470 return ret; 2471 2472 for (i = RTW89_MGNT; i <= RTW89_DATA; i++) { 2473 ret = rtw89_mac_typ_fltr_opt_ax(rtwdev, i, RTW89_FWD_TO_HOST, 2474 mac_idx); 2475 if (ret) 2476 return ret; 2477 } 2478 mac_ftlr = rtwdev->hal.rx_fltr; 2479 plcp_ftlr = B_AX_CCK_CRC_CHK | B_AX_CCK_SIG_CHK | 2480 B_AX_LSIG_PARITY_CHK_EN | B_AX_SIGA_CRC_CHK | 2481 B_AX_VHT_SU_SIGB_CRC_CHK | B_AX_VHT_MU_SIGB_CRC_CHK | 2482 B_AX_HE_SIGB_CRC_CHK; 2483 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_FLTR_OPT, mac_idx), 2484 mac_ftlr); 2485 rtw89_write16(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_PLCP_HDR_FLTR, mac_idx), 2486 plcp_ftlr); 2487 2488 return 0; 2489 } 2490 2491 static void _patch_dis_resp_chk(struct rtw89_dev *rtwdev, u8 mac_idx) 2492 { 2493 u32 reg, val32; 2494 u32 b_rsp_chk_nav, b_rsp_chk_cca; 2495 2496 b_rsp_chk_nav = B_AX_RSP_CHK_TXNAV | B_AX_RSP_CHK_INTRA_NAV | 2497 B_AX_RSP_CHK_BASIC_NAV; 2498 b_rsp_chk_cca = B_AX_RSP_CHK_SEC_CCA_80 | B_AX_RSP_CHK_SEC_CCA_40 | 2499 B_AX_RSP_CHK_SEC_CCA_20 | B_AX_RSP_CHK_BTCCA | 2500 B_AX_RSP_CHK_EDCCA | B_AX_RSP_CHK_CCA; 2501 2502 switch (rtwdev->chip->chip_id) { 2503 case RTL8852A: 2504 case RTL8852B: 2505 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RSP_CHK_SIG, mac_idx); 2506 val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_nav; 2507 rtw89_write32(rtwdev, reg, val32); 2508 2509 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_0, mac_idx); 2510 val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_cca; 2511 rtw89_write32(rtwdev, reg, val32); 2512 break; 2513 default: 2514 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RSP_CHK_SIG, mac_idx); 2515 val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_nav; 2516 rtw89_write32(rtwdev, reg, val32); 2517 2518 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_0, mac_idx); 2519 val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_cca; 2520 rtw89_write32(rtwdev, reg, val32); 2521 break; 2522 } 2523 } 2524 2525 static int cca_ctrl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2526 { 2527 u32 val, reg; 2528 int ret; 2529 2530 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2531 if (ret) 2532 return ret; 2533 2534 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CCA_CONTROL, mac_idx); 2535 val = rtw89_read32(rtwdev, reg); 2536 val |= (B_AX_TB_CHK_BASIC_NAV | B_AX_TB_CHK_BTCCA | 2537 B_AX_TB_CHK_EDCCA | B_AX_TB_CHK_CCA_P20 | 2538 B_AX_SIFS_CHK_BTCCA | B_AX_SIFS_CHK_CCA_P20 | 2539 B_AX_CTN_CHK_INTRA_NAV | 2540 B_AX_CTN_CHK_BASIC_NAV | B_AX_CTN_CHK_BTCCA | 2541 B_AX_CTN_CHK_EDCCA | B_AX_CTN_CHK_CCA_S80 | 2542 B_AX_CTN_CHK_CCA_S40 | B_AX_CTN_CHK_CCA_S20 | 2543 B_AX_CTN_CHK_CCA_P20); 2544 val &= ~(B_AX_TB_CHK_TX_NAV | B_AX_TB_CHK_CCA_S80 | 2545 B_AX_TB_CHK_CCA_S40 | B_AX_TB_CHK_CCA_S20 | 2546 B_AX_SIFS_CHK_CCA_S80 | B_AX_SIFS_CHK_CCA_S40 | 2547 B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV | 2548 B_AX_SIFS_CHK_EDCCA); 2549 2550 rtw89_write32(rtwdev, reg, val); 2551 2552 _patch_dis_resp_chk(rtwdev, mac_idx); 2553 2554 return 0; 2555 } 2556 2557 static int nav_ctrl_init_ax(struct rtw89_dev *rtwdev) 2558 { 2559 rtw89_write32_set(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_PLCP_UP_NAV_EN | 2560 B_AX_WMAC_TF_UP_NAV_EN | 2561 B_AX_WMAC_NAV_UPPER_EN); 2562 rtw89_write32_mask(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_NAV_UPPER_MASK, NAV_25MS); 2563 2564 return 0; 2565 } 2566 2567 static int spatial_reuse_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2568 { 2569 u32 reg; 2570 int ret; 2571 2572 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2573 if (ret) 2574 return ret; 2575 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_SR_CTRL, mac_idx); 2576 rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN); 2577 2578 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BSSID_SRC_CTRL, mac_idx); 2579 rtw89_write8_set(rtwdev, reg, B_AX_PLCP_SRC_EN); 2580 2581 return 0; 2582 } 2583 2584 static int tmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2585 { 2586 u32 reg; 2587 int ret; 2588 2589 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2590 if (ret) 2591 return ret; 2592 2593 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MAC_LOOPBACK, mac_idx); 2594 rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN); 2595 2596 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TCR0, mac_idx); 2597 rtw89_write32_mask(rtwdev, reg, B_AX_TCR_UDF_THSD_MASK, TCR_UDF_THSD); 2598 2599 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXD_FIFO_CTRL, mac_idx); 2600 rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_HIGH_MCS_THRE_MASK, TXDFIFO_HIGH_MCS_THRE); 2601 rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_LOW_MCS_THRE_MASK, TXDFIFO_LOW_MCS_THRE); 2602 2603 return 0; 2604 } 2605 2606 static int trxptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2607 { 2608 const struct rtw89_chip_info *chip = rtwdev->chip; 2609 const struct rtw89_rrsr_cfgs *rrsr = chip->rrsr_cfgs; 2610 u32 reg, val, sifs; 2611 int ret; 2612 2613 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2614 if (ret) 2615 return ret; 2616 2617 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_0, mac_idx); 2618 val = rtw89_read32(rtwdev, reg); 2619 val &= ~B_AX_WMAC_SPEC_SIFS_CCK_MASK; 2620 val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_CCK_MASK, WMAC_SPEC_SIFS_CCK); 2621 2622 switch (rtwdev->chip->chip_id) { 2623 case RTL8852A: 2624 sifs = WMAC_SPEC_SIFS_OFDM_52A; 2625 break; 2626 case RTL8851B: 2627 case RTL8852B: 2628 case RTL8852BT: 2629 sifs = WMAC_SPEC_SIFS_OFDM_52B; 2630 break; 2631 default: 2632 sifs = WMAC_SPEC_SIFS_OFDM_52C; 2633 break; 2634 } 2635 val &= ~B_AX_WMAC_SPEC_SIFS_OFDM_MASK; 2636 val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_OFDM_MASK, sifs); 2637 rtw89_write32(rtwdev, reg, val); 2638 2639 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXTRIG_TEST_USER_2, mac_idx); 2640 rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN); 2641 2642 reg = rtw89_mac_reg_by_idx(rtwdev, rrsr->ref_rate.addr, mac_idx); 2643 rtw89_write32_mask(rtwdev, reg, rrsr->ref_rate.mask, rrsr->ref_rate.data); 2644 reg = rtw89_mac_reg_by_idx(rtwdev, rrsr->rsc.addr, mac_idx); 2645 rtw89_write32_mask(rtwdev, reg, rrsr->rsc.mask, rrsr->rsc.data); 2646 2647 return 0; 2648 } 2649 2650 static void rst_bacam(struct rtw89_dev *rtwdev) 2651 { 2652 u32 val32; 2653 int ret; 2654 2655 rtw89_write32_mask(rtwdev, R_AX_RESPBA_CAM_CTRL, B_AX_BACAM_RST_MASK, 2656 S_AX_BACAM_RST_ALL); 2657 2658 ret = read_poll_timeout_atomic(rtw89_read32_mask, val32, val32 == 0, 2659 1, 1000, false, 2660 rtwdev, R_AX_RESPBA_CAM_CTRL, B_AX_BACAM_RST_MASK); 2661 if (ret) 2662 rtw89_warn(rtwdev, "failed to reset BA CAM\n"); 2663 } 2664 2665 static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2666 { 2667 #define TRXCFG_RMAC_CCA_TO 32 2668 #define TRXCFG_RMAC_DATA_TO 15 2669 #define RX_MAX_LEN_UNIT 512 2670 #define PLD_RLS_MAX_PG 127 2671 #define RX_SPEC_MAX_LEN (11454 + RX_MAX_LEN_UNIT) 2672 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2673 int ret; 2674 u32 reg, rx_max_len, rx_qta; 2675 u16 val; 2676 2677 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2678 if (ret) 2679 return ret; 2680 2681 if (mac_idx == RTW89_MAC_0) 2682 rst_bacam(rtwdev); 2683 2684 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RESPBA_CAM_CTRL, mac_idx); 2685 rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL); 2686 2687 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_DLK_PROTECT_CTL, mac_idx); 2688 val = rtw89_read16(rtwdev, reg); 2689 val = u16_replace_bits(val, TRXCFG_RMAC_DATA_TO, 2690 B_AX_RX_DLK_DATA_TIME_MASK); 2691 val = u16_replace_bits(val, TRXCFG_RMAC_CCA_TO, 2692 B_AX_RX_DLK_CCA_TIME_MASK); 2693 if (chip_id == RTL8852BT) 2694 val |= B_AX_RX_DLK_RST_EN; 2695 rtw89_write16(rtwdev, reg, val); 2696 2697 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RCR, mac_idx); 2698 rtw89_write8_mask(rtwdev, reg, B_AX_CH_EN_MASK, 0x1); 2699 2700 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_FLTR_OPT, mac_idx); 2701 if (mac_idx == RTW89_MAC_0) 2702 rx_qta = rtwdev->mac.dle_info.c0_rx_qta; 2703 else 2704 rx_qta = rtwdev->mac.dle_info.c1_rx_qta; 2705 rx_qta = min_t(u32, rx_qta, PLD_RLS_MAX_PG); 2706 rx_max_len = rx_qta * rtwdev->mac.dle_info.ple_pg_size; 2707 rx_max_len = min_t(u32, rx_max_len, RX_SPEC_MAX_LEN); 2708 rx_max_len /= RX_MAX_LEN_UNIT; 2709 rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len); 2710 2711 if (chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) { 2712 rtw89_write16_mask(rtwdev, 2713 rtw89_mac_reg_by_idx(rtwdev, R_AX_DLK_PROTECT_CTL, mac_idx), 2714 B_AX_RX_DLK_CCA_TIME_MASK, 0); 2715 rtw89_write16_set(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_RCR, mac_idx), 2716 BIT(12)); 2717 } 2718 2719 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PLCP_HDR_FLTR, mac_idx); 2720 rtw89_write8_clr(rtwdev, reg, B_AX_VHT_SU_SIGB_CRC_CHK); 2721 2722 return ret; 2723 } 2724 2725 static int cmac_com_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2726 { 2727 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2728 u32 val, reg; 2729 int ret; 2730 2731 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2732 if (ret) 2733 return ret; 2734 2735 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); 2736 val = rtw89_read32(rtwdev, reg); 2737 val = u32_replace_bits(val, 0, B_AX_TXSC_20M_MASK); 2738 val = u32_replace_bits(val, 0, B_AX_TXSC_40M_MASK); 2739 val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK); 2740 rtw89_write32(rtwdev, reg, val); 2741 2742 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2743 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_RRSR1, mac_idx); 2744 rtw89_write32_mask(rtwdev, reg, B_AX_RRSR_RATE_EN_MASK, RRSR_OFDM_CCK_EN); 2745 } 2746 2747 return 0; 2748 } 2749 2750 bool rtw89_mac_is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) 2751 { 2752 const struct rtw89_dle_mem *cfg; 2753 2754 cfg = get_dle_mem_cfg(rtwdev, mode); 2755 if (!cfg) { 2756 rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); 2757 return false; 2758 } 2759 2760 return (cfg->ple_min_qt->cma1_dma && cfg->ple_max_qt->cma1_dma); 2761 } 2762 2763 static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2764 { 2765 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2766 u32 val, reg; 2767 int ret; 2768 2769 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2770 if (ret) 2771 return ret; 2772 2773 if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { 2774 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SIFS_SETTING, mac_idx); 2775 val = rtw89_read32(rtwdev, reg); 2776 val = u32_replace_bits(val, S_AX_CTS2S_TH_1K, 2777 B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK); 2778 val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B, 2779 B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK); 2780 val |= B_AX_HW_CTS2SELF_EN; 2781 rtw89_write32(rtwdev, reg, val); 2782 2783 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_FSM_MON, mac_idx); 2784 val = rtw89_read32(rtwdev, reg); 2785 val = u32_replace_bits(val, S_AX_PTCL_TO_2MS, B_AX_PTCL_TX_ARB_TO_THR_MASK); 2786 val &= ~B_AX_PTCL_TX_ARB_TO_MODE; 2787 rtw89_write32(rtwdev, reg, val); 2788 } 2789 2790 if (mac_idx == RTW89_MAC_0) { 2791 rtw89_write8_set(rtwdev, R_AX_PTCL_COMMON_SETTING_0, 2792 B_AX_CMAC_TX_MODE_0 | B_AX_CMAC_TX_MODE_1); 2793 rtw89_write8_clr(rtwdev, R_AX_PTCL_COMMON_SETTING_0, 2794 B_AX_PTCL_TRIGGER_SS_EN_0 | 2795 B_AX_PTCL_TRIGGER_SS_EN_1 | 2796 B_AX_PTCL_TRIGGER_SS_EN_UL); 2797 rtw89_write8_mask(rtwdev, R_AX_PTCLRPT_FULL_HDL, 2798 B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU); 2799 } else if (mac_idx == RTW89_MAC_1) { 2800 rtw89_write8_mask(rtwdev, R_AX_PTCLRPT_FULL_HDL_C1, 2801 B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU); 2802 } 2803 2804 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2805 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AGG_LEN_VHT_0, mac_idx); 2806 rtw89_write32_mask(rtwdev, reg, 2807 B_AX_AMPDU_MAX_LEN_VHT_MASK, 0x3FF80); 2808 } 2809 2810 return 0; 2811 } 2812 2813 static int cmac_dma_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2814 { 2815 u32 reg; 2816 int ret; 2817 2818 if (!rtw89_is_rtl885xb(rtwdev)) 2819 return 0; 2820 2821 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2822 if (ret) 2823 return ret; 2824 2825 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXDMA_CTRL_0, mac_idx); 2826 rtw89_write8_clr(rtwdev, reg, RX_FULL_MODE); 2827 2828 return 0; 2829 } 2830 2831 static int cmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 2832 { 2833 int ret; 2834 2835 ret = scheduler_init_ax(rtwdev, mac_idx); 2836 if (ret) { 2837 rtw89_err(rtwdev, "[ERR]CMAC%d SCH init %d\n", mac_idx, ret); 2838 return ret; 2839 } 2840 2841 ret = addr_cam_init_ax(rtwdev, mac_idx); 2842 if (ret) { 2843 rtw89_err(rtwdev, "[ERR]CMAC%d ADDR_CAM reset %d\n", mac_idx, 2844 ret); 2845 return ret; 2846 } 2847 2848 ret = rx_fltr_init_ax(rtwdev, mac_idx); 2849 if (ret) { 2850 rtw89_err(rtwdev, "[ERR]CMAC%d RX filter init %d\n", mac_idx, 2851 ret); 2852 return ret; 2853 } 2854 2855 ret = cca_ctrl_init_ax(rtwdev, mac_idx); 2856 if (ret) { 2857 rtw89_err(rtwdev, "[ERR]CMAC%d CCA CTRL init %d\n", mac_idx, 2858 ret); 2859 return ret; 2860 } 2861 2862 ret = nav_ctrl_init_ax(rtwdev); 2863 if (ret) { 2864 rtw89_err(rtwdev, "[ERR]CMAC%d NAV CTRL init %d\n", mac_idx, 2865 ret); 2866 return ret; 2867 } 2868 2869 ret = spatial_reuse_init_ax(rtwdev, mac_idx); 2870 if (ret) { 2871 rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n", 2872 mac_idx, ret); 2873 return ret; 2874 } 2875 2876 ret = tmac_init_ax(rtwdev, mac_idx); 2877 if (ret) { 2878 rtw89_err(rtwdev, "[ERR]CMAC%d TMAC init %d\n", mac_idx, ret); 2879 return ret; 2880 } 2881 2882 ret = trxptcl_init_ax(rtwdev, mac_idx); 2883 if (ret) { 2884 rtw89_err(rtwdev, "[ERR]CMAC%d TRXPTCL init %d\n", mac_idx, ret); 2885 return ret; 2886 } 2887 2888 ret = rmac_init_ax(rtwdev, mac_idx); 2889 if (ret) { 2890 rtw89_err(rtwdev, "[ERR]CMAC%d RMAC init %d\n", mac_idx, ret); 2891 return ret; 2892 } 2893 2894 ret = cmac_com_init_ax(rtwdev, mac_idx); 2895 if (ret) { 2896 rtw89_err(rtwdev, "[ERR]CMAC%d Com init %d\n", mac_idx, ret); 2897 return ret; 2898 } 2899 2900 ret = ptcl_init_ax(rtwdev, mac_idx); 2901 if (ret) { 2902 rtw89_err(rtwdev, "[ERR]CMAC%d PTCL init %d\n", mac_idx, ret); 2903 return ret; 2904 } 2905 2906 ret = cmac_dma_init_ax(rtwdev, mac_idx); 2907 if (ret) { 2908 rtw89_err(rtwdev, "[ERR]CMAC%d DMA init %d\n", mac_idx, ret); 2909 return ret; 2910 } 2911 2912 return ret; 2913 } 2914 2915 static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev, 2916 struct rtw89_mac_c2h_info *c2h_info, u8 part_num) 2917 { 2918 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 2919 const struct rtw89_chip_info *chip = rtwdev->chip; 2920 struct rtw89_mac_h2c_info h2c_info = {}; 2921 enum rtw89_mac_c2h_type c2h_type; 2922 u8 content_len; 2923 u32 ret; 2924 2925 if (chip->chip_gen == RTW89_CHIP_AX) 2926 content_len = 0; 2927 else 2928 content_len = 2; 2929 2930 switch (part_num) { 2931 case 0: 2932 c2h_type = RTW89_FWCMD_C2HREG_FUNC_PHY_CAP; 2933 break; 2934 case 1: 2935 c2h_type = RTW89_FWCMD_C2HREG_FUNC_PHY_CAP_PART1; 2936 break; 2937 default: 2938 return -EINVAL; 2939 } 2940 2941 mac->cnv_efuse_state(rtwdev, false); 2942 2943 h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE; 2944 h2c_info.content_len = content_len; 2945 h2c_info.u.hdr.w0 = u32_encode_bits(part_num, RTW89_H2CREG_GET_FEATURE_PART_NUM); 2946 2947 ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, c2h_info); 2948 if (ret) 2949 goto out; 2950 2951 if (c2h_info->id != c2h_type) 2952 ret = -EINVAL; 2953 2954 out: 2955 mac->cnv_efuse_state(rtwdev, true); 2956 2957 return ret; 2958 } 2959 2960 static int rtw89_mac_setup_phycap_part0(struct rtw89_dev *rtwdev) 2961 { 2962 const struct rtw89_chip_info *chip = rtwdev->chip; 2963 const struct rtw89_c2hreg_phycap *phycap; 2964 struct rtw89_efuse *efuse = &rtwdev->efuse; 2965 struct rtw89_mac_c2h_info c2h_info = {}; 2966 struct rtw89_hal *hal = &rtwdev->hal; 2967 u8 tx_nss; 2968 u8 rx_nss; 2969 u8 tx_ant; 2970 u8 rx_ant; 2971 int ret; 2972 2973 ret = rtw89_mac_read_phycap(rtwdev, &c2h_info, 0); 2974 if (ret) 2975 return ret; 2976 2977 phycap = &c2h_info.u.phycap; 2978 2979 tx_nss = u32_get_bits(phycap->w1, RTW89_C2HREG_PHYCAP_W1_TX_NSS); 2980 rx_nss = u32_get_bits(phycap->w0, RTW89_C2HREG_PHYCAP_W0_RX_NSS); 2981 tx_ant = u32_get_bits(phycap->w3, RTW89_C2HREG_PHYCAP_W3_ANT_TX_NUM); 2982 rx_ant = u32_get_bits(phycap->w3, RTW89_C2HREG_PHYCAP_W3_ANT_RX_NUM); 2983 2984 hal->tx_nss = tx_nss ? min_t(u8, tx_nss, chip->tx_nss) : chip->tx_nss; 2985 hal->rx_nss = rx_nss ? min_t(u8, rx_nss, chip->rx_nss) : chip->rx_nss; 2986 2987 if (tx_ant == 1) 2988 hal->antenna_tx = RF_B; 2989 if (rx_ant == 1) 2990 hal->antenna_rx = RF_B; 2991 2992 if (tx_nss == 1 && tx_ant == 2 && rx_ant == 2) { 2993 hal->antenna_tx = RF_B; 2994 hal->tx_path_diversity = true; 2995 } 2996 2997 if (chip->rf_path_num == 1) { 2998 hal->antenna_tx = RF_A; 2999 hal->antenna_rx = RF_A; 3000 if ((efuse->rfe_type % 3) == 2) 3001 hal->ant_diversity = true; 3002 } 3003 3004 rtw89_debug(rtwdev, RTW89_DBG_FW, 3005 "phycap hal/phy/chip: tx_nss=0x%x/0x%x/0x%x rx_nss=0x%x/0x%x/0x%x\n", 3006 hal->tx_nss, tx_nss, chip->tx_nss, 3007 hal->rx_nss, rx_nss, chip->rx_nss); 3008 rtw89_debug(rtwdev, RTW89_DBG_FW, 3009 "ant num/bitmap: tx=%d/0x%x rx=%d/0x%x\n", 3010 tx_ant, hal->antenna_tx, rx_ant, hal->antenna_rx); 3011 rtw89_debug(rtwdev, RTW89_DBG_FW, "TX path diversity=%d\n", hal->tx_path_diversity); 3012 rtw89_debug(rtwdev, RTW89_DBG_FW, "Antenna diversity=%d\n", hal->ant_diversity); 3013 3014 return 0; 3015 } 3016 3017 static int rtw89_mac_setup_phycap_part1(struct rtw89_dev *rtwdev) 3018 { 3019 const struct rtw89_chip_variant *variant = rtwdev->variant; 3020 const struct rtw89_c2hreg_phycap *phycap; 3021 struct rtw89_mac_c2h_info c2h_info = {}; 3022 struct rtw89_hal *hal = &rtwdev->hal; 3023 u8 qam_raw, qam; 3024 int ret; 3025 3026 ret = rtw89_mac_read_phycap(rtwdev, &c2h_info, 1); 3027 if (ret) 3028 return ret; 3029 3030 phycap = &c2h_info.u.phycap; 3031 3032 qam_raw = u32_get_bits(phycap->w2, RTW89_C2HREG_PHYCAP_P1_W2_QAM); 3033 3034 switch (qam_raw) { 3035 case RTW89_C2HREG_PHYCAP_P1_W2_QAM_256: 3036 case RTW89_C2HREG_PHYCAP_P1_W2_QAM_1024: 3037 case RTW89_C2HREG_PHYCAP_P1_W2_QAM_4096: 3038 qam = qam_raw; 3039 break; 3040 default: 3041 qam = RTW89_C2HREG_PHYCAP_P1_W2_QAM_4096; 3042 break; 3043 } 3044 3045 if ((variant && variant->no_mcs_12_13) || 3046 qam <= RTW89_C2HREG_PHYCAP_P1_W2_QAM_1024) 3047 hal->no_mcs_12_13 = true; 3048 3049 rtw89_debug(rtwdev, RTW89_DBG_FW, "phycap qam=%d/%d no_mcs_12_13=%d\n", 3050 qam_raw, qam, hal->no_mcs_12_13); 3051 3052 return 0; 3053 } 3054 3055 int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev) 3056 { 3057 const struct rtw89_chip_info *chip = rtwdev->chip; 3058 int ret; 3059 3060 ret = rtw89_mac_setup_phycap_part0(rtwdev); 3061 if (ret) 3062 return ret; 3063 3064 if (chip->chip_gen == RTW89_CHIP_AX || 3065 RTW89_CHK_FW_FEATURE(NO_PHYCAP_P1, &rtwdev->fw)) 3066 return 0; 3067 3068 return rtw89_mac_setup_phycap_part1(rtwdev); 3069 } 3070 3071 static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band, 3072 u16 tx_en_u16, u16 mask_u16) 3073 { 3074 u32 ret; 3075 struct rtw89_mac_c2h_info c2h_info = {0}; 3076 struct rtw89_mac_h2c_info h2c_info = {0}; 3077 struct rtw89_h2creg_sch_tx_en *sch_tx_en = &h2c_info.u.sch_tx_en; 3078 3079 h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN; 3080 h2c_info.content_len = sizeof(*sch_tx_en) - RTW89_H2CREG_HDR_LEN; 3081 3082 u32p_replace_bits(&sch_tx_en->w0, tx_en_u16, RTW89_H2CREG_SCH_TX_EN_W0_EN); 3083 u32p_replace_bits(&sch_tx_en->w1, mask_u16, RTW89_H2CREG_SCH_TX_EN_W1_MASK); 3084 u32p_replace_bits(&sch_tx_en->w1, band, RTW89_H2CREG_SCH_TX_EN_W1_BAND); 3085 3086 ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); 3087 if (ret) 3088 return ret; 3089 3090 if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT) 3091 return -EINVAL; 3092 3093 return 0; 3094 } 3095 3096 static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx, 3097 u16 tx_en, u16 tx_en_mask) 3098 { 3099 u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_TXEN, mac_idx); 3100 u16 val; 3101 int ret; 3102 3103 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3104 if (ret) 3105 return ret; 3106 3107 if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) 3108 return rtw89_hw_sch_tx_en_h2c(rtwdev, mac_idx, 3109 tx_en, tx_en_mask); 3110 3111 val = rtw89_read16(rtwdev, reg); 3112 val = (val & ~tx_en_mask) | (tx_en & tx_en_mask); 3113 rtw89_write16(rtwdev, reg, val); 3114 3115 return 0; 3116 } 3117 3118 static int rtw89_set_hw_sch_tx_en_v1(struct rtw89_dev *rtwdev, u8 mac_idx, 3119 u32 tx_en, u32 tx_en_mask) 3120 { 3121 u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_DRV_TXEN, mac_idx); 3122 u32 val; 3123 int ret; 3124 3125 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3126 if (ret) 3127 return ret; 3128 3129 val = rtw89_read32(rtwdev, reg); 3130 val = (val & ~tx_en_mask) | (tx_en & tx_en_mask); 3131 rtw89_write32(rtwdev, reg, val); 3132 3133 return 0; 3134 } 3135 3136 int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, 3137 u32 *tx_en, enum rtw89_sch_tx_sel sel) 3138 { 3139 int ret; 3140 3141 *tx_en = rtw89_read16(rtwdev, 3142 rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_TXEN, mac_idx)); 3143 3144 switch (sel) { 3145 case RTW89_SCH_TX_SEL_ALL: 3146 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 3147 B_AX_CTN_TXEN_ALL_MASK); 3148 if (ret) 3149 return ret; 3150 break; 3151 case RTW89_SCH_TX_SEL_HIQ: 3152 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 3153 0, B_AX_CTN_TXEN_HGQ); 3154 if (ret) 3155 return ret; 3156 break; 3157 case RTW89_SCH_TX_SEL_MG0: 3158 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 3159 0, B_AX_CTN_TXEN_MGQ); 3160 if (ret) 3161 return ret; 3162 break; 3163 case RTW89_SCH_TX_SEL_MACID: 3164 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 3165 B_AX_CTN_TXEN_ALL_MASK); 3166 if (ret) 3167 return ret; 3168 break; 3169 default: 3170 return 0; 3171 } 3172 3173 return 0; 3174 } 3175 EXPORT_SYMBOL(rtw89_mac_stop_sch_tx); 3176 3177 int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, 3178 u32 *tx_en, enum rtw89_sch_tx_sel sel) 3179 { 3180 int ret; 3181 3182 *tx_en = rtw89_read32(rtwdev, 3183 rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_DRV_TXEN, mac_idx)); 3184 3185 switch (sel) { 3186 case RTW89_SCH_TX_SEL_ALL: 3187 ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0, 3188 B_AX_CTN_TXEN_ALL_MASK_V1); 3189 if (ret) 3190 return ret; 3191 break; 3192 case RTW89_SCH_TX_SEL_HIQ: 3193 ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 3194 0, B_AX_CTN_TXEN_HGQ); 3195 if (ret) 3196 return ret; 3197 break; 3198 case RTW89_SCH_TX_SEL_MG0: 3199 ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 3200 0, B_AX_CTN_TXEN_MGQ); 3201 if (ret) 3202 return ret; 3203 break; 3204 case RTW89_SCH_TX_SEL_MACID: 3205 ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0, 3206 B_AX_CTN_TXEN_ALL_MASK_V1); 3207 if (ret) 3208 return ret; 3209 break; 3210 default: 3211 return 0; 3212 } 3213 3214 return 0; 3215 } 3216 EXPORT_SYMBOL(rtw89_mac_stop_sch_tx_v1); 3217 3218 int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en) 3219 { 3220 int ret; 3221 3222 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, B_AX_CTN_TXEN_ALL_MASK); 3223 if (ret) 3224 return ret; 3225 3226 return 0; 3227 } 3228 EXPORT_SYMBOL(rtw89_mac_resume_sch_tx); 3229 3230 int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en) 3231 { 3232 int ret; 3233 3234 ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, tx_en, 3235 B_AX_CTN_TXEN_ALL_MASK_V1); 3236 if (ret) 3237 return ret; 3238 3239 return 0; 3240 } 3241 EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v1); 3242 3243 static int dle_buf_req_ax(struct rtw89_dev *rtwdev, u16 buf_len, bool wd, u16 *pkt_id) 3244 { 3245 u32 val, reg; 3246 int ret; 3247 3248 reg = wd ? R_AX_WD_BUF_REQ : R_AX_PL_BUF_REQ; 3249 val = buf_len; 3250 val |= B_AX_WD_BUF_REQ_EXEC; 3251 rtw89_write32(rtwdev, reg, val); 3252 3253 reg = wd ? R_AX_WD_BUF_STATUS : R_AX_PL_BUF_STATUS; 3254 3255 ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_BUF_STAT_DONE, 3256 1, 2000, false, rtwdev, reg); 3257 if (ret) 3258 return ret; 3259 3260 *pkt_id = FIELD_GET(B_AX_WD_BUF_STAT_PKTID_MASK, val); 3261 if (*pkt_id == S_WD_BUF_STAT_PKTID_INVALID) 3262 return -ENOENT; 3263 3264 return 0; 3265 } 3266 3267 static int set_cpuio_ax(struct rtw89_dev *rtwdev, 3268 struct rtw89_cpuio_ctrl *ctrl_para, bool wd) 3269 { 3270 u32 val, cmd_type, reg; 3271 int ret; 3272 3273 cmd_type = ctrl_para->cmd_type; 3274 3275 reg = wd ? R_AX_WD_CPUQ_OP_2 : R_AX_PL_CPUQ_OP_2; 3276 val = 0; 3277 val = u32_replace_bits(val, ctrl_para->start_pktid, 3278 B_AX_WD_CPUQ_OP_STRT_PKTID_MASK); 3279 val = u32_replace_bits(val, ctrl_para->end_pktid, 3280 B_AX_WD_CPUQ_OP_END_PKTID_MASK); 3281 rtw89_write32(rtwdev, reg, val); 3282 3283 reg = wd ? R_AX_WD_CPUQ_OP_1 : R_AX_PL_CPUQ_OP_1; 3284 val = 0; 3285 val = u32_replace_bits(val, ctrl_para->src_pid, 3286 B_AX_CPUQ_OP_SRC_PID_MASK); 3287 val = u32_replace_bits(val, ctrl_para->src_qid, 3288 B_AX_CPUQ_OP_SRC_QID_MASK); 3289 val = u32_replace_bits(val, ctrl_para->dst_pid, 3290 B_AX_CPUQ_OP_DST_PID_MASK); 3291 val = u32_replace_bits(val, ctrl_para->dst_qid, 3292 B_AX_CPUQ_OP_DST_QID_MASK); 3293 rtw89_write32(rtwdev, reg, val); 3294 3295 reg = wd ? R_AX_WD_CPUQ_OP_0 : R_AX_PL_CPUQ_OP_0; 3296 val = 0; 3297 val = u32_replace_bits(val, cmd_type, 3298 B_AX_CPUQ_OP_CMD_TYPE_MASK); 3299 val = u32_replace_bits(val, ctrl_para->macid, 3300 B_AX_CPUQ_OP_MACID_MASK); 3301 val = u32_replace_bits(val, ctrl_para->pkt_num, 3302 B_AX_CPUQ_OP_PKTNUM_MASK); 3303 val |= B_AX_WD_CPUQ_OP_EXEC; 3304 rtw89_write32(rtwdev, reg, val); 3305 3306 reg = wd ? R_AX_WD_CPUQ_OP_STATUS : R_AX_PL_CPUQ_OP_STATUS; 3307 3308 ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_CPUQ_OP_STAT_DONE, 3309 1, 2000, false, rtwdev, reg); 3310 if (ret) 3311 return ret; 3312 3313 if (cmd_type == CPUIO_OP_CMD_GET_1ST_PID || 3314 cmd_type == CPUIO_OP_CMD_GET_NEXT_PID) 3315 ctrl_para->pktid = FIELD_GET(B_AX_WD_CPUQ_OP_PKTID_MASK, val); 3316 3317 return 0; 3318 } 3319 3320 int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode, 3321 bool band1_en) 3322 { 3323 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3324 const struct rtw89_dle_mem *cfg; 3325 3326 cfg = get_dle_mem_cfg(rtwdev, mode); 3327 if (!cfg) { 3328 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 3329 return -EINVAL; 3330 } 3331 3332 if (dle_used_size(cfg) != dle_expected_used_size(rtwdev, mode)) { 3333 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 3334 return -EINVAL; 3335 } 3336 3337 dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU); 3338 3339 return mac->dle_quota_change(rtwdev, band1_en); 3340 } 3341 3342 static int dle_quota_change_ax(struct rtw89_dev *rtwdev, bool band1_en) 3343 { 3344 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3345 struct rtw89_cpuio_ctrl ctrl_para = {0}; 3346 u16 pkt_id; 3347 int ret; 3348 3349 ret = mac->dle_buf_req(rtwdev, 0x20, true, &pkt_id); 3350 if (ret) { 3351 rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n"); 3352 return ret; 3353 } 3354 3355 ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD; 3356 ctrl_para.start_pktid = pkt_id; 3357 ctrl_para.end_pktid = pkt_id; 3358 ctrl_para.pkt_num = 0; 3359 ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS; 3360 ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT; 3361 ret = mac->set_cpuio(rtwdev, &ctrl_para, true); 3362 if (ret) { 3363 rtw89_err(rtwdev, "[ERR]WDE DLE enqueue to head\n"); 3364 return -EFAULT; 3365 } 3366 3367 ret = mac->dle_buf_req(rtwdev, 0x20, false, &pkt_id); 3368 if (ret) { 3369 rtw89_err(rtwdev, "[ERR]PLE DLE buf req\n"); 3370 return ret; 3371 } 3372 3373 ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD; 3374 ctrl_para.start_pktid = pkt_id; 3375 ctrl_para.end_pktid = pkt_id; 3376 ctrl_para.pkt_num = 0; 3377 ctrl_para.dst_pid = PLE_DLE_PORT_ID_PLRLS; 3378 ctrl_para.dst_qid = PLE_DLE_QUEID_NO_REPORT; 3379 ret = mac->set_cpuio(rtwdev, &ctrl_para, false); 3380 if (ret) { 3381 rtw89_err(rtwdev, "[ERR]PLE DLE enqueue to head\n"); 3382 return -EFAULT; 3383 } 3384 3385 return 0; 3386 } 3387 3388 static int band_idle_ck_b(struct rtw89_dev *rtwdev, u8 mac_idx) 3389 { 3390 int ret; 3391 u32 reg; 3392 u8 val; 3393 3394 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3395 if (ret) 3396 return ret; 3397 3398 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_TX_CTN_SEL, mac_idx); 3399 3400 ret = read_poll_timeout(rtw89_read8, val, 3401 (val & B_AX_PTCL_TX_ON_STAT) == 0, 3402 SW_CVR_DUR_US, 3403 SW_CVR_DUR_US * PTCL_IDLE_POLL_CNT, 3404 false, rtwdev, reg); 3405 if (ret) 3406 return ret; 3407 3408 return 0; 3409 } 3410 3411 static int band1_enable_ax(struct rtw89_dev *rtwdev) 3412 { 3413 int ret, i; 3414 u32 sleep_bak[4] = {0}; 3415 u32 pause_bak[4] = {0}; 3416 u32 tx_en; 3417 3418 ret = rtw89_chip_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL); 3419 if (ret) { 3420 rtw89_err(rtwdev, "[ERR]stop sch tx %d\n", ret); 3421 return ret; 3422 } 3423 3424 for (i = 0; i < 4; i++) { 3425 sleep_bak[i] = rtw89_read32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4); 3426 pause_bak[i] = rtw89_read32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4); 3427 rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, U32_MAX); 3428 rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, U32_MAX); 3429 } 3430 3431 ret = band_idle_ck_b(rtwdev, 0); 3432 if (ret) { 3433 rtw89_err(rtwdev, "[ERR]tx idle poll %d\n", ret); 3434 return ret; 3435 } 3436 3437 ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, true); 3438 if (ret) { 3439 rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret); 3440 return ret; 3441 } 3442 3443 for (i = 0; i < 4; i++) { 3444 rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, sleep_bak[i]); 3445 rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, pause_bak[i]); 3446 } 3447 3448 ret = rtw89_chip_resume_sch_tx(rtwdev, 0, tx_en); 3449 if (ret) { 3450 rtw89_err(rtwdev, "[ERR]CMAC1 resume sch tx %d\n", ret); 3451 return ret; 3452 } 3453 3454 ret = cmac_func_en_ax(rtwdev, 1, true); 3455 if (ret) { 3456 rtw89_err(rtwdev, "[ERR]CMAC1 func en %d\n", ret); 3457 return ret; 3458 } 3459 3460 ret = cmac_init_ax(rtwdev, 1); 3461 if (ret) { 3462 rtw89_err(rtwdev, "[ERR]CMAC1 init %d\n", ret); 3463 return ret; 3464 } 3465 3466 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 3467 B_AX_R_SYM_FEN_WLBBFUN_1 | B_AX_R_SYM_FEN_WLBBGLB_1); 3468 3469 return 0; 3470 } 3471 3472 static void rtw89_wdrls_imr_enable(struct rtw89_dev *rtwdev) 3473 { 3474 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3475 3476 rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR, B_AX_WDRLS_IMR_EN_CLR); 3477 rtw89_write32_set(rtwdev, R_AX_WDRLS_ERR_IMR, imr->wdrls_imr_set); 3478 } 3479 3480 static void rtw89_wsec_imr_enable(struct rtw89_dev *rtwdev) 3481 { 3482 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3483 3484 rtw89_write32_set(rtwdev, imr->wsec_imr_reg, imr->wsec_imr_set); 3485 } 3486 3487 static void rtw89_mpdu_trx_imr_enable(struct rtw89_dev *rtwdev) 3488 { 3489 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3490 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3491 3492 rtw89_write32_clr(rtwdev, R_AX_MPDU_TX_ERR_IMR, 3493 B_AX_TX_GET_ERRPKTID_INT_EN | 3494 B_AX_TX_NXT_ERRPKTID_INT_EN | 3495 B_AX_TX_MPDU_SIZE_ZERO_INT_EN | 3496 B_AX_TX_OFFSET_ERR_INT_EN | 3497 B_AX_TX_HDR3_SIZE_ERR_INT_EN); 3498 if (chip_id == RTL8852C) 3499 rtw89_write32_clr(rtwdev, R_AX_MPDU_TX_ERR_IMR, 3500 B_AX_TX_ETH_TYPE_ERR_EN | 3501 B_AX_TX_LLC_PRE_ERR_EN | 3502 B_AX_TX_NW_TYPE_ERR_EN | 3503 B_AX_TX_KSRCH_ERR_EN); 3504 rtw89_write32_set(rtwdev, R_AX_MPDU_TX_ERR_IMR, 3505 imr->mpdu_tx_imr_set); 3506 3507 rtw89_write32_clr(rtwdev, R_AX_MPDU_RX_ERR_IMR, 3508 B_AX_GETPKTID_ERR_INT_EN | 3509 B_AX_MHDRLEN_ERR_INT_EN | 3510 B_AX_RPT_ERR_INT_EN); 3511 rtw89_write32_set(rtwdev, R_AX_MPDU_RX_ERR_IMR, 3512 imr->mpdu_rx_imr_set); 3513 } 3514 3515 static void rtw89_sta_sch_imr_enable(struct rtw89_dev *rtwdev) 3516 { 3517 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3518 3519 rtw89_write32_clr(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR, 3520 B_AX_SEARCH_HANG_TIMEOUT_INT_EN | 3521 B_AX_RPT_HANG_TIMEOUT_INT_EN | 3522 B_AX_PLE_B_PKTID_ERR_INT_EN); 3523 rtw89_write32_set(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR, 3524 imr->sta_sch_imr_set); 3525 } 3526 3527 static void rtw89_txpktctl_imr_enable(struct rtw89_dev *rtwdev) 3528 { 3529 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3530 3531 rtw89_write32_clr(rtwdev, imr->txpktctl_imr_b0_reg, 3532 imr->txpktctl_imr_b0_clr); 3533 rtw89_write32_set(rtwdev, imr->txpktctl_imr_b0_reg, 3534 imr->txpktctl_imr_b0_set); 3535 rtw89_write32_clr(rtwdev, imr->txpktctl_imr_b1_reg, 3536 imr->txpktctl_imr_b1_clr); 3537 rtw89_write32_set(rtwdev, imr->txpktctl_imr_b1_reg, 3538 imr->txpktctl_imr_b1_set); 3539 } 3540 3541 static void rtw89_wde_imr_enable(struct rtw89_dev *rtwdev) 3542 { 3543 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3544 3545 rtw89_write32_clr(rtwdev, R_AX_WDE_ERR_IMR, imr->wde_imr_clr); 3546 rtw89_write32_set(rtwdev, R_AX_WDE_ERR_IMR, imr->wde_imr_set); 3547 } 3548 3549 static void rtw89_ple_imr_enable(struct rtw89_dev *rtwdev) 3550 { 3551 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3552 3553 rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR, imr->ple_imr_clr); 3554 rtw89_write32_set(rtwdev, R_AX_PLE_ERR_IMR, imr->ple_imr_set); 3555 } 3556 3557 static void rtw89_pktin_imr_enable(struct rtw89_dev *rtwdev) 3558 { 3559 rtw89_write32_set(rtwdev, R_AX_PKTIN_ERR_IMR, 3560 B_AX_PKTIN_GETPKTID_ERR_INT_EN); 3561 } 3562 3563 static void rtw89_dispatcher_imr_enable(struct rtw89_dev *rtwdev) 3564 { 3565 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3566 3567 rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR, 3568 imr->host_disp_imr_clr); 3569 rtw89_write32_set(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR, 3570 imr->host_disp_imr_set); 3571 rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR, 3572 imr->cpu_disp_imr_clr); 3573 rtw89_write32_set(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR, 3574 imr->cpu_disp_imr_set); 3575 rtw89_write32_clr(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR, 3576 imr->other_disp_imr_clr); 3577 rtw89_write32_set(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR, 3578 imr->other_disp_imr_set); 3579 } 3580 3581 static void rtw89_cpuio_imr_enable(struct rtw89_dev *rtwdev) 3582 { 3583 rtw89_write32_clr(rtwdev, R_AX_CPUIO_ERR_IMR, B_AX_CPUIO_IMR_CLR); 3584 rtw89_write32_set(rtwdev, R_AX_CPUIO_ERR_IMR, B_AX_CPUIO_IMR_SET); 3585 } 3586 3587 static void rtw89_bbrpt_imr_enable(struct rtw89_dev *rtwdev) 3588 { 3589 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3590 3591 rtw89_write32_set(rtwdev, imr->bbrpt_com_err_imr_reg, 3592 B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN); 3593 rtw89_write32_clr(rtwdev, imr->bbrpt_chinfo_err_imr_reg, 3594 B_AX_BBRPT_CHINFO_IMR_CLR); 3595 rtw89_write32_set(rtwdev, imr->bbrpt_chinfo_err_imr_reg, 3596 imr->bbrpt_err_imr_set); 3597 rtw89_write32_set(rtwdev, imr->bbrpt_dfs_err_imr_reg, 3598 B_AX_BBRPT_DFS_TO_ERR_INT_EN); 3599 rtw89_write32_set(rtwdev, R_AX_LA_ERRFLAG, B_AX_LA_IMR_DATA_LOSS_ERR); 3600 } 3601 3602 static void rtw89_scheduler_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) 3603 { 3604 u32 reg; 3605 3606 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SCHEDULE_ERR_IMR, mac_idx); 3607 rtw89_write32_clr(rtwdev, reg, B_AX_SORT_NON_IDLE_ERR_INT_EN | 3608 B_AX_FSM_TIMEOUT_ERR_INT_EN); 3609 rtw89_write32_set(rtwdev, reg, B_AX_FSM_TIMEOUT_ERR_INT_EN); 3610 } 3611 3612 static void rtw89_ptcl_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) 3613 { 3614 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3615 u32 reg; 3616 3617 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_IMR0, mac_idx); 3618 rtw89_write32_clr(rtwdev, reg, imr->ptcl_imr_clr); 3619 rtw89_write32_set(rtwdev, reg, imr->ptcl_imr_set); 3620 } 3621 3622 static void rtw89_cdma_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) 3623 { 3624 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3625 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3626 u32 reg; 3627 3628 reg = rtw89_mac_reg_by_idx(rtwdev, imr->cdma_imr_0_reg, mac_idx); 3629 rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_0_clr); 3630 rtw89_write32_set(rtwdev, reg, imr->cdma_imr_0_set); 3631 3632 if (chip_id == RTL8852C) { 3633 reg = rtw89_mac_reg_by_idx(rtwdev, imr->cdma_imr_1_reg, mac_idx); 3634 rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_1_clr); 3635 rtw89_write32_set(rtwdev, reg, imr->cdma_imr_1_set); 3636 } 3637 } 3638 3639 static void rtw89_phy_intf_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) 3640 { 3641 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3642 u32 reg; 3643 3644 reg = rtw89_mac_reg_by_idx(rtwdev, imr->phy_intf_imr_reg, mac_idx); 3645 rtw89_write32_clr(rtwdev, reg, imr->phy_intf_imr_clr); 3646 rtw89_write32_set(rtwdev, reg, imr->phy_intf_imr_set); 3647 } 3648 3649 static void rtw89_rmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) 3650 { 3651 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3652 u32 reg; 3653 3654 reg = rtw89_mac_reg_by_idx(rtwdev, imr->rmac_imr_reg, mac_idx); 3655 rtw89_write32_clr(rtwdev, reg, imr->rmac_imr_clr); 3656 rtw89_write32_set(rtwdev, reg, imr->rmac_imr_set); 3657 } 3658 3659 static void rtw89_tmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) 3660 { 3661 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3662 u32 reg; 3663 3664 reg = rtw89_mac_reg_by_idx(rtwdev, imr->tmac_imr_reg, mac_idx); 3665 rtw89_write32_clr(rtwdev, reg, imr->tmac_imr_clr); 3666 rtw89_write32_set(rtwdev, reg, imr->tmac_imr_set); 3667 } 3668 3669 static int enable_imr_ax(struct rtw89_dev *rtwdev, u8 mac_idx, 3670 enum rtw89_mac_hwmod_sel sel) 3671 { 3672 int ret; 3673 3674 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, sel); 3675 if (ret) { 3676 rtw89_err(rtwdev, "MAC%d mac_idx%d is not ready\n", 3677 sel, mac_idx); 3678 return ret; 3679 } 3680 3681 if (sel == RTW89_DMAC_SEL) { 3682 rtw89_wdrls_imr_enable(rtwdev); 3683 rtw89_wsec_imr_enable(rtwdev); 3684 rtw89_mpdu_trx_imr_enable(rtwdev); 3685 rtw89_sta_sch_imr_enable(rtwdev); 3686 rtw89_txpktctl_imr_enable(rtwdev); 3687 rtw89_wde_imr_enable(rtwdev); 3688 rtw89_ple_imr_enable(rtwdev); 3689 rtw89_pktin_imr_enable(rtwdev); 3690 rtw89_dispatcher_imr_enable(rtwdev); 3691 rtw89_cpuio_imr_enable(rtwdev); 3692 rtw89_bbrpt_imr_enable(rtwdev); 3693 } else if (sel == RTW89_CMAC_SEL) { 3694 rtw89_scheduler_imr_enable(rtwdev, mac_idx); 3695 rtw89_ptcl_imr_enable(rtwdev, mac_idx); 3696 rtw89_cdma_imr_enable(rtwdev, mac_idx); 3697 rtw89_phy_intf_imr_enable(rtwdev, mac_idx); 3698 rtw89_rmac_imr_enable(rtwdev, mac_idx); 3699 rtw89_tmac_imr_enable(rtwdev, mac_idx); 3700 } else { 3701 return -EINVAL; 3702 } 3703 3704 return 0; 3705 } 3706 3707 static void err_imr_ctrl_ax(struct rtw89_dev *rtwdev, bool en) 3708 { 3709 rtw89_write32(rtwdev, R_AX_DMAC_ERR_IMR, 3710 en ? DMAC_ERR_IMR_EN : DMAC_ERR_IMR_DIS); 3711 rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR, 3712 en ? CMAC0_ERR_IMR_EN : CMAC0_ERR_IMR_DIS); 3713 if (!rtw89_is_rtl885xb(rtwdev) && rtwdev->mac.dle_info.c1_rx_qta) 3714 rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR_C1, 3715 en ? CMAC1_ERR_IMR_EN : CMAC1_ERR_IMR_DIS); 3716 } 3717 3718 static int dbcc_enable_ax(struct rtw89_dev *rtwdev, bool enable) 3719 { 3720 int ret = 0; 3721 3722 if (enable) { 3723 ret = band1_enable_ax(rtwdev); 3724 if (ret) { 3725 rtw89_err(rtwdev, "[ERR] band1_enable %d\n", ret); 3726 return ret; 3727 } 3728 3729 ret = enable_imr_ax(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL); 3730 if (ret) { 3731 rtw89_err(rtwdev, "[ERR] enable CMAC1 IMR %d\n", ret); 3732 return ret; 3733 } 3734 } else { 3735 rtw89_err(rtwdev, "[ERR] disable dbcc is not implemented not\n"); 3736 return -EINVAL; 3737 } 3738 3739 return 0; 3740 } 3741 3742 static int set_host_rpr_ax(struct rtw89_dev *rtwdev) 3743 { 3744 if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { 3745 rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG, 3746 B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_POH); 3747 rtw89_write32_set(rtwdev, R_AX_RLSRPT0_CFG0, 3748 B_AX_RLSRPT0_FLTR_MAP_MASK); 3749 } else { 3750 rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG, 3751 B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_STF); 3752 rtw89_write32_clr(rtwdev, R_AX_RLSRPT0_CFG0, 3753 B_AX_RLSRPT0_FLTR_MAP_MASK); 3754 } 3755 3756 rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_AGGNUM_MASK, 30); 3757 rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_TO_MASK, 255); 3758 3759 return 0; 3760 } 3761 3762 static int trx_init_ax(struct rtw89_dev *rtwdev) 3763 { 3764 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3765 enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode; 3766 int ret; 3767 3768 ret = dmac_init_ax(rtwdev, 0); 3769 if (ret) { 3770 rtw89_err(rtwdev, "[ERR]DMAC init %d\n", ret); 3771 return ret; 3772 } 3773 3774 ret = cmac_init_ax(rtwdev, 0); 3775 if (ret) { 3776 rtw89_err(rtwdev, "[ERR]CMAC%d init %d\n", 0, ret); 3777 return ret; 3778 } 3779 3780 if (rtw89_mac_is_qta_dbcc(rtwdev, qta_mode)) { 3781 ret = dbcc_enable_ax(rtwdev, true); 3782 if (ret) { 3783 rtw89_err(rtwdev, "[ERR]dbcc_enable init %d\n", ret); 3784 return ret; 3785 } 3786 } 3787 3788 ret = enable_imr_ax(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 3789 if (ret) { 3790 rtw89_err(rtwdev, "[ERR] enable DMAC IMR %d\n", ret); 3791 return ret; 3792 } 3793 3794 ret = enable_imr_ax(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); 3795 if (ret) { 3796 rtw89_err(rtwdev, "[ERR] to enable CMAC0 IMR %d\n", ret); 3797 return ret; 3798 } 3799 3800 err_imr_ctrl_ax(rtwdev, true); 3801 3802 ret = set_host_rpr_ax(rtwdev); 3803 if (ret) { 3804 rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret); 3805 return ret; 3806 } 3807 3808 if (chip_id == RTL8852C) 3809 rtw89_write32_clr(rtwdev, R_AX_RSP_CHK_SIG, 3810 B_AX_RSP_STATIC_RTS_CHK_SERV_BW_EN); 3811 3812 return 0; 3813 } 3814 3815 static int rtw89_mac_feat_init(struct rtw89_dev *rtwdev) 3816 { 3817 #define BACAM_1024BMP_OCC_ENTRY 4 3818 #define BACAM_MAX_RU_SUPPORT_B0_STA 1 3819 #define BACAM_MAX_RU_SUPPORT_B1_STA 1 3820 const struct rtw89_chip_info *chip = rtwdev->chip; 3821 u8 users, offset; 3822 3823 if (chip->bacam_ver != RTW89_BACAM_V1) 3824 return 0; 3825 3826 offset = 0; 3827 users = BACAM_MAX_RU_SUPPORT_B0_STA; 3828 rtw89_fw_h2c_init_ba_cam_users(rtwdev, users, offset, RTW89_MAC_0); 3829 3830 offset += users * BACAM_1024BMP_OCC_ENTRY; 3831 users = BACAM_MAX_RU_SUPPORT_B1_STA; 3832 rtw89_fw_h2c_init_ba_cam_users(rtwdev, users, offset, RTW89_MAC_1); 3833 3834 return 0; 3835 } 3836 3837 static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev) 3838 { 3839 u32 val32; 3840 3841 if (rtw89_is_rtl885xb(rtwdev)) { 3842 rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_APB_WRAP_EN); 3843 rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_APB_WRAP_EN); 3844 return; 3845 } 3846 3847 rtw89_mac_mem_write(rtwdev, R_AX_WDT_CTRL, 3848 WDT_CTRL_ALL_DIS, RTW89_MAC_MEM_CPU_LOCAL); 3849 3850 val32 = rtw89_mac_mem_read(rtwdev, R_AX_WDT_STATUS, RTW89_MAC_MEM_CPU_LOCAL); 3851 val32 |= B_AX_FS_WDT_INT; 3852 val32 &= ~B_AX_FS_WDT_INT_MSK; 3853 rtw89_mac_mem_write(rtwdev, R_AX_WDT_STATUS, val32, RTW89_MAC_MEM_CPU_LOCAL); 3854 } 3855 3856 static void rtw89_mac_disable_cpu_ax(struct rtw89_dev *rtwdev) 3857 { 3858 clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 3859 3860 rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN); 3861 rtw89_write32_clr(rtwdev, R_AX_WCPU_FW_CTRL, B_AX_WCPU_FWDL_EN | 3862 B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY); 3863 rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); 3864 3865 rtw89_disable_fw_watchdog(rtwdev); 3866 3867 rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); 3868 rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); 3869 } 3870 3871 static int rtw89_mac_enable_cpu_ax(struct rtw89_dev *rtwdev, u8 boot_reason, 3872 bool dlfw, bool include_bb) 3873 { 3874 u32 val; 3875 int ret; 3876 3877 if (rtw89_read32(rtwdev, R_AX_PLATFORM_ENABLE) & B_AX_WCPU_EN) 3878 return -EFAULT; 3879 3880 rtw89_write32(rtwdev, R_AX_UDM1, 0); 3881 rtw89_write32(rtwdev, R_AX_UDM2, 0); 3882 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 3883 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 3884 rtw89_write32(rtwdev, R_AX_HALT_H2C, 0); 3885 rtw89_write32(rtwdev, R_AX_HALT_C2H, 0); 3886 3887 rtw89_write32_set(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); 3888 3889 val = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 3890 val &= ~(B_AX_WCPU_FWDL_EN | B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY); 3891 val = u32_replace_bits(val, RTW89_FWDL_INITIAL_STATE, 3892 B_AX_WCPU_FWDL_STS_MASK); 3893 3894 if (dlfw) 3895 val |= B_AX_WCPU_FWDL_EN; 3896 3897 rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val); 3898 3899 if (rtw89_is_rtl885xb(rtwdev)) 3900 rtw89_write32_mask(rtwdev, R_AX_SEC_CTRL, 3901 B_AX_SEC_IDMEM_SIZE_CONFIG_MASK, 0x2); 3902 3903 rtw89_write16_mask(rtwdev, R_AX_BOOT_REASON, B_AX_BOOT_REASON_MASK, 3904 boot_reason); 3905 rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN); 3906 3907 if (!dlfw) { 3908 mdelay(5); 3909 3910 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 3911 if (ret) 3912 return ret; 3913 } 3914 3915 return 0; 3916 } 3917 3918 static void rtw89_mac_hci_func_en_ax(struct rtw89_dev *rtwdev) 3919 { 3920 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3921 u32 val; 3922 3923 if (chip_id == RTL8852C) 3924 val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN | 3925 B_AX_PKT_BUF_EN | B_AX_H_AXIDMA_EN; 3926 else 3927 val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN | 3928 B_AX_PKT_BUF_EN; 3929 rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val); 3930 } 3931 3932 static void rtw89_mac_dmac_func_pre_en_ax(struct rtw89_dev *rtwdev) 3933 { 3934 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3935 u32 val; 3936 3937 if (chip_id == RTL8851B || chip_id == RTL8852BT) 3938 val = B_AX_DISPATCHER_CLK_EN | B_AX_AXIDMA_CLK_EN; 3939 else 3940 val = B_AX_DISPATCHER_CLK_EN; 3941 rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val); 3942 3943 if (chip_id != RTL8852C) 3944 return; 3945 3946 val = rtw89_read32(rtwdev, R_AX_HAXI_INIT_CFG1); 3947 val &= ~(B_AX_DMA_MODE_MASK | B_AX_STOP_AXI_MST); 3948 val |= FIELD_PREP(B_AX_DMA_MODE_MASK, DMA_MOD_PCIE_1B) | 3949 B_AX_TXHCI_EN_V1 | B_AX_RXHCI_EN_V1; 3950 rtw89_write32(rtwdev, R_AX_HAXI_INIT_CFG1, val); 3951 3952 rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP1, 3953 B_AX_STOP_ACH0 | B_AX_STOP_ACH1 | B_AX_STOP_ACH3 | 3954 B_AX_STOP_ACH4 | B_AX_STOP_ACH5 | B_AX_STOP_ACH6 | 3955 B_AX_STOP_ACH7 | B_AX_STOP_CH8 | B_AX_STOP_CH9 | 3956 B_AX_STOP_CH12 | B_AX_STOP_ACH2); 3957 rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP2, B_AX_STOP_CH10 | B_AX_STOP_CH11); 3958 rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_AXIDMA_EN); 3959 } 3960 3961 static int rtw89_mac_dmac_pre_init(struct rtw89_dev *rtwdev) 3962 { 3963 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3964 int ret; 3965 3966 mac->hci_func_en(rtwdev); 3967 mac->dmac_func_pre_en(rtwdev); 3968 3969 ret = rtw89_mac_dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode); 3970 if (ret) { 3971 rtw89_err(rtwdev, "[ERR]DLE pre init %d\n", ret); 3972 return ret; 3973 } 3974 3975 ret = rtw89_mac_hfc_init(rtwdev, true, false, true); 3976 if (ret) { 3977 rtw89_err(rtwdev, "[ERR]HCI FC pre init %d\n", ret); 3978 return ret; 3979 } 3980 3981 return ret; 3982 } 3983 3984 int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev) 3985 { 3986 rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN, 3987 B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); 3988 rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, 3989 B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | 3990 B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); 3991 rtw89_write8_set(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); 3992 3993 return 0; 3994 } 3995 EXPORT_SYMBOL(rtw89_mac_enable_bb_rf); 3996 3997 int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev) 3998 { 3999 rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, 4000 B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); 4001 rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, 4002 B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | 4003 B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); 4004 rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); 4005 4006 return 0; 4007 } 4008 EXPORT_SYMBOL(rtw89_mac_disable_bb_rf); 4009 4010 int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb) 4011 { 4012 int ret; 4013 4014 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4015 4016 if (include_bb) { 4017 rtw89_chip_bb_preinit(rtwdev, RTW89_PHY_0); 4018 if (rtwdev->dbcc_en) 4019 rtw89_chip_bb_preinit(rtwdev, RTW89_PHY_1); 4020 } 4021 4022 ret = rtw89_mac_dmac_pre_init(rtwdev); 4023 if (ret) 4024 return ret; 4025 4026 if (rtwdev->hci.ops->mac_pre_init) { 4027 ret = rtwdev->hci.ops->mac_pre_init(rtwdev); 4028 if (ret) 4029 return ret; 4030 } 4031 4032 ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL, include_bb); 4033 if (ret) 4034 return ret; 4035 4036 return 0; 4037 } 4038 4039 int rtw89_mac_init(struct rtw89_dev *rtwdev) 4040 { 4041 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4042 const struct rtw89_chip_info *chip = rtwdev->chip; 4043 bool include_bb = !!chip->bbmcu_nr; 4044 int ret; 4045 4046 ret = rtw89_mac_pwr_on(rtwdev); 4047 if (ret) 4048 return ret; 4049 4050 ret = rtw89_mac_partial_init(rtwdev, include_bb); 4051 if (ret) 4052 goto fail; 4053 4054 ret = rtw89_chip_enable_bb_rf(rtwdev); 4055 if (ret) 4056 goto fail; 4057 4058 ret = mac->sys_init(rtwdev); 4059 if (ret) 4060 goto fail; 4061 4062 ret = mac->trx_init(rtwdev); 4063 if (ret) 4064 goto fail; 4065 4066 ret = rtw89_mac_feat_init(rtwdev); 4067 if (ret) 4068 goto fail; 4069 4070 if (rtwdev->hci.ops->mac_post_init) { 4071 ret = rtwdev->hci.ops->mac_post_init(rtwdev); 4072 if (ret) 4073 goto fail; 4074 } 4075 4076 rtw89_fw_send_all_early_h2c(rtwdev); 4077 rtw89_fw_h2c_set_ofld_cfg(rtwdev); 4078 4079 return ret; 4080 fail: 4081 rtw89_mac_pwr_off(rtwdev); 4082 4083 return ret; 4084 } 4085 4086 static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) 4087 { 4088 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 4089 u8 i; 4090 4091 if (rtwdev->chip->chip_gen != RTW89_CHIP_AX || sec->secure_boot) 4092 return; 4093 4094 for (i = 0; i < 4; i++) { 4095 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, 4096 DMAC_TBL_BASE_ADDR + (macid << 4) + (i << 2)); 4097 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0); 4098 } 4099 } 4100 4101 static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) 4102 { 4103 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 4104 4105 if (rtwdev->chip->chip_gen != RTW89_CHIP_AX || sec->secure_boot) 4106 return; 4107 4108 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, 4109 CMAC_TBL_BASE_ADDR + macid * CCTL_INFO_SIZE); 4110 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0x4); 4111 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 4, 0x400A0004); 4112 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 8, 0); 4113 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 12, 0); 4114 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 16, 0); 4115 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 20, 0xE43000B); 4116 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 24, 0); 4117 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 28, 0xB8109); 4118 } 4119 4120 int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause) 4121 { 4122 u8 sh = FIELD_GET(GENMASK(4, 0), macid); 4123 u8 grp = macid >> 5; 4124 int ret; 4125 4126 /* If this is called by change_interface() in the case of P2P, it could 4127 * be power-off, so ignore this operation. 4128 */ 4129 if (test_bit(RTW89_FLAG_CHANGING_INTERFACE, rtwdev->flags) && 4130 !test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) 4131 return 0; 4132 4133 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); 4134 if (ret) 4135 return ret; 4136 4137 rtw89_fw_h2c_macid_pause(rtwdev, sh, grp, pause); 4138 4139 return 0; 4140 } 4141 4142 static const struct rtw89_port_reg rtw89_port_base_ax = { 4143 .port_cfg = R_AX_PORT_CFG_P0, 4144 .tbtt_prohib = R_AX_TBTT_PROHIB_P0, 4145 .bcn_area = R_AX_BCN_AREA_P0, 4146 .bcn_early = R_AX_BCNERLYINT_CFG_P0, 4147 .tbtt_early = R_AX_TBTTERLYINT_CFG_P0, 4148 .tbtt_agg = R_AX_TBTT_AGG_P0, 4149 .bcn_space = R_AX_BCN_SPACE_CFG_P0, 4150 .bcn_forcetx = R_AX_BCN_FORCETX_P0, 4151 .bcn_err_cnt = R_AX_BCN_ERR_CNT_P0, 4152 .bcn_err_flag = R_AX_BCN_ERR_FLAG_P0, 4153 .dtim_ctrl = R_AX_DTIM_CTRL_P0, 4154 .tbtt_shift = R_AX_TBTT_SHIFT_P0, 4155 .bcn_cnt_tmr = R_AX_BCN_CNT_TMR_P0, 4156 .tsftr_l = R_AX_TSFTR_LOW_P0, 4157 .tsftr_h = R_AX_TSFTR_HIGH_P0, 4158 .md_tsft = R_AX_MD_TSFT_STMP_CTL, 4159 .bss_color = R_AX_PTCL_BSS_COLOR_0, 4160 .mbssid = R_AX_MBSSID_CTRL, 4161 .mbssid_drop = R_AX_MBSSID_DROP_0, 4162 .tsf_sync = R_AX_PORT0_TSF_SYNC, 4163 .ptcl_dbg = R_AX_PTCL_DBG, 4164 .ptcl_dbg_info = R_AX_PTCL_DBG_INFO, 4165 .bcn_drop_all = R_AX_BCN_DROP_ALL0, 4166 .hiq_win = {R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG, 4167 R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2, 4168 R_AX_PORT_HGQ_WINDOW_CFG + 3}, 4169 }; 4170 4171 static void rtw89_mac_check_packet_ctrl(struct rtw89_dev *rtwdev, 4172 struct rtw89_vif_link *rtwvif_link, u8 type) 4173 { 4174 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4175 const struct rtw89_port_reg *p = mac->port_base; 4176 u8 mask = B_AX_PTCL_DBG_INFO_MASK_BY_PORT(rtwvif_link->port); 4177 u32 reg_info, reg_ctrl; 4178 u32 val; 4179 int ret; 4180 4181 reg_info = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg_info, rtwvif_link->mac_idx); 4182 reg_ctrl = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg, rtwvif_link->mac_idx); 4183 4184 rtw89_write32_mask(rtwdev, reg_ctrl, B_AX_PTCL_DBG_SEL_MASK, type); 4185 rtw89_write32_set(rtwdev, reg_ctrl, B_AX_PTCL_DBG_EN); 4186 fsleep(100); 4187 4188 ret = read_poll_timeout(rtw89_read32_mask, val, val == 0, 1000, 100000, 4189 true, rtwdev, reg_info, mask); 4190 if (ret) 4191 rtw89_warn(rtwdev, "Polling beacon packet empty fail\n"); 4192 } 4193 4194 static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev, 4195 struct rtw89_vif_link *rtwvif_link) 4196 { 4197 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4198 const struct rtw89_port_reg *p = mac->port_base; 4199 4200 rtw89_write32_set(rtwdev, p->bcn_drop_all, BIT(rtwvif_link->port)); 4201 rtw89_write32_port_mask(rtwdev, rtwvif_link, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK, 4202 1); 4203 rtw89_write32_port_mask(rtwdev, rtwvif_link, p->bcn_area, B_AX_BCN_MSK_AREA_MASK, 4204 0); 4205 rtw89_write32_port_mask(rtwdev, rtwvif_link, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 4206 0); 4207 rtw89_write32_port_mask(rtwdev, rtwvif_link, p->bcn_early, B_AX_BCNERLY_MASK, 2); 4208 rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_early, 4209 B_AX_TBTTERLY_MASK, 1); 4210 rtw89_write32_port_mask(rtwdev, rtwvif_link, p->bcn_space, 4211 B_AX_BCN_SPACE_MASK, 1); 4212 rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, B_AX_BCNTX_EN); 4213 4214 rtw89_mac_check_packet_ctrl(rtwdev, rtwvif_link, AX_PTCL_DBG_BCNQ_NUM0); 4215 if (rtwvif_link->port == RTW89_PORT_0) 4216 rtw89_mac_check_packet_ctrl(rtwdev, rtwvif_link, AX_PTCL_DBG_BCNQ_NUM1); 4217 4218 rtw89_write32_clr(rtwdev, p->bcn_drop_all, BIT(rtwvif_link->port)); 4219 rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, B_AX_TBTT_PROHIB_EN); 4220 fsleep(2000); 4221 } 4222 4223 #define BCN_INTERVAL 100 4224 #define BCN_ERLY_DEF 160 4225 #define BCN_SETUP_DEF 2 4226 #define BCN_HOLD_DEF 200 4227 #define BCN_MASK_DEF 0 4228 #define TBTT_ERLY_DEF 5 4229 #define BCN_SET_UNIT 32 4230 #define BCN_ERLY_SET_DLY (10 * 2) 4231 4232 static void rtw89_mac_port_cfg_func_sw(struct rtw89_dev *rtwdev, 4233 struct rtw89_vif_link *rtwvif_link) 4234 { 4235 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4236 const struct rtw89_port_reg *p = mac->port_base; 4237 const struct rtw89_chip_info *chip = rtwdev->chip; 4238 struct ieee80211_bss_conf *bss_conf; 4239 bool need_backup = false; 4240 u32 backup_val; 4241 u16 beacon_int; 4242 4243 if (!rtw89_read32_port_mask(rtwdev, rtwvif_link, p->port_cfg, B_AX_PORT_FUNC_EN)) 4244 return; 4245 4246 if (chip->chip_id == RTL8852A && rtwvif_link->port != RTW89_PORT_0) { 4247 need_backup = true; 4248 backup_val = rtw89_read32_port(rtwdev, rtwvif_link, p->tbtt_prohib); 4249 } 4250 4251 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 4252 rtw89_mac_bcn_drop(rtwdev, rtwvif_link); 4253 4254 if (chip->chip_id == RTL8852A) { 4255 rtw89_write32_port_clr(rtwdev, rtwvif_link, p->tbtt_prohib, 4256 B_AX_TBTT_SETUP_MASK); 4257 rtw89_write32_port_mask(rtwdev, rtwvif_link, p->tbtt_prohib, 4258 B_AX_TBTT_HOLD_MASK, 1); 4259 rtw89_write16_port_clr(rtwdev, rtwvif_link, p->tbtt_early, 4260 B_AX_TBTTERLY_MASK); 4261 rtw89_write16_port_clr(rtwdev, rtwvif_link, p->bcn_early, 4262 B_AX_BCNERLY_MASK); 4263 } 4264 4265 rcu_read_lock(); 4266 4267 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4268 beacon_int = bss_conf->beacon_int; 4269 4270 rcu_read_unlock(); 4271 4272 msleep(beacon_int + 1); 4273 rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, B_AX_PORT_FUNC_EN | 4274 B_AX_BRK_SETUP); 4275 rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, B_AX_TSFTR_RST); 4276 rtw89_write32_port(rtwdev, rtwvif_link, p->bcn_cnt_tmr, 0); 4277 4278 if (need_backup) 4279 rtw89_write32_port(rtwdev, rtwvif_link, p->tbtt_prohib, backup_val); 4280 } 4281 4282 static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev, 4283 struct rtw89_vif_link *rtwvif_link, bool en) 4284 { 4285 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4286 const struct rtw89_port_reg *p = mac->port_base; 4287 4288 if (en) 4289 rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, 4290 B_AX_TXBCN_RPT_EN); 4291 else 4292 rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, 4293 B_AX_TXBCN_RPT_EN); 4294 } 4295 4296 static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev, 4297 struct rtw89_vif_link *rtwvif_link, bool en) 4298 { 4299 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4300 const struct rtw89_port_reg *p = mac->port_base; 4301 4302 if (en) 4303 rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, 4304 B_AX_RXBCN_RPT_EN); 4305 else 4306 rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, 4307 B_AX_RXBCN_RPT_EN); 4308 } 4309 4310 static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev, 4311 struct rtw89_vif_link *rtwvif_link) 4312 { 4313 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4314 const struct rtw89_port_reg *p = mac->port_base; 4315 4316 rtw89_write32_port_mask(rtwdev, rtwvif_link, p->port_cfg, B_AX_NET_TYPE_MASK, 4317 rtwvif_link->net_type); 4318 } 4319 4320 static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev, 4321 struct rtw89_vif_link *rtwvif_link) 4322 { 4323 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4324 const struct rtw89_port_reg *p = mac->port_base; 4325 bool en = rtwvif_link->net_type != RTW89_NET_TYPE_NO_LINK; 4326 u32 bits = B_AX_TBTT_PROHIB_EN | B_AX_BRK_SETUP; 4327 4328 if (en) 4329 rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, bits); 4330 else 4331 rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, bits); 4332 } 4333 4334 static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev, 4335 struct rtw89_vif_link *rtwvif_link) 4336 { 4337 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4338 const struct rtw89_port_reg *p = mac->port_base; 4339 bool en = rtwvif_link->net_type == RTW89_NET_TYPE_INFRA || 4340 rtwvif_link->net_type == RTW89_NET_TYPE_AD_HOC; 4341 u32 bit = B_AX_RX_BSSID_FIT_EN; 4342 4343 if (en) 4344 rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, bit); 4345 else 4346 rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, bit); 4347 } 4348 4349 void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev, 4350 struct rtw89_vif_link *rtwvif_link, bool en) 4351 { 4352 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4353 const struct rtw89_port_reg *p = mac->port_base; 4354 4355 if (en) 4356 rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, B_AX_TSF_UDT_EN); 4357 else 4358 rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, B_AX_TSF_UDT_EN); 4359 } 4360 4361 static void rtw89_mac_port_cfg_rx_sync_by_nettype(struct rtw89_dev *rtwdev, 4362 struct rtw89_vif_link *rtwvif_link) 4363 { 4364 bool en = rtwvif_link->net_type == RTW89_NET_TYPE_INFRA || 4365 rtwvif_link->net_type == RTW89_NET_TYPE_AD_HOC; 4366 4367 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, en); 4368 } 4369 4370 static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev, 4371 struct rtw89_vif_link *rtwvif_link, bool en) 4372 { 4373 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4374 const struct rtw89_port_reg *p = mac->port_base; 4375 4376 if (en) 4377 rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, B_AX_BCNTX_EN); 4378 else 4379 rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, B_AX_BCNTX_EN); 4380 } 4381 4382 static void rtw89_mac_port_cfg_tx_sw_by_nettype(struct rtw89_dev *rtwdev, 4383 struct rtw89_vif_link *rtwvif_link) 4384 { 4385 bool en = rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE || 4386 rtwvif_link->net_type == RTW89_NET_TYPE_AD_HOC; 4387 4388 rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif_link, en); 4389 } 4390 4391 void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en) 4392 { 4393 struct rtw89_vif_link *rtwvif_link; 4394 struct rtw89_vif *rtwvif; 4395 unsigned int link_id; 4396 4397 rtw89_for_each_rtwvif(rtwdev, rtwvif) 4398 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 4399 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 4400 rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif_link, en); 4401 } 4402 4403 static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev, 4404 struct rtw89_vif_link *rtwvif_link) 4405 { 4406 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4407 const struct rtw89_port_reg *p = mac->port_base; 4408 struct ieee80211_bss_conf *bss_conf; 4409 u16 bcn_int; 4410 4411 rcu_read_lock(); 4412 4413 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4414 if (bss_conf->beacon_int) 4415 bcn_int = bss_conf->beacon_int; 4416 else 4417 bcn_int = BCN_INTERVAL; 4418 4419 rcu_read_unlock(); 4420 4421 rtw89_write32_port_mask(rtwdev, rtwvif_link, p->bcn_space, B_AX_BCN_SPACE_MASK, 4422 bcn_int); 4423 } 4424 4425 static void rtw89_mac_port_cfg_hiq_win(struct rtw89_dev *rtwdev, 4426 struct rtw89_vif_link *rtwvif_link) 4427 { 4428 u8 win = rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE ? 16 : 0; 4429 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4430 const struct rtw89_port_reg *p = mac->port_base; 4431 u8 port = rtwvif_link->port; 4432 u32 reg; 4433 4434 reg = rtw89_mac_reg_by_idx(rtwdev, p->hiq_win[port], rtwvif_link->mac_idx); 4435 rtw89_write8(rtwdev, reg, win); 4436 } 4437 4438 static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev, 4439 struct rtw89_vif_link *rtwvif_link) 4440 { 4441 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4442 const struct rtw89_port_reg *p = mac->port_base; 4443 struct ieee80211_bss_conf *bss_conf; 4444 u8 dtim_period; 4445 u32 addr; 4446 4447 rcu_read_lock(); 4448 4449 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4450 dtim_period = bss_conf->dtim_period; 4451 4452 rcu_read_unlock(); 4453 4454 addr = rtw89_mac_reg_by_idx(rtwdev, p->md_tsft, rtwvif_link->mac_idx); 4455 rtw89_write8_set(rtwdev, addr, B_AX_UPD_HGQMD | B_AX_UPD_TIMIE); 4456 4457 rtw89_write16_port_mask(rtwdev, rtwvif_link, p->dtim_ctrl, B_AX_DTIM_NUM_MASK, 4458 dtim_period); 4459 } 4460 4461 static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev, 4462 struct rtw89_vif_link *rtwvif_link) 4463 { 4464 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4465 const struct rtw89_port_reg *p = mac->port_base; 4466 4467 rtw89_write32_port_mask(rtwdev, rtwvif_link, p->tbtt_prohib, 4468 B_AX_TBTT_SETUP_MASK, BCN_SETUP_DEF); 4469 } 4470 4471 static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev, 4472 struct rtw89_vif_link *rtwvif_link) 4473 { 4474 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4475 const struct rtw89_port_reg *p = mac->port_base; 4476 4477 rtw89_write32_port_mask(rtwdev, rtwvif_link, p->tbtt_prohib, 4478 B_AX_TBTT_HOLD_MASK, BCN_HOLD_DEF); 4479 } 4480 4481 static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev, 4482 struct rtw89_vif_link *rtwvif_link) 4483 { 4484 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4485 const struct rtw89_port_reg *p = mac->port_base; 4486 4487 rtw89_write32_port_mask(rtwdev, rtwvif_link, p->bcn_area, 4488 B_AX_BCN_MSK_AREA_MASK, BCN_MASK_DEF); 4489 } 4490 4491 static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev, 4492 struct rtw89_vif_link *rtwvif_link) 4493 { 4494 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4495 const struct rtw89_port_reg *p = mac->port_base; 4496 4497 rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_early, 4498 B_AX_TBTTERLY_MASK, TBTT_ERLY_DEF); 4499 } 4500 4501 static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev, 4502 struct rtw89_vif_link *rtwvif_link) 4503 { 4504 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4505 const struct rtw89_port_reg *p = mac->port_base; 4506 static const u32 masks[RTW89_PORT_NUM] = { 4507 B_AX_BSS_COLOB_AX_PORT_0_MASK, B_AX_BSS_COLOB_AX_PORT_1_MASK, 4508 B_AX_BSS_COLOB_AX_PORT_2_MASK, B_AX_BSS_COLOB_AX_PORT_3_MASK, 4509 B_AX_BSS_COLOB_AX_PORT_4_MASK, 4510 }; 4511 struct ieee80211_bss_conf *bss_conf; 4512 u8 port = rtwvif_link->port; 4513 u32 reg_base; 4514 u32 reg; 4515 u8 bss_color; 4516 4517 rcu_read_lock(); 4518 4519 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4520 bss_color = bss_conf->he_bss_color.color; 4521 4522 rcu_read_unlock(); 4523 4524 reg_base = port >= 4 ? p->bss_color + 4 : p->bss_color; 4525 reg = rtw89_mac_reg_by_idx(rtwdev, reg_base, rtwvif_link->mac_idx); 4526 rtw89_write32_mask(rtwdev, reg, masks[port], bss_color); 4527 } 4528 4529 static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, 4530 struct rtw89_vif_link *rtwvif_link) 4531 { 4532 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4533 const struct rtw89_port_reg *p = mac->port_base; 4534 u8 port = rtwvif_link->port; 4535 u32 reg; 4536 4537 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 4538 return; 4539 4540 if (port == 0) { 4541 reg = rtw89_mac_reg_by_idx(rtwdev, p->mbssid, rtwvif_link->mac_idx); 4542 rtw89_write32_clr(rtwdev, reg, B_AX_P0MB_ALL_MASK); 4543 } 4544 } 4545 4546 static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev, 4547 struct rtw89_vif_link *rtwvif_link) 4548 { 4549 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4550 const struct rtw89_port_reg *p = mac->port_base; 4551 u8 port = rtwvif_link->port; 4552 u32 reg; 4553 u32 val; 4554 4555 reg = rtw89_mac_reg_by_idx(rtwdev, p->mbssid_drop, rtwvif_link->mac_idx); 4556 val = rtw89_read32(rtwdev, reg); 4557 val &= ~FIELD_PREP(B_AX_PORT_DROP_4_0_MASK, BIT(port)); 4558 if (port == 0) 4559 val &= ~BIT(0); 4560 rtw89_write32(rtwdev, reg, val); 4561 } 4562 4563 static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev, 4564 struct rtw89_vif_link *rtwvif_link, bool enable) 4565 { 4566 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4567 const struct rtw89_port_reg *p = mac->port_base; 4568 4569 if (enable) 4570 rtw89_write32_port_set(rtwdev, rtwvif_link, p->port_cfg, 4571 B_AX_PORT_FUNC_EN); 4572 else 4573 rtw89_write32_port_clr(rtwdev, rtwvif_link, p->port_cfg, 4574 B_AX_PORT_FUNC_EN); 4575 } 4576 4577 static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev, 4578 struct rtw89_vif_link *rtwvif_link) 4579 { 4580 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4581 const struct rtw89_port_reg *p = mac->port_base; 4582 4583 rtw89_write32_port_mask(rtwdev, rtwvif_link, p->bcn_early, B_AX_BCNERLY_MASK, 4584 BCN_ERLY_DEF); 4585 } 4586 4587 static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev, 4588 struct rtw89_vif_link *rtwvif_link) 4589 { 4590 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4591 const struct rtw89_port_reg *p = mac->port_base; 4592 u16 val; 4593 4594 if (rtwdev->chip->chip_id != RTL8852C) 4595 return; 4596 4597 if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT && 4598 rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION) 4599 return; 4600 4601 val = FIELD_PREP(B_AX_TBTT_SHIFT_OFST_MAG, 1) | 4602 B_AX_TBTT_SHIFT_OFST_SIGN; 4603 4604 rtw89_write16_port_mask(rtwdev, rtwvif_link, p->tbtt_shift, 4605 B_AX_TBTT_SHIFT_OFST_MASK, val); 4606 } 4607 4608 void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev, 4609 struct rtw89_vif_link *rtwvif_link, 4610 struct rtw89_vif_link *rtwvif_src, 4611 u16 offset_tu) 4612 { 4613 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4614 const struct rtw89_port_reg *p = mac->port_base; 4615 u32 val, reg; 4616 4617 val = RTW89_PORT_OFFSET_TU_TO_32US(offset_tu); 4618 reg = rtw89_mac_reg_by_idx(rtwdev, p->tsf_sync + rtwvif_link->port * 4, 4619 rtwvif_link->mac_idx); 4620 4621 rtw89_write32_mask(rtwdev, reg, B_AX_SYNC_PORT_SRC, rtwvif_src->port); 4622 rtw89_write32_mask(rtwdev, reg, B_AX_SYNC_PORT_OFFSET_VAL, val); 4623 rtw89_write32_set(rtwdev, reg, B_AX_SYNC_NOW); 4624 } 4625 4626 static void rtw89_mac_port_tsf_sync_rand(struct rtw89_dev *rtwdev, 4627 struct rtw89_vif_link *rtwvif_link, 4628 struct rtw89_vif_link *rtwvif_src, 4629 u8 offset, int *n_offset) 4630 { 4631 if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE || rtwvif_link == rtwvif_src) 4632 return; 4633 4634 if (rtwvif_link->rand_tsf_done) 4635 goto out; 4636 4637 /* adjust offset randomly to avoid beacon conflict */ 4638 offset = offset - offset / 4 + get_random_u32() % (offset / 2); 4639 rtw89_mac_port_tsf_sync(rtwdev, rtwvif_link, rtwvif_src, 4640 (*n_offset) * offset); 4641 4642 rtwvif_link->rand_tsf_done = true; 4643 4644 out: 4645 (*n_offset)++; 4646 } 4647 4648 static void rtw89_mac_port_tsf_resync_all(struct rtw89_dev *rtwdev) 4649 { 4650 struct rtw89_vif_link *src = NULL, *tmp; 4651 u8 offset = 100, vif_aps = 0; 4652 struct rtw89_vif *rtwvif; 4653 unsigned int link_id; 4654 int n_offset = 1; 4655 4656 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 4657 rtw89_vif_for_each_link(rtwvif, tmp, link_id) { 4658 if (!src || tmp->net_type == RTW89_NET_TYPE_INFRA) 4659 src = tmp; 4660 if (tmp->net_type == RTW89_NET_TYPE_AP_MODE) 4661 vif_aps++; 4662 } 4663 } 4664 4665 if (vif_aps == 0) 4666 return; 4667 4668 offset /= (vif_aps + 1); 4669 4670 rtw89_for_each_rtwvif(rtwdev, rtwvif) 4671 rtw89_vif_for_each_link(rtwvif, tmp, link_id) 4672 rtw89_mac_port_tsf_sync_rand(rtwdev, tmp, src, offset, 4673 &n_offset); 4674 } 4675 4676 int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4677 { 4678 int ret; 4679 4680 ret = rtw89_mac_port_update(rtwdev, rtwvif_link); 4681 if (ret) 4682 return ret; 4683 4684 rtw89_mac_dmac_tbl_init(rtwdev, rtwvif_link->mac_id); 4685 rtw89_mac_cmac_tbl_init(rtwdev, rtwvif_link->mac_id); 4686 4687 ret = rtw89_mac_set_macid_pause(rtwdev, rtwvif_link->mac_id, false); 4688 if (ret) 4689 return ret; 4690 4691 ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, NULL, RTW89_ROLE_CREATE); 4692 if (ret) 4693 return ret; 4694 4695 ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, NULL, true); 4696 if (ret) 4697 return ret; 4698 4699 ret = rtw89_cam_init(rtwdev, rtwvif_link); 4700 if (ret) 4701 return ret; 4702 4703 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL); 4704 if (ret) 4705 return ret; 4706 4707 ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif_link, NULL); 4708 if (ret) 4709 return ret; 4710 4711 ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif_link, NULL); 4712 if (ret) 4713 return ret; 4714 4715 return 0; 4716 } 4717 4718 int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4719 { 4720 int ret; 4721 4722 ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif_link, NULL, RTW89_ROLE_REMOVE); 4723 if (ret) 4724 return ret; 4725 4726 rtw89_cam_deinit(rtwdev, rtwvif_link); 4727 4728 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, NULL); 4729 if (ret) 4730 return ret; 4731 4732 return 0; 4733 } 4734 4735 int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4736 { 4737 u8 port = rtwvif_link->port; 4738 4739 if (port >= RTW89_PORT_NUM) 4740 return -EINVAL; 4741 4742 rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link); 4743 rtw89_mac_port_cfg_tx_rpt(rtwdev, rtwvif_link, false); 4744 rtw89_mac_port_cfg_rx_rpt(rtwdev, rtwvif_link, false); 4745 rtw89_mac_port_cfg_net_type(rtwdev, rtwvif_link); 4746 rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif_link); 4747 rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif_link); 4748 rtw89_mac_port_cfg_rx_sync_by_nettype(rtwdev, rtwvif_link); 4749 rtw89_mac_port_cfg_tx_sw_by_nettype(rtwdev, rtwvif_link); 4750 rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif_link); 4751 rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif_link); 4752 rtw89_mac_port_cfg_hiq_dtim(rtwdev, rtwvif_link); 4753 rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif_link); 4754 rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif_link); 4755 rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif_link); 4756 rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif_link); 4757 rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif_link); 4758 rtw89_mac_port_cfg_tbtt_shift(rtwdev, rtwvif_link); 4759 rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif_link); 4760 rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif_link); 4761 rtw89_mac_port_cfg_func_en(rtwdev, rtwvif_link, true); 4762 rtw89_mac_port_tsf_resync_all(rtwdev); 4763 fsleep(BCN_ERLY_SET_DLY); 4764 rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif_link); 4765 4766 return 0; 4767 } 4768 4769 int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4770 u64 *tsf) 4771 { 4772 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4773 const struct rtw89_port_reg *p = mac->port_base; 4774 u32 tsf_low, tsf_high; 4775 int ret; 4776 4777 ret = rtw89_mac_check_mac_en(rtwdev, rtwvif_link->mac_idx, RTW89_CMAC_SEL); 4778 if (ret) 4779 return ret; 4780 4781 tsf_low = rtw89_read32_port(rtwdev, rtwvif_link, p->tsftr_l); 4782 tsf_high = rtw89_read32_port(rtwdev, rtwvif_link, p->tsftr_h); 4783 *tsf = (u64)tsf_high << 32 | tsf_low; 4784 4785 return 0; 4786 } 4787 4788 static void rtw89_mac_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, 4789 struct cfg80211_bss *bss, 4790 void *data) 4791 { 4792 const struct cfg80211_bss_ies *ies; 4793 const struct element *elem; 4794 bool *tolerated = data; 4795 4796 rcu_read_lock(); 4797 ies = rcu_dereference(bss->ies); 4798 elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, 4799 ies->len); 4800 4801 if (!elem || elem->datalen < 10 || 4802 !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) 4803 *tolerated = false; 4804 rcu_read_unlock(); 4805 } 4806 4807 void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev, 4808 struct rtw89_vif_link *rtwvif_link) 4809 { 4810 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4811 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4812 struct ieee80211_hw *hw = rtwdev->hw; 4813 struct ieee80211_bss_conf *bss_conf; 4814 struct cfg80211_chan_def oper; 4815 bool tolerated = true; 4816 u32 reg; 4817 4818 rcu_read_lock(); 4819 4820 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4821 if (!bss_conf->he_support || vif->type != NL80211_IFTYPE_STATION) { 4822 rcu_read_unlock(); 4823 return; 4824 } 4825 4826 oper = bss_conf->chanreq.oper; 4827 if (!(oper.chan->flags & IEEE80211_CHAN_RADAR)) { 4828 rcu_read_unlock(); 4829 return; 4830 } 4831 4832 rcu_read_unlock(); 4833 4834 cfg80211_bss_iter(hw->wiphy, &oper, 4835 rtw89_mac_check_he_obss_narrow_bw_ru_iter, 4836 &tolerated); 4837 4838 reg = rtw89_mac_reg_by_idx(rtwdev, mac->narrow_bw_ru_dis.addr, 4839 rtwvif_link->mac_idx); 4840 if (tolerated) 4841 rtw89_write32_clr(rtwdev, reg, mac->narrow_bw_ru_dis.mask); 4842 else 4843 rtw89_write32_set(rtwdev, reg, mac->narrow_bw_ru_dis.mask); 4844 } 4845 4846 void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev, 4847 struct rtw89_vif_link *rtwvif_link) 4848 { 4849 struct ieee80211_bss_conf *bss_conf; 4850 bool set; 4851 u32 reg; 4852 4853 if (rtwdev->chip->chip_gen != RTW89_CHIP_BE) 4854 return; 4855 4856 rcu_read_lock(); 4857 4858 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4859 set = bss_conf->he_support && !bss_conf->eht_support; 4860 4861 rcu_read_unlock(); 4862 4863 reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CLIENT_OM_CTRL, 4864 rtwvif_link->mac_idx); 4865 4866 if (set) 4867 rtw89_write32_set(rtwdev, reg, B_BE_TRIG_DIS_EHTTB); 4868 else 4869 rtw89_write32_clr(rtwdev, reg, B_BE_TRIG_DIS_EHTTB); 4870 } 4871 4872 void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4873 { 4874 rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link); 4875 4876 rtwvif_link->rand_tsf_done = false; 4877 } 4878 4879 int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4880 { 4881 return rtw89_mac_vif_init(rtwdev, rtwvif_link); 4882 } 4883 4884 int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4885 { 4886 return rtw89_mac_vif_deinit(rtwdev, rtwvif_link); 4887 } 4888 4889 static void 4890 rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 4891 { 4892 } 4893 4894 static bool rtw89_is_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel) 4895 { 4896 const struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 4897 4898 return band == op->band_type && channel == op->primary_channel; 4899 } 4900 4901 static void 4902 rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb, 4903 u32 len) 4904 { 4905 const struct rtw89_c2h_scanofld *c2h = 4906 (const struct rtw89_c2h_scanofld *)skb->data; 4907 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 4908 struct rtw89_vif *rtwvif; 4909 struct rtw89_chan new; 4910 u16 actual_period, expect_period; 4911 u8 reason, status, tx_fail, band; 4912 u8 mac_idx, sw_def, fw_def; 4913 u8 ver = U8_MAX; 4914 u32 report_tsf; 4915 u16 chan; 4916 int ret; 4917 4918 if (!rtwvif_link) 4919 return; 4920 4921 rtwvif = rtwvif_link->rtwvif; 4922 4923 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 4924 ver = 0; 4925 4926 tx_fail = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_TX_FAIL); 4927 status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS); 4928 chan = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PRI_CH); 4929 reason = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_RSN); 4930 band = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_BAND); 4931 actual_period = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PERIOD); 4932 mac_idx = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_MAC_IDX); 4933 4934 4935 if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ))) 4936 band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G; 4937 4938 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 4939 sw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_SW_DEF); 4940 fw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_FW_DEF); 4941 report_tsf = le32_get_bits(c2h->w7, RTW89_C2H_SCANOFLD_W7_REPORT_TSF); 4942 if (ver == 0) { 4943 expect_period = 4944 le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD); 4945 } else { 4946 actual_period = le32_get_bits(c2h->w8, RTW89_C2H_SCANOFLD_W8_PERIOD_V1); 4947 expect_period = 4948 le32_get_bits(c2h->w8, RTW89_C2H_SCANOFLD_W8_EXPECT_PERIOD_V1); 4949 } 4950 4951 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 4952 "sw_def: %d, fw_def: %d, tsf: %x, expect: %d\n", 4953 sw_def, fw_def, report_tsf, expect_period); 4954 } 4955 4956 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 4957 "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n", 4958 mac_idx, band, chan, reason, status, tx_fail, actual_period); 4959 4960 switch (reason) { 4961 case RTW89_SCAN_LEAVE_OP_NOTIFY: 4962 case RTW89_SCAN_LEAVE_CH_NOTIFY: 4963 if (rtw89_is_op_chan(rtwdev, band, chan)) { 4964 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, false); 4965 ieee80211_stop_queues(rtwdev->hw); 4966 } 4967 return; 4968 case RTW89_SCAN_END_SCAN_NOTIFY: 4969 if (rtwdev->scan_info.abort) 4970 return; 4971 4972 if (rtwvif_link && rtwvif->scan_req && 4973 !list_empty(&rtwdev->scan_info.chan_list)) { 4974 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, true); 4975 if (ret) { 4976 rtw89_hw_scan_abort(rtwdev, rtwvif_link); 4977 rtw89_warn(rtwdev, "HW scan failed: %d\n", ret); 4978 } 4979 } else { 4980 rtw89_hw_scan_complete(rtwdev, rtwvif_link, false); 4981 } 4982 break; 4983 case RTW89_SCAN_ENTER_OP_NOTIFY: 4984 case RTW89_SCAN_ENTER_CH_NOTIFY: 4985 if (rtw89_is_op_chan(rtwdev, band, chan)) { 4986 rtw89_assign_entity_chan(rtwdev, rtwvif_link->chanctx_idx, 4987 &rtwdev->scan_info.op_chan); 4988 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 4989 ieee80211_wake_queues(rtwdev->hw); 4990 } else { 4991 rtw89_chan_create(&new, chan, chan, band, 4992 RTW89_CHANNEL_WIDTH_20); 4993 rtw89_assign_entity_chan(rtwdev, rtwvif_link->chanctx_idx, 4994 &new); 4995 } 4996 break; 4997 default: 4998 return; 4999 } 5000 } 5001 5002 static void 5003 rtw89_mac_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 5004 struct sk_buff *skb) 5005 { 5006 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 5007 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 5008 enum nl80211_cqm_rssi_threshold_event nl_event; 5009 const struct rtw89_c2h_mac_bcnfltr_rpt *c2h = 5010 (const struct rtw89_c2h_mac_bcnfltr_rpt *)skb->data; 5011 u8 type, event, mac_id; 5012 s8 sig; 5013 5014 type = le32_get_bits(c2h->w2, RTW89_C2H_MAC_BCNFLTR_RPT_W2_TYPE); 5015 sig = le32_get_bits(c2h->w2, RTW89_C2H_MAC_BCNFLTR_RPT_W2_MA) - MAX_RSSI; 5016 event = le32_get_bits(c2h->w2, RTW89_C2H_MAC_BCNFLTR_RPT_W2_EVENT); 5017 mac_id = le32_get_bits(c2h->w2, RTW89_C2H_MAC_BCNFLTR_RPT_W2_MACID); 5018 5019 if (mac_id != rtwvif_link->mac_id) 5020 return; 5021 5022 rtw89_debug(rtwdev, RTW89_DBG_FW, 5023 "C2H bcnfltr rpt macid: %d, type: %d, ma: %d, event: %d\n", 5024 mac_id, type, sig, event); 5025 5026 switch (type) { 5027 case RTW89_BCN_FLTR_BEACON_LOSS: 5028 if (!rtwdev->scanning && !rtwvif->offchan && 5029 !rtwvif_link->noa_once.in_duration) 5030 ieee80211_connection_loss(vif); 5031 else 5032 rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, rtwvif_link, true); 5033 return; 5034 case RTW89_BCN_FLTR_NOTIFY: 5035 nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; 5036 break; 5037 case RTW89_BCN_FLTR_RSSI: 5038 if (event == RTW89_BCN_FLTR_RSSI_LOW) 5039 nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; 5040 else if (event == RTW89_BCN_FLTR_RSSI_HIGH) 5041 nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; 5042 else 5043 return; 5044 break; 5045 default: 5046 return; 5047 } 5048 5049 ieee80211_cqm_rssi_notify(vif, nl_event, sig, GFP_KERNEL); 5050 } 5051 5052 static void 5053 rtw89_mac_c2h_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, 5054 u32 len) 5055 { 5056 struct rtw89_vif_link *rtwvif_link; 5057 struct rtw89_vif *rtwvif; 5058 unsigned int link_id; 5059 5060 rtw89_for_each_rtwvif(rtwdev, rtwvif) 5061 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 5062 rtw89_mac_bcn_fltr_rpt(rtwdev, rtwvif_link, c2h); 5063 } 5064 5065 static void 5066 rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 5067 { 5068 /* N.B. This will run in interrupt context. */ 5069 5070 rtw89_debug(rtwdev, RTW89_DBG_FW, 5071 "C2H rev ack recv, cat: %d, class: %d, func: %d, seq : %d\n", 5072 RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h->data), 5073 RTW89_GET_MAC_C2H_REV_ACK_CLASS(c2h->data), 5074 RTW89_GET_MAC_C2H_REV_ACK_FUNC(c2h->data), 5075 RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h->data)); 5076 } 5077 5078 static void 5079 rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 len) 5080 { 5081 /* N.B. This will run in interrupt context. */ 5082 struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait; 5083 struct rtw89_wait_info *ps_wait = &rtwdev->mac.ps_wait; 5084 const struct rtw89_c2h_done_ack *c2h = 5085 (const struct rtw89_c2h_done_ack *)skb_c2h->data; 5086 u8 h2c_cat = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_CAT); 5087 u8 h2c_class = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_CLASS); 5088 u8 h2c_func = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_FUNC); 5089 u8 h2c_return = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_H2C_RETURN); 5090 u8 h2c_seq = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_H2C_SEQ); 5091 struct rtw89_completion_data data = {}; 5092 unsigned int cond; 5093 5094 rtw89_debug(rtwdev, RTW89_DBG_FW, 5095 "C2H done ack recv, cat: %d, class: %d, func: %d, ret: %d, seq : %d\n", 5096 h2c_cat, h2c_class, h2c_func, h2c_return, h2c_seq); 5097 5098 if (h2c_cat != H2C_CAT_MAC) 5099 return; 5100 5101 switch (h2c_class) { 5102 default: 5103 return; 5104 case H2C_CL_MAC_PS: 5105 switch (h2c_func) { 5106 default: 5107 return; 5108 case H2C_FUNC_IPS_CFG: 5109 cond = RTW89_PS_WAIT_COND_IPS_CFG; 5110 break; 5111 } 5112 5113 data.err = !!h2c_return; 5114 rtw89_complete_cond(ps_wait, cond, &data); 5115 return; 5116 case H2C_CL_MAC_FW_OFLD: 5117 switch (h2c_func) { 5118 default: 5119 return; 5120 case H2C_FUNC_ADD_SCANOFLD_CH: 5121 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5122 h2c_return &= RTW89_C2H_SCAN_DONE_ACK_RETURN; 5123 break; 5124 case H2C_FUNC_SCANOFLD: 5125 cond = RTW89_SCANOFLD_WAIT_COND_START; 5126 break; 5127 case H2C_FUNC_SCANOFLD_BE: 5128 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 5129 h2c_return &= RTW89_C2H_SCAN_DONE_ACK_RETURN; 5130 break; 5131 } 5132 5133 data.err = !!h2c_return; 5134 rtw89_complete_cond(fw_ofld_wait, cond, &data); 5135 return; 5136 } 5137 } 5138 5139 static void 5140 rtw89_mac_c2h_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 5141 { 5142 rtw89_fw_log_dump(rtwdev, c2h->data, len); 5143 } 5144 5145 static void 5146 rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 5147 { 5148 } 5149 5150 static void 5151 rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, 5152 u32 len) 5153 { 5154 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5155 const struct rtw89_c2h_pkt_ofld_rsp *c2h = 5156 (const struct rtw89_c2h_pkt_ofld_rsp *)skb_c2h->data; 5157 u16 pkt_len = le32_get_bits(c2h->w2, RTW89_C2H_PKT_OFLD_RSP_W2_PTK_LEN); 5158 u8 pkt_id = le32_get_bits(c2h->w2, RTW89_C2H_PKT_OFLD_RSP_W2_PTK_ID); 5159 u8 pkt_op = le32_get_bits(c2h->w2, RTW89_C2H_PKT_OFLD_RSP_W2_PTK_OP); 5160 struct rtw89_completion_data data = {}; 5161 unsigned int cond; 5162 5163 rtw89_debug(rtwdev, RTW89_DBG_FW, "pkt ofld rsp: id %d op %d len %d\n", 5164 pkt_id, pkt_op, pkt_len); 5165 5166 data.err = !pkt_len; 5167 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(pkt_id, pkt_op); 5168 5169 rtw89_complete_cond(wait, cond, &data); 5170 } 5171 5172 static void 5173 rtw89_mac_c2h_tx_duty_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 len) 5174 { 5175 struct rtw89_c2h_tx_duty_rpt *c2h = 5176 (struct rtw89_c2h_tx_duty_rpt *)skb_c2h->data; 5177 u8 err; 5178 5179 err = le32_get_bits(c2h->w2, RTW89_C2H_TX_DUTY_RPT_W2_TIMER_ERR); 5180 5181 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "C2H TX duty rpt with err=%d\n", err); 5182 } 5183 5184 static void 5185 rtw89_mac_c2h_tsf32_toggle_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, 5186 u32 len) 5187 { 5188 rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_TSF32_TOGGLE_CHANGE); 5189 } 5190 5191 static void 5192 rtw89_mac_c2h_mcc_rcv_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 5193 { 5194 u8 group = RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h->data); 5195 u8 func = RTW89_GET_MAC_C2H_MCC_RCV_ACK_H2C_FUNC(c2h->data); 5196 5197 switch (func) { 5198 case H2C_FUNC_ADD_MCC: 5199 case H2C_FUNC_START_MCC: 5200 case H2C_FUNC_STOP_MCC: 5201 case H2C_FUNC_DEL_MCC_GROUP: 5202 case H2C_FUNC_RESET_MCC_GROUP: 5203 case H2C_FUNC_MCC_REQ_TSF: 5204 case H2C_FUNC_MCC_MACID_BITMAP: 5205 case H2C_FUNC_MCC_SYNC: 5206 case H2C_FUNC_MCC_SET_DURATION: 5207 break; 5208 default: 5209 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5210 "invalid MCC C2H RCV ACK: func %d\n", func); 5211 return; 5212 } 5213 5214 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5215 "MCC C2H RCV ACK: group %d, func %d\n", group, func); 5216 } 5217 5218 static void 5219 rtw89_mac_c2h_mcc_req_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 5220 { 5221 u8 group = RTW89_GET_MAC_C2H_MCC_REQ_ACK_GROUP(c2h->data); 5222 u8 func = RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_FUNC(c2h->data); 5223 u8 retcode = RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_RETURN(c2h->data); 5224 struct rtw89_completion_data data = {}; 5225 unsigned int cond; 5226 bool next = false; 5227 5228 switch (func) { 5229 case H2C_FUNC_MCC_REQ_TSF: 5230 next = true; 5231 break; 5232 case H2C_FUNC_MCC_MACID_BITMAP: 5233 case H2C_FUNC_MCC_SYNC: 5234 case H2C_FUNC_MCC_SET_DURATION: 5235 break; 5236 case H2C_FUNC_ADD_MCC: 5237 case H2C_FUNC_START_MCC: 5238 case H2C_FUNC_STOP_MCC: 5239 case H2C_FUNC_DEL_MCC_GROUP: 5240 case H2C_FUNC_RESET_MCC_GROUP: 5241 default: 5242 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5243 "invalid MCC C2H REQ ACK: func %d\n", func); 5244 return; 5245 } 5246 5247 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5248 "MCC C2H REQ ACK: group %d, func %d, return code %d\n", 5249 group, func, retcode); 5250 5251 if (!retcode && next) 5252 return; 5253 5254 data.err = !!retcode; 5255 cond = RTW89_MCC_WAIT_COND(group, func); 5256 rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data); 5257 } 5258 5259 static void 5260 rtw89_mac_c2h_mcc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 5261 { 5262 u8 group = RTW89_GET_MAC_C2H_MCC_TSF_RPT_GROUP(c2h->data); 5263 struct rtw89_completion_data data = {}; 5264 struct rtw89_mac_mcc_tsf_rpt *rpt; 5265 unsigned int cond; 5266 5267 rpt = (struct rtw89_mac_mcc_tsf_rpt *)data.buf; 5268 rpt->macid_x = RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_X(c2h->data); 5269 rpt->macid_y = RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_Y(c2h->data); 5270 rpt->tsf_x_low = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_X(c2h->data); 5271 rpt->tsf_x_high = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_X(c2h->data); 5272 rpt->tsf_y_low = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_Y(c2h->data); 5273 rpt->tsf_y_high = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_Y(c2h->data); 5274 5275 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5276 "MCC C2H TSF RPT: macid %d> %llu, macid %d> %llu\n", 5277 rpt->macid_x, (u64)rpt->tsf_x_high << 32 | rpt->tsf_x_low, 5278 rpt->macid_y, (u64)rpt->tsf_y_high << 32 | rpt->tsf_y_low); 5279 5280 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_REQ_TSF); 5281 rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data); 5282 } 5283 5284 static void 5285 rtw89_mac_c2h_mcc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 5286 { 5287 u8 group = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_GROUP(c2h->data); 5288 u8 macid = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_MACID(c2h->data); 5289 u8 status = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_STATUS(c2h->data); 5290 u32 tsf_low = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_LOW(c2h->data); 5291 u32 tsf_high = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h->data); 5292 struct rtw89_completion_data data = {}; 5293 unsigned int cond; 5294 bool rsp = true; 5295 bool err; 5296 u8 func; 5297 5298 switch (status) { 5299 case RTW89_MAC_MCC_ADD_ROLE_OK: 5300 case RTW89_MAC_MCC_ADD_ROLE_FAIL: 5301 func = H2C_FUNC_ADD_MCC; 5302 err = status == RTW89_MAC_MCC_ADD_ROLE_FAIL; 5303 break; 5304 case RTW89_MAC_MCC_START_GROUP_OK: 5305 case RTW89_MAC_MCC_START_GROUP_FAIL: 5306 func = H2C_FUNC_START_MCC; 5307 err = status == RTW89_MAC_MCC_START_GROUP_FAIL; 5308 break; 5309 case RTW89_MAC_MCC_STOP_GROUP_OK: 5310 case RTW89_MAC_MCC_STOP_GROUP_FAIL: 5311 func = H2C_FUNC_STOP_MCC; 5312 err = status == RTW89_MAC_MCC_STOP_GROUP_FAIL; 5313 break; 5314 case RTW89_MAC_MCC_DEL_GROUP_OK: 5315 case RTW89_MAC_MCC_DEL_GROUP_FAIL: 5316 func = H2C_FUNC_DEL_MCC_GROUP; 5317 err = status == RTW89_MAC_MCC_DEL_GROUP_FAIL; 5318 break; 5319 case RTW89_MAC_MCC_RESET_GROUP_OK: 5320 case RTW89_MAC_MCC_RESET_GROUP_FAIL: 5321 func = H2C_FUNC_RESET_MCC_GROUP; 5322 err = status == RTW89_MAC_MCC_RESET_GROUP_FAIL; 5323 break; 5324 case RTW89_MAC_MCC_SWITCH_CH_OK: 5325 case RTW89_MAC_MCC_SWITCH_CH_FAIL: 5326 case RTW89_MAC_MCC_TXNULL0_OK: 5327 case RTW89_MAC_MCC_TXNULL0_FAIL: 5328 case RTW89_MAC_MCC_TXNULL1_OK: 5329 case RTW89_MAC_MCC_TXNULL1_FAIL: 5330 case RTW89_MAC_MCC_SWITCH_EARLY: 5331 case RTW89_MAC_MCC_TBTT: 5332 case RTW89_MAC_MCC_DURATION_START: 5333 case RTW89_MAC_MCC_DURATION_END: 5334 rsp = false; 5335 break; 5336 default: 5337 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5338 "invalid MCC C2H STS RPT: status %d\n", status); 5339 return; 5340 } 5341 5342 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5343 "MCC C2H STS RPT: group %d, macid %d, status %d, tsf %llu\n", 5344 group, macid, status, (u64)tsf_high << 32 | tsf_low); 5345 5346 if (!rsp) 5347 return; 5348 5349 data.err = err; 5350 cond = RTW89_MCC_WAIT_COND(group, func); 5351 rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data); 5352 } 5353 5354 static void 5355 rtw89_mac_c2h_mrc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 5356 { 5357 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 5358 const struct rtw89_c2h_mrc_tsf_rpt *c2h_rpt; 5359 struct rtw89_completion_data data = {}; 5360 struct rtw89_mac_mrc_tsf_rpt *rpt; 5361 unsigned int i; 5362 5363 c2h_rpt = (const struct rtw89_c2h_mrc_tsf_rpt *)c2h->data; 5364 rpt = (struct rtw89_mac_mrc_tsf_rpt *)data.buf; 5365 rpt->num = min_t(u8, RTW89_MAC_MRC_MAX_REQ_TSF_NUM, 5366 le32_get_bits(c2h_rpt->w2, 5367 RTW89_C2H_MRC_TSF_RPT_W2_REQ_TSF_NUM)); 5368 5369 for (i = 0; i < rpt->num; i++) { 5370 u32 tsf_high = le32_to_cpu(c2h_rpt->infos[i].tsf_high); 5371 u32 tsf_low = le32_to_cpu(c2h_rpt->infos[i].tsf_low); 5372 5373 rpt->tsfs[i] = (u64)tsf_high << 32 | tsf_low; 5374 5375 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5376 "MRC C2H TSF RPT: index %u> %llu\n", 5377 i, rpt->tsfs[i]); 5378 } 5379 5380 rtw89_complete_cond(wait, RTW89_MRC_WAIT_COND_REQ_TSF, &data); 5381 } 5382 5383 static void 5384 rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 len) 5385 { 5386 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 5387 struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; 5388 struct rtw89_wait_info *wait = &rtw_wow->wait; 5389 const struct rtw89_c2h_wow_aoac_report *c2h = 5390 (const struct rtw89_c2h_wow_aoac_report *)skb->data; 5391 struct rtw89_completion_data data = {}; 5392 5393 aoac_rpt->rpt_ver = c2h->rpt_ver; 5394 aoac_rpt->sec_type = c2h->sec_type; 5395 aoac_rpt->key_idx = c2h->key_idx; 5396 aoac_rpt->pattern_idx = c2h->pattern_idx; 5397 aoac_rpt->rekey_ok = u8_get_bits(c2h->rekey_ok, 5398 RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX); 5399 memcpy(aoac_rpt->ptk_tx_iv, c2h->ptk_tx_iv, sizeof(aoac_rpt->ptk_tx_iv)); 5400 memcpy(aoac_rpt->eapol_key_replay_count, c2h->eapol_key_replay_count, 5401 sizeof(aoac_rpt->eapol_key_replay_count)); 5402 memcpy(aoac_rpt->gtk, c2h->gtk, sizeof(aoac_rpt->gtk)); 5403 memcpy(aoac_rpt->ptk_rx_iv, c2h->ptk_rx_iv, sizeof(aoac_rpt->ptk_rx_iv)); 5404 memcpy(aoac_rpt->gtk_rx_iv, c2h->gtk_rx_iv, sizeof(aoac_rpt->gtk_rx_iv)); 5405 aoac_rpt->igtk_key_id = le64_to_cpu(c2h->igtk_key_id); 5406 aoac_rpt->igtk_ipn = le64_to_cpu(c2h->igtk_ipn); 5407 memcpy(aoac_rpt->igtk, c2h->igtk, sizeof(aoac_rpt->igtk)); 5408 5409 rtw89_complete_cond(wait, RTW89_WOW_WAIT_COND_AOAC, &data); 5410 } 5411 5412 static void 5413 rtw89_mac_c2h_mlo_link_cfg_stat(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 5414 { 5415 const struct rtw89_c2h_mlo_link_cfg_rpt *c2h_rpt; 5416 struct rtw89_wait_info *wait = &rtwdev->mlo.wait; 5417 struct rtw89_completion_data data = {}; 5418 unsigned int cond; 5419 u16 mac_id; 5420 u8 status; 5421 5422 c2h_rpt = (const struct rtw89_c2h_mlo_link_cfg_rpt *)c2h->data; 5423 5424 mac_id = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MLO_LINK_CFG_RPT_W2_MACID); 5425 status = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MLO_LINK_CFG_RPT_W2_STATUS); 5426 5427 data.err = status == RTW89_C2H_MLO_LINK_CFG_ROLE_NOT_EXIST || 5428 status == RTW89_C2H_MLO_LINK_CFG_RUNNING; 5429 cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG); 5430 rtw89_complete_cond(wait, cond, &data); 5431 } 5432 5433 static void 5434 rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 5435 { 5436 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 5437 const struct rtw89_c2h_mrc_status_rpt *c2h_rpt; 5438 struct rtw89_completion_data data = {}; 5439 enum rtw89_mac_mrc_status status; 5440 unsigned int cond; 5441 bool next = false; 5442 u32 tsf_high; 5443 u32 tsf_low; 5444 u8 sch_idx; 5445 u8 func; 5446 5447 c2h_rpt = (const struct rtw89_c2h_mrc_status_rpt *)c2h->data; 5448 sch_idx = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MRC_STATUS_RPT_W2_SCH_IDX); 5449 status = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MRC_STATUS_RPT_W2_STATUS); 5450 tsf_high = le32_to_cpu(c2h_rpt->tsf_high); 5451 tsf_low = le32_to_cpu(c2h_rpt->tsf_low); 5452 5453 switch (status) { 5454 case RTW89_MAC_MRC_START_SCH_OK: 5455 func = H2C_FUNC_START_MRC; 5456 break; 5457 case RTW89_MAC_MRC_STOP_SCH_OK: 5458 /* H2C_FUNC_DEL_MRC without STOP_ONLY, so wait for DEL_SCH_OK */ 5459 func = H2C_FUNC_DEL_MRC; 5460 next = true; 5461 break; 5462 case RTW89_MAC_MRC_DEL_SCH_OK: 5463 func = H2C_FUNC_DEL_MRC; 5464 break; 5465 case RTW89_MAC_MRC_EMPTY_SCH_FAIL: 5466 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5467 "MRC C2H STS RPT: empty sch fail\n"); 5468 return; 5469 case RTW89_MAC_MRC_ROLE_NOT_EXIST_FAIL: 5470 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5471 "MRC C2H STS RPT: role not exist fail\n"); 5472 return; 5473 case RTW89_MAC_MRC_DATA_NOT_FOUND_FAIL: 5474 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5475 "MRC C2H STS RPT: data not found fail\n"); 5476 return; 5477 case RTW89_MAC_MRC_GET_NEXT_SLOT_FAIL: 5478 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5479 "MRC C2H STS RPT: get next slot fail\n"); 5480 return; 5481 case RTW89_MAC_MRC_ALT_ROLE_FAIL: 5482 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5483 "MRC C2H STS RPT: alt role fail\n"); 5484 return; 5485 case RTW89_MAC_MRC_ADD_PSTIMER_FAIL: 5486 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5487 "MRC C2H STS RPT: add ps timer fail\n"); 5488 return; 5489 case RTW89_MAC_MRC_MALLOC_FAIL: 5490 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5491 "MRC C2H STS RPT: malloc fail\n"); 5492 return; 5493 case RTW89_MAC_MRC_SWITCH_CH_FAIL: 5494 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5495 "MRC C2H STS RPT: switch ch fail\n"); 5496 return; 5497 case RTW89_MAC_MRC_TXNULL0_FAIL: 5498 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5499 "MRC C2H STS RPT: tx null-0 fail\n"); 5500 return; 5501 case RTW89_MAC_MRC_PORT_FUNC_EN_FAIL: 5502 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5503 "MRC C2H STS RPT: port func en fail\n"); 5504 return; 5505 default: 5506 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5507 "invalid MRC C2H STS RPT: status %d\n", status); 5508 return; 5509 } 5510 5511 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 5512 "MRC C2H STS RPT: sch_idx %d, status %d, tsf %llu\n", 5513 sch_idx, status, (u64)tsf_high << 32 | tsf_low); 5514 5515 if (next) 5516 return; 5517 5518 cond = RTW89_MRC_WAIT_COND(sch_idx, func); 5519 rtw89_complete_cond(wait, cond, &data); 5520 } 5521 5522 static void 5523 rtw89_mac_c2h_pwr_int_notify(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 len) 5524 { 5525 const struct rtw89_c2h_pwr_int_notify *c2h; 5526 struct rtw89_sta_link *rtwsta_link; 5527 struct ieee80211_sta *sta; 5528 struct rtw89_sta *rtwsta; 5529 u16 macid; 5530 bool ps; 5531 5532 c2h = (const struct rtw89_c2h_pwr_int_notify *)skb->data; 5533 macid = le32_get_bits(c2h->w2, RTW89_C2H_PWR_INT_NOTIFY_W2_MACID); 5534 ps = le32_get_bits(c2h->w2, RTW89_C2H_PWR_INT_NOTIFY_W2_PWR_STATUS); 5535 5536 rcu_read_lock(); 5537 5538 rtwsta_link = rtw89_assoc_link_rcu_dereference(rtwdev, macid); 5539 if (unlikely(!rtwsta_link)) 5540 goto out; 5541 5542 rtwsta = rtwsta_link->rtwsta; 5543 if (ps) 5544 set_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags); 5545 else 5546 clear_bit(RTW89_REMOTE_STA_IN_PS, rtwsta->flags); 5547 5548 sta = rtwsta_to_sta(rtwsta); 5549 ieee80211_sta_ps_transition(sta, ps); 5550 5551 out: 5552 rcu_read_unlock(); 5553 } 5554 5555 static 5556 void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev, 5557 struct sk_buff *c2h, u32 len) = { 5558 [RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL, 5559 [RTW89_MAC_C2H_FUNC_READ_RSP] = NULL, 5560 [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = rtw89_mac_c2h_pkt_ofld_rsp, 5561 [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL, 5562 [RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause, 5563 [RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp, 5564 [RTW89_MAC_C2H_FUNC_TX_DUTY_RPT] = rtw89_mac_c2h_tx_duty_rpt, 5565 [RTW89_MAC_C2H_FUNC_TSF32_TOGL_RPT] = rtw89_mac_c2h_tsf32_toggle_rpt, 5566 [RTW89_MAC_C2H_FUNC_BCNFLTR_RPT] = rtw89_mac_c2h_bcn_fltr_rpt, 5567 }; 5568 5569 static 5570 void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev, 5571 struct sk_buff *c2h, u32 len) = { 5572 [RTW89_MAC_C2H_FUNC_REC_ACK] = rtw89_mac_c2h_rec_ack, 5573 [RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack, 5574 [RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log, 5575 [RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt, 5576 }; 5577 5578 static 5579 void (* const rtw89_mac_c2h_mcc_handler[])(struct rtw89_dev *rtwdev, 5580 struct sk_buff *c2h, u32 len) = { 5581 [RTW89_MAC_C2H_FUNC_MCC_RCV_ACK] = rtw89_mac_c2h_mcc_rcv_ack, 5582 [RTW89_MAC_C2H_FUNC_MCC_REQ_ACK] = rtw89_mac_c2h_mcc_req_ack, 5583 [RTW89_MAC_C2H_FUNC_MCC_TSF_RPT] = rtw89_mac_c2h_mcc_tsf_rpt, 5584 [RTW89_MAC_C2H_FUNC_MCC_STATUS_RPT] = rtw89_mac_c2h_mcc_status_rpt, 5585 }; 5586 5587 static 5588 void (* const rtw89_mac_c2h_mlo_handler[])(struct rtw89_dev *rtwdev, 5589 struct sk_buff *c2h, u32 len) = { 5590 [RTW89_MAC_C2H_FUNC_MLO_GET_TBL] = NULL, 5591 [RTW89_MAC_C2H_FUNC_MLO_EMLSR_TRANS_DONE] = NULL, 5592 [RTW89_MAC_C2H_FUNC_MLO_EMLSR_STA_CFG_DONE] = NULL, 5593 [RTW89_MAC_C2H_FUNC_MCMLO_RELINK_RPT] = NULL, 5594 [RTW89_MAC_C2H_FUNC_MCMLO_SN_SYNC_RPT] = NULL, 5595 [RTW89_MAC_C2H_FUNC_MLO_LINK_CFG_STAT] = rtw89_mac_c2h_mlo_link_cfg_stat, 5596 [RTW89_MAC_C2H_FUNC_MLO_DM_DBG_DUMP] = NULL, 5597 }; 5598 5599 static 5600 void (* const rtw89_mac_c2h_mrc_handler[])(struct rtw89_dev *rtwdev, 5601 struct sk_buff *c2h, u32 len) = { 5602 [RTW89_MAC_C2H_FUNC_MRC_TSF_RPT] = rtw89_mac_c2h_mrc_tsf_rpt, 5603 [RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT] = rtw89_mac_c2h_mrc_status_rpt, 5604 }; 5605 5606 static 5607 void (* const rtw89_mac_c2h_wow_handler[])(struct rtw89_dev *rtwdev, 5608 struct sk_buff *c2h, u32 len) = { 5609 [RTW89_MAC_C2H_FUNC_AOAC_REPORT] = rtw89_mac_c2h_wow_aoac_rpt, 5610 }; 5611 5612 static 5613 void (* const rtw89_mac_c2h_ap_handler[])(struct rtw89_dev *rtwdev, 5614 struct sk_buff *c2h, u32 len) = { 5615 [RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY] = rtw89_mac_c2h_pwr_int_notify, 5616 }; 5617 5618 static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev, 5619 struct sk_buff *skb) 5620 { 5621 const struct rtw89_c2h_scanofld *c2h = 5622 (const struct rtw89_c2h_scanofld *)skb->data; 5623 struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait; 5624 struct rtw89_completion_data data = {}; 5625 unsigned int cond; 5626 u8 status, reason; 5627 5628 status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS); 5629 reason = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_RSN); 5630 data.err = status != RTW89_SCAN_STATUS_SUCCESS; 5631 5632 if (reason == RTW89_SCAN_END_SCAN_NOTIFY) { 5633 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) 5634 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 5635 else 5636 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 5637 5638 rtw89_complete_cond(fw_ofld_wait, cond, &data); 5639 } 5640 } 5641 5642 bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h, 5643 u8 class, u8 func) 5644 { 5645 switch (class) { 5646 default: 5647 return false; 5648 case RTW89_MAC_C2H_CLASS_INFO: 5649 switch (func) { 5650 default: 5651 return false; 5652 case RTW89_MAC_C2H_FUNC_REC_ACK: 5653 case RTW89_MAC_C2H_FUNC_DONE_ACK: 5654 return true; 5655 } 5656 case RTW89_MAC_C2H_CLASS_OFLD: 5657 switch (func) { 5658 default: 5659 return false; 5660 case RTW89_MAC_C2H_FUNC_SCANOFLD_RSP: 5661 rtw89_mac_c2h_scanofld_rsp_atomic(rtwdev, c2h); 5662 return false; 5663 case RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP: 5664 return true; 5665 } 5666 case RTW89_MAC_C2H_CLASS_MCC: 5667 return true; 5668 case RTW89_MAC_C2H_CLASS_MLO: 5669 return true; 5670 case RTW89_MAC_C2H_CLASS_MRC: 5671 return true; 5672 case RTW89_MAC_C2H_CLASS_WOW: 5673 return true; 5674 case RTW89_MAC_C2H_CLASS_AP: 5675 switch (func) { 5676 default: 5677 return false; 5678 case RTW89_MAC_C2H_FUNC_PWR_INT_NOTIFY: 5679 return true; 5680 } 5681 } 5682 } 5683 5684 void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, 5685 u32 len, u8 class, u8 func) 5686 { 5687 void (*handler)(struct rtw89_dev *rtwdev, 5688 struct sk_buff *c2h, u32 len) = NULL; 5689 5690 switch (class) { 5691 case RTW89_MAC_C2H_CLASS_INFO: 5692 if (func < RTW89_MAC_C2H_FUNC_INFO_MAX) 5693 handler = rtw89_mac_c2h_info_handler[func]; 5694 break; 5695 case RTW89_MAC_C2H_CLASS_OFLD: 5696 if (func < RTW89_MAC_C2H_FUNC_OFLD_MAX) 5697 handler = rtw89_mac_c2h_ofld_handler[func]; 5698 break; 5699 case RTW89_MAC_C2H_CLASS_MCC: 5700 if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MCC) 5701 handler = rtw89_mac_c2h_mcc_handler[func]; 5702 break; 5703 case RTW89_MAC_C2H_CLASS_MLO: 5704 if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MLO) 5705 handler = rtw89_mac_c2h_mlo_handler[func]; 5706 break; 5707 case RTW89_MAC_C2H_CLASS_MRC: 5708 if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MRC) 5709 handler = rtw89_mac_c2h_mrc_handler[func]; 5710 break; 5711 case RTW89_MAC_C2H_CLASS_WOW: 5712 if (func < NUM_OF_RTW89_MAC_C2H_FUNC_WOW) 5713 handler = rtw89_mac_c2h_wow_handler[func]; 5714 break; 5715 case RTW89_MAC_C2H_CLASS_AP: 5716 if (func < NUM_OF_RTW89_MAC_C2H_FUNC_AP) 5717 handler = rtw89_mac_c2h_ap_handler[func]; 5718 break; 5719 case RTW89_MAC_C2H_CLASS_FWDBG: 5720 return; 5721 default: 5722 rtw89_info(rtwdev, "MAC c2h class %d not support\n", class); 5723 return; 5724 } 5725 if (!handler) { 5726 rtw89_info(rtwdev, "MAC c2h class %d func %d not support\n", class, 5727 func); 5728 return; 5729 } 5730 handler(rtwdev, skb, len); 5731 } 5732 5733 static 5734 bool rtw89_mac_get_txpwr_cr_ax(struct rtw89_dev *rtwdev, 5735 enum rtw89_phy_idx phy_idx, 5736 u32 reg_base, u32 *cr) 5737 { 5738 enum rtw89_qta_mode mode = rtwdev->mac.qta_mode; 5739 u32 addr = rtw89_mac_reg_by_idx(rtwdev, reg_base, phy_idx); 5740 5741 if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR_AX) { 5742 rtw89_err(rtwdev, "[TXPWR] addr=0x%x exceed txpwr cr\n", 5743 addr); 5744 goto error; 5745 } 5746 5747 if (addr >= CMAC1_START_ADDR_AX && addr <= CMAC1_END_ADDR_AX) 5748 if (mode == RTW89_QTA_SCC) { 5749 rtw89_err(rtwdev, 5750 "[TXPWR] addr=0x%x but hw not enable\n", 5751 addr); 5752 goto error; 5753 } 5754 5755 *cr = addr; 5756 return true; 5757 5758 error: 5759 rtw89_err(rtwdev, "[TXPWR] check txpwr cr 0x%x(phy%d) fail\n", 5760 addr, phy_idx); 5761 5762 return false; 5763 } 5764 5765 static 5766 int rtw89_mac_cfg_ppdu_status_ax(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable) 5767 { 5768 u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PPDU_STAT, mac_idx); 5769 int ret; 5770 5771 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 5772 if (ret) 5773 return ret; 5774 5775 if (!enable) { 5776 rtw89_write32_clr(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN); 5777 return 0; 5778 } 5779 5780 rtw89_write32(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN | 5781 B_AX_APP_MAC_INFO_RPT | 5782 B_AX_APP_RX_CNT_RPT | B_AX_APP_PLCP_HDR_RPT | 5783 B_AX_PPDU_STAT_RPT_CRC32); 5784 rtw89_write32_mask(rtwdev, R_AX_HW_RPT_FWD, B_AX_FWD_PPDU_STAT_MASK, 5785 RTW89_PRPT_DEST_HOST); 5786 5787 return 0; 5788 } 5789 5790 static 5791 void __rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx) 5792 { 5793 #define MAC_AX_TIME_TH_SH 5 5794 #define MAC_AX_LEN_TH_SH 4 5795 #define MAC_AX_TIME_TH_MAX 255 5796 #define MAC_AX_LEN_TH_MAX 255 5797 #define MAC_AX_TIME_TH_DEF 88 5798 #define MAC_AX_LEN_TH_DEF 4080 5799 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 5800 struct ieee80211_hw *hw = rtwdev->hw; 5801 u32 rts_threshold = hw->wiphy->rts_threshold; 5802 u32 time_th, len_th; 5803 u32 reg; 5804 5805 if (rts_threshold == (u32)-1) { 5806 time_th = MAC_AX_TIME_TH_DEF; 5807 len_th = MAC_AX_LEN_TH_DEF; 5808 } else { 5809 time_th = MAC_AX_TIME_TH_MAX << MAC_AX_TIME_TH_SH; 5810 len_th = rts_threshold; 5811 } 5812 5813 time_th = min_t(u32, time_th >> MAC_AX_TIME_TH_SH, MAC_AX_TIME_TH_MAX); 5814 len_th = min_t(u32, len_th >> MAC_AX_LEN_TH_SH, MAC_AX_LEN_TH_MAX); 5815 5816 reg = rtw89_mac_reg_by_idx(rtwdev, mac->agg_len_ht, mac_idx); 5817 rtw89_write16_mask(rtwdev, reg, B_AX_RTS_TXTIME_TH_MASK, time_th); 5818 rtw89_write16_mask(rtwdev, reg, B_AX_RTS_LEN_TH_MASK, len_th); 5819 } 5820 5821 void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev) 5822 { 5823 __rtw89_mac_update_rts_threshold(rtwdev, RTW89_MAC_0); 5824 if (rtwdev->dbcc_en) 5825 __rtw89_mac_update_rts_threshold(rtwdev, RTW89_MAC_1); 5826 } 5827 5828 void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop) 5829 { 5830 bool empty; 5831 int ret; 5832 5833 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) 5834 return; 5835 5836 ret = read_poll_timeout(dle_is_txq_empty, empty, empty, 5837 10000, 200000, false, rtwdev); 5838 if (ret && !drop && (rtwdev->total_sta_assoc || rtwdev->scanning)) 5839 rtw89_info(rtwdev, "timed out to flush queues\n"); 5840 } 5841 5842 int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex) 5843 { 5844 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 5845 u8 val; 5846 u16 val16; 5847 u32 val32; 5848 int ret; 5849 5850 rtw89_write8_set(rtwdev, R_AX_GPIO_MUXCFG, B_AX_ENBT); 5851 if (chip_id != RTL8851B && chip_id != RTL8852BT) 5852 rtw89_write8_set(rtwdev, R_AX_BTC_FUNC_EN, B_AX_PTA_WL_TX_EN); 5853 rtw89_write8_set(rtwdev, R_AX_BT_COEX_CFG_2 + 1, B_AX_GNT_BT_POLARITY >> 8); 5854 rtw89_write8_set(rtwdev, R_AX_CSR_MODE, B_AX_STATIS_BT_EN | B_AX_WL_ACT_MSK); 5855 rtw89_write8_set(rtwdev, R_AX_CSR_MODE + 2, B_AX_BT_CNT_RST >> 16); 5856 if (chip_id != RTL8851B && chip_id != RTL8852BT) 5857 rtw89_write8_clr(rtwdev, R_AX_TRXPTCL_RESP_0 + 3, B_AX_RSP_CHK_BTCCA >> 24); 5858 5859 val16 = rtw89_read16(rtwdev, R_AX_CCA_CFG_0); 5860 val16 = (val16 | B_AX_BTCCA_EN) & ~B_AX_BTCCA_BRK_TXOP_EN; 5861 rtw89_write16(rtwdev, R_AX_CCA_CFG_0, val16); 5862 5863 ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_2, &val32); 5864 if (ret) { 5865 rtw89_err(rtwdev, "Read R_AX_LTE_SW_CFG_2 fail!\n"); 5866 return ret; 5867 } 5868 val32 = val32 & B_AX_WL_RX_CTRL; 5869 ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_2, val32); 5870 if (ret) { 5871 rtw89_err(rtwdev, "Write R_AX_LTE_SW_CFG_2 fail!\n"); 5872 return ret; 5873 } 5874 5875 switch (coex->pta_mode) { 5876 case RTW89_MAC_AX_COEX_RTK_MODE: 5877 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG); 5878 val &= ~B_AX_BTMODE_MASK; 5879 val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_0_3); 5880 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val); 5881 5882 val = rtw89_read8(rtwdev, R_AX_TDMA_MODE); 5883 rtw89_write8(rtwdev, R_AX_TDMA_MODE, val | B_AX_RTK_BT_ENABLE); 5884 5885 val = rtw89_read8(rtwdev, R_AX_BT_COEX_CFG_5); 5886 val &= ~B_AX_BT_RPT_SAMPLE_RATE_MASK; 5887 val |= FIELD_PREP(B_AX_BT_RPT_SAMPLE_RATE_MASK, MAC_AX_RTK_RATE); 5888 rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_5, val); 5889 break; 5890 case RTW89_MAC_AX_COEX_CSR_MODE: 5891 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG); 5892 val &= ~B_AX_BTMODE_MASK; 5893 val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_2); 5894 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val); 5895 5896 val16 = rtw89_read16(rtwdev, R_AX_CSR_MODE); 5897 val16 &= ~B_AX_BT_PRI_DETECT_TO_MASK; 5898 val16 |= FIELD_PREP(B_AX_BT_PRI_DETECT_TO_MASK, MAC_AX_CSR_PRI_TO); 5899 val16 &= ~B_AX_BT_TRX_INIT_DETECT_MASK; 5900 val16 |= FIELD_PREP(B_AX_BT_TRX_INIT_DETECT_MASK, MAC_AX_CSR_TRX_TO); 5901 val16 &= ~B_AX_BT_STAT_DELAY_MASK; 5902 val16 |= FIELD_PREP(B_AX_BT_STAT_DELAY_MASK, MAC_AX_CSR_DELAY); 5903 val16 |= B_AX_ENHANCED_BT; 5904 rtw89_write16(rtwdev, R_AX_CSR_MODE, val16); 5905 5906 rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_2, MAC_AX_CSR_RATE); 5907 break; 5908 default: 5909 return -EINVAL; 5910 } 5911 5912 switch (coex->direction) { 5913 case RTW89_MAC_AX_COEX_INNER: 5914 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 5915 val = (val & ~BIT(2)) | BIT(1); 5916 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 5917 break; 5918 case RTW89_MAC_AX_COEX_OUTPUT: 5919 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 5920 val = val | BIT(1) | BIT(0); 5921 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 5922 break; 5923 case RTW89_MAC_AX_COEX_INPUT: 5924 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 5925 val = val & ~(BIT(2) | BIT(1)); 5926 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 5927 break; 5928 default: 5929 return -EINVAL; 5930 } 5931 5932 return 0; 5933 } 5934 EXPORT_SYMBOL(rtw89_mac_coex_init); 5935 5936 int rtw89_mac_coex_init_v1(struct rtw89_dev *rtwdev, 5937 const struct rtw89_mac_ax_coex *coex) 5938 { 5939 rtw89_write32_set(rtwdev, R_AX_BTC_CFG, 5940 B_AX_BTC_EN | B_AX_BTG_LNA1_GAIN_SEL); 5941 rtw89_write32_set(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_EN); 5942 rtw89_write16_set(rtwdev, R_AX_CCA_CFG_0, B_AX_BTCCA_EN); 5943 rtw89_write16_clr(rtwdev, R_AX_CCA_CFG_0, B_AX_BTCCA_BRK_TXOP_EN); 5944 5945 switch (coex->pta_mode) { 5946 case RTW89_MAC_AX_COEX_RTK_MODE: 5947 rtw89_write32_mask(rtwdev, R_AX_BTC_CFG, B_AX_BTC_MODE_MASK, 5948 MAC_AX_RTK_MODE); 5949 rtw89_write32_mask(rtwdev, R_AX_RTK_MODE_CFG_V1, 5950 B_AX_SAMPLE_CLK_MASK, MAC_AX_RTK_RATE); 5951 break; 5952 case RTW89_MAC_AX_COEX_CSR_MODE: 5953 rtw89_write32_mask(rtwdev, R_AX_BTC_CFG, B_AX_BTC_MODE_MASK, 5954 MAC_AX_CSR_MODE); 5955 break; 5956 default: 5957 return -EINVAL; 5958 } 5959 5960 return 0; 5961 } 5962 EXPORT_SYMBOL(rtw89_mac_coex_init_v1); 5963 5964 int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev, 5965 const struct rtw89_mac_ax_coex_gnt *gnt_cfg) 5966 { 5967 u32 val = 0, ret; 5968 5969 if (gnt_cfg->band[0].gnt_bt) 5970 val |= B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL; 5971 5972 if (gnt_cfg->band[0].gnt_bt_sw_en) 5973 val |= B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL; 5974 5975 if (gnt_cfg->band[0].gnt_wl) 5976 val |= B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL; 5977 5978 if (gnt_cfg->band[0].gnt_wl_sw_en) 5979 val |= B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL; 5980 5981 if (gnt_cfg->band[1].gnt_bt) 5982 val |= B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL; 5983 5984 if (gnt_cfg->band[1].gnt_bt_sw_en) 5985 val |= B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL; 5986 5987 if (gnt_cfg->band[1].gnt_wl) 5988 val |= B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL; 5989 5990 if (gnt_cfg->band[1].gnt_wl_sw_en) 5991 val |= B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL; 5992 5993 ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val); 5994 if (ret) { 5995 rtw89_err(rtwdev, "Write LTE fail!\n"); 5996 return ret; 5997 } 5998 5999 return 0; 6000 } 6001 EXPORT_SYMBOL(rtw89_mac_cfg_gnt); 6002 6003 int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev, 6004 const struct rtw89_mac_ax_coex_gnt *gnt_cfg) 6005 { 6006 u32 val = 0; 6007 6008 if (gnt_cfg->band[0].gnt_bt) 6009 val |= B_AX_GNT_BT_RFC_S0_VAL | B_AX_GNT_BT_RX_VAL | 6010 B_AX_GNT_BT_TX_VAL; 6011 else 6012 val |= B_AX_WL_ACT_VAL; 6013 6014 if (gnt_cfg->band[0].gnt_bt_sw_en) 6015 val |= B_AX_GNT_BT_RFC_S0_SWCTRL | B_AX_GNT_BT_RX_SWCTRL | 6016 B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL; 6017 6018 if (gnt_cfg->band[0].gnt_wl) 6019 val |= B_AX_GNT_WL_RFC_S0_VAL | B_AX_GNT_WL_RX_VAL | 6020 B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL; 6021 6022 if (gnt_cfg->band[0].gnt_wl_sw_en) 6023 val |= B_AX_GNT_WL_RFC_S0_SWCTRL | B_AX_GNT_WL_RX_SWCTRL | 6024 B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL; 6025 6026 if (gnt_cfg->band[1].gnt_bt) 6027 val |= B_AX_GNT_BT_RFC_S1_VAL | B_AX_GNT_BT_RX_VAL | 6028 B_AX_GNT_BT_TX_VAL; 6029 else 6030 val |= B_AX_WL_ACT_VAL; 6031 6032 if (gnt_cfg->band[1].gnt_bt_sw_en) 6033 val |= B_AX_GNT_BT_RFC_S1_SWCTRL | B_AX_GNT_BT_RX_SWCTRL | 6034 B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL; 6035 6036 if (gnt_cfg->band[1].gnt_wl) 6037 val |= B_AX_GNT_WL_RFC_S1_VAL | B_AX_GNT_WL_RX_VAL | 6038 B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL; 6039 6040 if (gnt_cfg->band[1].gnt_wl_sw_en) 6041 val |= B_AX_GNT_WL_RFC_S1_SWCTRL | B_AX_GNT_WL_RX_SWCTRL | 6042 B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL; 6043 6044 rtw89_write32(rtwdev, R_AX_GNT_SW_CTRL, val); 6045 6046 return 0; 6047 } 6048 EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v1); 6049 6050 static 6051 int rtw89_mac_cfg_plt_ax(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt) 6052 { 6053 u32 reg; 6054 u16 val; 6055 int ret; 6056 6057 ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL); 6058 if (ret) 6059 return ret; 6060 6061 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BT_PLT, plt->band); 6062 val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_TX_PLT_GNT_LTE_RX : 0) | 6063 (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_TX_PLT_GNT_BT_TX : 0) | 6064 (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_TX_PLT_GNT_BT_RX : 0) | 6065 (plt->tx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_TX_PLT_GNT_WL : 0) | 6066 (plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_RX_PLT_GNT_LTE_RX : 0) | 6067 (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_RX_PLT_GNT_BT_TX : 0) | 6068 (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_RX_PLT_GNT_BT_RX : 0) | 6069 (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0) | 6070 B_AX_PLT_EN; 6071 rtw89_write16(rtwdev, reg, val); 6072 6073 return 0; 6074 } 6075 6076 void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val) 6077 { 6078 u32 fw_sb; 6079 6080 fw_sb = rtw89_read32(rtwdev, R_AX_SCOREBOARD); 6081 fw_sb = FIELD_GET(B_MAC_AX_SB_FW_MASK, fw_sb); 6082 fw_sb = fw_sb & ~B_MAC_AX_BTGS1_NOTIFY; 6083 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) 6084 fw_sb = fw_sb | MAC_AX_NOTIFY_PWR_MAJOR; 6085 else 6086 fw_sb = fw_sb | MAC_AX_NOTIFY_TP_MAJOR; 6087 val = FIELD_GET(B_MAC_AX_SB_DRV_MASK, val); 6088 val = B_AX_TOGGLE | 6089 FIELD_PREP(B_MAC_AX_SB_DRV_MASK, val) | 6090 FIELD_PREP(B_MAC_AX_SB_FW_MASK, fw_sb); 6091 rtw89_write32(rtwdev, R_AX_SCOREBOARD, val); 6092 fsleep(1000); /* avoid BT FW loss information */ 6093 } 6094 6095 u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev) 6096 { 6097 return rtw89_read32(rtwdev, R_AX_SCOREBOARD); 6098 } 6099 6100 int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl) 6101 { 6102 u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3); 6103 6104 val = wl ? val | BIT(2) : val & ~BIT(2); 6105 rtw89_write8(rtwdev, R_AX_SYS_SDIO_CTRL + 3, val); 6106 6107 return 0; 6108 } 6109 EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path); 6110 6111 int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl) 6112 { 6113 struct rtw89_btc *btc = &rtwdev->btc; 6114 struct rtw89_btc_dm *dm = &btc->dm; 6115 struct rtw89_mac_ax_gnt *g = dm->gnt.band; 6116 int i; 6117 6118 if (wl) 6119 return 0; 6120 6121 for (i = 0; i < RTW89_PHY_NUM; i++) { 6122 g[i].gnt_bt_sw_en = 1; 6123 g[i].gnt_bt = 1; 6124 g[i].gnt_wl_sw_en = 1; 6125 g[i].gnt_wl = 0; 6126 } 6127 6128 return rtw89_mac_cfg_gnt_v1(rtwdev, &dm->gnt); 6129 } 6130 EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v1); 6131 6132 bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev) 6133 { 6134 const struct rtw89_chip_info *chip = rtwdev->chip; 6135 u8 val = 0; 6136 6137 if (chip->chip_id == RTL8852C || chip->chip_id == RTL8922A) 6138 return false; 6139 else if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 6140 val = rtw89_read8_mask(rtwdev, R_AX_SYS_SDIO_CTRL + 3, 6141 B_AX_LTE_MUX_CTRL_PATH >> 24); 6142 6143 return !!val; 6144 } 6145 6146 static u16 rtw89_mac_get_plt_cnt_ax(struct rtw89_dev *rtwdev, u8 band) 6147 { 6148 u32 reg; 6149 u16 cnt; 6150 6151 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BT_PLT, band); 6152 cnt = rtw89_read32_mask(rtwdev, reg, B_AX_BT_PLT_PKT_CNT_MASK); 6153 rtw89_write16_set(rtwdev, reg, B_AX_BT_PLT_RST); 6154 6155 return cnt; 6156 } 6157 6158 static void rtw89_mac_bfee_standby_timer(struct rtw89_dev *rtwdev, u8 mac_idx, 6159 bool keep) 6160 { 6161 u32 reg; 6162 6163 if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) 6164 return; 6165 6166 rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee standby_timer to %d\n", keep); 6167 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMEE_RESP_OPTION, mac_idx); 6168 if (keep) { 6169 set_bit(RTW89_FLAG_BFEE_TIMER_KEEP, rtwdev->flags); 6170 rtw89_write32_mask(rtwdev, reg, B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK, 6171 BFRP_RX_STANDBY_TIMER_KEEP); 6172 } else { 6173 clear_bit(RTW89_FLAG_BFEE_TIMER_KEEP, rtwdev->flags); 6174 rtw89_write32_mask(rtwdev, reg, B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK, 6175 BFRP_RX_STANDBY_TIMER_RELEASE); 6176 } 6177 } 6178 6179 void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) 6180 { 6181 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6182 u32 reg; 6183 u32 mask = mac->bfee_ctrl.mask; 6184 6185 rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee ndpa_en to %d\n", en); 6186 reg = rtw89_mac_reg_by_idx(rtwdev, mac->bfee_ctrl.addr, mac_idx); 6187 if (en) { 6188 set_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 6189 rtw89_write32_set(rtwdev, reg, mask); 6190 } else { 6191 clear_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 6192 rtw89_write32_clr(rtwdev, reg, mask); 6193 } 6194 } 6195 6196 static int rtw89_mac_init_bfee_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 6197 { 6198 u32 reg; 6199 u32 val32; 6200 int ret; 6201 6202 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 6203 if (ret) 6204 return ret; 6205 6206 /* AP mode set tx gid to 63 */ 6207 /* STA mode set tx gid to 0(default) */ 6208 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMER_CTRL_0, mac_idx); 6209 rtw89_write32_set(rtwdev, reg, B_AX_BFMER_NDP_BFEN); 6210 6211 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx); 6212 rtw89_write32(rtwdev, reg, CSI_RRSC_BMAP); 6213 6214 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMEE_RESP_OPTION, mac_idx); 6215 val32 = FIELD_PREP(B_AX_BFMEE_NDP_RX_STANDBY_TIMER_MASK, NDP_RX_STANDBY_TIMER); 6216 rtw89_write32(rtwdev, reg, val32); 6217 rtw89_mac_bfee_standby_timer(rtwdev, mac_idx, true); 6218 rtw89_mac_bfee_ctrl(rtwdev, mac_idx, true); 6219 6220 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 6221 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL | 6222 B_AX_BFMEE_USE_NSTS | 6223 B_AX_BFMEE_CSI_GID_SEL | 6224 B_AX_BFMEE_CSI_FORCE_RETE_EN); 6225 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_RATE, mac_idx); 6226 rtw89_write32(rtwdev, reg, 6227 u32_encode_bits(CSI_INIT_RATE_HT, B_AX_BFMEE_HT_CSI_RATE_MASK) | 6228 u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) | 6229 u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK)); 6230 6231 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CSIRPT_OPTION, mac_idx); 6232 rtw89_write32_set(rtwdev, reg, 6233 B_AX_CSIPRT_VHTSU_AID_EN | B_AX_CSIPRT_HESU_AID_EN); 6234 6235 return 0; 6236 } 6237 6238 static int rtw89_mac_set_csi_para_reg_ax(struct rtw89_dev *rtwdev, 6239 struct rtw89_vif_link *rtwvif_link, 6240 struct rtw89_sta_link *rtwsta_link) 6241 { 6242 u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1; 6243 struct ieee80211_link_sta *link_sta; 6244 u8 mac_idx = rtwvif_link->mac_idx; 6245 u8 port_sel = rtwvif_link->port; 6246 u8 sound_dim = 3, t; 6247 u8 *phy_cap; 6248 u32 reg; 6249 u16 val; 6250 int ret; 6251 6252 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 6253 if (ret) 6254 return ret; 6255 6256 rcu_read_lock(); 6257 6258 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 6259 phy_cap = link_sta->he_cap.he_cap_elem.phy_cap_info; 6260 6261 if ((phy_cap[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) || 6262 (phy_cap[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) { 6263 ldpc_en &= !!(phy_cap[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD); 6264 stbc_en &= !!(phy_cap[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ); 6265 t = FIELD_GET(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, 6266 phy_cap[5]); 6267 sound_dim = min(sound_dim, t); 6268 } 6269 if ((link_sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || 6270 (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { 6271 ldpc_en &= !!(link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); 6272 stbc_en &= !!(link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK); 6273 t = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, 6274 link_sta->vht_cap.cap); 6275 sound_dim = min(sound_dim, t); 6276 } 6277 nc = min(nc, sound_dim); 6278 nr = min(nr, sound_dim); 6279 6280 rcu_read_unlock(); 6281 6282 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 6283 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); 6284 6285 val = FIELD_PREP(B_AX_BFMEE_CSIINFO0_NC_MASK, nc) | 6286 FIELD_PREP(B_AX_BFMEE_CSIINFO0_NR_MASK, nr) | 6287 FIELD_PREP(B_AX_BFMEE_CSIINFO0_NG_MASK, ng) | 6288 FIELD_PREP(B_AX_BFMEE_CSIINFO0_CB_MASK, cb) | 6289 FIELD_PREP(B_AX_BFMEE_CSIINFO0_CS_MASK, cs) | 6290 FIELD_PREP(B_AX_BFMEE_CSIINFO0_LDPC_EN, ldpc_en) | 6291 FIELD_PREP(B_AX_BFMEE_CSIINFO0_STBC_EN, stbc_en); 6292 6293 if (port_sel == 0) 6294 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 6295 else 6296 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_1, mac_idx); 6297 6298 rtw89_write16(rtwdev, reg, val); 6299 6300 return 0; 6301 } 6302 6303 static int rtw89_mac_csi_rrsc_ax(struct rtw89_dev *rtwdev, 6304 struct rtw89_vif_link *rtwvif_link, 6305 struct rtw89_sta_link *rtwsta_link) 6306 { 6307 u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M); 6308 struct ieee80211_link_sta *link_sta; 6309 u8 mac_idx = rtwvif_link->mac_idx; 6310 u32 reg; 6311 int ret; 6312 6313 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 6314 if (ret) 6315 return ret; 6316 6317 rcu_read_lock(); 6318 6319 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 6320 6321 if (link_sta->he_cap.has_he) { 6322 rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) | 6323 BIT(RTW89_MAC_BF_RRSC_HE_MSC3) | 6324 BIT(RTW89_MAC_BF_RRSC_HE_MSC5)); 6325 } 6326 if (link_sta->vht_cap.vht_supported) { 6327 rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) | 6328 BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) | 6329 BIT(RTW89_MAC_BF_RRSC_VHT_MSC5)); 6330 } 6331 if (link_sta->ht_cap.ht_supported) { 6332 rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) | 6333 BIT(RTW89_MAC_BF_RRSC_HT_MSC3) | 6334 BIT(RTW89_MAC_BF_RRSC_HT_MSC5)); 6335 } 6336 6337 rcu_read_unlock(); 6338 6339 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 6340 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); 6341 rtw89_write32_clr(rtwdev, reg, B_AX_BFMEE_CSI_FORCE_RETE_EN); 6342 rtw89_write32(rtwdev, 6343 rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx), 6344 rrsc); 6345 6346 return 0; 6347 } 6348 6349 static void rtw89_mac_bf_assoc_ax(struct rtw89_dev *rtwdev, 6350 struct rtw89_vif_link *rtwvif_link, 6351 struct rtw89_sta_link *rtwsta_link) 6352 { 6353 struct ieee80211_link_sta *link_sta; 6354 bool has_beamformer_cap; 6355 6356 rcu_read_lock(); 6357 6358 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 6359 has_beamformer_cap = rtw89_sta_has_beamformer_cap(link_sta); 6360 6361 rcu_read_unlock(); 6362 6363 if (has_beamformer_cap) { 6364 rtw89_debug(rtwdev, RTW89_DBG_BF, 6365 "initialize bfee for new association\n"); 6366 rtw89_mac_init_bfee_ax(rtwdev, rtwvif_link->mac_idx); 6367 rtw89_mac_set_csi_para_reg_ax(rtwdev, rtwvif_link, rtwsta_link); 6368 rtw89_mac_csi_rrsc_ax(rtwdev, rtwvif_link, rtwsta_link); 6369 } 6370 } 6371 6372 void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, 6373 struct rtw89_vif_link *rtwvif_link, 6374 struct rtw89_sta_link *rtwsta_link) 6375 { 6376 rtw89_mac_bfee_ctrl(rtwdev, rtwvif_link->mac_idx, false); 6377 } 6378 6379 void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6380 struct ieee80211_bss_conf *conf) 6381 { 6382 struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); 6383 struct rtw89_vif_link *rtwvif_link; 6384 u8 mac_idx; 6385 __le32 *p; 6386 6387 rtwvif_link = rtwvif->links[conf->link_id]; 6388 if (unlikely(!rtwvif_link)) { 6389 rtw89_err(rtwdev, 6390 "%s: rtwvif link (link_id %u) is not active\n", 6391 __func__, conf->link_id); 6392 return; 6393 } 6394 6395 mac_idx = rtwvif_link->mac_idx; 6396 6397 rtw89_debug(rtwdev, RTW89_DBG_BF, "update bf GID table\n"); 6398 6399 p = (__le32 *)conf->mu_group.membership; 6400 rtw89_write32(rtwdev, 6401 rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION_EN0, mac_idx), 6402 le32_to_cpu(p[0])); 6403 rtw89_write32(rtwdev, 6404 rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION_EN1, mac_idx), 6405 le32_to_cpu(p[1])); 6406 6407 p = (__le32 *)conf->mu_group.position; 6408 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION0, mac_idx), 6409 le32_to_cpu(p[0])); 6410 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION1, mac_idx), 6411 le32_to_cpu(p[1])); 6412 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION2, mac_idx), 6413 le32_to_cpu(p[2])); 6414 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION3, mac_idx), 6415 le32_to_cpu(p[3])); 6416 } 6417 6418 struct rtw89_mac_bf_monitor_iter_data { 6419 struct rtw89_dev *rtwdev; 6420 struct rtw89_sta_link *down_rtwsta_link; 6421 int count; 6422 }; 6423 6424 static 6425 void rtw89_mac_bf_monitor_calc_iter(void *data, struct ieee80211_sta *sta) 6426 { 6427 struct rtw89_mac_bf_monitor_iter_data *iter_data = 6428 (struct rtw89_mac_bf_monitor_iter_data *)data; 6429 struct rtw89_sta_link *down_rtwsta_link = iter_data->down_rtwsta_link; 6430 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 6431 struct ieee80211_link_sta *link_sta; 6432 struct rtw89_sta_link *rtwsta_link; 6433 bool has_beamformer_cap = false; 6434 int *count = &iter_data->count; 6435 unsigned int link_id; 6436 6437 rcu_read_lock(); 6438 6439 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) { 6440 if (rtwsta_link == down_rtwsta_link) 6441 continue; 6442 6443 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false); 6444 if (rtw89_sta_has_beamformer_cap(link_sta)) { 6445 has_beamformer_cap = true; 6446 break; 6447 } 6448 } 6449 6450 if (has_beamformer_cap) 6451 (*count)++; 6452 6453 rcu_read_unlock(); 6454 } 6455 6456 void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev, 6457 struct rtw89_sta_link *rtwsta_link, 6458 bool disconnect) 6459 { 6460 struct rtw89_mac_bf_monitor_iter_data data; 6461 6462 data.rtwdev = rtwdev; 6463 data.down_rtwsta_link = disconnect ? rtwsta_link : NULL; 6464 data.count = 0; 6465 ieee80211_iterate_stations_atomic(rtwdev->hw, 6466 rtw89_mac_bf_monitor_calc_iter, 6467 &data); 6468 6469 rtw89_debug(rtwdev, RTW89_DBG_BF, "bfee STA count=%d\n", data.count); 6470 if (data.count) 6471 set_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags); 6472 else 6473 clear_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags); 6474 } 6475 6476 void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev) 6477 { 6478 struct rtw89_traffic_stats *stats = &rtwdev->stats; 6479 struct rtw89_vif_link *rtwvif_link; 6480 bool en = stats->tx_tfc_lv <= stats->rx_tfc_lv; 6481 bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 6482 struct rtw89_vif *rtwvif; 6483 bool keep_timer = true; 6484 unsigned int link_id; 6485 bool old_keep_timer; 6486 6487 old_keep_timer = test_bit(RTW89_FLAG_BFEE_TIMER_KEEP, rtwdev->flags); 6488 6489 if (stats->tx_tfc_lv <= RTW89_TFC_LOW && stats->rx_tfc_lv <= RTW89_TFC_LOW) 6490 keep_timer = false; 6491 6492 if (keep_timer != old_keep_timer) { 6493 rtw89_for_each_rtwvif(rtwdev, rtwvif) 6494 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 6495 rtw89_mac_bfee_standby_timer(rtwdev, rtwvif_link->mac_idx, 6496 keep_timer); 6497 } 6498 6499 if (en == old) 6500 return; 6501 6502 rtw89_for_each_rtwvif(rtwdev, rtwvif) 6503 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 6504 rtw89_mac_bfee_ctrl(rtwdev, rtwvif_link->mac_idx, en); 6505 } 6506 6507 static int 6508 __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link, 6509 u32 tx_time) 6510 { 6511 #define MAC_AX_DFLT_TX_TIME 5280 6512 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6513 u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx; 6514 u32 max_tx_time = tx_time == 0 ? MAC_AX_DFLT_TX_TIME : tx_time; 6515 u32 reg; 6516 int ret = 0; 6517 6518 if (rtwsta_link->cctl_tx_time) { 6519 rtwsta_link->ampdu_max_time = (max_tx_time - 512) >> 9; 6520 ret = rtw89_chip_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link); 6521 } else { 6522 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 6523 if (ret) { 6524 rtw89_warn(rtwdev, "failed to check cmac in set txtime\n"); 6525 return ret; 6526 } 6527 6528 reg = rtw89_mac_reg_by_idx(rtwdev, mac->agg_limit.addr, mac_idx); 6529 rtw89_write32_mask(rtwdev, reg, mac->agg_limit.mask, 6530 max_tx_time >> 5); 6531 } 6532 6533 return ret; 6534 } 6535 6536 int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link, 6537 bool resume, u32 tx_time) 6538 { 6539 int ret = 0; 6540 6541 if (!resume) { 6542 rtwsta_link->cctl_tx_time = true; 6543 ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta_link, tx_time); 6544 } else { 6545 ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta_link, tx_time); 6546 rtwsta_link->cctl_tx_time = false; 6547 } 6548 6549 return ret; 6550 } 6551 6552 int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link, 6553 u32 *tx_time) 6554 { 6555 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6556 u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx; 6557 u32 reg; 6558 int ret = 0; 6559 6560 if (rtwsta_link->cctl_tx_time) { 6561 *tx_time = (rtwsta_link->ampdu_max_time + 1) << 9; 6562 } else { 6563 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 6564 if (ret) { 6565 rtw89_warn(rtwdev, "failed to check cmac in tx_time\n"); 6566 return ret; 6567 } 6568 6569 reg = rtw89_mac_reg_by_idx(rtwdev, mac->agg_limit.addr, mac_idx); 6570 *tx_time = rtw89_read32_mask(rtwdev, reg, mac->agg_limit.mask) << 5; 6571 } 6572 6573 return ret; 6574 } 6575 6576 int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev, 6577 struct rtw89_sta_link *rtwsta_link, 6578 bool resume, u8 tx_retry) 6579 { 6580 int ret = 0; 6581 6582 rtwsta_link->data_tx_cnt_lmt = tx_retry; 6583 6584 if (!resume) { 6585 rtwsta_link->cctl_tx_retry_limit = true; 6586 ret = rtw89_chip_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link); 6587 } else { 6588 ret = rtw89_chip_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link); 6589 rtwsta_link->cctl_tx_retry_limit = false; 6590 } 6591 6592 return ret; 6593 } 6594 6595 int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, 6596 struct rtw89_sta_link *rtwsta_link, u8 *tx_retry) 6597 { 6598 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6599 u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx; 6600 u32 reg; 6601 int ret = 0; 6602 6603 if (rtwsta_link->cctl_tx_retry_limit) { 6604 *tx_retry = rtwsta_link->data_tx_cnt_lmt; 6605 } else { 6606 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 6607 if (ret) { 6608 rtw89_warn(rtwdev, "failed to check cmac in rty_lmt\n"); 6609 return ret; 6610 } 6611 6612 reg = rtw89_mac_reg_by_idx(rtwdev, mac->txcnt_limit.addr, mac_idx); 6613 *tx_retry = rtw89_read32_mask(rtwdev, reg, mac->txcnt_limit.mask); 6614 } 6615 6616 return ret; 6617 } 6618 6619 int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, 6620 struct rtw89_vif_link *rtwvif_link, bool en) 6621 { 6622 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6623 u8 mac_idx = rtwvif_link->mac_idx; 6624 u16 set = mac->muedca_ctrl.mask; 6625 u32 reg; 6626 u32 ret; 6627 6628 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 6629 if (ret) 6630 return ret; 6631 6632 reg = rtw89_mac_reg_by_idx(rtwdev, mac->muedca_ctrl.addr, mac_idx); 6633 if (en) 6634 rtw89_write16_set(rtwdev, reg, set); 6635 else 6636 rtw89_write16_clr(rtwdev, reg, set); 6637 6638 return 0; 6639 } 6640 6641 static 6642 int rtw89_mac_write_xtal_si_ax(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask) 6643 { 6644 u32 val32; 6645 int ret; 6646 6647 val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) | 6648 FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, val) | 6649 FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, mask) | 6650 FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_WRITE) | 6651 FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1); 6652 rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32); 6653 6654 ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL), 6655 50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL); 6656 if (ret) { 6657 rtw89_warn(rtwdev, "xtal si not ready(W): offset=%x val=%x mask=%x\n", 6658 offset, val, mask); 6659 return ret; 6660 } 6661 6662 return 0; 6663 } 6664 6665 static 6666 int rtw89_mac_read_xtal_si_ax(struct rtw89_dev *rtwdev, u8 offset, u8 *val) 6667 { 6668 u32 val32; 6669 int ret; 6670 6671 val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) | 6672 FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, 0x00) | 6673 FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, 0x00) | 6674 FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_READ) | 6675 FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1); 6676 rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32); 6677 6678 ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL), 6679 50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL); 6680 if (ret) { 6681 rtw89_warn(rtwdev, "xtal si not ready(R): offset=%x\n", offset); 6682 return ret; 6683 } 6684 6685 *val = rtw89_read8(rtwdev, R_AX_WLAN_XTAL_SI_CTRL + 1); 6686 6687 return 0; 6688 } 6689 6690 static 6691 void rtw89_mac_pkt_drop_sta(struct rtw89_dev *rtwdev, 6692 struct rtw89_vif_link *rtwvif_link, 6693 struct rtw89_sta_link *rtwsta_link) 6694 { 6695 static const enum rtw89_pkt_drop_sel sels[] = { 6696 RTW89_PKT_DROP_SEL_MACID_BE_ONCE, 6697 RTW89_PKT_DROP_SEL_MACID_BK_ONCE, 6698 RTW89_PKT_DROP_SEL_MACID_VI_ONCE, 6699 RTW89_PKT_DROP_SEL_MACID_VO_ONCE, 6700 }; 6701 struct rtw89_pkt_drop_params params = {0}; 6702 int i; 6703 6704 params.mac_band = rtwvif_link->mac_idx; 6705 params.macid = rtwsta_link->mac_id; 6706 params.port = rtwvif_link->port; 6707 params.mbssid = 0; 6708 params.tf_trs = rtwvif_link->trigger; 6709 6710 for (i = 0; i < ARRAY_SIZE(sels); i++) { 6711 params.sel = sels[i]; 6712 rtw89_fw_h2c_pkt_drop(rtwdev, ¶ms); 6713 } 6714 } 6715 6716 static void rtw89_mac_pkt_drop_vif_iter(void *data, struct ieee80211_sta *sta) 6717 { 6718 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 6719 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 6720 struct rtw89_dev *rtwdev = rtwsta->rtwdev; 6721 struct rtw89_vif_link *rtwvif_link; 6722 struct rtw89_sta_link *rtwsta_link; 6723 struct rtw89_vif *target = data; 6724 unsigned int link_id; 6725 6726 if (rtwvif != target) 6727 return; 6728 6729 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) { 6730 rtwvif_link = rtwsta_link->rtwvif_link; 6731 rtw89_mac_pkt_drop_sta(rtwdev, rtwvif_link, rtwsta_link); 6732 } 6733 } 6734 6735 void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 6736 { 6737 ieee80211_iterate_stations_atomic(rtwdev->hw, 6738 rtw89_mac_pkt_drop_vif_iter, 6739 rtwvif); 6740 } 6741 6742 int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev, 6743 enum rtw89_mac_idx band) 6744 { 6745 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6746 struct rtw89_pkt_drop_params params = {0}; 6747 bool empty; 6748 int i, ret = 0, try_cnt = 3; 6749 6750 params.mac_band = band; 6751 params.sel = RTW89_PKT_DROP_SEL_BAND_ONCE; 6752 6753 for (i = 0; i < try_cnt; i++) { 6754 ret = read_poll_timeout(mac->is_txq_empty, empty, empty, 50, 6755 50000, false, rtwdev); 6756 if (ret && !RTW89_CHK_FW_FEATURE(NO_PACKET_DROP, &rtwdev->fw)) 6757 rtw89_fw_h2c_pkt_drop(rtwdev, ¶ms); 6758 else 6759 return 0; 6760 } 6761 return ret; 6762 } 6763 6764 int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable) 6765 { 6766 struct rtw89_mac_h2c_info h2c_info = {}; 6767 struct rtw89_mac_c2h_info c2h_info = {}; 6768 u32 ret; 6769 6770 if (RTW89_CHK_FW_FEATURE(NO_WOW_CPU_IO_RX, &rtwdev->fw)) 6771 return 0; 6772 6773 h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL; 6774 h2c_info.content_len = sizeof(h2c_info.u.hdr); 6775 h2c_info.u.hdr.w0 = u32_encode_bits(wow_enable, RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN); 6776 6777 ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); 6778 if (ret) 6779 return ret; 6780 6781 if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK) 6782 ret = -EINVAL; 6783 6784 return ret; 6785 } 6786 6787 static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow) 6788 { 6789 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6790 const struct rtw89_chip_info *chip = rtwdev->chip; 6791 int ret; 6792 6793 if (enable_wow) { 6794 ret = rtw89_mac_resize_ple_rx_quota(rtwdev, true); 6795 if (ret) { 6796 rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret); 6797 return ret; 6798 } 6799 6800 rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP); 6801 rtw89_mac_cpu_io_rx(rtwdev, enable_wow); 6802 rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE); 6803 rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false); 6804 rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0); 6805 rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0); 6806 rtw89_write32(rtwdev, R_AX_TF_FWD, 0); 6807 rtw89_write32(rtwdev, R_AX_HW_RPT_FWD, 0); 6808 6809 if (RTW89_CHK_FW_FEATURE(NO_WOW_CPU_IO_RX, &rtwdev->fw)) 6810 return 0; 6811 6812 if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 6813 rtw89_write8(rtwdev, R_BE_DBG_WOW_READY, WOWLAN_NOT_READY); 6814 else 6815 rtw89_write32_set(rtwdev, R_AX_DBG_WOW, 6816 B_AX_DBG_WOW_CPU_IO_RX_EN); 6817 } else { 6818 ret = rtw89_mac_resize_ple_rx_quota(rtwdev, false); 6819 if (ret) { 6820 rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret); 6821 return ret; 6822 } 6823 6824 rtw89_mac_cpu_io_rx(rtwdev, enable_wow); 6825 rtw89_write32_clr(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP); 6826 rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); 6827 rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD); 6828 rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD); 6829 } 6830 6831 return 0; 6832 } 6833 6834 static u8 rtw89_fw_get_rdy_ax(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 6835 { 6836 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); 6837 6838 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); 6839 } 6840 6841 static 6842 int rtw89_fwdl_check_path_ready_ax(struct rtw89_dev *rtwdev, 6843 bool h2c_or_fwdl) 6844 { 6845 u8 check = h2c_or_fwdl ? B_AX_H2C_PATH_RDY : B_AX_FWDL_PATH_RDY; 6846 u8 val; 6847 6848 return read_poll_timeout_atomic(rtw89_read8, val, val & check, 6849 1, FWDL_WAIT_CNT, false, 6850 rtwdev, R_AX_WCPU_FW_CTRL); 6851 } 6852 6853 static 6854 void rtw89_fwdl_secure_idmem_share_mode_ax(struct rtw89_dev *rtwdev, u8 mode) 6855 { 6856 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 6857 6858 if (!sec->secure_boot) 6859 return; 6860 6861 rtw89_write32_mask(rtwdev, R_AX_WCPU_FW_CTRL, 6862 B_AX_IDMEM_SHARE_MODE_RECORD_MASK, mode); 6863 rtw89_write32_set(rtwdev, R_AX_WCPU_FW_CTRL, 6864 B_AX_IDMEM_SHARE_MODE_RECORD_VALID); 6865 } 6866 6867 const struct rtw89_mac_gen_def rtw89_mac_gen_ax = { 6868 .band1_offset = RTW89_MAC_AX_BAND_REG_OFFSET, 6869 .filter_model_addr = R_AX_FILTER_MODEL_ADDR, 6870 .indir_access_addr = R_AX_INDIR_ACCESS_ENTRY, 6871 .mem_base_addrs = rtw89_mac_mem_base_addrs_ax, 6872 .rx_fltr = R_AX_RX_FLTR_OPT, 6873 .port_base = &rtw89_port_base_ax, 6874 .agg_len_ht = R_AX_AGG_LEN_HT_0, 6875 .ps_status = R_AX_PPWRBIT_SETTING, 6876 6877 .muedca_ctrl = { 6878 .addr = R_AX_MUEDCA_EN, 6879 .mask = B_AX_MUEDCA_EN_0 | B_AX_SET_MUEDCATIMER_TF_0, 6880 }, 6881 .bfee_ctrl = { 6882 .addr = R_AX_BFMEE_RESP_OPTION, 6883 .mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN | 6884 B_AX_BFMEE_HE_NDPA_EN, 6885 }, 6886 .narrow_bw_ru_dis = { 6887 .addr = R_AX_RXTRIG_TEST_USER_2, 6888 .mask = B_AX_RXTRIG_RU26_DIS, 6889 }, 6890 .wow_ctrl = {.addr = R_AX_WOW_CTRL, .mask = B_AX_WOW_WOWEN,}, 6891 .agg_limit = {.addr = R_AX_AMPDU_AGG_LIMIT, .mask = B_AX_AMPDU_MAX_TIME_MASK,}, 6892 .txcnt_limit = {.addr = R_AX_TXCNT, .mask = B_AX_L_TXCNT_LMT_MASK,}, 6893 6894 .check_mac_en = rtw89_mac_check_mac_en_ax, 6895 .sys_init = sys_init_ax, 6896 .trx_init = trx_init_ax, 6897 .hci_func_en = rtw89_mac_hci_func_en_ax, 6898 .dmac_func_pre_en = rtw89_mac_dmac_func_pre_en_ax, 6899 .dle_func_en = dle_func_en_ax, 6900 .dle_clk_en = dle_clk_en_ax, 6901 .bf_assoc = rtw89_mac_bf_assoc_ax, 6902 6903 .typ_fltr_opt = rtw89_mac_typ_fltr_opt_ax, 6904 .cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_ax, 6905 .cfg_phy_rpt = NULL, 6906 6907 .dle_mix_cfg = dle_mix_cfg_ax, 6908 .chk_dle_rdy = chk_dle_rdy_ax, 6909 .dle_buf_req = dle_buf_req_ax, 6910 .hfc_func_en = hfc_func_en_ax, 6911 .hfc_h2c_cfg = hfc_h2c_cfg_ax, 6912 .hfc_mix_cfg = hfc_mix_cfg_ax, 6913 .hfc_get_mix_info = hfc_get_mix_info_ax, 6914 .wde_quota_cfg = wde_quota_cfg_ax, 6915 .ple_quota_cfg = ple_quota_cfg_ax, 6916 .set_cpuio = set_cpuio_ax, 6917 .dle_quota_change = dle_quota_change_ax, 6918 6919 .disable_cpu = rtw89_mac_disable_cpu_ax, 6920 .fwdl_enable_wcpu = rtw89_mac_enable_cpu_ax, 6921 .fwdl_get_status = rtw89_fw_get_rdy_ax, 6922 .fwdl_check_path_ready = rtw89_fwdl_check_path_ready_ax, 6923 .fwdl_secure_idmem_share_mode = rtw89_fwdl_secure_idmem_share_mode_ax, 6924 .parse_efuse_map = rtw89_parse_efuse_map_ax, 6925 .parse_phycap_map = rtw89_parse_phycap_map_ax, 6926 .cnv_efuse_state = rtw89_cnv_efuse_state_ax, 6927 .efuse_read_fw_secure = rtw89_efuse_read_fw_secure_ax, 6928 6929 .cfg_plt = rtw89_mac_cfg_plt_ax, 6930 .get_plt_cnt = rtw89_mac_get_plt_cnt_ax, 6931 6932 .get_txpwr_cr = rtw89_mac_get_txpwr_cr_ax, 6933 6934 .write_xtal_si = rtw89_mac_write_xtal_si_ax, 6935 .read_xtal_si = rtw89_mac_read_xtal_si_ax, 6936 6937 .dump_qta_lost = rtw89_mac_dump_qta_lost_ax, 6938 .dump_err_status = rtw89_mac_dump_err_status_ax, 6939 6940 .is_txq_empty = mac_is_txq_empty_ax, 6941 6942 .prep_chan_list = rtw89_hw_scan_prep_chan_list_ax, 6943 .free_chan_list = rtw89_hw_scan_free_chan_list_ax, 6944 .add_chan_list = rtw89_hw_scan_add_chan_list_ax, 6945 .add_chan_list_pno = rtw89_pno_scan_add_chan_list_ax, 6946 .scan_offload = rtw89_fw_h2c_scan_offload_ax, 6947 6948 .wow_config_mac = rtw89_wow_config_mac_ax, 6949 }; 6950 EXPORT_SYMBOL(rtw89_mac_gen_ax); 6951