1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020 Realtek Corporation
3 */
4
5 #include "cam.h"
6 #include "chan.h"
7 #include "coex.h"
8 #include "debug.h"
9 #include "fw.h"
10 #include "mac.h"
11 #include "phy.h"
12 #include "ps.h"
13 #include "reg.h"
14 #include "util.h"
15
16 union rtw89_fw_element_arg {
17 size_t offset;
18 enum rtw89_rf_path rf_path;
19 enum rtw89_fw_type fw_type;
20 };
21
22 struct rtw89_fw_element_handler {
23 int (*fn)(struct rtw89_dev *rtwdev,
24 const struct rtw89_fw_element_hdr *elm,
25 const union rtw89_fw_element_arg arg);
26 const union rtw89_fw_element_arg arg;
27 const char *name;
28 };
29
30 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
31 struct sk_buff *skb);
32 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
33 struct rtw89_wait_info *wait, unsigned int cond);
34
rtw89_fw_h2c_alloc_skb(struct rtw89_dev * rtwdev,u32 len,bool header)35 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
36 bool header)
37 {
38 struct sk_buff *skb;
39 u32 header_len = 0;
40 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
41
42 if (header)
43 header_len = H2C_HEADER_LEN;
44
45 skb = dev_alloc_skb(len + header_len + h2c_desc_size);
46 if (!skb)
47 return NULL;
48 skb_reserve(skb, header_len + h2c_desc_size);
49 memset(skb->data, 0, len);
50
51 return skb;
52 }
53
rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev * rtwdev,u32 len)54 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
55 {
56 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
57 }
58
rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev * rtwdev,u32 len)59 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
60 {
61 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
62 }
63
rtw89_fw_check_rdy(struct rtw89_dev * rtwdev,enum rtw89_fwdl_check_type type)64 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type)
65 {
66 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
67 u8 val;
68 int ret;
69
70 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val,
71 val == RTW89_FWDL_WCPU_FW_INIT_RDY,
72 1, FWDL_WAIT_CNT, false, rtwdev, type);
73 if (ret) {
74 switch (val) {
75 case RTW89_FWDL_CHECKSUM_FAIL:
76 rtw89_err(rtwdev, "fw checksum fail\n");
77 return -EINVAL;
78
79 case RTW89_FWDL_SECURITY_FAIL:
80 rtw89_err(rtwdev, "fw security fail\n");
81 return -EINVAL;
82
83 case RTW89_FWDL_CV_NOT_MATCH:
84 rtw89_err(rtwdev, "fw cv not match\n");
85 return -EINVAL;
86
87 default:
88 rtw89_err(rtwdev, "fw unexpected status %d\n", val);
89 return -EBUSY;
90 }
91 }
92
93 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
94
95 return 0;
96 }
97
rtw89_fw_hdr_parser_v0(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)98 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
99 struct rtw89_fw_bin_info *info)
100 {
101 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw;
102 struct rtw89_fw_hdr_section_info *section_info;
103 const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
104 const struct rtw89_fw_hdr_section *section;
105 const u8 *fw_end = fw + len;
106 const u8 *bin;
107 u32 base_hdr_len;
108 u32 mssc_len = 0;
109 u32 i;
110
111 if (!info)
112 return -EINVAL;
113
114 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM);
115 base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
116 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR);
117
118 if (info->dynamic_hdr_en) {
119 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN);
120 info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
121 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
122 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
123 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
124 return -EINVAL;
125 }
126 } else {
127 info->hdr_len = base_hdr_len;
128 info->dynamic_hdr_len = 0;
129 }
130
131 bin = fw + info->hdr_len;
132
133 /* jump to section header */
134 section_info = info->section_info;
135 for (i = 0; i < info->section_num; i++) {
136 section = &fw_hdr->sections[i];
137 section_info->type =
138 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE);
139 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
140 section_info->mssc =
141 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC);
142 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
143 } else {
144 section_info->mssc = 0;
145 }
146
147 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE);
148 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM))
149 section_info->len += FWDL_SECTION_CHKSUM_LEN;
150 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL);
151 section_info->dladdr =
152 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff;
153 section_info->addr = bin;
154 bin += section_info->len;
155 section_info++;
156 }
157
158 if (fw_end != bin + mssc_len) {
159 rtw89_err(rtwdev, "[ERR]fw bin size\n");
160 return -EINVAL;
161 }
162
163 return 0;
164 }
165
rtw89_fw_hdr_parser_v1(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)166 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
167 struct rtw89_fw_bin_info *info)
168 {
169 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw;
170 struct rtw89_fw_hdr_section_info *section_info;
171 const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
172 const struct rtw89_fw_hdr_section_v1 *section;
173 const u8 *fw_end = fw + len;
174 const u8 *bin;
175 u32 base_hdr_len;
176 u32 mssc_len = 0;
177 u32 i;
178
179 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
180 base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
181 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
182
183 if (info->dynamic_hdr_en) {
184 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE);
185 info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
186 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
187 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
188 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
189 return -EINVAL;
190 }
191 } else {
192 info->hdr_len = base_hdr_len;
193 info->dynamic_hdr_len = 0;
194 }
195
196 bin = fw + info->hdr_len;
197
198 /* jump to section header */
199 section_info = info->section_info;
200 for (i = 0; i < info->section_num; i++) {
201 section = &fw_hdr->sections[i];
202 section_info->type =
203 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
204 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
205 section_info->mssc =
206 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
207 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
208 } else {
209 section_info->mssc = 0;
210 }
211
212 section_info->len =
213 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
214 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
215 section_info->len += FWDL_SECTION_CHKSUM_LEN;
216 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL);
217 section_info->dladdr =
218 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
219 section_info->addr = bin;
220 bin += section_info->len;
221 section_info++;
222 }
223
224 if (fw_end != bin + mssc_len) {
225 rtw89_err(rtwdev, "[ERR]fw bin size\n");
226 return -EINVAL;
227 }
228
229 return 0;
230 }
231
rtw89_fw_hdr_parser(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)232 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
233 const struct rtw89_fw_suit *fw_suit,
234 struct rtw89_fw_bin_info *info)
235 {
236 const u8 *fw = fw_suit->data;
237 u32 len = fw_suit->size;
238
239 if (!fw || !len) {
240 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type);
241 return -ENOENT;
242 }
243
244 switch (fw_suit->hdr_ver) {
245 case 0:
246 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info);
247 case 1:
248 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info);
249 default:
250 return -ENOENT;
251 }
252 }
253
254 static
rtw89_mfw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit,bool nowarn)255 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
256 struct rtw89_fw_suit *fw_suit, bool nowarn)
257 {
258 struct rtw89_fw_info *fw_info = &rtwdev->fw;
259 const struct firmware *firmware = fw_info->req.firmware;
260 const u8 *mfw = firmware->data;
261 u32 mfw_len = firmware->size;
262 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
263 const struct rtw89_mfw_info *mfw_info;
264 int i;
265
266 if (mfw_hdr->sig != RTW89_MFW_SIG) {
267 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
268 /* legacy firmware support normal type only */
269 if (type != RTW89_FW_NORMAL)
270 return -EINVAL;
271 fw_suit->data = mfw;
272 fw_suit->size = mfw_len;
273 return 0;
274 }
275
276 for (i = 0; i < mfw_hdr->fw_nr; i++) {
277 mfw_info = &mfw_hdr->info[i];
278 if (mfw_info->type == type) {
279 if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp)
280 goto found;
281 if (type == RTW89_FW_LOGFMT)
282 goto found;
283 }
284 }
285
286 if (!nowarn)
287 rtw89_err(rtwdev, "no suitable firmware found\n");
288 return -ENOENT;
289
290 found:
291 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
292 fw_suit->size = le32_to_cpu(mfw_info->size);
293 return 0;
294 }
295
rtw89_mfw_get_size(struct rtw89_dev * rtwdev)296 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
297 {
298 struct rtw89_fw_info *fw_info = &rtwdev->fw;
299 const struct firmware *firmware = fw_info->req.firmware;
300 const struct rtw89_mfw_hdr *mfw_hdr =
301 (const struct rtw89_mfw_hdr *)firmware->data;
302 const struct rtw89_mfw_info *mfw_info;
303 u32 size;
304
305 if (mfw_hdr->sig != RTW89_MFW_SIG) {
306 rtw89_warn(rtwdev, "not mfw format\n");
307 return 0;
308 }
309
310 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
311 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
312
313 return size;
314 }
315
rtw89_fw_update_ver_v0(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit,const struct rtw89_fw_hdr * hdr)316 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev,
317 struct rtw89_fw_suit *fw_suit,
318 const struct rtw89_fw_hdr *hdr)
319 {
320 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION);
321 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION);
322 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION);
323 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX);
324 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID);
325 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR);
326 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH);
327 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE);
328 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR);
329 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN);
330 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION);
331 }
332
rtw89_fw_update_ver_v1(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit,const struct rtw89_fw_hdr_v1 * hdr)333 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev,
334 struct rtw89_fw_suit *fw_suit,
335 const struct rtw89_fw_hdr_v1 *hdr)
336 {
337 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION);
338 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION);
339 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION);
340 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX);
341 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID);
342 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR);
343 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH);
344 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE);
345 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR);
346 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN);
347 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION);
348 }
349
rtw89_fw_update_ver(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit)350 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
351 enum rtw89_fw_type type,
352 struct rtw89_fw_suit *fw_suit)
353 {
354 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data;
355 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data;
356
357 if (type == RTW89_FW_LOGFMT)
358 return 0;
359
360 fw_suit->type = type;
361 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER);
362
363 switch (fw_suit->hdr_ver) {
364 case 0:
365 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0);
366 break;
367 case 1:
368 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1);
369 break;
370 default:
371 rtw89_err(rtwdev, "Unknown firmware header version %u\n",
372 fw_suit->hdr_ver);
373 return -ENOENT;
374 }
375
376 rtw89_info(rtwdev,
377 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n",
378 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
379 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type);
380
381 return 0;
382 }
383
384 static
__rtw89_fw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool nowarn)385 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
386 bool nowarn)
387 {
388 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
389 int ret;
390
391 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn);
392 if (ret)
393 return ret;
394
395 return rtw89_fw_update_ver(rtwdev, type, fw_suit);
396 }
397
398 static
__rtw89_fw_recognize_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)399 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev,
400 const struct rtw89_fw_element_hdr *elm,
401 const union rtw89_fw_element_arg arg)
402 {
403 enum rtw89_fw_type type = arg.fw_type;
404 struct rtw89_hal *hal = &rtwdev->hal;
405 struct rtw89_fw_suit *fw_suit;
406
407 if (hal->cv != elm->u.bbmcu.cv)
408 return 1; /* ignore this element */
409
410 fw_suit = rtw89_fw_suit_get(rtwdev, type);
411 fw_suit->data = elm->u.bbmcu.contents;
412 fw_suit->size = le32_to_cpu(elm->size);
413
414 return rtw89_fw_update_ver(rtwdev, type, fw_suit);
415 }
416
417 #define __DEF_FW_FEAT_COND(__cond, __op) \
418 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
419 { \
420 return suit_ver_code __op comp_ver_code; \
421 }
422
423 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
424 __DEF_FW_FEAT_COND(le, <=); /* less or equal */
425 __DEF_FW_FEAT_COND(lt, <); /* less than */
426
427 struct __fw_feat_cfg {
428 enum rtw89_core_chip_id chip_id;
429 enum rtw89_fw_feature feature;
430 u32 ver_code;
431 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
432 };
433
434 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
435 { \
436 .chip_id = _chip, \
437 .feature = RTW89_FW_FEATURE_ ## _feat, \
438 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
439 .cond = __fw_feat_cond_ ## _cond, \
440 }
441
442 static const struct __fw_feat_cfg fw_feat_tbl[] = {
443 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE),
444 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD),
445 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER),
446 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
447 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
448 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
449 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
450 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP),
451 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
452 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
453 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
454 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
455 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
456 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
457 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
458 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
459 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
460 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
461 };
462
rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info * fw,const struct rtw89_chip_info * chip,u32 ver_code)463 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
464 const struct rtw89_chip_info *chip,
465 u32 ver_code)
466 {
467 int i;
468
469 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
470 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
471
472 if (chip->chip_id != ent->chip_id)
473 continue;
474
475 if (ent->cond(ver_code, ent->ver_code))
476 RTW89_SET_FW_FEATURE(ent->feature, fw);
477 }
478 }
479
rtw89_fw_recognize_features(struct rtw89_dev * rtwdev)480 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
481 {
482 const struct rtw89_chip_info *chip = rtwdev->chip;
483 const struct rtw89_fw_suit *fw_suit;
484 u32 suit_ver_code;
485
486 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
487 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
488
489 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code);
490 }
491
492 const struct firmware *
rtw89_early_fw_feature_recognize(struct device * device,const struct rtw89_chip_info * chip,struct rtw89_fw_info * early_fw,int * used_fw_format)493 rtw89_early_fw_feature_recognize(struct device *device,
494 const struct rtw89_chip_info *chip,
495 struct rtw89_fw_info *early_fw,
496 int *used_fw_format)
497 {
498 const struct firmware *firmware;
499 char fw_name[64];
500 int fw_format;
501 u32 ver_code;
502 int ret;
503
504 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) {
505 rtw89_fw_get_filename(fw_name, sizeof(fw_name),
506 chip->fw_basename, fw_format);
507
508 ret = request_firmware(&firmware, fw_name, device);
509 if (!ret) {
510 dev_info(device, "loaded firmware %s\n", fw_name);
511 *used_fw_format = fw_format;
512 break;
513 }
514 }
515
516 if (ret) {
517 dev_err(device, "failed to early request firmware: %d\n", ret);
518 return NULL;
519 }
520
521 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
522
523 if (!ver_code)
524 goto out;
525
526 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code);
527
528 out:
529 return firmware;
530 }
531
rtw89_fw_recognize(struct rtw89_dev * rtwdev)532 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
533 {
534 const struct rtw89_chip_info *chip = rtwdev->chip;
535 int ret;
536
537 if (chip->try_ce_fw) {
538 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true);
539 if (!ret)
540 goto normal_done;
541 }
542
543 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false);
544 if (ret)
545 return ret;
546
547 normal_done:
548 /* It still works if wowlan firmware isn't existing. */
549 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
550
551 /* It still works if log format file isn't existing. */
552 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true);
553
554 rtw89_fw_recognize_features(rtwdev);
555
556 rtw89_coex_recognize_ver(rtwdev);
557
558 return 0;
559 }
560
561 static
rtw89_build_phy_tbl_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)562 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
563 const struct rtw89_fw_element_hdr *elm,
564 const union rtw89_fw_element_arg arg)
565 {
566 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
567 struct rtw89_phy_table *tbl;
568 struct rtw89_reg2_def *regs;
569 enum rtw89_rf_path rf_path;
570 u32 n_regs, i;
571 u8 idx;
572
573 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
574 if (!tbl)
575 return -ENOMEM;
576
577 switch (le32_to_cpu(elm->id)) {
578 case RTW89_FW_ELEMENT_ID_BB_REG:
579 elm_info->bb_tbl = tbl;
580 break;
581 case RTW89_FW_ELEMENT_ID_BB_GAIN:
582 elm_info->bb_gain = tbl;
583 break;
584 case RTW89_FW_ELEMENT_ID_RADIO_A:
585 case RTW89_FW_ELEMENT_ID_RADIO_B:
586 case RTW89_FW_ELEMENT_ID_RADIO_C:
587 case RTW89_FW_ELEMENT_ID_RADIO_D:
588 rf_path = arg.rf_path;
589 idx = elm->u.reg2.idx;
590
591 elm_info->rf_radio[idx] = tbl;
592 tbl->rf_path = rf_path;
593 tbl->config = rtw89_phy_config_rf_reg_v1;
594 break;
595 case RTW89_FW_ELEMENT_ID_RF_NCTL:
596 elm_info->rf_nctl = tbl;
597 break;
598 default:
599 kfree(tbl);
600 return -ENOENT;
601 }
602
603 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]);
604 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL);
605 if (!regs)
606 goto out;
607
608 for (i = 0; i < n_regs; i++) {
609 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr);
610 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data);
611 }
612
613 tbl->n_regs = n_regs;
614 tbl->regs = regs;
615
616 return 0;
617
618 out:
619 kfree(tbl);
620 return -ENOMEM;
621 }
622
623 static
rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)624 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev,
625 const struct rtw89_fw_element_hdr *elm,
626 const union rtw89_fw_element_arg arg)
627 {
628 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr;
629 const unsigned long offset = arg.offset;
630 struct rtw89_efuse *efuse = &rtwdev->efuse;
631 struct rtw89_txpwr_conf *conf;
632
633 if (!rtwdev->rfe_data) {
634 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL);
635 if (!rtwdev->rfe_data)
636 return -ENOMEM;
637 }
638
639 conf = (void *)rtwdev->rfe_data + offset;
640
641 /* if multiple matched, take the last eventually */
642 if (txpwr_elm->rfe_type == efuse->rfe_type)
643 goto setup;
644
645 /* without one is matched, accept default */
646 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE &&
647 (!rtw89_txpwr_conf_valid(conf) ||
648 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE))
649 goto setup;
650
651 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n",
652 elm->id, txpwr_elm->rfe_type);
653 return 0;
654
655 setup:
656 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n",
657 elm->id, txpwr_elm->rfe_type);
658
659 conf->rfe_type = txpwr_elm->rfe_type;
660 conf->ent_sz = txpwr_elm->ent_sz;
661 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents);
662 conf->data = txpwr_elm->content;
663 return 0;
664 }
665
666 static
rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)667 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev,
668 const struct rtw89_fw_element_hdr *elm,
669 const union rtw89_fw_element_arg arg)
670 {
671 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
672 const struct rtw89_chip_info *chip = rtwdev->chip;
673 u32 needed_bitmap = 0;
674 u32 offset = 0;
675 int subband;
676 u32 bitmap;
677 int type;
678
679 if (chip->support_bands & BIT(NL80211_BAND_6GHZ))
680 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ;
681 if (chip->support_bands & BIT(NL80211_BAND_5GHZ))
682 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ;
683 if (chip->support_bands & BIT(NL80211_BAND_2GHZ))
684 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ;
685
686 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap);
687
688 if ((bitmap & needed_bitmap) != needed_bitmap) {
689 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n",
690 needed_bitmap, bitmap);
691 return -ENOENT;
692 }
693
694 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL);
695 if (!elm_info->txpwr_trk)
696 return -ENOMEM;
697
698 for (type = 0; bitmap; type++, bitmap >>= 1) {
699 if (!(bitmap & BIT(0)))
700 continue;
701
702 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START &&
703 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX)
704 subband = 4;
705 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START &&
706 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX)
707 subband = 3;
708 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START &&
709 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX)
710 subband = 1;
711 else
712 break;
713
714 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset];
715
716 offset += subband;
717 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size))
718 goto err;
719 }
720
721 return 0;
722
723 err:
724 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n",
725 offset, le32_to_cpu(elm->size));
726 kfree(elm_info->txpwr_trk);
727 elm_info->txpwr_trk = NULL;
728
729 return -EFAULT;
730 }
731
732 static
rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)733 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev,
734 const struct rtw89_fw_element_hdr *elm,
735 const union rtw89_fw_element_arg arg)
736 {
737 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
738 u8 rfk_id;
739
740 if (elm_info->rfk_log_fmt)
741 goto allocated;
742
743 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL);
744 if (!elm_info->rfk_log_fmt)
745 return 1; /* this is an optional element, so just ignore this */
746
747 allocated:
748 rfk_id = elm->u.rfk_log_fmt.rfk_id;
749 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM)
750 return 1;
751
752 elm_info->rfk_log_fmt->elm[rfk_id] = elm;
753
754 return 0;
755 }
756
757 static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
758 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
759 { .fw_type = RTW89_FW_BBMCU0 }, NULL},
760 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm,
761 { .fw_type = RTW89_FW_BBMCU1 }, NULL},
762 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"},
763 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL},
764 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm,
765 { .rf_path = RF_PATH_A }, "radio A"},
766 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm,
767 { .rf_path = RF_PATH_B }, NULL},
768 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm,
769 { .rf_path = RF_PATH_C }, NULL},
770 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm,
771 { .rf_path = RF_PATH_D }, NULL},
772 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"},
773 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = {
774 rtw89_fw_recognize_txpwr_from_elm,
775 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR",
776 },
777 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = {
778 rtw89_fw_recognize_txpwr_from_elm,
779 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL,
780 },
781 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = {
782 rtw89_fw_recognize_txpwr_from_elm,
783 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL,
784 },
785 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = {
786 rtw89_fw_recognize_txpwr_from_elm,
787 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL,
788 },
789 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = {
790 rtw89_fw_recognize_txpwr_from_elm,
791 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL,
792 },
793 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = {
794 rtw89_fw_recognize_txpwr_from_elm,
795 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL,
796 },
797 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = {
798 rtw89_fw_recognize_txpwr_from_elm,
799 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL,
800 },
801 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = {
802 rtw89_fw_recognize_txpwr_from_elm,
803 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL,
804 },
805 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = {
806 rtw89_fw_recognize_txpwr_from_elm,
807 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL,
808 },
809 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = {
810 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK",
811 },
812 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = {
813 rtw89_build_rfk_log_fmt_from_elm, {}, NULL,
814 },
815 };
816
rtw89_fw_recognize_elements(struct rtw89_dev * rtwdev)817 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
818 {
819 struct rtw89_fw_info *fw_info = &rtwdev->fw;
820 const struct firmware *firmware = fw_info->req.firmware;
821 const struct rtw89_chip_info *chip = rtwdev->chip;
822 u32 unrecognized_elements = chip->needed_fw_elms;
823 const struct rtw89_fw_element_handler *handler;
824 const struct rtw89_fw_element_hdr *hdr;
825 u32 elm_size;
826 u32 elem_id;
827 u32 offset;
828 int ret;
829
830 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM);
831
832 offset = rtw89_mfw_get_size(rtwdev);
833 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
834 if (offset == 0)
835 return -EINVAL;
836
837 while (offset + sizeof(*hdr) < firmware->size) {
838 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset);
839
840 elm_size = le32_to_cpu(hdr->size);
841 if (offset + elm_size >= firmware->size) {
842 rtw89_warn(rtwdev, "firmware element size exceeds\n");
843 break;
844 }
845
846 elem_id = le32_to_cpu(hdr->id);
847 if (elem_id >= ARRAY_SIZE(__fw_element_handlers))
848 goto next;
849
850 handler = &__fw_element_handlers[elem_id];
851 if (!handler->fn)
852 goto next;
853
854 ret = handler->fn(rtwdev, hdr, handler->arg);
855 if (ret == 1) /* ignore this element */
856 goto next;
857 if (ret)
858 return ret;
859
860 if (handler->name)
861 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n",
862 handler->name, hdr->ver);
863
864 unrecognized_elements &= ~BIT(elem_id);
865 next:
866 offset += sizeof(*hdr) + elm_size;
867 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
868 }
869
870 if (unrecognized_elements) {
871 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n",
872 unrecognized_elements);
873 return -ENOENT;
874 }
875
876 return 0;
877 }
878
rtw89_h2c_pkt_set_hdr(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,bool rack,bool dack,u32 len)879 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
880 u8 type, u8 cat, u8 class, u8 func,
881 bool rack, bool dack, u32 len)
882 {
883 struct fwcmd_hdr *hdr;
884
885 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
886
887 if (!(rtwdev->fw.h2c_seq % 4))
888 rack = true;
889 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
890 FIELD_PREP(H2C_HDR_CAT, cat) |
891 FIELD_PREP(H2C_HDR_CLASS, class) |
892 FIELD_PREP(H2C_HDR_FUNC, func) |
893 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
894
895 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
896 len + H2C_HEADER_LEN) |
897 (rack ? H2C_HDR_REC_ACK : 0) |
898 (dack ? H2C_HDR_DONE_ACK : 0));
899
900 rtwdev->fw.h2c_seq++;
901 }
902
rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,u32 len)903 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
904 struct sk_buff *skb,
905 u8 type, u8 cat, u8 class, u8 func,
906 u32 len)
907 {
908 struct fwcmd_hdr *hdr;
909
910 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
911
912 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
913 FIELD_PREP(H2C_HDR_CAT, cat) |
914 FIELD_PREP(H2C_HDR_CLASS, class) |
915 FIELD_PREP(H2C_HDR_FUNC, func) |
916 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
917
918 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
919 len + H2C_HEADER_LEN));
920 }
921
__rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const u8 * fw,u32 len)922 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
923 {
924 struct sk_buff *skb;
925 u32 ret = 0;
926
927 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
928 if (!skb) {
929 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
930 return -ENOMEM;
931 }
932
933 skb_put_data(skb, fw, len);
934 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
935 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
936 H2C_CAT_MAC, H2C_CL_MAC_FWDL,
937 H2C_FUNC_MAC_FWHDR_DL, len);
938
939 ret = rtw89_h2c_tx(rtwdev, skb, false);
940 if (ret) {
941 rtw89_err(rtwdev, "failed to send h2c\n");
942 ret = -1;
943 goto fail;
944 }
945
946 return 0;
947 fail:
948 dev_kfree_skb_any(skb);
949
950 return ret;
951 }
952
rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const u8 * fw,u32 len)953 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
954 {
955 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
956 int ret;
957
958 ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
959 if (ret) {
960 rtw89_err(rtwdev, "[ERR]FW header download\n");
961 return ret;
962 }
963
964 ret = mac->fwdl_check_path_ready(rtwdev, false);
965 if (ret) {
966 rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
967 return ret;
968 }
969
970 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
971 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
972
973 return 0;
974 }
975
__rtw89_fw_download_main(struct rtw89_dev * rtwdev,struct rtw89_fw_hdr_section_info * info)976 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
977 struct rtw89_fw_hdr_section_info *info)
978 {
979 struct sk_buff *skb;
980 const u8 *section = info->addr;
981 u32 residue_len = info->len;
982 u32 pkt_len;
983 int ret;
984
985 while (residue_len) {
986 if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
987 pkt_len = FWDL_SECTION_PER_PKT_LEN;
988 else
989 pkt_len = residue_len;
990
991 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
992 if (!skb) {
993 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
994 return -ENOMEM;
995 }
996 skb_put_data(skb, section, pkt_len);
997
998 ret = rtw89_h2c_tx(rtwdev, skb, true);
999 if (ret) {
1000 rtw89_err(rtwdev, "failed to send h2c\n");
1001 ret = -1;
1002 goto fail;
1003 }
1004
1005 section += pkt_len;
1006 residue_len -= pkt_len;
1007 }
1008
1009 return 0;
1010 fail:
1011 dev_kfree_skb_any(skb);
1012
1013 return ret;
1014 }
1015
1016 static enum rtw89_fwdl_check_type
rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit)1017 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev,
1018 const struct rtw89_fw_suit *fw_suit)
1019 {
1020 switch (fw_suit->type) {
1021 case RTW89_FW_BBMCU0:
1022 return RTW89_FWDL_CHECK_BB0_FWDL_DONE;
1023 case RTW89_FW_BBMCU1:
1024 return RTW89_FWDL_CHECK_BB1_FWDL_DONE;
1025 default:
1026 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE;
1027 }
1028 }
1029
rtw89_fw_download_main(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1030 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev,
1031 const struct rtw89_fw_suit *fw_suit,
1032 struct rtw89_fw_bin_info *info)
1033 {
1034 struct rtw89_fw_hdr_section_info *section_info = info->section_info;
1035 const struct rtw89_chip_info *chip = rtwdev->chip;
1036 enum rtw89_fwdl_check_type chk_type;
1037 u8 section_num = info->section_num;
1038 int ret;
1039
1040 while (section_num--) {
1041 ret = __rtw89_fw_download_main(rtwdev, section_info);
1042 if (ret)
1043 return ret;
1044 section_info++;
1045 }
1046
1047 if (chip->chip_gen == RTW89_CHIP_AX)
1048 return 0;
1049
1050 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit);
1051 ret = rtw89_fw_check_rdy(rtwdev, chk_type);
1052 if (ret) {
1053 rtw89_warn(rtwdev, "failed to download firmware type %u\n",
1054 fw_suit->type);
1055 return ret;
1056 }
1057
1058 return 0;
1059 }
1060
rtw89_fw_prog_cnt_dump(struct rtw89_dev * rtwdev)1061 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
1062 {
1063 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
1064 u32 addr = R_AX_DBG_PORT_SEL;
1065 u32 val32;
1066 u16 index;
1067
1068 if (chip_gen == RTW89_CHIP_BE) {
1069 addr = R_BE_WLCPU_PORT_PC;
1070 goto dump;
1071 }
1072
1073 rtw89_write32(rtwdev, R_AX_DBG_CTRL,
1074 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
1075 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
1076 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
1077
1078 dump:
1079 for (index = 0; index < 15; index++) {
1080 val32 = rtw89_read32(rtwdev, addr);
1081 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
1082 fsleep(10);
1083 }
1084 }
1085
rtw89_fw_dl_fail_dump(struct rtw89_dev * rtwdev)1086 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
1087 {
1088 u32 val32;
1089 u16 val16;
1090
1091 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
1092 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
1093
1094 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
1095 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
1096
1097 rtw89_fw_prog_cnt_dump(rtwdev);
1098 }
1099
rtw89_fw_download_suit(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit)1100 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
1101 struct rtw89_fw_suit *fw_suit)
1102 {
1103 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1104 struct rtw89_fw_bin_info info;
1105 int ret;
1106
1107 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
1108 if (ret) {
1109 rtw89_err(rtwdev, "parse fw header fail\n");
1110 return ret;
1111 }
1112
1113 if (rtwdev->chip->chip_id == RTL8922A &&
1114 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN))
1115 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000);
1116
1117 ret = mac->fwdl_check_path_ready(rtwdev, true);
1118 if (ret) {
1119 rtw89_err(rtwdev, "[ERR]H2C path ready\n");
1120 return ret;
1121 }
1122
1123 ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len -
1124 info.dynamic_hdr_len);
1125 if (ret)
1126 return ret;
1127
1128 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info);
1129 if (ret)
1130 return ret;
1131
1132 return 0;
1133 }
1134
rtw89_fw_download(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool include_bb)1135 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
1136 bool include_bb)
1137 {
1138 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1139 struct rtw89_fw_info *fw_info = &rtwdev->fw;
1140 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
1141 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr;
1142 int ret;
1143 int i;
1144
1145 mac->disable_cpu(rtwdev);
1146 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb);
1147 if (ret)
1148 return ret;
1149
1150 ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1151 if (ret)
1152 goto fwdl_err;
1153
1154 for (i = 0; i < bbmcu_nr && include_bb; i++) {
1155 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i);
1156
1157 ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1158 if (ret)
1159 goto fwdl_err;
1160 }
1161
1162 fw_info->h2c_seq = 0;
1163 fw_info->rec_seq = 0;
1164 fw_info->h2c_counter = 0;
1165 fw_info->c2h_counter = 0;
1166 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
1167 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
1168
1169 mdelay(5);
1170
1171 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE);
1172 if (ret) {
1173 rtw89_warn(rtwdev, "download firmware fail\n");
1174 return ret;
1175 }
1176
1177 return ret;
1178
1179 fwdl_err:
1180 rtw89_fw_dl_fail_dump(rtwdev);
1181 return ret;
1182 }
1183
rtw89_wait_firmware_completion(struct rtw89_dev * rtwdev)1184 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
1185 {
1186 struct rtw89_fw_info *fw = &rtwdev->fw;
1187
1188 wait_for_completion(&fw->req.completion);
1189 if (!fw->req.firmware)
1190 return -EINVAL;
1191
1192 return 0;
1193 }
1194
rtw89_load_firmware_req(struct rtw89_dev * rtwdev,struct rtw89_fw_req_info * req,const char * fw_name,bool nowarn)1195 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev,
1196 struct rtw89_fw_req_info *req,
1197 const char *fw_name, bool nowarn)
1198 {
1199 int ret;
1200
1201 if (req->firmware) {
1202 rtw89_debug(rtwdev, RTW89_DBG_FW,
1203 "full firmware has been early requested\n");
1204 complete_all(&req->completion);
1205 return 0;
1206 }
1207
1208 if (nowarn)
1209 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev);
1210 else
1211 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev);
1212
1213 complete_all(&req->completion);
1214
1215 return ret;
1216 }
1217
rtw89_load_firmware_work(struct work_struct * work)1218 void rtw89_load_firmware_work(struct work_struct *work)
1219 {
1220 struct rtw89_dev *rtwdev =
1221 container_of(work, struct rtw89_dev, load_firmware_work);
1222 const struct rtw89_chip_info *chip = rtwdev->chip;
1223 char fw_name[64];
1224
1225 rtw89_fw_get_filename(fw_name, sizeof(fw_name),
1226 chip->fw_basename, rtwdev->fw.fw_format);
1227
1228 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false);
1229 }
1230
rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table * tbl)1231 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl)
1232 {
1233 if (!tbl)
1234 return;
1235
1236 kfree(tbl->regs);
1237 kfree(tbl);
1238 }
1239
rtw89_unload_firmware_elements(struct rtw89_dev * rtwdev)1240 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev)
1241 {
1242 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1243 int i;
1244
1245 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl);
1246 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain);
1247 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++)
1248 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]);
1249 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl);
1250
1251 kfree(elm_info->txpwr_trk);
1252 kfree(elm_info->rfk_log_fmt);
1253 }
1254
rtw89_unload_firmware(struct rtw89_dev * rtwdev)1255 void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
1256 {
1257 struct rtw89_fw_info *fw = &rtwdev->fw;
1258
1259 cancel_work_sync(&rtwdev->load_firmware_work);
1260
1261 if (fw->req.firmware) {
1262 release_firmware(fw->req.firmware);
1263
1264 /* assign NULL back in case rtw89_free_ieee80211_hw()
1265 * try to release the same one again.
1266 */
1267 fw->req.firmware = NULL;
1268 }
1269
1270 kfree(fw->log.fmts);
1271 rtw89_unload_firmware_elements(rtwdev);
1272 }
1273
rtw89_fw_log_get_fmt_idx(struct rtw89_dev * rtwdev,u32 fmt_id)1274 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id)
1275 {
1276 struct rtw89_fw_log *fw_log = &rtwdev->fw.log;
1277 u32 i;
1278
1279 if (fmt_id > fw_log->last_fmt_id)
1280 return 0;
1281
1282 for (i = 0; i < fw_log->fmt_count; i++) {
1283 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id)
1284 return i;
1285 }
1286 return 0;
1287 }
1288
rtw89_fw_log_create_fmts_dict(struct rtw89_dev * rtwdev)1289 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev)
1290 {
1291 struct rtw89_fw_log *log = &rtwdev->fw.log;
1292 const struct rtw89_fw_logsuit_hdr *suit_hdr;
1293 struct rtw89_fw_suit *suit = &log->suit;
1294 const void *fmts_ptr, *fmts_end_ptr;
1295 u32 fmt_count;
1296 int i;
1297
1298 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data;
1299 fmt_count = le32_to_cpu(suit_hdr->count);
1300 log->fmt_ids = suit_hdr->ids;
1301 fmts_ptr = &suit_hdr->ids[fmt_count];
1302 fmts_end_ptr = suit->data + suit->size;
1303 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL);
1304 if (!log->fmts)
1305 return -ENOMEM;
1306
1307 for (i = 0; i < fmt_count; i++) {
1308 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr);
1309 if (!fmts_ptr)
1310 break;
1311
1312 (*log->fmts)[i] = fmts_ptr;
1313 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]);
1314 log->fmt_count++;
1315 fmts_ptr += strlen(fmts_ptr);
1316 }
1317
1318 return 0;
1319 }
1320
rtw89_fw_log_prepare(struct rtw89_dev * rtwdev)1321 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev)
1322 {
1323 struct rtw89_fw_log *log = &rtwdev->fw.log;
1324 struct rtw89_fw_suit *suit = &log->suit;
1325
1326 if (!suit || !suit->data) {
1327 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n");
1328 return -EINVAL;
1329 }
1330 if (log->fmts)
1331 return 0;
1332
1333 return rtw89_fw_log_create_fmts_dict(rtwdev);
1334 }
1335
rtw89_fw_log_dump_data(struct rtw89_dev * rtwdev,const struct rtw89_fw_c2h_log_fmt * log_fmt,u32 fmt_idx,u8 para_int,bool raw_data)1336 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev,
1337 const struct rtw89_fw_c2h_log_fmt *log_fmt,
1338 u32 fmt_idx, u8 para_int, bool raw_data)
1339 {
1340 const char *(*fmts)[] = rtwdev->fw.log.fmts;
1341 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE];
1342 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0};
1343 int i;
1344
1345 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) {
1346 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n",
1347 log_fmt->argc);
1348 return;
1349 }
1350
1351 if (para_int)
1352 for (i = 0 ; i < log_fmt->argc; i++)
1353 args[i] = le32_to_cpu(log_fmt->u.argv[i]);
1354
1355 if (raw_data) {
1356 if (para_int)
1357 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
1358 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id),
1359 para_int, log_fmt->argc, (int)sizeof(args), args);
1360 else
1361 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
1362 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id),
1363 para_int, log_fmt->argc, log_fmt->u.raw);
1364 } else {
1365 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx],
1366 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4],
1367 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9],
1368 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe],
1369 args[0xf]);
1370 }
1371
1372 rtw89_info(rtwdev, "C2H log: %s", str_buf);
1373 }
1374
rtw89_fw_log_dump(struct rtw89_dev * rtwdev,u8 * buf,u32 len)1375 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
1376 {
1377 const struct rtw89_fw_c2h_log_fmt *log_fmt;
1378 u8 para_int;
1379 u32 fmt_idx;
1380
1381 if (len < RTW89_C2H_HEADER_LEN) {
1382 rtw89_err(rtwdev, "c2h log length is wrong!\n");
1383 return;
1384 }
1385
1386 buf += RTW89_C2H_HEADER_LEN;
1387 len -= RTW89_C2H_HEADER_LEN;
1388 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf;
1389
1390 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN)
1391 goto plain_log;
1392
1393 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE))
1394 goto plain_log;
1395
1396 if (!rtwdev->fw.log.fmts)
1397 return;
1398
1399 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT);
1400 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id));
1401
1402 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0)
1403 rtw89_info(rtwdev, "C2H log: %s%s",
1404 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw);
1405 else if (fmt_idx != 0 && para_int)
1406 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false);
1407 else
1408 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true);
1409 return;
1410
1411 plain_log:
1412 rtw89_info(rtwdev, "C2H log: %.*s", len, buf);
1413
1414 }
1415
1416 #define H2C_CAM_LEN 60
rtw89_fw_h2c_cam(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta,const u8 * scan_mac_addr)1417 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1418 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
1419 {
1420 struct sk_buff *skb;
1421 int ret;
1422
1423 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
1424 if (!skb) {
1425 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1426 return -ENOMEM;
1427 }
1428 skb_put(skb, H2C_CAM_LEN);
1429 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data);
1430 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data);
1431
1432 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1433 H2C_CAT_MAC,
1434 H2C_CL_MAC_ADDR_CAM_UPDATE,
1435 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
1436 H2C_CAM_LEN);
1437
1438 ret = rtw89_h2c_tx(rtwdev, skb, false);
1439 if (ret) {
1440 rtw89_err(rtwdev, "failed to send h2c\n");
1441 goto fail;
1442 }
1443
1444 return 0;
1445 fail:
1446 dev_kfree_skb_any(skb);
1447
1448 return ret;
1449 }
1450
1451 #define H2C_DCTL_SEC_CAM_LEN 68
rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta)1452 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
1453 struct rtw89_vif *rtwvif,
1454 struct rtw89_sta *rtwsta)
1455 {
1456 struct sk_buff *skb;
1457 int ret;
1458
1459 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
1460 if (!skb) {
1461 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
1462 return -ENOMEM;
1463 }
1464 skb_put(skb, H2C_DCTL_SEC_CAM_LEN);
1465
1466 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data);
1467
1468 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1469 H2C_CAT_MAC,
1470 H2C_CL_MAC_FR_EXCHG,
1471 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
1472 H2C_DCTL_SEC_CAM_LEN);
1473
1474 ret = rtw89_h2c_tx(rtwdev, skb, false);
1475 if (ret) {
1476 rtw89_err(rtwdev, "failed to send h2c\n");
1477 goto fail;
1478 }
1479
1480 return 0;
1481 fail:
1482 dev_kfree_skb_any(skb);
1483
1484 return ret;
1485 }
1486 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
1487
1488 #define H2C_BA_CAM_LEN 8
rtw89_fw_h2c_ba_cam(struct rtw89_dev * rtwdev,struct rtw89_sta * rtwsta,bool valid,struct ieee80211_ampdu_params * params)1489 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
1490 bool valid, struct ieee80211_ampdu_params *params)
1491 {
1492 const struct rtw89_chip_info *chip = rtwdev->chip;
1493 struct rtw89_vif *rtwvif = rtwsta->rtwvif;
1494 u8 macid = rtwsta->mac_id;
1495 struct sk_buff *skb;
1496 u8 entry_idx;
1497 int ret;
1498
1499 ret = valid ?
1500 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
1501 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
1502 if (ret) {
1503 /* it still works even if we don't have static BA CAM, because
1504 * hardware can create dynamic BA CAM automatically.
1505 */
1506 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
1507 "failed to %s entry tid=%d for h2c ba cam\n",
1508 valid ? "alloc" : "free", params->tid);
1509 return 0;
1510 }
1511
1512 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
1513 if (!skb) {
1514 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
1515 return -ENOMEM;
1516 }
1517 skb_put(skb, H2C_BA_CAM_LEN);
1518 SET_BA_CAM_MACID(skb->data, macid);
1519 if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
1520 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
1521 else
1522 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
1523 if (!valid)
1524 goto end;
1525 SET_BA_CAM_VALID(skb->data, valid);
1526 SET_BA_CAM_TID(skb->data, params->tid);
1527 if (params->buf_size > 64)
1528 SET_BA_CAM_BMAP_SIZE(skb->data, 4);
1529 else
1530 SET_BA_CAM_BMAP_SIZE(skb->data, 0);
1531 /* If init req is set, hw will set the ssn */
1532 SET_BA_CAM_INIT_REQ(skb->data, 1);
1533 SET_BA_CAM_SSN(skb->data, params->ssn);
1534
1535 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
1536 SET_BA_CAM_STD_EN(skb->data, 1);
1537 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
1538 }
1539
1540 end:
1541 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1542 H2C_CAT_MAC,
1543 H2C_CL_BA_CAM,
1544 H2C_FUNC_MAC_BA_CAM, 0, 1,
1545 H2C_BA_CAM_LEN);
1546
1547 ret = rtw89_h2c_tx(rtwdev, skb, false);
1548 if (ret) {
1549 rtw89_err(rtwdev, "failed to send h2c\n");
1550 goto fail;
1551 }
1552
1553 return 0;
1554 fail:
1555 dev_kfree_skb_any(skb);
1556
1557 return ret;
1558 }
1559
rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev * rtwdev,u8 entry_idx,u8 uid)1560 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
1561 u8 entry_idx, u8 uid)
1562 {
1563 struct sk_buff *skb;
1564 int ret;
1565
1566 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
1567 if (!skb) {
1568 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
1569 return -ENOMEM;
1570 }
1571 skb_put(skb, H2C_BA_CAM_LEN);
1572
1573 SET_BA_CAM_VALID(skb->data, 1);
1574 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
1575 SET_BA_CAM_UID(skb->data, uid);
1576 SET_BA_CAM_BAND(skb->data, 0);
1577 SET_BA_CAM_STD_EN(skb->data, 0);
1578
1579 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1580 H2C_CAT_MAC,
1581 H2C_CL_BA_CAM,
1582 H2C_FUNC_MAC_BA_CAM, 0, 1,
1583 H2C_BA_CAM_LEN);
1584
1585 ret = rtw89_h2c_tx(rtwdev, skb, false);
1586 if (ret) {
1587 rtw89_err(rtwdev, "failed to send h2c\n");
1588 goto fail;
1589 }
1590
1591 return 0;
1592 fail:
1593 dev_kfree_skb_any(skb);
1594
1595 return ret;
1596 }
1597
rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev * rtwdev)1598 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
1599 {
1600 const struct rtw89_chip_info *chip = rtwdev->chip;
1601 u8 entry_idx = chip->bacam_num;
1602 u8 uid = 0;
1603 int i;
1604
1605 for (i = 0; i < chip->bacam_dynamic_num; i++) {
1606 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid);
1607 entry_idx++;
1608 uid++;
1609 }
1610 }
1611
1612 #define H2C_LOG_CFG_LEN 12
rtw89_fw_h2c_fw_log(struct rtw89_dev * rtwdev,bool enable)1613 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
1614 {
1615 struct sk_buff *skb;
1616 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
1617 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
1618 int ret;
1619
1620 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
1621 if (!skb) {
1622 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
1623 return -ENOMEM;
1624 }
1625
1626 skb_put(skb, H2C_LOG_CFG_LEN);
1627 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD);
1628 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
1629 SET_LOG_CFG_COMP(skb->data, comp);
1630 SET_LOG_CFG_COMP_EXT(skb->data, 0);
1631
1632 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1633 H2C_CAT_MAC,
1634 H2C_CL_FW_INFO,
1635 H2C_FUNC_LOG_CFG, 0, 0,
1636 H2C_LOG_CFG_LEN);
1637
1638 ret = rtw89_h2c_tx(rtwdev, skb, false);
1639 if (ret) {
1640 rtw89_err(rtwdev, "failed to send h2c\n");
1641 goto fail;
1642 }
1643
1644 return 0;
1645 fail:
1646 dev_kfree_skb_any(skb);
1647
1648 return ret;
1649 }
1650
rtw89_fw_h2c_add_general_pkt(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,enum rtw89_fw_pkt_ofld_type type,u8 * id)1651 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
1652 struct rtw89_vif *rtwvif,
1653 enum rtw89_fw_pkt_ofld_type type,
1654 u8 *id)
1655 {
1656 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1657 struct rtw89_pktofld_info *info;
1658 struct sk_buff *skb;
1659 int ret;
1660
1661 info = kzalloc(sizeof(*info), GFP_KERNEL);
1662 if (!info)
1663 return -ENOMEM;
1664
1665 switch (type) {
1666 case RTW89_PKT_OFLD_TYPE_PS_POLL:
1667 skb = ieee80211_pspoll_get(rtwdev->hw, vif);
1668 break;
1669 case RTW89_PKT_OFLD_TYPE_PROBE_RSP:
1670 skb = ieee80211_proberesp_get(rtwdev->hw, vif);
1671 break;
1672 case RTW89_PKT_OFLD_TYPE_NULL_DATA:
1673 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false);
1674 break;
1675 case RTW89_PKT_OFLD_TYPE_QOS_NULL:
1676 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
1677 break;
1678 default:
1679 goto err;
1680 }
1681
1682 if (!skb)
1683 goto err;
1684
1685 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
1686 kfree_skb(skb);
1687
1688 if (ret)
1689 goto err;
1690
1691 list_add_tail(&info->list, &rtwvif->general_pkt_list);
1692 *id = info->id;
1693 return 0;
1694
1695 err:
1696 kfree(info);
1697 return -ENOMEM;
1698 }
1699
rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool notify_fw)1700 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
1701 struct rtw89_vif *rtwvif, bool notify_fw)
1702 {
1703 struct list_head *pkt_list = &rtwvif->general_pkt_list;
1704 struct rtw89_pktofld_info *info, *tmp;
1705
1706 list_for_each_entry_safe(info, tmp, pkt_list, list) {
1707 if (notify_fw)
1708 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
1709 else
1710 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id);
1711 list_del(&info->list);
1712 kfree(info);
1713 }
1714 }
1715
rtw89_fw_release_general_pkt_list(struct rtw89_dev * rtwdev,bool notify_fw)1716 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
1717 {
1718 struct rtw89_vif *rtwvif;
1719
1720 rtw89_for_each_rtwvif(rtwdev, rtwvif)
1721 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw);
1722 }
1723
1724 #define H2C_GENERAL_PKT_LEN 6
1725 #define H2C_GENERAL_PKT_ID_UND 0xff
rtw89_fw_h2c_general_pkt(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,u8 macid)1726 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
1727 struct rtw89_vif *rtwvif, u8 macid)
1728 {
1729 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
1730 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
1731 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND;
1732 struct sk_buff *skb;
1733 int ret;
1734
1735 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1736 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
1737 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1738 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
1739 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1740 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
1741
1742 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
1743 if (!skb) {
1744 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1745 return -ENOMEM;
1746 }
1747 skb_put(skb, H2C_GENERAL_PKT_LEN);
1748 SET_GENERAL_PKT_MACID(skb->data, macid);
1749 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
1750 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll);
1751 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null);
1752 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null);
1753 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
1754
1755 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1756 H2C_CAT_MAC,
1757 H2C_CL_FW_INFO,
1758 H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
1759 H2C_GENERAL_PKT_LEN);
1760
1761 ret = rtw89_h2c_tx(rtwdev, skb, false);
1762 if (ret) {
1763 rtw89_err(rtwdev, "failed to send h2c\n");
1764 goto fail;
1765 }
1766
1767 return 0;
1768 fail:
1769 dev_kfree_skb_any(skb);
1770
1771 return ret;
1772 }
1773
1774 #define H2C_LPS_PARM_LEN 8
rtw89_fw_h2c_lps_parm(struct rtw89_dev * rtwdev,struct rtw89_lps_parm * lps_param)1775 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
1776 struct rtw89_lps_parm *lps_param)
1777 {
1778 struct sk_buff *skb;
1779 int ret;
1780
1781 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
1782 if (!skb) {
1783 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1784 return -ENOMEM;
1785 }
1786 skb_put(skb, H2C_LPS_PARM_LEN);
1787
1788 SET_LPS_PARM_MACID(skb->data, lps_param->macid);
1789 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
1790 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
1791 SET_LPS_PARM_RLBM(skb->data, 1);
1792 SET_LPS_PARM_SMARTPS(skb->data, 1);
1793 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
1794 SET_LPS_PARM_VOUAPSD(skb->data, 0);
1795 SET_LPS_PARM_VIUAPSD(skb->data, 0);
1796 SET_LPS_PARM_BEUAPSD(skb->data, 0);
1797 SET_LPS_PARM_BKUAPSD(skb->data, 0);
1798
1799 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1800 H2C_CAT_MAC,
1801 H2C_CL_MAC_PS,
1802 H2C_FUNC_MAC_LPS_PARM, 0, 1,
1803 H2C_LPS_PARM_LEN);
1804
1805 ret = rtw89_h2c_tx(rtwdev, skb, false);
1806 if (ret) {
1807 rtw89_err(rtwdev, "failed to send h2c\n");
1808 goto fail;
1809 }
1810
1811 return 0;
1812 fail:
1813 dev_kfree_skb_any(skb);
1814
1815 return ret;
1816 }
1817
1818 #define H2C_P2P_ACT_LEN 20
rtw89_fw_h2c_p2p_act(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,struct ieee80211_p2p_noa_desc * desc,u8 act,u8 noa_id)1819 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
1820 struct ieee80211_p2p_noa_desc *desc,
1821 u8 act, u8 noa_id)
1822 {
1823 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
1824 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
1825 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
1826 struct sk_buff *skb;
1827 u8 *cmd;
1828 int ret;
1829
1830 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
1831 if (!skb) {
1832 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
1833 return -ENOMEM;
1834 }
1835 skb_put(skb, H2C_P2P_ACT_LEN);
1836 cmd = skb->data;
1837
1838 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id);
1839 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
1840 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
1841 RTW89_SET_FWCMD_P2P_ACT(cmd, act);
1842 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
1843 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
1844 if (desc) {
1845 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
1846 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
1847 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
1848 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
1849 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
1850 }
1851
1852 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1853 H2C_CAT_MAC, H2C_CL_MAC_PS,
1854 H2C_FUNC_P2P_ACT, 0, 0,
1855 H2C_P2P_ACT_LEN);
1856
1857 ret = rtw89_h2c_tx(rtwdev, skb, false);
1858 if (ret) {
1859 rtw89_err(rtwdev, "failed to send h2c\n");
1860 goto fail;
1861 }
1862
1863 return 0;
1864 fail:
1865 dev_kfree_skb_any(skb);
1866
1867 return ret;
1868 }
1869
__rtw89_fw_h2c_set_tx_path(struct rtw89_dev * rtwdev,struct sk_buff * skb)1870 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
1871 struct sk_buff *skb)
1872 {
1873 const struct rtw89_chip_info *chip = rtwdev->chip;
1874 struct rtw89_hal *hal = &rtwdev->hal;
1875 u8 ntx_path;
1876 u8 map_b;
1877
1878 if (chip->rf_path_num == 1) {
1879 ntx_path = RF_A;
1880 map_b = 0;
1881 } else {
1882 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
1883 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
1884 }
1885
1886 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
1887 SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
1888 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
1889 SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
1890 SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
1891 }
1892
1893 #define H2C_CMC_TBL_LEN 68
rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)1894 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
1895 struct rtw89_vif *rtwvif)
1896 {
1897 const struct rtw89_chip_info *chip = rtwdev->chip;
1898 struct sk_buff *skb;
1899 u8 macid = rtwvif->mac_id;
1900 int ret;
1901
1902 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1903 if (!skb) {
1904 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1905 return -ENOMEM;
1906 }
1907 skb_put(skb, H2C_CMC_TBL_LEN);
1908 SET_CTRL_INFO_MACID(skb->data, macid);
1909 SET_CTRL_INFO_OPERATION(skb->data, 1);
1910 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
1911 SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
1912 __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
1913 SET_CMC_TBL_ANTSEL_A(skb->data, 0);
1914 SET_CMC_TBL_ANTSEL_B(skb->data, 0);
1915 SET_CMC_TBL_ANTSEL_C(skb->data, 0);
1916 SET_CMC_TBL_ANTSEL_D(skb->data, 0);
1917 }
1918 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
1919 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
1920 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
1921 SET_CMC_TBL_DATA_DCM(skb->data, 0);
1922
1923 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1924 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1925 chip->h2c_cctl_func_id, 0, 1,
1926 H2C_CMC_TBL_LEN);
1927
1928 ret = rtw89_h2c_tx(rtwdev, skb, false);
1929 if (ret) {
1930 rtw89_err(rtwdev, "failed to send h2c\n");
1931 goto fail;
1932 }
1933
1934 return 0;
1935 fail:
1936 dev_kfree_skb_any(skb);
1937
1938 return ret;
1939 }
1940
__get_sta_he_pkt_padding(struct rtw89_dev * rtwdev,struct ieee80211_sta * sta,u8 * pads)1941 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
1942 struct ieee80211_sta *sta, u8 *pads)
1943 {
1944 bool ppe_th;
1945 u8 ppe16, ppe8;
1946 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
1947 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0];
1948 u8 ru_bitmap;
1949 u8 n, idx, sh;
1950 u16 ppe;
1951 int i;
1952
1953 if (!sta->deflink.he_cap.has_he)
1954 return;
1955
1956 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
1957 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
1958 if (!ppe_th) {
1959 u8 pad;
1960
1961 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
1962 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]);
1963
1964 for (i = 0; i < RTW89_PPE_BW_NUM; i++)
1965 pads[i] = pad;
1966
1967 return;
1968 }
1969
1970 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
1971 n = hweight8(ru_bitmap);
1972 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
1973
1974 for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
1975 if (!(ru_bitmap & BIT(i))) {
1976 pads[i] = 1;
1977 continue;
1978 }
1979
1980 idx = n >> 3;
1981 sh = n & 7;
1982 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
1983
1984 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx]));
1985 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
1986 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
1987 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
1988
1989 if (ppe16 != 7 && ppe8 == 7)
1990 pads[i] = 2;
1991 else if (ppe8 != 7)
1992 pads[i] = 1;
1993 else
1994 pads[i] = 0;
1995 }
1996 }
1997
rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1998 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
1999 struct ieee80211_vif *vif,
2000 struct ieee80211_sta *sta)
2001 {
2002 const struct rtw89_chip_info *chip = rtwdev->chip;
2003 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
2004 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
2005 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
2006 rtwvif->sub_entity_idx);
2007 struct sk_buff *skb;
2008 u8 pads[RTW89_PPE_BW_NUM];
2009 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
2010 u16 lowest_rate;
2011 int ret;
2012
2013 memset(pads, 0, sizeof(pads));
2014 if (sta)
2015 __get_sta_he_pkt_padding(rtwdev, sta, pads);
2016
2017 if (vif->p2p)
2018 lowest_rate = RTW89_HW_RATE_OFDM6;
2019 else if (chan->band_type == RTW89_BAND_2G)
2020 lowest_rate = RTW89_HW_RATE_CCK1;
2021 else
2022 lowest_rate = RTW89_HW_RATE_OFDM6;
2023
2024 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
2025 if (!skb) {
2026 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2027 return -ENOMEM;
2028 }
2029 skb_put(skb, H2C_CMC_TBL_LEN);
2030 SET_CTRL_INFO_MACID(skb->data, mac_id);
2031 SET_CTRL_INFO_OPERATION(skb->data, 1);
2032 SET_CMC_TBL_DISRTSFB(skb->data, 1);
2033 SET_CMC_TBL_DISDATAFB(skb->data, 1);
2034 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
2035 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
2036 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
2037 if (vif->type == NL80211_IFTYPE_STATION)
2038 SET_CMC_TBL_ULDL(skb->data, 1);
2039 else
2040 SET_CMC_TBL_ULDL(skb->data, 0);
2041 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
2042 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
2043 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
2044 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
2045 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
2046 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
2047 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
2048 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
2049 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
2050 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
2051 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
2052 }
2053 if (sta)
2054 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
2055 sta->deflink.he_cap.has_he);
2056 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
2057 SET_CMC_TBL_DATA_DCM(skb->data, 0);
2058
2059 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2060 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2061 chip->h2c_cctl_func_id, 0, 1,
2062 H2C_CMC_TBL_LEN);
2063
2064 ret = rtw89_h2c_tx(rtwdev, skb, false);
2065 if (ret) {
2066 rtw89_err(rtwdev, "failed to send h2c\n");
2067 goto fail;
2068 }
2069
2070 return 0;
2071 fail:
2072 dev_kfree_skb_any(skb);
2073
2074 return ret;
2075 }
2076
rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta * rtwsta)2077 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
2078 struct rtw89_sta *rtwsta)
2079 {
2080 const struct rtw89_chip_info *chip = rtwdev->chip;
2081 struct sk_buff *skb;
2082 int ret;
2083
2084 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
2085 if (!skb) {
2086 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2087 return -ENOMEM;
2088 }
2089 skb_put(skb, H2C_CMC_TBL_LEN);
2090 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
2091 SET_CTRL_INFO_OPERATION(skb->data, 1);
2092 if (rtwsta->cctl_tx_time) {
2093 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
2094 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time);
2095 }
2096 if (rtwsta->cctl_tx_retry_limit) {
2097 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
2098 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt);
2099 }
2100
2101 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2102 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2103 chip->h2c_cctl_func_id, 0, 1,
2104 H2C_CMC_TBL_LEN);
2105
2106 ret = rtw89_h2c_tx(rtwdev, skb, false);
2107 if (ret) {
2108 rtw89_err(rtwdev, "failed to send h2c\n");
2109 goto fail;
2110 }
2111
2112 return 0;
2113 fail:
2114 dev_kfree_skb_any(skb);
2115
2116 return ret;
2117 }
2118
rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta * rtwsta)2119 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
2120 struct rtw89_sta *rtwsta)
2121 {
2122 const struct rtw89_chip_info *chip = rtwdev->chip;
2123 struct sk_buff *skb;
2124 int ret;
2125
2126 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
2127 return 0;
2128
2129 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
2130 if (!skb) {
2131 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2132 return -ENOMEM;
2133 }
2134 skb_put(skb, H2C_CMC_TBL_LEN);
2135 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
2136 SET_CTRL_INFO_OPERATION(skb->data, 1);
2137
2138 __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
2139
2140 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2141 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2142 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
2143 H2C_CMC_TBL_LEN);
2144
2145 ret = rtw89_h2c_tx(rtwdev, skb, false);
2146 if (ret) {
2147 rtw89_err(rtwdev, "failed to send h2c\n");
2148 goto fail;
2149 }
2150
2151 return 0;
2152 fail:
2153 dev_kfree_skb_any(skb);
2154
2155 return ret;
2156 }
2157
2158 #define H2C_BCN_BASE_LEN 12
rtw89_fw_h2c_update_beacon(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)2159 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
2160 struct rtw89_vif *rtwvif)
2161 {
2162 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
2163 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
2164 rtwvif->sub_entity_idx);
2165 struct sk_buff *skb;
2166 struct sk_buff *skb_beacon;
2167 u16 tim_offset;
2168 int bcn_total_len;
2169 u16 beacon_rate;
2170 void *noa_data;
2171 u8 noa_len;
2172 int ret;
2173
2174 if (vif->p2p)
2175 beacon_rate = RTW89_HW_RATE_OFDM6;
2176 else if (chan->band_type == RTW89_BAND_2G)
2177 beacon_rate = RTW89_HW_RATE_CCK1;
2178 else
2179 beacon_rate = RTW89_HW_RATE_OFDM6;
2180
2181 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
2182 NULL, 0);
2183 if (!skb_beacon) {
2184 rtw89_err(rtwdev, "failed to get beacon skb\n");
2185 return -ENOMEM;
2186 }
2187
2188 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data);
2189 if (noa_len &&
2190 (noa_len <= skb_tailroom(skb_beacon) ||
2191 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
2192 skb_put_data(skb_beacon, noa_data, noa_len);
2193 }
2194
2195 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
2196 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
2197 if (!skb) {
2198 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2199 dev_kfree_skb_any(skb_beacon);
2200 return -ENOMEM;
2201 }
2202 skb_put(skb, H2C_BCN_BASE_LEN);
2203
2204 SET_BCN_UPD_PORT(skb->data, rtwvif->port);
2205 SET_BCN_UPD_MBSSID(skb->data, 0);
2206 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
2207 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
2208 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
2209 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
2210 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
2211 SET_BCN_UPD_RATE(skb->data, beacon_rate);
2212
2213 skb_put_data(skb, skb_beacon->data, skb_beacon->len);
2214 dev_kfree_skb_any(skb_beacon);
2215
2216 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2217 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2218 H2C_FUNC_MAC_BCN_UPD, 0, 1,
2219 bcn_total_len);
2220
2221 ret = rtw89_h2c_tx(rtwdev, skb, false);
2222 if (ret) {
2223 rtw89_err(rtwdev, "failed to send h2c\n");
2224 dev_kfree_skb_any(skb);
2225 return ret;
2226 }
2227
2228 return 0;
2229 }
2230
2231 #define H2C_ROLE_MAINTAIN_LEN 4
rtw89_fw_h2c_role_maintain(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta,enum rtw89_upd_mode upd_mode)2232 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
2233 struct rtw89_vif *rtwvif,
2234 struct rtw89_sta *rtwsta,
2235 enum rtw89_upd_mode upd_mode)
2236 {
2237 struct sk_buff *skb;
2238 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
2239 u8 self_role;
2240 int ret;
2241
2242 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
2243 if (rtwsta)
2244 self_role = RTW89_SELF_ROLE_AP_CLIENT;
2245 else
2246 self_role = rtwvif->self_role;
2247 } else {
2248 self_role = rtwvif->self_role;
2249 }
2250
2251 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
2252 if (!skb) {
2253 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2254 return -ENOMEM;
2255 }
2256 skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
2257 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
2258 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
2259 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
2260 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
2261
2262 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2263 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
2264 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
2265 H2C_ROLE_MAINTAIN_LEN);
2266
2267 ret = rtw89_h2c_tx(rtwdev, skb, false);
2268 if (ret) {
2269 rtw89_err(rtwdev, "failed to send h2c\n");
2270 goto fail;
2271 }
2272
2273 return 0;
2274 fail:
2275 dev_kfree_skb_any(skb);
2276
2277 return ret;
2278 }
2279
2280 #define H2C_JOIN_INFO_LEN 4
rtw89_fw_h2c_join_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_sta * rtwsta,bool dis_conn)2281 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
2282 struct rtw89_sta *rtwsta, bool dis_conn)
2283 {
2284 struct sk_buff *skb;
2285 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
2286 u8 self_role = rtwvif->self_role;
2287 u8 net_type = rtwvif->net_type;
2288 int ret;
2289
2290 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
2291 self_role = RTW89_SELF_ROLE_AP_CLIENT;
2292 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
2293 }
2294
2295 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
2296 if (!skb) {
2297 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2298 return -ENOMEM;
2299 }
2300 skb_put(skb, H2C_JOIN_INFO_LEN);
2301 SET_JOININFO_MACID(skb->data, mac_id);
2302 SET_JOININFO_OP(skb->data, dis_conn);
2303 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
2304 SET_JOININFO_WMM(skb->data, rtwvif->wmm);
2305 SET_JOININFO_TGR(skb->data, rtwvif->trigger);
2306 SET_JOININFO_ISHESTA(skb->data, 0);
2307 SET_JOININFO_DLBW(skb->data, 0);
2308 SET_JOININFO_TF_MAC_PAD(skb->data, 0);
2309 SET_JOININFO_DL_T_PE(skb->data, 0);
2310 SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
2311 SET_JOININFO_NET_TYPE(skb->data, net_type);
2312 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
2313 SET_JOININFO_SELF_ROLE(skb->data, self_role);
2314
2315 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2316 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
2317 H2C_FUNC_MAC_JOININFO, 0, 1,
2318 H2C_JOIN_INFO_LEN);
2319
2320 ret = rtw89_h2c_tx(rtwdev, skb, false);
2321 if (ret) {
2322 rtw89_err(rtwdev, "failed to send h2c\n");
2323 goto fail;
2324 }
2325
2326 return 0;
2327 fail:
2328 dev_kfree_skb_any(skb);
2329
2330 return ret;
2331 }
2332
rtw89_fw_h2c_notify_dbcc(struct rtw89_dev * rtwdev,bool en)2333 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en)
2334 {
2335 struct rtw89_h2c_notify_dbcc *h2c;
2336 u32 len = sizeof(*h2c);
2337 struct sk_buff *skb;
2338 int ret;
2339
2340 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2341 if (!skb) {
2342 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n");
2343 return -ENOMEM;
2344 }
2345 skb_put(skb, len);
2346 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data;
2347
2348 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN);
2349
2350 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2351 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
2352 H2C_FUNC_NOTIFY_DBCC, 0, 1,
2353 len);
2354
2355 ret = rtw89_h2c_tx(rtwdev, skb, false);
2356 if (ret) {
2357 rtw89_err(rtwdev, "failed to send h2c\n");
2358 goto fail;
2359 }
2360
2361 return 0;
2362 fail:
2363 dev_kfree_skb_any(skb);
2364
2365 return ret;
2366 }
2367
rtw89_fw_h2c_macid_pause(struct rtw89_dev * rtwdev,u8 sh,u8 grp,bool pause)2368 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
2369 bool pause)
2370 {
2371 struct rtw89_fw_macid_pause_grp h2c = {{0}};
2372 u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
2373 struct sk_buff *skb;
2374 int ret;
2375
2376 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
2377 if (!skb) {
2378 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2379 return -ENOMEM;
2380 }
2381 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
2382 if (pause)
2383 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
2384 skb_put_data(skb, &h2c, len);
2385
2386 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2387 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2388 H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
2389 len);
2390
2391 ret = rtw89_h2c_tx(rtwdev, skb, false);
2392 if (ret) {
2393 rtw89_err(rtwdev, "failed to send h2c\n");
2394 goto fail;
2395 }
2396
2397 return 0;
2398 fail:
2399 dev_kfree_skb_any(skb);
2400
2401 return ret;
2402 }
2403
2404 #define H2C_EDCA_LEN 12
rtw89_fw_h2c_set_edca(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,u8 ac,u32 val)2405 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
2406 u8 ac, u32 val)
2407 {
2408 struct sk_buff *skb;
2409 int ret;
2410
2411 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
2412 if (!skb) {
2413 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
2414 return -ENOMEM;
2415 }
2416 skb_put(skb, H2C_EDCA_LEN);
2417 RTW89_SET_EDCA_SEL(skb->data, 0);
2418 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx);
2419 RTW89_SET_EDCA_WMM(skb->data, 0);
2420 RTW89_SET_EDCA_AC(skb->data, ac);
2421 RTW89_SET_EDCA_PARAM(skb->data, val);
2422
2423 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2424 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2425 H2C_FUNC_USR_EDCA, 0, 1,
2426 H2C_EDCA_LEN);
2427
2428 ret = rtw89_h2c_tx(rtwdev, skb, false);
2429 if (ret) {
2430 rtw89_err(rtwdev, "failed to send h2c\n");
2431 goto fail;
2432 }
2433
2434 return 0;
2435 fail:
2436 dev_kfree_skb_any(skb);
2437
2438 return ret;
2439 }
2440
2441 #define H2C_TSF32_TOGL_LEN 4
rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool en)2442 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
2443 bool en)
2444 {
2445 struct sk_buff *skb;
2446 u16 early_us = en ? 2000 : 0;
2447 u8 *cmd;
2448 int ret;
2449
2450 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
2451 if (!skb) {
2452 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
2453 return -ENOMEM;
2454 }
2455 skb_put(skb, H2C_TSF32_TOGL_LEN);
2456 cmd = skb->data;
2457
2458 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx);
2459 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
2460 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port);
2461 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
2462
2463 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2464 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2465 H2C_FUNC_TSF32_TOGL, 0, 0,
2466 H2C_TSF32_TOGL_LEN);
2467
2468 ret = rtw89_h2c_tx(rtwdev, skb, false);
2469 if (ret) {
2470 rtw89_err(rtwdev, "failed to send h2c\n");
2471 goto fail;
2472 }
2473
2474 return 0;
2475 fail:
2476 dev_kfree_skb_any(skb);
2477
2478 return ret;
2479 }
2480
2481 #define H2C_OFLD_CFG_LEN 8
rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev * rtwdev)2482 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
2483 {
2484 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
2485 struct sk_buff *skb;
2486 int ret;
2487
2488 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
2489 if (!skb) {
2490 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
2491 return -ENOMEM;
2492 }
2493 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
2494
2495 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2496 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2497 H2C_FUNC_OFLD_CFG, 0, 1,
2498 H2C_OFLD_CFG_LEN);
2499
2500 ret = rtw89_h2c_tx(rtwdev, skb, false);
2501 if (ret) {
2502 rtw89_err(rtwdev, "failed to send h2c\n");
2503 goto fail;
2504 }
2505
2506 return 0;
2507 fail:
2508 dev_kfree_skb_any(skb);
2509
2510 return ret;
2511 }
2512
rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,bool connect)2513 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
2514 struct ieee80211_vif *vif,
2515 bool connect)
2516 {
2517 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
2518 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL;
2519 struct rtw89_h2c_bcnfltr *h2c;
2520 u32 len = sizeof(*h2c);
2521 struct sk_buff *skb;
2522 int ret;
2523
2524 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
2525 return -EINVAL;
2526
2527 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA)
2528 return -EINVAL;
2529
2530 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2531 if (!skb) {
2532 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n");
2533 return -ENOMEM;
2534 }
2535
2536 skb_put(skb, len);
2537 h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
2538
2539 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
2540 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
2541 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
2542 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
2543 RTW89_H2C_BCNFLTR_W0_MODE) |
2544 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) |
2545 le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
2546 le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI,
2547 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
2548 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
2549
2550 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2551 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2552 H2C_FUNC_CFG_BCNFLTR, 0, 1, len);
2553
2554 ret = rtw89_h2c_tx(rtwdev, skb, false);
2555 if (ret) {
2556 rtw89_err(rtwdev, "failed to send h2c\n");
2557 goto fail;
2558 }
2559
2560 return 0;
2561 fail:
2562 dev_kfree_skb_any(skb);
2563
2564 return ret;
2565 }
2566
rtw89_fw_h2c_rssi_offload(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu)2567 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
2568 struct rtw89_rx_phy_ppdu *phy_ppdu)
2569 {
2570 struct rtw89_h2c_ofld_rssi *h2c;
2571 u32 len = sizeof(*h2c);
2572 struct sk_buff *skb;
2573 s8 rssi;
2574 int ret;
2575
2576 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
2577 return -EINVAL;
2578
2579 if (!phy_ppdu)
2580 return -EINVAL;
2581
2582 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2583 if (!skb) {
2584 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n");
2585 return -ENOMEM;
2586 }
2587
2588 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR;
2589 skb_put(skb, len);
2590 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data;
2591
2592 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) |
2593 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM);
2594 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL);
2595
2596 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2597 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2598 H2C_FUNC_OFLD_RSSI, 0, 1, len);
2599
2600 ret = rtw89_h2c_tx(rtwdev, skb, false);
2601 if (ret) {
2602 rtw89_err(rtwdev, "failed to send h2c\n");
2603 goto fail;
2604 }
2605
2606 return 0;
2607 fail:
2608 dev_kfree_skb_any(skb);
2609
2610 return ret;
2611 }
2612
rtw89_fw_h2c_tp_offload(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)2613 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
2614 {
2615 struct rtw89_traffic_stats *stats = &rtwvif->stats;
2616 struct rtw89_h2c_ofld *h2c;
2617 u32 len = sizeof(*h2c);
2618 struct sk_buff *skb;
2619 int ret;
2620
2621 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA)
2622 return -EINVAL;
2623
2624 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2625 if (!skb) {
2626 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n");
2627 return -ENOMEM;
2628 }
2629
2630 skb_put(skb, len);
2631 h2c = (struct rtw89_h2c_ofld *)skb->data;
2632
2633 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) |
2634 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) |
2635 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP);
2636
2637 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2638 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2639 H2C_FUNC_OFLD_TP, 0, 1, len);
2640
2641 ret = rtw89_h2c_tx(rtwdev, skb, false);
2642 if (ret) {
2643 rtw89_err(rtwdev, "failed to send h2c\n");
2644 goto fail;
2645 }
2646
2647 return 0;
2648 fail:
2649 dev_kfree_skb_any(skb);
2650
2651 return ret;
2652 }
2653
rtw89_fw_h2c_ra(struct rtw89_dev * rtwdev,struct rtw89_ra_info * ra,bool csi)2654 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
2655 {
2656 const struct rtw89_chip_info *chip = rtwdev->chip;
2657 struct rtw89_h2c_ra_v1 *h2c_v1;
2658 struct rtw89_h2c_ra *h2c;
2659 u32 len = sizeof(*h2c);
2660 bool format_v1 = false;
2661 struct sk_buff *skb;
2662 int ret;
2663
2664 if (chip->chip_gen == RTW89_CHIP_BE) {
2665 len = sizeof(*h2c_v1);
2666 format_v1 = true;
2667 }
2668
2669 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2670 if (!skb) {
2671 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2672 return -ENOMEM;
2673 }
2674 skb_put(skb, len);
2675 h2c = (struct rtw89_h2c_ra *)skb->data;
2676 rtw89_debug(rtwdev, RTW89_DBG_RA,
2677 "ra cmd msk: %llx ", ra->ra_mask);
2678
2679 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) |
2680 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) |
2681 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) |
2682 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) |
2683 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) |
2684 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) |
2685 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) |
2686 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) |
2687 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) |
2688 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) |
2689 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) |
2690 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) |
2691 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) |
2692 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK);
2693 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32);
2694 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32);
2695 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) |
2696 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF);
2697
2698 if (!format_v1)
2699 goto csi;
2700
2701 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c;
2702 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) |
2703 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT);
2704
2705 csi:
2706 if (!csi)
2707 goto done;
2708
2709 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL);
2710 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) |
2711 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) |
2712 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) |
2713 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) |
2714 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) |
2715 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) |
2716 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) |
2717 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW);
2718
2719 done:
2720 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2721 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
2722 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
2723 len);
2724
2725 ret = rtw89_h2c_tx(rtwdev, skb, false);
2726 if (ret) {
2727 rtw89_err(rtwdev, "failed to send h2c\n");
2728 goto fail;
2729 }
2730
2731 return 0;
2732 fail:
2733 dev_kfree_skb_any(skb);
2734
2735 return ret;
2736 }
2737
rtw89_fw_h2c_cxdrv_init(struct rtw89_dev * rtwdev)2738 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
2739 {
2740 struct rtw89_btc *btc = &rtwdev->btc;
2741 struct rtw89_btc_dm *dm = &btc->dm;
2742 struct rtw89_btc_init_info *init_info = &dm->init_info;
2743 struct rtw89_btc_module *module = &init_info->module;
2744 struct rtw89_btc_ant_info *ant = &module->ant;
2745 struct rtw89_h2c_cxinit *h2c;
2746 u32 len = sizeof(*h2c);
2747 struct sk_buff *skb;
2748 int ret;
2749
2750 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2751 if (!skb) {
2752 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
2753 return -ENOMEM;
2754 }
2755 skb_put(skb, len);
2756 h2c = (struct rtw89_h2c_cxinit *)skb->data;
2757
2758 h2c->hdr.type = CXDRVINFO_INIT;
2759 h2c->hdr.len = len - H2C_LEN_CXDRVHDR;
2760
2761 h2c->ant_type = ant->type;
2762 h2c->ant_num = ant->num;
2763 h2c->ant_iso = ant->isolation;
2764 h2c->ant_info =
2765 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) |
2766 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) |
2767 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) |
2768 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT);
2769
2770 h2c->mod_rfe = module->rfe_type;
2771 h2c->mod_cv = module->cv;
2772 h2c->mod_info =
2773 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) |
2774 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) |
2775 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) |
2776 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE);
2777 h2c->mod_adie_kt = module->kt_ver_adie;
2778 h2c->wl_gch = init_info->wl_guard_ch;
2779
2780 h2c->info =
2781 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) |
2782 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) |
2783 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) |
2784 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) |
2785 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY);
2786
2787 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2788 H2C_CAT_OUTSRC, BTFC_SET,
2789 SET_DRV_INFO, 0, 0,
2790 len);
2791
2792 ret = rtw89_h2c_tx(rtwdev, skb, false);
2793 if (ret) {
2794 rtw89_err(rtwdev, "failed to send h2c\n");
2795 goto fail;
2796 }
2797
2798 return 0;
2799 fail:
2800 dev_kfree_skb_any(skb);
2801
2802 return ret;
2803 }
2804
2805 #define PORT_DATA_OFFSET 4
2806 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
2807 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
2808 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
2809
rtw89_fw_h2c_cxdrv_role(struct rtw89_dev * rtwdev)2810 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
2811 {
2812 struct rtw89_btc *btc = &rtwdev->btc;
2813 const struct rtw89_btc_ver *ver = btc->ver;
2814 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2815 struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
2816 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
2817 struct rtw89_btc_wl_active_role *active = role_info->active_role;
2818 struct sk_buff *skb;
2819 u32 len;
2820 u8 offset = 0;
2821 u8 *cmd;
2822 int ret;
2823 int i;
2824
2825 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num);
2826
2827 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2828 if (!skb) {
2829 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
2830 return -ENOMEM;
2831 }
2832 skb_put(skb, len);
2833 cmd = skb->data;
2834
2835 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
2836 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
2837
2838 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
2839 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
2840
2841 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
2842 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
2843 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
2844 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
2845 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
2846 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
2847 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
2848 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
2849 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
2850 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
2851 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
2852 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
2853
2854 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
2855 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
2856 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
2857 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
2858 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
2859 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
2860 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
2861 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
2862 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
2863 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
2864 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
2865 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
2866 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
2867 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
2868 }
2869
2870 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2871 H2C_CAT_OUTSRC, BTFC_SET,
2872 SET_DRV_INFO, 0, 0,
2873 len);
2874
2875 ret = rtw89_h2c_tx(rtwdev, skb, false);
2876 if (ret) {
2877 rtw89_err(rtwdev, "failed to send h2c\n");
2878 goto fail;
2879 }
2880
2881 return 0;
2882 fail:
2883 dev_kfree_skb_any(skb);
2884
2885 return ret;
2886 }
2887
2888 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
2889 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
2890
rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev * rtwdev)2891 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
2892 {
2893 struct rtw89_btc *btc = &rtwdev->btc;
2894 const struct rtw89_btc_ver *ver = btc->ver;
2895 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2896 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
2897 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
2898 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
2899 struct sk_buff *skb;
2900 u32 len;
2901 u8 *cmd, offset;
2902 int ret;
2903 int i;
2904
2905 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num);
2906
2907 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2908 if (!skb) {
2909 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
2910 return -ENOMEM;
2911 }
2912 skb_put(skb, len);
2913 cmd = skb->data;
2914
2915 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
2916 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
2917
2918 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
2919 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
2920
2921 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
2922 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
2923 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
2924 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
2925 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
2926 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
2927 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
2928 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
2929 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
2930 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
2931 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
2932 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
2933
2934 offset = PORT_DATA_OFFSET;
2935 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
2936 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
2937 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
2938 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
2939 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
2940 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
2941 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
2942 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
2943 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
2944 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
2945 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
2946 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
2947 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
2948 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
2949 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
2950 }
2951
2952 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
2953 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
2954 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
2955 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
2956 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
2957 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
2958 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
2959
2960 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2961 H2C_CAT_OUTSRC, BTFC_SET,
2962 SET_DRV_INFO, 0, 0,
2963 len);
2964
2965 ret = rtw89_h2c_tx(rtwdev, skb, false);
2966 if (ret) {
2967 rtw89_err(rtwdev, "failed to send h2c\n");
2968 goto fail;
2969 }
2970
2971 return 0;
2972 fail:
2973 dev_kfree_skb_any(skb);
2974
2975 return ret;
2976 }
2977
2978 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \
2979 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
2980
rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev * rtwdev)2981 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev)
2982 {
2983 struct rtw89_btc *btc = &rtwdev->btc;
2984 const struct rtw89_btc_ver *ver = btc->ver;
2985 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2986 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2;
2987 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
2988 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2;
2989 struct sk_buff *skb;
2990 u32 len;
2991 u8 *cmd, offset;
2992 int ret;
2993 int i;
2994
2995 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num);
2996
2997 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2998 if (!skb) {
2999 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
3000 return -ENOMEM;
3001 }
3002 skb_put(skb, len);
3003 cmd = skb->data;
3004
3005 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
3006 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
3007
3008 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
3009 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
3010
3011 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
3012 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
3013 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
3014 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
3015 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
3016 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
3017 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
3018 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
3019 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
3020 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
3021 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
3022 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
3023
3024 offset = PORT_DATA_OFFSET;
3025 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
3026 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset);
3027 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset);
3028 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset);
3029 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset);
3030 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset);
3031 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset);
3032 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset);
3033 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset);
3034 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset);
3035 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset);
3036 }
3037
3038 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
3039 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
3040 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
3041 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
3042 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
3043 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
3044 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
3045
3046 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3047 H2C_CAT_OUTSRC, BTFC_SET,
3048 SET_DRV_INFO, 0, 0,
3049 len);
3050
3051 ret = rtw89_h2c_tx(rtwdev, skb, false);
3052 if (ret) {
3053 rtw89_err(rtwdev, "failed to send h2c\n");
3054 goto fail;
3055 }
3056
3057 return 0;
3058 fail:
3059 dev_kfree_skb_any(skb);
3060
3061 return ret;
3062 }
3063
3064 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev * rtwdev)3065 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
3066 {
3067 struct rtw89_btc *btc = &rtwdev->btc;
3068 const struct rtw89_btc_ver *ver = btc->ver;
3069 struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
3070 struct sk_buff *skb;
3071 u8 *cmd;
3072 int ret;
3073
3074 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
3075 if (!skb) {
3076 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
3077 return -ENOMEM;
3078 }
3079 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
3080 cmd = skb->data;
3081
3082 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
3083 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
3084
3085 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
3086 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
3087 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
3088 if (ver->fcxctrl == 0)
3089 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
3090
3091 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3092 H2C_CAT_OUTSRC, BTFC_SET,
3093 SET_DRV_INFO, 0, 0,
3094 H2C_LEN_CXDRVINFO_CTRL);
3095
3096 ret = rtw89_h2c_tx(rtwdev, skb, false);
3097 if (ret) {
3098 rtw89_err(rtwdev, "failed to send h2c\n");
3099 goto fail;
3100 }
3101
3102 return 0;
3103 fail:
3104 dev_kfree_skb_any(skb);
3105
3106 return ret;
3107 }
3108
3109 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev * rtwdev)3110 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev)
3111 {
3112 struct rtw89_btc *btc = &rtwdev->btc;
3113 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info;
3114 struct sk_buff *skb;
3115 u8 *cmd;
3116 int ret;
3117
3118 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX);
3119 if (!skb) {
3120 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n");
3121 return -ENOMEM;
3122 }
3123 skb_put(skb, H2C_LEN_CXDRVINFO_TRX);
3124 cmd = skb->data;
3125
3126 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX);
3127 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR);
3128
3129 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl);
3130 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl);
3131 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi);
3132 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi);
3133 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power);
3134 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain);
3135 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power);
3136 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain);
3137 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn);
3138 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm);
3139 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile);
3140 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2);
3141 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate);
3142 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate);
3143 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp);
3144 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp);
3145 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio);
3146
3147 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3148 H2C_CAT_OUTSRC, BTFC_SET,
3149 SET_DRV_INFO, 0, 0,
3150 H2C_LEN_CXDRVINFO_TRX);
3151
3152 ret = rtw89_h2c_tx(rtwdev, skb, false);
3153 if (ret) {
3154 rtw89_err(rtwdev, "failed to send h2c\n");
3155 goto fail;
3156 }
3157
3158 return 0;
3159 fail:
3160 dev_kfree_skb_any(skb);
3161
3162 return ret;
3163 }
3164
3165 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev * rtwdev)3166 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
3167 {
3168 struct rtw89_btc *btc = &rtwdev->btc;
3169 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
3170 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
3171 struct sk_buff *skb;
3172 u8 *cmd;
3173 int ret;
3174
3175 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
3176 if (!skb) {
3177 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
3178 return -ENOMEM;
3179 }
3180 skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
3181 cmd = skb->data;
3182
3183 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
3184 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
3185
3186 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
3187 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
3188 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
3189 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
3190 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
3191
3192 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3193 H2C_CAT_OUTSRC, BTFC_SET,
3194 SET_DRV_INFO, 0, 0,
3195 H2C_LEN_CXDRVINFO_RFK);
3196
3197 ret = rtw89_h2c_tx(rtwdev, skb, false);
3198 if (ret) {
3199 rtw89_err(rtwdev, "failed to send h2c\n");
3200 goto fail;
3201 }
3202
3203 return 0;
3204 fail:
3205 dev_kfree_skb_any(skb);
3206
3207 return ret;
3208 }
3209
3210 #define H2C_LEN_PKT_OFLD 4
rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev * rtwdev,u8 id)3211 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
3212 {
3213 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
3214 struct sk_buff *skb;
3215 unsigned int cond;
3216 u8 *cmd;
3217 int ret;
3218
3219 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
3220 if (!skb) {
3221 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
3222 return -ENOMEM;
3223 }
3224 skb_put(skb, H2C_LEN_PKT_OFLD);
3225 cmd = skb->data;
3226
3227 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
3228 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
3229
3230 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3231 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3232 H2C_FUNC_PACKET_OFLD, 1, 1,
3233 H2C_LEN_PKT_OFLD);
3234
3235 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL);
3236
3237 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3238 if (ret < 0) {
3239 rtw89_debug(rtwdev, RTW89_DBG_FW,
3240 "failed to del pkt ofld: id %d, ret %d\n",
3241 id, ret);
3242 return ret;
3243 }
3244
3245 rtw89_core_release_bit_map(rtwdev->pkt_offload, id);
3246 return 0;
3247 }
3248
rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev * rtwdev,u8 * id,struct sk_buff * skb_ofld)3249 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
3250 struct sk_buff *skb_ofld)
3251 {
3252 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
3253 struct sk_buff *skb;
3254 unsigned int cond;
3255 u8 *cmd;
3256 u8 alloc_id;
3257 int ret;
3258
3259 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
3260 RTW89_MAX_PKT_OFLD_NUM);
3261 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
3262 return -ENOSPC;
3263
3264 *id = alloc_id;
3265
3266 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
3267 if (!skb) {
3268 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
3269 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
3270 return -ENOMEM;
3271 }
3272 skb_put(skb, H2C_LEN_PKT_OFLD);
3273 cmd = skb->data;
3274
3275 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
3276 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
3277 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
3278 skb_put_data(skb, skb_ofld->data, skb_ofld->len);
3279
3280 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3281 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3282 H2C_FUNC_PACKET_OFLD, 1, 1,
3283 H2C_LEN_PKT_OFLD + skb_ofld->len);
3284
3285 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD);
3286
3287 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3288 if (ret < 0) {
3289 rtw89_debug(rtwdev, RTW89_DBG_FW,
3290 "failed to add pkt ofld: id %d, ret %d\n",
3291 alloc_id, ret);
3292 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
3293 return ret;
3294 }
3295
3296 return 0;
3297 }
3298
3299 #define H2C_LEN_SCAN_LIST_OFFLOAD 4
rtw89_fw_h2c_scan_list_offload(struct rtw89_dev * rtwdev,int len,struct list_head * chan_list)3300 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
3301 struct list_head *chan_list)
3302 {
3303 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
3304 struct rtw89_mac_chinfo *ch_info;
3305 struct sk_buff *skb;
3306 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
3307 unsigned int cond;
3308 u8 *cmd;
3309 int ret;
3310
3311 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
3312 if (!skb) {
3313 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
3314 return -ENOMEM;
3315 }
3316 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
3317 cmd = skb->data;
3318
3319 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
3320 /* in unit of 4 bytes */
3321 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
3322
3323 list_for_each_entry(ch_info, chan_list, list) {
3324 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
3325
3326 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
3327 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
3328 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
3329 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
3330 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
3331 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
3332 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
3333 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
3334 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
3335 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
3336 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
3337 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
3338 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
3339 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
3340 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
3341 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
3342 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
3343 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
3344 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
3345 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
3346 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
3347 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
3348 }
3349
3350 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3351 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3352 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
3353
3354 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH);
3355
3356 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3357 if (ret) {
3358 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
3359 return ret;
3360 }
3361
3362 return 0;
3363 }
3364
rtw89_fw_h2c_scan_offload(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option,struct rtw89_vif * rtwvif)3365 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
3366 struct rtw89_scan_option *option,
3367 struct rtw89_vif *rtwvif)
3368 {
3369 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
3370 struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
3371 struct rtw89_h2c_scanofld *h2c;
3372 u32 len = sizeof(*h2c);
3373 struct sk_buff *skb;
3374 unsigned int cond;
3375 int ret;
3376
3377 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3378 if (!skb) {
3379 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
3380 return -ENOMEM;
3381 }
3382 skb_put(skb, len);
3383 h2c = (struct rtw89_h2c_scanofld *)skb->data;
3384
3385 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
3386 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
3387 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) |
3388 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION);
3389
3390 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) |
3391 le32_encode_bits(option->target_ch_mode,
3392 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) |
3393 le32_encode_bits(RTW89_SCAN_IMMEDIATE,
3394 RTW89_H2C_SCANOFLD_W1_START_MODE) |
3395 le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
3396
3397 if (option->target_ch_mode) {
3398 h2c->w1 |= le32_encode_bits(op->band_width,
3399 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) |
3400 le32_encode_bits(op->primary_channel,
3401 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) |
3402 le32_encode_bits(op->channel,
3403 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH);
3404 h2c->w0 |= le32_encode_bits(op->band_type,
3405 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND);
3406 }
3407
3408 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3409 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3410 H2C_FUNC_SCANOFLD, 1, 1,
3411 len);
3412
3413 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD);
3414
3415 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3416 if (ret) {
3417 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n");
3418 return ret;
3419 }
3420
3421 return 0;
3422 }
3423
rtw89_fw_h2c_rf_reg(struct rtw89_dev * rtwdev,struct rtw89_fw_h2c_rf_reg_info * info,u16 len,u8 page)3424 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
3425 struct rtw89_fw_h2c_rf_reg_info *info,
3426 u16 len, u8 page)
3427 {
3428 struct sk_buff *skb;
3429 u8 class = info->rf_path == RF_PATH_A ?
3430 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
3431 int ret;
3432
3433 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3434 if (!skb) {
3435 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
3436 return -ENOMEM;
3437 }
3438 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
3439
3440 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3441 H2C_CAT_OUTSRC, class, page, 0, 0,
3442 len);
3443
3444 ret = rtw89_h2c_tx(rtwdev, skb, false);
3445 if (ret) {
3446 rtw89_err(rtwdev, "failed to send h2c\n");
3447 goto fail;
3448 }
3449
3450 return 0;
3451 fail:
3452 dev_kfree_skb_any(skb);
3453
3454 return ret;
3455 }
3456
rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev * rtwdev)3457 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
3458 {
3459 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
3460 struct rtw89_fw_h2c_rf_get_mccch *mccch;
3461 struct sk_buff *skb;
3462 int ret;
3463 u8 idx;
3464
3465 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
3466 if (!skb) {
3467 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
3468 return -ENOMEM;
3469 }
3470 skb_put(skb, sizeof(*mccch));
3471 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
3472
3473 idx = rfk_mcc->table_idx;
3474 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
3475 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
3476 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]);
3477 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]);
3478 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]);
3479 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]);
3480
3481 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3482 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
3483 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
3484 sizeof(*mccch));
3485
3486 ret = rtw89_h2c_tx(rtwdev, skb, false);
3487 if (ret) {
3488 rtw89_err(rtwdev, "failed to send h2c\n");
3489 goto fail;
3490 }
3491
3492 return 0;
3493 fail:
3494 dev_kfree_skb_any(skb);
3495
3496 return ret;
3497 }
3498 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
3499
rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev * rtwdev,u8 h2c_class,u8 h2c_func,u8 * buf,u16 len,bool rack,bool dack)3500 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
3501 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
3502 bool rack, bool dack)
3503 {
3504 struct sk_buff *skb;
3505 int ret;
3506
3507 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3508 if (!skb) {
3509 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
3510 return -ENOMEM;
3511 }
3512 skb_put_data(skb, buf, len);
3513
3514 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3515 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
3516 len);
3517
3518 ret = rtw89_h2c_tx(rtwdev, skb, false);
3519 if (ret) {
3520 rtw89_err(rtwdev, "failed to send h2c\n");
3521 goto fail;
3522 }
3523
3524 return 0;
3525 fail:
3526 dev_kfree_skb_any(skb);
3527
3528 return ret;
3529 }
3530
rtw89_fw_h2c_raw(struct rtw89_dev * rtwdev,const u8 * buf,u16 len)3531 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
3532 {
3533 struct sk_buff *skb;
3534 int ret;
3535
3536 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
3537 if (!skb) {
3538 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
3539 return -ENOMEM;
3540 }
3541 skb_put_data(skb, buf, len);
3542
3543 ret = rtw89_h2c_tx(rtwdev, skb, false);
3544 if (ret) {
3545 rtw89_err(rtwdev, "failed to send h2c\n");
3546 goto fail;
3547 }
3548
3549 return 0;
3550 fail:
3551 dev_kfree_skb_any(skb);
3552
3553 return ret;
3554 }
3555
rtw89_fw_send_all_early_h2c(struct rtw89_dev * rtwdev)3556 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
3557 {
3558 struct rtw89_early_h2c *early_h2c;
3559
3560 lockdep_assert_held(&rtwdev->mutex);
3561
3562 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
3563 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
3564 }
3565 }
3566
rtw89_fw_free_all_early_h2c(struct rtw89_dev * rtwdev)3567 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
3568 {
3569 struct rtw89_early_h2c *early_h2c, *tmp;
3570
3571 mutex_lock(&rtwdev->mutex);
3572 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
3573 list_del(&early_h2c->list);
3574 kfree(early_h2c->h2c);
3575 kfree(early_h2c);
3576 }
3577 mutex_unlock(&rtwdev->mutex);
3578 }
3579
rtw89_fw_c2h_parse_attr(struct sk_buff * c2h)3580 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
3581 {
3582 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data;
3583 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
3584
3585 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY);
3586 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS);
3587 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC);
3588 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN);
3589 }
3590
rtw89_fw_c2h_chk_atomic(struct rtw89_dev * rtwdev,struct sk_buff * c2h)3591 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
3592 struct sk_buff *c2h)
3593 {
3594 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
3595 u8 category = attr->category;
3596 u8 class = attr->class;
3597 u8 func = attr->func;
3598
3599 switch (category) {
3600 default:
3601 return false;
3602 case RTW89_C2H_CAT_MAC:
3603 return rtw89_mac_c2h_chk_atomic(rtwdev, class, func);
3604 case RTW89_C2H_CAT_OUTSRC:
3605 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func);
3606 }
3607 }
3608
rtw89_fw_c2h_irqsafe(struct rtw89_dev * rtwdev,struct sk_buff * c2h)3609 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
3610 {
3611 rtw89_fw_c2h_parse_attr(c2h);
3612 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h))
3613 goto enqueue;
3614
3615 rtw89_fw_c2h_cmd_handle(rtwdev, c2h);
3616 dev_kfree_skb_any(c2h);
3617 return;
3618
3619 enqueue:
3620 skb_queue_tail(&rtwdev->c2h_queue, c2h);
3621 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
3622 }
3623
rtw89_fw_c2h_cmd_handle(struct rtw89_dev * rtwdev,struct sk_buff * skb)3624 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
3625 struct sk_buff *skb)
3626 {
3627 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
3628 u8 category = attr->category;
3629 u8 class = attr->class;
3630 u8 func = attr->func;
3631 u16 len = attr->len;
3632 bool dump = true;
3633
3634 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
3635 return;
3636
3637 switch (category) {
3638 case RTW89_C2H_CAT_TEST:
3639 break;
3640 case RTW89_C2H_CAT_MAC:
3641 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
3642 if (class == RTW89_MAC_C2H_CLASS_INFO &&
3643 func == RTW89_MAC_C2H_FUNC_C2H_LOG)
3644 dump = false;
3645 break;
3646 case RTW89_C2H_CAT_OUTSRC:
3647 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
3648 class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
3649 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
3650 else
3651 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
3652 break;
3653 }
3654
3655 if (dump)
3656 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
3657 }
3658
rtw89_fw_c2h_work(struct work_struct * work)3659 void rtw89_fw_c2h_work(struct work_struct *work)
3660 {
3661 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
3662 c2h_work);
3663 struct sk_buff *skb, *tmp;
3664
3665 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
3666 skb_unlink(skb, &rtwdev->c2h_queue);
3667 mutex_lock(&rtwdev->mutex);
3668 rtw89_fw_c2h_cmd_handle(rtwdev, skb);
3669 mutex_unlock(&rtwdev->mutex);
3670 dev_kfree_skb_any(skb);
3671 }
3672 }
3673
rtw89_fw_write_h2c_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * info)3674 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
3675 struct rtw89_mac_h2c_info *info)
3676 {
3677 const struct rtw89_chip_info *chip = rtwdev->chip;
3678 struct rtw89_fw_info *fw_info = &rtwdev->fw;
3679 const u32 *h2c_reg = chip->h2c_regs;
3680 u8 i, val, len;
3681 int ret;
3682
3683 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
3684 rtwdev, chip->h2c_ctrl_reg);
3685 if (ret) {
3686 rtw89_warn(rtwdev, "FW does not process h2c registers\n");
3687 return ret;
3688 }
3689
3690 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
3691 sizeof(info->u.h2creg[0]));
3692
3693 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK);
3694 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK);
3695
3696 for (i = 0; i < RTW89_H2CREG_MAX; i++)
3697 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]);
3698
3699 fw_info->h2c_counter++;
3700 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr,
3701 chip->h2c_counter_reg.mask, fw_info->h2c_counter);
3702 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
3703
3704 return 0;
3705 }
3706
rtw89_fw_read_c2h_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_c2h_info * info)3707 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
3708 struct rtw89_mac_c2h_info *info)
3709 {
3710 const struct rtw89_chip_info *chip = rtwdev->chip;
3711 struct rtw89_fw_info *fw_info = &rtwdev->fw;
3712 const u32 *c2h_reg = chip->c2h_regs;
3713 u32 ret;
3714 u8 i, val;
3715
3716 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
3717
3718 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
3719 RTW89_C2H_TIMEOUT, false, rtwdev,
3720 chip->c2h_ctrl_reg);
3721 if (ret) {
3722 rtw89_warn(rtwdev, "c2h reg timeout\n");
3723 return ret;
3724 }
3725
3726 for (i = 0; i < RTW89_C2HREG_MAX; i++)
3727 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
3728
3729 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
3730
3731 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK);
3732 info->content_len =
3733 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) -
3734 RTW89_C2HREG_HDR_LEN;
3735
3736 fw_info->c2h_counter++;
3737 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr,
3738 chip->c2h_counter_reg.mask, fw_info->c2h_counter);
3739
3740 return 0;
3741 }
3742
rtw89_fw_msg_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * h2c_info,struct rtw89_mac_c2h_info * c2h_info)3743 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
3744 struct rtw89_mac_h2c_info *h2c_info,
3745 struct rtw89_mac_c2h_info *c2h_info)
3746 {
3747 u32 ret;
3748
3749 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
3750 lockdep_assert_held(&rtwdev->mutex);
3751
3752 if (!h2c_info && !c2h_info)
3753 return -EINVAL;
3754
3755 if (!h2c_info)
3756 goto recv_c2h;
3757
3758 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
3759 if (ret)
3760 return ret;
3761
3762 recv_c2h:
3763 if (!c2h_info)
3764 return 0;
3765
3766 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
3767 if (ret)
3768 return ret;
3769
3770 return 0;
3771 }
3772
rtw89_fw_st_dbg_dump(struct rtw89_dev * rtwdev)3773 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
3774 {
3775 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
3776 rtw89_err(rtwdev, "[ERR]pwr is off\n");
3777 return;
3778 }
3779
3780 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
3781 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
3782 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
3783 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
3784 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
3785 rtw89_read32(rtwdev, R_AX_HALT_C2H));
3786 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
3787 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
3788
3789 rtw89_fw_prog_cnt_dump(rtwdev);
3790 }
3791
rtw89_release_pkt_list(struct rtw89_dev * rtwdev)3792 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
3793 {
3794 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
3795 struct rtw89_pktofld_info *info, *tmp;
3796 u8 idx;
3797
3798 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
3799 if (!(rtwdev->chip->support_bands & BIT(idx)))
3800 continue;
3801
3802 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
3803 if (test_bit(info->id, rtwdev->pkt_offload))
3804 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
3805 list_del(&info->list);
3806 kfree(info);
3807 }
3808 }
3809 }
3810
rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct rtw89_pktofld_info * info,enum nl80211_band band,u8 ssid_idx)3811 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
3812 struct rtw89_vif *rtwvif,
3813 struct rtw89_pktofld_info *info,
3814 enum nl80211_band band, u8 ssid_idx)
3815 {
3816 struct cfg80211_scan_request *req = rtwvif->scan_req;
3817
3818 if (band != NL80211_BAND_6GHZ)
3819 return false;
3820
3821 if (req->ssids[ssid_idx].ssid_len) {
3822 memcpy(info->ssid, req->ssids[ssid_idx].ssid,
3823 req->ssids[ssid_idx].ssid_len);
3824 info->ssid_len = req->ssids[ssid_idx].ssid_len;
3825 return false;
3826 } else {
3827 return true;
3828 }
3829 }
3830
rtw89_append_probe_req_ie(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,struct sk_buff * skb,u8 ssid_idx)3831 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
3832 struct rtw89_vif *rtwvif,
3833 struct sk_buff *skb, u8 ssid_idx)
3834 {
3835 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
3836 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
3837 struct rtw89_pktofld_info *info;
3838 struct sk_buff *new;
3839 int ret = 0;
3840 u8 band;
3841
3842 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
3843 if (!(rtwdev->chip->support_bands & BIT(band)))
3844 continue;
3845
3846 new = skb_copy(skb, GFP_KERNEL);
3847 if (!new) {
3848 ret = -ENOMEM;
3849 goto out;
3850 }
3851 skb_put_data(new, ies->ies[band], ies->len[band]);
3852 skb_put_data(new, ies->common_ies, ies->common_ie_len);
3853
3854 info = kzalloc(sizeof(*info), GFP_KERNEL);
3855 if (!info) {
3856 ret = -ENOMEM;
3857 kfree_skb(new);
3858 goto out;
3859 }
3860
3861 if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band,
3862 ssid_idx)) {
3863 kfree_skb(new);
3864 kfree(info);
3865 goto out;
3866 }
3867
3868 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
3869 if (ret) {
3870 kfree_skb(new);
3871 kfree(info);
3872 goto out;
3873 }
3874
3875 list_add_tail(&info->list, &scan_info->pkt_list[band]);
3876 kfree_skb(new);
3877 }
3878 out:
3879 return ret;
3880 }
3881
rtw89_hw_scan_update_probe_req(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)3882 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
3883 struct rtw89_vif *rtwvif)
3884 {
3885 struct cfg80211_scan_request *req = rtwvif->scan_req;
3886 struct sk_buff *skb;
3887 u8 num = req->n_ssids, i;
3888 int ret;
3889
3890 for (i = 0; i < num; i++) {
3891 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
3892 req->ssids[i].ssid,
3893 req->ssids[i].ssid_len,
3894 req->ie_len);
3895 if (!skb)
3896 return -ENOMEM;
3897
3898 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i);
3899 kfree_skb(skb);
3900
3901 if (ret)
3902 return ret;
3903 }
3904
3905 return 0;
3906 }
3907
rtw89_update_6ghz_rnr_chan(struct rtw89_dev * rtwdev,struct cfg80211_scan_request * req,struct rtw89_mac_chinfo * ch_info)3908 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
3909 struct cfg80211_scan_request *req,
3910 struct rtw89_mac_chinfo *ch_info)
3911 {
3912 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
3913 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
3914 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
3915 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
3916 struct cfg80211_scan_6ghz_params *params;
3917 struct rtw89_pktofld_info *info, *tmp;
3918 struct ieee80211_hdr *hdr;
3919 struct sk_buff *skb;
3920 bool found;
3921 int ret = 0;
3922 u8 i;
3923
3924 if (!req->n_6ghz_params)
3925 return 0;
3926
3927 for (i = 0; i < req->n_6ghz_params; i++) {
3928 params = &req->scan_6ghz_params[i];
3929
3930 if (req->channels[params->channel_idx]->hw_value !=
3931 ch_info->pri_ch)
3932 continue;
3933
3934 found = false;
3935 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) {
3936 if (ether_addr_equal(tmp->bssid, params->bssid)) {
3937 found = true;
3938 break;
3939 }
3940 }
3941 if (found)
3942 continue;
3943
3944 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
3945 NULL, 0, req->ie_len);
3946 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
3947 skb_put_data(skb, ies->common_ies, ies->common_ie_len);
3948 hdr = (struct ieee80211_hdr *)skb->data;
3949 ether_addr_copy(hdr->addr3, params->bssid);
3950
3951 info = kzalloc(sizeof(*info), GFP_KERNEL);
3952 if (!info) {
3953 ret = -ENOMEM;
3954 kfree_skb(skb);
3955 goto out;
3956 }
3957
3958 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
3959 if (ret) {
3960 kfree_skb(skb);
3961 kfree(info);
3962 goto out;
3963 }
3964
3965 ether_addr_copy(info->bssid, params->bssid);
3966 info->channel_6ghz = req->channels[params->channel_idx]->hw_value;
3967 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]);
3968
3969 ch_info->tx_pkt = true;
3970 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
3971
3972 kfree_skb(skb);
3973 }
3974
3975 out:
3976 return ret;
3977 }
3978
rtw89_hw_scan_add_chan(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo * ch_info)3979 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
3980 int ssid_num,
3981 struct rtw89_mac_chinfo *ch_info)
3982 {
3983 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
3984 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
3985 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
3986 struct cfg80211_scan_request *req = rtwvif->scan_req;
3987 struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
3988 struct rtw89_pktofld_info *info;
3989 u8 band, probe_count = 0;
3990 int ret;
3991
3992 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
3993 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
3994 ch_info->bw = RTW89_SCAN_WIDTH;
3995 ch_info->tx_pkt = true;
3996 ch_info->cfg_tx_pwr = false;
3997 ch_info->tx_pwr_idx = 0;
3998 ch_info->tx_null = false;
3999 ch_info->pause_data = false;
4000 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
4001
4002 if (ch_info->ch_band == RTW89_BAND_6G) {
4003 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
4004 !ch_info->is_psc) {
4005 ch_info->tx_pkt = false;
4006 if (!req->duration_mandatory)
4007 ch_info->period -= RTW89_DWELL_TIME_6G;
4008 }
4009 }
4010
4011 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info);
4012 if (ret)
4013 rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
4014
4015 if (ssid_num) {
4016 band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
4017
4018 list_for_each_entry(info, &scan_info->pkt_list[band], list) {
4019 if (info->channel_6ghz &&
4020 ch_info->pri_ch != info->channel_6ghz)
4021 continue;
4022 else if (info->channel_6ghz && probe_count != 0)
4023 ch_info->period += RTW89_CHANNEL_TIME_6G;
4024 ch_info->pkt_id[probe_count++] = info->id;
4025 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
4026 break;
4027 }
4028 ch_info->num_pkt = probe_count;
4029 }
4030
4031 switch (chan_type) {
4032 case RTW89_CHAN_OPERATE:
4033 ch_info->central_ch = op->channel;
4034 ch_info->pri_ch = op->primary_channel;
4035 ch_info->ch_band = op->band_type;
4036 ch_info->bw = op->band_width;
4037 ch_info->tx_null = true;
4038 ch_info->num_pkt = 0;
4039 break;
4040 case RTW89_CHAN_DFS:
4041 if (ch_info->ch_band != RTW89_BAND_6G)
4042 ch_info->period = max_t(u8, ch_info->period,
4043 RTW89_DFS_CHAN_TIME);
4044 ch_info->dwell_time = RTW89_DWELL_TIME;
4045 break;
4046 case RTW89_CHAN_ACTIVE:
4047 break;
4048 default:
4049 rtw89_err(rtwdev, "Channel type out of bound\n");
4050 }
4051 }
4052
rtw89_hw_scan_add_chan_list(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool connected)4053 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
4054 struct rtw89_vif *rtwvif, bool connected)
4055 {
4056 struct cfg80211_scan_request *req = rtwvif->scan_req;
4057 struct rtw89_mac_chinfo *ch_info, *tmp;
4058 struct ieee80211_channel *channel;
4059 struct list_head chan_list;
4060 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
4061 int list_len, off_chan_time = 0;
4062 enum rtw89_chan_type type;
4063 int ret = 0;
4064 u32 idx;
4065
4066 INIT_LIST_HEAD(&chan_list);
4067 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
4068 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
4069 idx++, list_len++) {
4070 channel = req->channels[idx];
4071 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
4072 if (!ch_info) {
4073 ret = -ENOMEM;
4074 goto out;
4075 }
4076
4077 if (req->duration_mandatory)
4078 ch_info->period = req->duration;
4079 else if (channel->band == NL80211_BAND_6GHZ)
4080 ch_info->period = RTW89_CHANNEL_TIME_6G +
4081 RTW89_DWELL_TIME_6G;
4082 else
4083 ch_info->period = RTW89_CHANNEL_TIME;
4084
4085 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
4086 ch_info->central_ch = channel->hw_value;
4087 ch_info->pri_ch = channel->hw_value;
4088 ch_info->rand_seq_num = random_seq;
4089 ch_info->is_psc = cfg80211_channel_is_psc(channel);
4090
4091 if (channel->flags &
4092 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
4093 type = RTW89_CHAN_DFS;
4094 else
4095 type = RTW89_CHAN_ACTIVE;
4096 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
4097
4098 if (connected &&
4099 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
4100 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
4101 if (!tmp) {
4102 ret = -ENOMEM;
4103 kfree(ch_info);
4104 goto out;
4105 }
4106
4107 type = RTW89_CHAN_OPERATE;
4108 tmp->period = req->duration_mandatory ?
4109 req->duration : RTW89_CHANNEL_TIME;
4110 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
4111 list_add_tail(&tmp->list, &chan_list);
4112 off_chan_time = 0;
4113 list_len++;
4114 }
4115 list_add_tail(&ch_info->list, &chan_list);
4116 off_chan_time += ch_info->period;
4117 }
4118 rtwdev->scan_info.last_chan_idx = idx;
4119 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
4120
4121 out:
4122 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
4123 list_del(&ch_info->list);
4124 kfree(ch_info);
4125 }
4126
4127 return ret;
4128 }
4129
rtw89_hw_scan_prehandle(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool connected)4130 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
4131 struct rtw89_vif *rtwvif, bool connected)
4132 {
4133 int ret;
4134
4135 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
4136 if (ret) {
4137 rtw89_err(rtwdev, "Update probe request failed\n");
4138 goto out;
4139 }
4140 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected);
4141 out:
4142 return ret;
4143 }
4144
rtw89_hw_scan_start(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,struct ieee80211_scan_request * scan_req)4145 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
4146 struct ieee80211_scan_request *scan_req)
4147 {
4148 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
4149 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
4150 struct cfg80211_scan_request *req = &scan_req->req;
4151 u32 rx_fltr = rtwdev->hal.rx_fltr;
4152 u8 mac_addr[ETH_ALEN];
4153
4154 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan);
4155 rtwdev->scan_info.scanning_vif = vif;
4156 rtwdev->scan_info.last_chan_idx = 0;
4157 rtwvif->scan_ies = &scan_req->ies;
4158 rtwvif->scan_req = req;
4159 ieee80211_stop_queues(rtwdev->hw);
4160
4161 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
4162 get_random_mask_addr(mac_addr, req->mac_addr,
4163 req->mac_addr_mask);
4164 else
4165 ether_addr_copy(mac_addr, vif->addr);
4166 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
4167
4168 rx_fltr &= ~B_AX_A_BCN_CHK_EN;
4169 rx_fltr &= ~B_AX_A_BC;
4170 rx_fltr &= ~B_AX_A_A1_MATCH;
4171 rtw89_write32_mask(rtwdev,
4172 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
4173 B_AX_RX_FLTR_CFG_MASK,
4174 rx_fltr);
4175
4176 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN);
4177 }
4178
rtw89_hw_scan_complete(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,bool aborted)4179 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
4180 bool aborted)
4181 {
4182 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
4183 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
4184 struct cfg80211_scan_info info = {
4185 .aborted = aborted,
4186 };
4187 struct rtw89_vif *rtwvif;
4188
4189 if (!vif)
4190 return;
4191
4192 rtw89_write32_mask(rtwdev,
4193 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
4194 B_AX_RX_FLTR_CFG_MASK,
4195 rtwdev->hal.rx_fltr);
4196
4197 rtw89_core_scan_complete(rtwdev, vif, true);
4198 ieee80211_scan_completed(rtwdev->hw, &info);
4199 ieee80211_wake_queues(rtwdev->hw);
4200 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
4201
4202 rtw89_release_pkt_list(rtwdev);
4203 rtwvif = (struct rtw89_vif *)vif->drv_priv;
4204 rtwvif->scan_req = NULL;
4205 rtwvif->scan_ies = NULL;
4206 scan_info->last_chan_idx = 0;
4207 scan_info->scanning_vif = NULL;
4208
4209 rtw89_chanctx_proceed(rtwdev);
4210 }
4211
rtw89_hw_scan_abort(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif)4212 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
4213 {
4214 rtw89_hw_scan_offload(rtwdev, vif, false);
4215 rtw89_hw_scan_complete(rtwdev, vif, true);
4216 }
4217
rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev * rtwdev)4218 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
4219 {
4220 struct rtw89_vif *rtwvif;
4221
4222 rtw89_for_each_rtwvif(rtwdev, rtwvif) {
4223 /* This variable implies connected or during attempt to connect */
4224 if (!is_zero_ether_addr(rtwvif->bssid))
4225 return true;
4226 }
4227
4228 return false;
4229 }
4230
rtw89_hw_scan_offload(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,bool enable)4231 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
4232 bool enable)
4233 {
4234 struct rtw89_scan_option opt = {0};
4235 struct rtw89_vif *rtwvif;
4236 bool connected;
4237 int ret = 0;
4238
4239 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
4240 if (!rtwvif)
4241 return -EINVAL;
4242
4243 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
4244 opt.enable = enable;
4245 opt.target_ch_mode = connected;
4246 if (enable) {
4247 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected);
4248 if (ret)
4249 goto out;
4250 }
4251 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
4252 out:
4253 return ret;
4254 }
4255
4256 #define H2C_FW_CPU_EXCEPTION_LEN 4
4257 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev * rtwdev)4258 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
4259 {
4260 struct sk_buff *skb;
4261 int ret;
4262
4263 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
4264 if (!skb) {
4265 rtw89_err(rtwdev,
4266 "failed to alloc skb for fw cpu exception\n");
4267 return -ENOMEM;
4268 }
4269
4270 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
4271 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
4272 H2C_FW_CPU_EXCEPTION_TYPE_DEF);
4273
4274 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4275 H2C_CAT_TEST,
4276 H2C_CL_FW_STATUS_TEST,
4277 H2C_FUNC_CPU_EXCEPTION, 0, 0,
4278 H2C_FW_CPU_EXCEPTION_LEN);
4279
4280 ret = rtw89_h2c_tx(rtwdev, skb, false);
4281 if (ret) {
4282 rtw89_err(rtwdev, "failed to send h2c\n");
4283 goto fail;
4284 }
4285
4286 return 0;
4287
4288 fail:
4289 dev_kfree_skb_any(skb);
4290 return ret;
4291 }
4292
4293 #define H2C_PKT_DROP_LEN 24
rtw89_fw_h2c_pkt_drop(struct rtw89_dev * rtwdev,const struct rtw89_pkt_drop_params * params)4294 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
4295 const struct rtw89_pkt_drop_params *params)
4296 {
4297 struct sk_buff *skb;
4298 int ret;
4299
4300 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
4301 if (!skb) {
4302 rtw89_err(rtwdev,
4303 "failed to alloc skb for packet drop\n");
4304 return -ENOMEM;
4305 }
4306
4307 switch (params->sel) {
4308 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
4309 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
4310 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
4311 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
4312 case RTW89_PKT_DROP_SEL_BAND_ONCE:
4313 break;
4314 default:
4315 rtw89_debug(rtwdev, RTW89_DBG_FW,
4316 "H2C of pkt drop might not fully support sel: %d yet\n",
4317 params->sel);
4318 break;
4319 }
4320
4321 skb_put(skb, H2C_PKT_DROP_LEN);
4322 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
4323 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
4324 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
4325 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
4326 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
4327 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
4328 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data,
4329 params->macid_band_sel[0]);
4330 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data,
4331 params->macid_band_sel[1]);
4332 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data,
4333 params->macid_band_sel[2]);
4334 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data,
4335 params->macid_band_sel[3]);
4336
4337 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4338 H2C_CAT_MAC,
4339 H2C_CL_MAC_FW_OFLD,
4340 H2C_FUNC_PKT_DROP, 0, 0,
4341 H2C_PKT_DROP_LEN);
4342
4343 ret = rtw89_h2c_tx(rtwdev, skb, false);
4344 if (ret) {
4345 rtw89_err(rtwdev, "failed to send h2c\n");
4346 goto fail;
4347 }
4348
4349 return 0;
4350
4351 fail:
4352 dev_kfree_skb_any(skb);
4353 return ret;
4354 }
4355
4356 #define H2C_KEEP_ALIVE_LEN 4
rtw89_fw_h2c_keep_alive(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool enable)4357 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
4358 bool enable)
4359 {
4360 struct sk_buff *skb;
4361 u8 pkt_id = 0;
4362 int ret;
4363
4364 if (enable) {
4365 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
4366 RTW89_PKT_OFLD_TYPE_NULL_DATA,
4367 &pkt_id);
4368 if (ret)
4369 return -EPERM;
4370 }
4371
4372 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN);
4373 if (!skb) {
4374 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4375 return -ENOMEM;
4376 }
4377
4378 skb_put(skb, H2C_KEEP_ALIVE_LEN);
4379
4380 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
4381 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
4382 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
4383 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id);
4384
4385 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4386 H2C_CAT_MAC,
4387 H2C_CL_MAC_WOW,
4388 H2C_FUNC_KEEP_ALIVE, 0, 1,
4389 H2C_KEEP_ALIVE_LEN);
4390
4391 ret = rtw89_h2c_tx(rtwdev, skb, false);
4392 if (ret) {
4393 rtw89_err(rtwdev, "failed to send h2c\n");
4394 goto fail;
4395 }
4396
4397 return 0;
4398
4399 fail:
4400 dev_kfree_skb_any(skb);
4401
4402 return ret;
4403 }
4404
4405 #define H2C_DISCONNECT_DETECT_LEN 8
rtw89_fw_h2c_disconnect_detect(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool enable)4406 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
4407 struct rtw89_vif *rtwvif, bool enable)
4408 {
4409 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
4410 struct sk_buff *skb;
4411 u8 macid = rtwvif->mac_id;
4412 int ret;
4413
4414 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
4415 if (!skb) {
4416 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4417 return -ENOMEM;
4418 }
4419
4420 skb_put(skb, H2C_DISCONNECT_DETECT_LEN);
4421
4422 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
4423 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable);
4424 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable);
4425 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid);
4426 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100);
4427 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5);
4428 }
4429
4430 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4431 H2C_CAT_MAC,
4432 H2C_CL_MAC_WOW,
4433 H2C_FUNC_DISCONNECT_DETECT, 0, 1,
4434 H2C_DISCONNECT_DETECT_LEN);
4435
4436 ret = rtw89_h2c_tx(rtwdev, skb, false);
4437 if (ret) {
4438 rtw89_err(rtwdev, "failed to send h2c\n");
4439 goto fail;
4440 }
4441
4442 return 0;
4443
4444 fail:
4445 dev_kfree_skb_any(skb);
4446
4447 return ret;
4448 }
4449
4450 #define H2C_WOW_GLOBAL_LEN 8
rtw89_fw_h2c_wow_global(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool enable)4451 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
4452 bool enable)
4453 {
4454 struct sk_buff *skb;
4455 u8 macid = rtwvif->mac_id;
4456 int ret;
4457
4458 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN);
4459 if (!skb) {
4460 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4461 return -ENOMEM;
4462 }
4463
4464 skb_put(skb, H2C_WOW_GLOBAL_LEN);
4465
4466 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable);
4467 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid);
4468
4469 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4470 H2C_CAT_MAC,
4471 H2C_CL_MAC_WOW,
4472 H2C_FUNC_WOW_GLOBAL, 0, 1,
4473 H2C_WOW_GLOBAL_LEN);
4474
4475 ret = rtw89_h2c_tx(rtwdev, skb, false);
4476 if (ret) {
4477 rtw89_err(rtwdev, "failed to send h2c\n");
4478 goto fail;
4479 }
4480
4481 return 0;
4482
4483 fail:
4484 dev_kfree_skb_any(skb);
4485
4486 return ret;
4487 }
4488
4489 #define H2C_WAKEUP_CTRL_LEN 4
rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif,bool enable)4490 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
4491 struct rtw89_vif *rtwvif,
4492 bool enable)
4493 {
4494 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
4495 struct sk_buff *skb;
4496 u8 macid = rtwvif->mac_id;
4497 int ret;
4498
4499 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
4500 if (!skb) {
4501 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4502 return -ENOMEM;
4503 }
4504
4505 skb_put(skb, H2C_WAKEUP_CTRL_LEN);
4506
4507 if (rtw_wow->pattern_cnt)
4508 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable);
4509 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
4510 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable);
4511 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
4512 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable);
4513
4514 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid);
4515
4516 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4517 H2C_CAT_MAC,
4518 H2C_CL_MAC_WOW,
4519 H2C_FUNC_WAKEUP_CTRL, 0, 1,
4520 H2C_WAKEUP_CTRL_LEN);
4521
4522 ret = rtw89_h2c_tx(rtwdev, skb, false);
4523 if (ret) {
4524 rtw89_err(rtwdev, "failed to send h2c\n");
4525 goto fail;
4526 }
4527
4528 return 0;
4529
4530 fail:
4531 dev_kfree_skb_any(skb);
4532
4533 return ret;
4534 }
4535
4536 #define H2C_WOW_CAM_UPD_LEN 24
rtw89_fw_wow_cam_update(struct rtw89_dev * rtwdev,struct rtw89_wow_cam_info * cam_info)4537 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
4538 struct rtw89_wow_cam_info *cam_info)
4539 {
4540 struct sk_buff *skb;
4541 int ret;
4542
4543 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN);
4544 if (!skb) {
4545 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
4546 return -ENOMEM;
4547 }
4548
4549 skb_put(skb, H2C_WOW_CAM_UPD_LEN);
4550
4551 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w);
4552 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx);
4553 if (cam_info->valid) {
4554 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]);
4555 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]);
4556 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]);
4557 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]);
4558 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc);
4559 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data,
4560 cam_info->negative_pattern_match);
4561 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data,
4562 cam_info->skip_mac_hdr);
4563 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc);
4564 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc);
4565 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc);
4566 }
4567 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid);
4568
4569 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4570 H2C_CAT_MAC,
4571 H2C_CL_MAC_WOW,
4572 H2C_FUNC_WOW_CAM_UPD, 0, 1,
4573 H2C_WOW_CAM_UPD_LEN);
4574
4575 ret = rtw89_h2c_tx(rtwdev, skb, false);
4576 if (ret) {
4577 rtw89_err(rtwdev, "failed to send h2c\n");
4578 goto fail;
4579 }
4580
4581 return 0;
4582 fail:
4583 dev_kfree_skb_any(skb);
4584
4585 return ret;
4586 }
4587
4588 /* Return < 0, if failures happen during waiting for the condition.
4589 * Return 0, when waiting for the condition succeeds.
4590 * Return > 0, if the wait is considered unreachable due to driver/FW design,
4591 * where 1 means during SER.
4592 */
rtw89_h2c_tx_and_wait(struct rtw89_dev * rtwdev,struct sk_buff * skb,struct rtw89_wait_info * wait,unsigned int cond)4593 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
4594 struct rtw89_wait_info *wait, unsigned int cond)
4595 {
4596 int ret;
4597
4598 ret = rtw89_h2c_tx(rtwdev, skb, false);
4599 if (ret) {
4600 rtw89_err(rtwdev, "failed to send h2c\n");
4601 dev_kfree_skb_any(skb);
4602 return -EBUSY;
4603 }
4604
4605 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
4606 return 1;
4607
4608 return rtw89_wait_for_cond(wait, cond);
4609 }
4610
4611 #define H2C_ADD_MCC_LEN 16
rtw89_fw_h2c_add_mcc(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_add_req * p)4612 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
4613 const struct rtw89_fw_mcc_add_req *p)
4614 {
4615 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4616 struct sk_buff *skb;
4617 unsigned int cond;
4618
4619 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN);
4620 if (!skb) {
4621 rtw89_err(rtwdev,
4622 "failed to alloc skb for add mcc\n");
4623 return -ENOMEM;
4624 }
4625
4626 skb_put(skb, H2C_ADD_MCC_LEN);
4627 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid);
4628 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0);
4629 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1);
4630 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch);
4631 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth);
4632 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group);
4633 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt);
4634 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null);
4635 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry);
4636 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch);
4637 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count);
4638 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early);
4639 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g);
4640 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en);
4641 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass);
4642 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type);
4643 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration);
4644 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en);
4645 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num);
4646 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target);
4647
4648 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4649 H2C_CAT_MAC,
4650 H2C_CL_MCC,
4651 H2C_FUNC_ADD_MCC, 0, 0,
4652 H2C_ADD_MCC_LEN);
4653
4654 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC);
4655 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4656 }
4657
4658 #define H2C_START_MCC_LEN 12
rtw89_fw_h2c_start_mcc(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_start_req * p)4659 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
4660 const struct rtw89_fw_mcc_start_req *p)
4661 {
4662 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4663 struct sk_buff *skb;
4664 unsigned int cond;
4665
4666 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN);
4667 if (!skb) {
4668 rtw89_err(rtwdev,
4669 "failed to alloc skb for start mcc\n");
4670 return -ENOMEM;
4671 }
4672
4673 skb_put(skb, H2C_START_MCC_LEN);
4674 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group);
4675 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group);
4676 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action);
4677 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group);
4678 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt);
4679 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en);
4680 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid);
4681 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low);
4682 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high);
4683
4684 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4685 H2C_CAT_MAC,
4686 H2C_CL_MCC,
4687 H2C_FUNC_START_MCC, 0, 0,
4688 H2C_START_MCC_LEN);
4689
4690 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC);
4691 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4692 }
4693
4694 #define H2C_STOP_MCC_LEN 4
rtw89_fw_h2c_stop_mcc(struct rtw89_dev * rtwdev,u8 group,u8 macid,bool prev_groups)4695 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
4696 bool prev_groups)
4697 {
4698 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4699 struct sk_buff *skb;
4700 unsigned int cond;
4701
4702 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN);
4703 if (!skb) {
4704 rtw89_err(rtwdev,
4705 "failed to alloc skb for stop mcc\n");
4706 return -ENOMEM;
4707 }
4708
4709 skb_put(skb, H2C_STOP_MCC_LEN);
4710 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid);
4711 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group);
4712 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups);
4713
4714 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4715 H2C_CAT_MAC,
4716 H2C_CL_MCC,
4717 H2C_FUNC_STOP_MCC, 0, 0,
4718 H2C_STOP_MCC_LEN);
4719
4720 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC);
4721 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4722 }
4723
4724 #define H2C_DEL_MCC_GROUP_LEN 4
rtw89_fw_h2c_del_mcc_group(struct rtw89_dev * rtwdev,u8 group,bool prev_groups)4725 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
4726 bool prev_groups)
4727 {
4728 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4729 struct sk_buff *skb;
4730 unsigned int cond;
4731
4732 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN);
4733 if (!skb) {
4734 rtw89_err(rtwdev,
4735 "failed to alloc skb for del mcc group\n");
4736 return -ENOMEM;
4737 }
4738
4739 skb_put(skb, H2C_DEL_MCC_GROUP_LEN);
4740 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group);
4741 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups);
4742
4743 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4744 H2C_CAT_MAC,
4745 H2C_CL_MCC,
4746 H2C_FUNC_DEL_MCC_GROUP, 0, 0,
4747 H2C_DEL_MCC_GROUP_LEN);
4748
4749 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP);
4750 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4751 }
4752
4753 #define H2C_RESET_MCC_GROUP_LEN 4
rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev * rtwdev,u8 group)4754 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group)
4755 {
4756 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4757 struct sk_buff *skb;
4758 unsigned int cond;
4759
4760 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN);
4761 if (!skb) {
4762 rtw89_err(rtwdev,
4763 "failed to alloc skb for reset mcc group\n");
4764 return -ENOMEM;
4765 }
4766
4767 skb_put(skb, H2C_RESET_MCC_GROUP_LEN);
4768 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group);
4769
4770 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4771 H2C_CAT_MAC,
4772 H2C_CL_MCC,
4773 H2C_FUNC_RESET_MCC_GROUP, 0, 0,
4774 H2C_RESET_MCC_GROUP_LEN);
4775
4776 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP);
4777 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4778 }
4779
4780 #define H2C_MCC_REQ_TSF_LEN 4
rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_tsf_req * req,struct rtw89_mac_mcc_tsf_rpt * rpt)4781 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
4782 const struct rtw89_fw_mcc_tsf_req *req,
4783 struct rtw89_mac_mcc_tsf_rpt *rpt)
4784 {
4785 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4786 struct rtw89_mac_mcc_tsf_rpt *tmp;
4787 struct sk_buff *skb;
4788 unsigned int cond;
4789 int ret;
4790
4791 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN);
4792 if (!skb) {
4793 rtw89_err(rtwdev,
4794 "failed to alloc skb for mcc req tsf\n");
4795 return -ENOMEM;
4796 }
4797
4798 skb_put(skb, H2C_MCC_REQ_TSF_LEN);
4799 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group);
4800 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x);
4801 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y);
4802
4803 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4804 H2C_CAT_MAC,
4805 H2C_CL_MCC,
4806 H2C_FUNC_MCC_REQ_TSF, 0, 0,
4807 H2C_MCC_REQ_TSF_LEN);
4808
4809 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF);
4810 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4811 if (ret)
4812 return ret;
4813
4814 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf;
4815 *rpt = *tmp;
4816
4817 return 0;
4818 }
4819
4820 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4
rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev * rtwdev,u8 group,u8 macid,u8 * bitmap)4821 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid,
4822 u8 *bitmap)
4823 {
4824 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4825 struct sk_buff *skb;
4826 unsigned int cond;
4827 u8 map_len;
4828 u8 h2c_len;
4829
4830 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8);
4831 map_len = RTW89_MAX_MAC_ID_NUM / 8;
4832 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len;
4833 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len);
4834 if (!skb) {
4835 rtw89_err(rtwdev,
4836 "failed to alloc skb for mcc macid bitmap\n");
4837 return -ENOMEM;
4838 }
4839
4840 skb_put(skb, h2c_len);
4841 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group);
4842 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid);
4843 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len);
4844 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len);
4845
4846 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4847 H2C_CAT_MAC,
4848 H2C_CL_MCC,
4849 H2C_FUNC_MCC_MACID_BITMAP, 0, 0,
4850 h2c_len);
4851
4852 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP);
4853 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4854 }
4855
4856 #define H2C_MCC_SYNC_LEN 4
rtw89_fw_h2c_mcc_sync(struct rtw89_dev * rtwdev,u8 group,u8 source,u8 target,u8 offset)4857 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
4858 u8 target, u8 offset)
4859 {
4860 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4861 struct sk_buff *skb;
4862 unsigned int cond;
4863
4864 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN);
4865 if (!skb) {
4866 rtw89_err(rtwdev,
4867 "failed to alloc skb for mcc sync\n");
4868 return -ENOMEM;
4869 }
4870
4871 skb_put(skb, H2C_MCC_SYNC_LEN);
4872 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group);
4873 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source);
4874 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target);
4875 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset);
4876
4877 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4878 H2C_CAT_MAC,
4879 H2C_CL_MCC,
4880 H2C_FUNC_MCC_SYNC, 0, 0,
4881 H2C_MCC_SYNC_LEN);
4882
4883 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC);
4884 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4885 }
4886
4887 #define H2C_MCC_SET_DURATION_LEN 20
rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_duration * p)4888 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
4889 const struct rtw89_fw_mcc_duration *p)
4890 {
4891 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
4892 struct sk_buff *skb;
4893 unsigned int cond;
4894
4895 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN);
4896 if (!skb) {
4897 rtw89_err(rtwdev,
4898 "failed to alloc skb for mcc set duration\n");
4899 return -ENOMEM;
4900 }
4901
4902 skb_put(skb, H2C_MCC_SET_DURATION_LEN);
4903 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group);
4904 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group);
4905 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid);
4906 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x);
4907 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y);
4908 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data,
4909 p->start_tsf_low);
4910 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data,
4911 p->start_tsf_high);
4912 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x);
4913 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y);
4914
4915 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4916 H2C_CAT_MAC,
4917 H2C_CL_MCC,
4918 H2C_FUNC_MCC_SET_DURATION, 0, 0,
4919 H2C_MCC_SET_DURATION_LEN);
4920
4921 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION);
4922 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4923 }
4924
__fw_txpwr_entry_zero_ext(const void * ext_ptr,u8 ext_len)4925 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
4926 {
4927 static const u8 zeros[U8_MAX] = {};
4928
4929 return memcmp(ext_ptr, zeros, ext_len) == 0;
4930 }
4931
4932 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \
4933 ({ \
4934 u8 __var_sz = sizeof(*(e)); \
4935 bool __accept; \
4936 if (__var_sz >= (ent_sz)) \
4937 __accept = true; \
4938 else \
4939 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\
4940 (ent_sz) - __var_sz);\
4941 __accept; \
4942 })
4943
4944 static bool
fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)4945 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e,
4946 const void *cursor,
4947 const struct rtw89_txpwr_conf *conf)
4948 {
4949 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
4950 return false;
4951
4952 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM)
4953 return false;
4954
4955 switch (e->rs) {
4956 case RTW89_RS_CCK:
4957 if (e->shf + e->len > RTW89_RATE_CCK_NUM)
4958 return false;
4959 break;
4960 case RTW89_RS_OFDM:
4961 if (e->shf + e->len > RTW89_RATE_OFDM_NUM)
4962 return false;
4963 break;
4964 case RTW89_RS_MCS:
4965 if (e->shf + e->len > __RTW89_RATE_MCS_NUM ||
4966 e->nss >= RTW89_NSS_NUM ||
4967 e->ofdma >= RTW89_OFDMA_NUM)
4968 return false;
4969 break;
4970 case RTW89_RS_HEDCM:
4971 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM ||
4972 e->nss >= RTW89_NSS_HEDCM_NUM ||
4973 e->ofdma >= RTW89_OFDMA_NUM)
4974 return false;
4975 break;
4976 case RTW89_RS_OFFSET:
4977 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM)
4978 return false;
4979 break;
4980 default:
4981 return false;
4982 }
4983
4984 return true;
4985 }
4986
4987 static
rtw89_fw_load_txpwr_byrate(struct rtw89_dev * rtwdev,const struct rtw89_txpwr_table * tbl)4988 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev,
4989 const struct rtw89_txpwr_table *tbl)
4990 {
4991 const struct rtw89_txpwr_conf *conf = tbl->data;
4992 struct rtw89_fw_txpwr_byrate_entry entry = {};
4993 struct rtw89_txpwr_byrate *byr_head;
4994 struct rtw89_rate_desc desc = {};
4995 const void *cursor;
4996 u32 data;
4997 s8 *byr;
4998 int i;
4999
5000 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5001 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf))
5002 continue;
5003
5004 byr_head = &rtwdev->byr[entry.band][entry.bw];
5005 data = le32_to_cpu(entry.data);
5006 desc.ofdma = entry.ofdma;
5007 desc.nss = entry.nss;
5008 desc.rs = entry.rs;
5009
5010 for (i = 0; i < entry.len; i++, data >>= 8) {
5011 desc.idx = entry.shf + i;
5012 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
5013 *byr = data & 0xff;
5014 }
5015 }
5016 }
5017
5018 static bool
fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)5019 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e,
5020 const void *cursor,
5021 const struct rtw89_txpwr_conf *conf)
5022 {
5023 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5024 return false;
5025
5026 if (e->bw >= RTW89_2G_BW_NUM)
5027 return false;
5028 if (e->nt >= RTW89_NTX_NUM)
5029 return false;
5030 if (e->rs >= RTW89_RS_LMT_NUM)
5031 return false;
5032 if (e->bf >= RTW89_BF_NUM)
5033 return false;
5034 if (e->regd >= RTW89_REGD_NUM)
5035 return false;
5036 if (e->ch_idx >= RTW89_2G_CH_NUM)
5037 return false;
5038
5039 return true;
5040 }
5041
5042 static
rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data * data)5043 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data)
5044 {
5045 const struct rtw89_txpwr_conf *conf = &data->conf;
5046 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {};
5047 const void *cursor;
5048
5049 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5050 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf))
5051 continue;
5052
5053 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
5054 [entry.ch_idx] = entry.v;
5055 }
5056 }
5057
5058 static bool
fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)5059 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e,
5060 const void *cursor,
5061 const struct rtw89_txpwr_conf *conf)
5062 {
5063 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5064 return false;
5065
5066 if (e->bw >= RTW89_5G_BW_NUM)
5067 return false;
5068 if (e->nt >= RTW89_NTX_NUM)
5069 return false;
5070 if (e->rs >= RTW89_RS_LMT_NUM)
5071 return false;
5072 if (e->bf >= RTW89_BF_NUM)
5073 return false;
5074 if (e->regd >= RTW89_REGD_NUM)
5075 return false;
5076 if (e->ch_idx >= RTW89_5G_CH_NUM)
5077 return false;
5078
5079 return true;
5080 }
5081
5082 static
rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data * data)5083 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data)
5084 {
5085 const struct rtw89_txpwr_conf *conf = &data->conf;
5086 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {};
5087 const void *cursor;
5088
5089 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5090 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf))
5091 continue;
5092
5093 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
5094 [entry.ch_idx] = entry.v;
5095 }
5096 }
5097
5098 static bool
fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)5099 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e,
5100 const void *cursor,
5101 const struct rtw89_txpwr_conf *conf)
5102 {
5103 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5104 return false;
5105
5106 if (e->bw >= RTW89_6G_BW_NUM)
5107 return false;
5108 if (e->nt >= RTW89_NTX_NUM)
5109 return false;
5110 if (e->rs >= RTW89_RS_LMT_NUM)
5111 return false;
5112 if (e->bf >= RTW89_BF_NUM)
5113 return false;
5114 if (e->regd >= RTW89_REGD_NUM)
5115 return false;
5116 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
5117 return false;
5118 if (e->ch_idx >= RTW89_6G_CH_NUM)
5119 return false;
5120
5121 return true;
5122 }
5123
5124 static
rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data * data)5125 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data)
5126 {
5127 const struct rtw89_txpwr_conf *conf = &data->conf;
5128 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {};
5129 const void *cursor;
5130
5131 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5132 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf))
5133 continue;
5134
5135 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
5136 [entry.reg_6ghz_power][entry.ch_idx] = entry.v;
5137 }
5138 }
5139
5140 static bool
fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)5141 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e,
5142 const void *cursor,
5143 const struct rtw89_txpwr_conf *conf)
5144 {
5145 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5146 return false;
5147
5148 if (e->ru >= RTW89_RU_NUM)
5149 return false;
5150 if (e->nt >= RTW89_NTX_NUM)
5151 return false;
5152 if (e->regd >= RTW89_REGD_NUM)
5153 return false;
5154 if (e->ch_idx >= RTW89_2G_CH_NUM)
5155 return false;
5156
5157 return true;
5158 }
5159
5160 static
rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data * data)5161 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data)
5162 {
5163 const struct rtw89_txpwr_conf *conf = &data->conf;
5164 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {};
5165 const void *cursor;
5166
5167 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5168 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf))
5169 continue;
5170
5171 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
5172 }
5173 }
5174
5175 static bool
fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)5176 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e,
5177 const void *cursor,
5178 const struct rtw89_txpwr_conf *conf)
5179 {
5180 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5181 return false;
5182
5183 if (e->ru >= RTW89_RU_NUM)
5184 return false;
5185 if (e->nt >= RTW89_NTX_NUM)
5186 return false;
5187 if (e->regd >= RTW89_REGD_NUM)
5188 return false;
5189 if (e->ch_idx >= RTW89_5G_CH_NUM)
5190 return false;
5191
5192 return true;
5193 }
5194
5195 static
rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data * data)5196 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data)
5197 {
5198 const struct rtw89_txpwr_conf *conf = &data->conf;
5199 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {};
5200 const void *cursor;
5201
5202 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5203 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf))
5204 continue;
5205
5206 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
5207 }
5208 }
5209
5210 static bool
fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)5211 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e,
5212 const void *cursor,
5213 const struct rtw89_txpwr_conf *conf)
5214 {
5215 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5216 return false;
5217
5218 if (e->ru >= RTW89_RU_NUM)
5219 return false;
5220 if (e->nt >= RTW89_NTX_NUM)
5221 return false;
5222 if (e->regd >= RTW89_REGD_NUM)
5223 return false;
5224 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
5225 return false;
5226 if (e->ch_idx >= RTW89_6G_CH_NUM)
5227 return false;
5228
5229 return true;
5230 }
5231
5232 static
rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data * data)5233 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data)
5234 {
5235 const struct rtw89_txpwr_conf *conf = &data->conf;
5236 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {};
5237 const void *cursor;
5238
5239 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5240 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf))
5241 continue;
5242
5243 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power]
5244 [entry.ch_idx] = entry.v;
5245 }
5246 }
5247
5248 static bool
fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)5249 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e,
5250 const void *cursor,
5251 const struct rtw89_txpwr_conf *conf)
5252 {
5253 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5254 return false;
5255
5256 if (e->band >= RTW89_BAND_NUM)
5257 return false;
5258 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM)
5259 return false;
5260 if (e->regd >= RTW89_REGD_NUM)
5261 return false;
5262
5263 return true;
5264 }
5265
5266 static
rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data * data)5267 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data)
5268 {
5269 const struct rtw89_txpwr_conf *conf = &data->conf;
5270 struct rtw89_fw_tx_shape_lmt_entry entry = {};
5271 const void *cursor;
5272
5273 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5274 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf))
5275 continue;
5276
5277 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v;
5278 }
5279 }
5280
5281 static bool
fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)5282 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e,
5283 const void *cursor,
5284 const struct rtw89_txpwr_conf *conf)
5285 {
5286 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5287 return false;
5288
5289 if (e->band >= RTW89_BAND_NUM)
5290 return false;
5291 if (e->regd >= RTW89_REGD_NUM)
5292 return false;
5293
5294 return true;
5295 }
5296
5297 static
rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data * data)5298 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data)
5299 {
5300 const struct rtw89_txpwr_conf *conf = &data->conf;
5301 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {};
5302 const void *cursor;
5303
5304 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5305 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf))
5306 continue;
5307
5308 data->v[entry.band][entry.regd] = entry.v;
5309 }
5310 }
5311
5312 const struct rtw89_rfe_parms *
rtw89_load_rfe_data_from_fw(struct rtw89_dev * rtwdev,const struct rtw89_rfe_parms * init)5313 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
5314 const struct rtw89_rfe_parms *init)
5315 {
5316 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data;
5317 struct rtw89_rfe_parms *parms;
5318
5319 if (!rfe_data)
5320 return init;
5321
5322 parms = &rfe_data->rfe_parms;
5323 if (init)
5324 *parms = *init;
5325
5326 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) {
5327 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf;
5328 rfe_data->byrate.tbl.size = 0; /* don't care here */
5329 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate;
5330 parms->byr_tbl = &rfe_data->byrate.tbl;
5331 }
5332
5333 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) {
5334 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz);
5335 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v;
5336 }
5337
5338 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) {
5339 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz);
5340 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v;
5341 }
5342
5343 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) {
5344 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz);
5345 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v;
5346 }
5347
5348 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) {
5349 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz);
5350 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v;
5351 }
5352
5353 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) {
5354 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz);
5355 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v;
5356 }
5357
5358 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) {
5359 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz);
5360 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v;
5361 }
5362
5363 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) {
5364 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt);
5365 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v;
5366 }
5367
5368 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) {
5369 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru);
5370 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v;
5371 }
5372
5373 return parms;
5374 }
5375