1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
7 */
8
9 #include <linux/export.h>
10
11 #include "core.h"
12 #include "hif.h"
13 #include "debug.h"
14
15 /********/
16 /* Send */
17 /********/
18
ath10k_htc_control_tx_complete(struct ath10k * ar,struct sk_buff * skb)19 static void ath10k_htc_control_tx_complete(struct ath10k *ar,
20 struct sk_buff *skb)
21 {
22 kfree_skb(skb);
23 }
24
ath10k_htc_build_tx_ctrl_skb(void * ar)25 static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
26 {
27 struct sk_buff *skb;
28 struct ath10k_skb_cb *skb_cb;
29
30 skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
31 if (!skb)
32 return NULL;
33
34 skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
35 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
36
37 skb_cb = ATH10K_SKB_CB(skb);
38 memset(skb_cb, 0, sizeof(*skb_cb));
39
40 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
41 return skb;
42 }
43
ath10k_htc_restore_tx_skb(struct ath10k_htc * htc,struct sk_buff * skb)44 static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
45 struct sk_buff *skb)
46 {
47 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
48
49 if (htc->ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
50 dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
51 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
52 }
53
ath10k_htc_notify_tx_completion(struct ath10k_htc_ep * ep,struct sk_buff * skb)54 void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
55 struct sk_buff *skb)
56 {
57 struct ath10k *ar = ep->htc->ar;
58 struct ath10k_htc_hdr *hdr;
59
60 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
61 ep->eid, skb);
62
63 /* A corner case where the copy completion is reaching to host but still
64 * copy engine is processing it due to which host unmaps corresponding
65 * memory and causes SMMU fault, hence as workaround adding delay
66 * the unmapping memory to avoid SMMU faults.
67 */
68 if (ar->hw_params.delay_unmap_buffer &&
69 ep->ul_pipe_id == 3)
70 mdelay(2);
71
72 hdr = (struct ath10k_htc_hdr *)skb->data;
73 ath10k_htc_restore_tx_skb(ep->htc, skb);
74
75 if (!ep->ep_ops.ep_tx_complete) {
76 ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
77 dev_kfree_skb_any(skb);
78 return;
79 }
80
81 if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
82 dev_kfree_skb_any(skb);
83 return;
84 }
85
86 ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
87 }
88 EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
89
ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep * ep,struct sk_buff * skb)90 static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
91 struct sk_buff *skb)
92 {
93 struct ath10k_htc_hdr *hdr;
94
95 hdr = (struct ath10k_htc_hdr *)skb->data;
96 memset(hdr, 0, sizeof(struct ath10k_htc_hdr));
97
98 hdr->eid = ep->eid;
99 hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
100 hdr->flags = 0;
101 if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
102 hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
103
104 spin_lock_bh(&ep->htc->tx_lock);
105 hdr->seq_no = ep->seq_no++;
106 spin_unlock_bh(&ep->htc->tx_lock);
107 }
108
ath10k_htc_consume_credit(struct ath10k_htc_ep * ep,unsigned int len,bool consume)109 static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
110 unsigned int len,
111 bool consume)
112 {
113 struct ath10k_htc *htc = ep->htc;
114 struct ath10k *ar = htc->ar;
115 enum ath10k_htc_ep_id eid = ep->eid;
116 int credits, ret = 0;
117
118 if (!ep->tx_credit_flow_enabled)
119 return 0;
120
121 credits = DIV_ROUND_UP(len, ep->tx_credit_size);
122 spin_lock_bh(&htc->tx_lock);
123
124 if (ep->tx_credits < credits) {
125 ath10k_dbg(ar, ATH10K_DBG_HTC,
126 "htc insufficient credits ep %d required %d available %d consume %d\n",
127 eid, credits, ep->tx_credits, consume);
128 ret = -EAGAIN;
129 goto unlock;
130 }
131
132 if (consume) {
133 ep->tx_credits -= credits;
134 ath10k_dbg(ar, ATH10K_DBG_HTC,
135 "htc ep %d consumed %d credits total %d\n",
136 eid, credits, ep->tx_credits);
137 }
138
139 unlock:
140 spin_unlock_bh(&htc->tx_lock);
141 return ret;
142 }
143
ath10k_htc_release_credit(struct ath10k_htc_ep * ep,unsigned int len)144 static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
145 {
146 struct ath10k_htc *htc = ep->htc;
147 struct ath10k *ar = htc->ar;
148 enum ath10k_htc_ep_id eid = ep->eid;
149 int credits;
150
151 if (!ep->tx_credit_flow_enabled)
152 return;
153
154 credits = DIV_ROUND_UP(len, ep->tx_credit_size);
155 spin_lock_bh(&htc->tx_lock);
156 ep->tx_credits += credits;
157 ath10k_dbg(ar, ATH10K_DBG_HTC,
158 "htc ep %d reverted %d credits back total %d\n",
159 eid, credits, ep->tx_credits);
160 spin_unlock_bh(&htc->tx_lock);
161
162 if (ep->ep_ops.ep_tx_credits)
163 ep->ep_ops.ep_tx_credits(htc->ar);
164 }
165
ath10k_htc_send(struct ath10k_htc * htc,enum ath10k_htc_ep_id eid,struct sk_buff * skb)166 int ath10k_htc_send(struct ath10k_htc *htc,
167 enum ath10k_htc_ep_id eid,
168 struct sk_buff *skb)
169 {
170 struct ath10k *ar = htc->ar;
171 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
172 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
173 struct ath10k_hif_sg_item sg_item;
174 struct device *dev = htc->ar->dev;
175 int ret;
176 unsigned int skb_len;
177
178 if (htc->ar->state == ATH10K_STATE_WEDGED)
179 return -ECOMM;
180
181 if (eid >= ATH10K_HTC_EP_COUNT) {
182 ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
183 return -ENOENT;
184 }
185
186 skb_push(skb, sizeof(struct ath10k_htc_hdr));
187
188 skb_len = skb->len;
189 ret = ath10k_htc_consume_credit(ep, skb_len, true);
190 if (ret)
191 goto err_pull;
192
193 ath10k_htc_prepare_tx_skb(ep, skb);
194
195 skb_cb->eid = eid;
196 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
197 skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
198 DMA_TO_DEVICE);
199 ret = dma_mapping_error(dev, skb_cb->paddr);
200 if (ret) {
201 ret = -EIO;
202 goto err_credits;
203 }
204 }
205
206 sg_item.transfer_id = ep->eid;
207 sg_item.transfer_context = skb;
208 sg_item.vaddr = skb->data;
209 sg_item.paddr = skb_cb->paddr;
210 sg_item.len = skb->len;
211
212 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
213 if (ret)
214 goto err_unmap;
215
216 return 0;
217
218 err_unmap:
219 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
220 dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
221 err_credits:
222 ath10k_htc_release_credit(ep, skb_len);
223 err_pull:
224 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
225 return ret;
226 }
227
ath10k_htc_tx_completion_handler(struct ath10k * ar,struct sk_buff * skb)228 void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
229 {
230 struct ath10k_htc *htc = &ar->htc;
231 struct ath10k_skb_cb *skb_cb;
232 struct ath10k_htc_ep *ep;
233
234 if (WARN_ON_ONCE(!skb))
235 return;
236
237 skb_cb = ATH10K_SKB_CB(skb);
238 ep = &htc->endpoint[skb_cb->eid];
239
240 ath10k_htc_notify_tx_completion(ep, skb);
241 /* the skb now belongs to the completion handler */
242 }
243 EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
244
245 /***********/
246 /* Receive */
247 /***********/
248
249 static void
ath10k_htc_process_credit_report(struct ath10k_htc * htc,const struct ath10k_htc_credit_report * report,int len,enum ath10k_htc_ep_id eid)250 ath10k_htc_process_credit_report(struct ath10k_htc *htc,
251 const struct ath10k_htc_credit_report *report,
252 int len,
253 enum ath10k_htc_ep_id eid)
254 {
255 struct ath10k *ar = htc->ar;
256 struct ath10k_htc_ep *ep;
257 int i, n_reports;
258
259 if (len % sizeof(*report))
260 ath10k_warn(ar, "Uneven credit report len %d", len);
261
262 n_reports = len / sizeof(*report);
263
264 spin_lock_bh(&htc->tx_lock);
265 for (i = 0; i < n_reports; i++, report++) {
266 if (report->eid >= ATH10K_HTC_EP_COUNT)
267 break;
268
269 ep = &htc->endpoint[report->eid];
270 ep->tx_credits += report->credits;
271
272 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
273 report->eid, report->credits, ep->tx_credits);
274
275 if (ep->ep_ops.ep_tx_credits) {
276 spin_unlock_bh(&htc->tx_lock);
277 ep->ep_ops.ep_tx_credits(htc->ar);
278 spin_lock_bh(&htc->tx_lock);
279 }
280 }
281 spin_unlock_bh(&htc->tx_lock);
282 }
283
284 static int
ath10k_htc_process_lookahead(struct ath10k_htc * htc,const struct ath10k_htc_lookahead_report * report,int len,enum ath10k_htc_ep_id eid,void * next_lookaheads,int * next_lookaheads_len)285 ath10k_htc_process_lookahead(struct ath10k_htc *htc,
286 const struct ath10k_htc_lookahead_report *report,
287 int len,
288 enum ath10k_htc_ep_id eid,
289 void *next_lookaheads,
290 int *next_lookaheads_len)
291 {
292 struct ath10k *ar = htc->ar;
293
294 /* Invalid lookahead flags are actually transmitted by
295 * the target in the HTC control message.
296 * Since this will happen at every boot we silently ignore
297 * the lookahead in this case
298 */
299 if (report->pre_valid != ((~report->post_valid) & 0xFF))
300 return 0;
301
302 if (next_lookaheads && next_lookaheads_len) {
303 ath10k_dbg(ar, ATH10K_DBG_HTC,
304 "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n",
305 report->pre_valid, report->post_valid);
306
307 /* look ahead bytes are valid, copy them over */
308 memcpy((u8 *)next_lookaheads, report->lookahead, 4);
309
310 *next_lookaheads_len = 1;
311 }
312
313 return 0;
314 }
315
316 static int
ath10k_htc_process_lookahead_bundle(struct ath10k_htc * htc,const struct ath10k_htc_lookahead_bundle * report,int len,enum ath10k_htc_ep_id eid,void * next_lookaheads,int * next_lookaheads_len)317 ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
318 const struct ath10k_htc_lookahead_bundle *report,
319 int len,
320 enum ath10k_htc_ep_id eid,
321 void *next_lookaheads,
322 int *next_lookaheads_len)
323 {
324 struct ath10k *ar = htc->ar;
325 int bundle_cnt = len / sizeof(*report);
326
327 if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
328 ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
329 bundle_cnt);
330 return -EINVAL;
331 }
332
333 if (next_lookaheads && next_lookaheads_len) {
334 int i;
335
336 for (i = 0; i < bundle_cnt; i++) {
337 memcpy(((u8 *)next_lookaheads) + 4 * i,
338 report->lookahead, 4);
339 report++;
340 }
341
342 *next_lookaheads_len = bundle_cnt;
343 }
344
345 return 0;
346 }
347
ath10k_htc_process_trailer(struct ath10k_htc * htc,u8 * buffer,int length,enum ath10k_htc_ep_id src_eid,void * next_lookaheads,int * next_lookaheads_len)348 int ath10k_htc_process_trailer(struct ath10k_htc *htc,
349 u8 *buffer,
350 int length,
351 enum ath10k_htc_ep_id src_eid,
352 void *next_lookaheads,
353 int *next_lookaheads_len)
354 {
355 struct ath10k_htc_lookahead_bundle *bundle;
356 struct ath10k *ar = htc->ar;
357 int status = 0;
358 struct ath10k_htc_record *record;
359 u8 *orig_buffer;
360 int orig_length;
361 size_t len;
362
363 orig_buffer = buffer;
364 orig_length = length;
365
366 while (length > 0) {
367 record = (struct ath10k_htc_record *)buffer;
368
369 if (length < sizeof(record->hdr)) {
370 status = -EINVAL;
371 break;
372 }
373
374 if (record->hdr.len > length) {
375 /* no room left in buffer for record */
376 ath10k_warn(ar, "Invalid record length: %d\n",
377 record->hdr.len);
378 status = -EINVAL;
379 break;
380 }
381
382 switch (record->hdr.id) {
383 case ATH10K_HTC_RECORD_CREDITS:
384 len = sizeof(struct ath10k_htc_credit_report);
385 if (record->hdr.len < len) {
386 ath10k_warn(ar, "Credit report too long\n");
387 status = -EINVAL;
388 break;
389 }
390 ath10k_htc_process_credit_report(htc,
391 record->credit_report,
392 record->hdr.len,
393 src_eid);
394 break;
395 case ATH10K_HTC_RECORD_LOOKAHEAD:
396 len = sizeof(struct ath10k_htc_lookahead_report);
397 if (record->hdr.len < len) {
398 ath10k_warn(ar, "Lookahead report too long\n");
399 status = -EINVAL;
400 break;
401 }
402 status = ath10k_htc_process_lookahead(htc,
403 record->lookahead_report,
404 record->hdr.len,
405 src_eid,
406 next_lookaheads,
407 next_lookaheads_len);
408 break;
409 case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE:
410 bundle = record->lookahead_bundle;
411 status = ath10k_htc_process_lookahead_bundle(htc,
412 bundle,
413 record->hdr.len,
414 src_eid,
415 next_lookaheads,
416 next_lookaheads_len);
417 break;
418 default:
419 ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
420 record->hdr.id, record->hdr.len);
421 break;
422 }
423
424 if (status)
425 break;
426
427 /* multiple records may be present in a trailer */
428 buffer += sizeof(record->hdr) + record->hdr.len;
429 length -= sizeof(record->hdr) + record->hdr.len;
430 }
431
432 if (status)
433 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
434 orig_buffer, orig_length);
435
436 return status;
437 }
438 EXPORT_SYMBOL(ath10k_htc_process_trailer);
439
ath10k_htc_rx_completion_handler(struct ath10k * ar,struct sk_buff * skb)440 void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
441 {
442 int status = 0;
443 struct ath10k_htc *htc = &ar->htc;
444 struct ath10k_htc_hdr *hdr;
445 struct ath10k_htc_ep *ep;
446 u16 payload_len;
447 u32 trailer_len = 0;
448 size_t min_len;
449 u8 eid;
450 bool trailer_present;
451
452 hdr = (struct ath10k_htc_hdr *)skb->data;
453 skb_pull(skb, sizeof(*hdr));
454
455 eid = hdr->eid;
456
457 if (eid >= ATH10K_HTC_EP_COUNT) {
458 ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
459 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
460 hdr, sizeof(*hdr));
461 goto out;
462 }
463
464 ep = &htc->endpoint[eid];
465 if (ep->service_id == ATH10K_HTC_SVC_ID_UNUSED) {
466 ath10k_warn(ar, "htc rx endpoint %d is not connected\n", eid);
467 goto out;
468 }
469
470 payload_len = __le16_to_cpu(hdr->len);
471
472 if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
473 ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
474 payload_len + sizeof(*hdr));
475 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
476 hdr, sizeof(*hdr));
477 goto out;
478 }
479
480 if (skb->len < payload_len) {
481 ath10k_dbg(ar, ATH10K_DBG_HTC,
482 "HTC Rx: insufficient length, got %d, expected %d\n",
483 skb->len, payload_len);
484 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
485 "", hdr, sizeof(*hdr));
486 goto out;
487 }
488
489 /* get flags to check for trailer */
490 trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
491 if (trailer_present) {
492 u8 *trailer;
493
494 trailer_len = hdr->trailer_len;
495 min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
496
497 if ((trailer_len < min_len) ||
498 (trailer_len > payload_len)) {
499 ath10k_warn(ar, "Invalid trailer length: %d\n",
500 trailer_len);
501 goto out;
502 }
503
504 trailer = (u8 *)hdr;
505 trailer += sizeof(*hdr);
506 trailer += payload_len;
507 trailer -= trailer_len;
508 status = ath10k_htc_process_trailer(htc, trailer,
509 trailer_len, hdr->eid,
510 NULL, NULL);
511 if (status)
512 goto out;
513
514 skb_trim(skb, skb->len - trailer_len);
515 }
516
517 if (((int)payload_len - (int)trailer_len) <= 0)
518 /* zero length packet with trailer data, just drop these */
519 goto out;
520
521 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
522 eid, skb);
523 ep->ep_ops.ep_rx_complete(ar, skb);
524
525 /* skb is now owned by the rx completion handler */
526 skb = NULL;
527 out:
528 kfree_skb(skb);
529 }
530 EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
531
ath10k_htc_control_rx_complete(struct ath10k * ar,struct sk_buff * skb)532 static void ath10k_htc_control_rx_complete(struct ath10k *ar,
533 struct sk_buff *skb)
534 {
535 struct ath10k_htc *htc = &ar->htc;
536 struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
537
538 switch (__le16_to_cpu(msg->hdr.message_id)) {
539 case ATH10K_HTC_MSG_READY_ID:
540 case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
541 /* handle HTC control message */
542 if (completion_done(&htc->ctl_resp)) {
543 /* this is a fatal error, target should not be
544 * sending unsolicited messages on the ep 0
545 */
546 ath10k_warn(ar, "HTC rx ctrl still processing\n");
547 complete(&htc->ctl_resp);
548 goto out;
549 }
550
551 htc->control_resp_len =
552 min_t(int, skb->len,
553 ATH10K_HTC_MAX_CTRL_MSG_LEN);
554
555 memcpy(htc->control_resp_buffer, skb->data,
556 htc->control_resp_len);
557
558 complete(&htc->ctl_resp);
559 break;
560 case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
561 htc->htc_ops.target_send_suspend_complete(ar);
562 break;
563 default:
564 ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
565 break;
566 }
567
568 out:
569 kfree_skb(skb);
570 }
571
572 /***************/
573 /* Init/Deinit */
574 /***************/
575
htc_service_name(enum ath10k_htc_svc_id id)576 static const char *htc_service_name(enum ath10k_htc_svc_id id)
577 {
578 switch (id) {
579 case ATH10K_HTC_SVC_ID_RESERVED:
580 return "Reserved";
581 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
582 return "Control";
583 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
584 return "WMI";
585 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
586 return "DATA BE";
587 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
588 return "DATA BK";
589 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
590 return "DATA VI";
591 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
592 return "DATA VO";
593 case ATH10K_HTC_SVC_ID_NMI_CONTROL:
594 return "NMI Control";
595 case ATH10K_HTC_SVC_ID_NMI_DATA:
596 return "NMI Data";
597 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
598 return "HTT Data";
599 case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
600 return "HTT Data";
601 case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
602 return "HTT Data";
603 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
604 return "RAW";
605 case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
606 return "PKTLOG";
607 }
608
609 return "Unknown";
610 }
611
ath10k_htc_reset_endpoint_states(struct ath10k_htc * htc)612 static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
613 {
614 struct ath10k_htc_ep *ep;
615 int i;
616
617 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
618 ep = &htc->endpoint[i];
619 ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
620 ep->max_ep_message_len = 0;
621 ep->max_tx_queue_depth = 0;
622 ep->eid = i;
623 ep->htc = htc;
624 ep->tx_credit_flow_enabled = true;
625 }
626 }
627
ath10k_htc_get_credit_allocation(struct ath10k_htc * htc,u16 service_id)628 static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
629 u16 service_id)
630 {
631 u8 allocation = 0;
632
633 /* The WMI control service is the only service with flow control.
634 * Let it have all transmit credits.
635 */
636 if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
637 allocation = htc->total_transmit_credits;
638
639 return allocation;
640 }
641
ath10k_htc_send_bundle(struct ath10k_htc_ep * ep,struct sk_buff * bundle_skb,struct sk_buff_head * tx_save_head)642 static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
643 struct sk_buff *bundle_skb,
644 struct sk_buff_head *tx_save_head)
645 {
646 struct ath10k_hif_sg_item sg_item;
647 struct ath10k_htc *htc = ep->htc;
648 struct ath10k *ar = htc->ar;
649 struct sk_buff *skb;
650 int ret, cn = 0;
651 unsigned int skb_len;
652
653 ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
654 skb_len = bundle_skb->len;
655 ret = ath10k_htc_consume_credit(ep, skb_len, true);
656
657 if (!ret) {
658 sg_item.transfer_id = ep->eid;
659 sg_item.transfer_context = bundle_skb;
660 sg_item.vaddr = bundle_skb->data;
661 sg_item.len = bundle_skb->len;
662
663 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
664 if (ret)
665 ath10k_htc_release_credit(ep, skb_len);
666 }
667
668 if (ret)
669 dev_kfree_skb_any(bundle_skb);
670
671 for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
672 if (ret) {
673 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
674 skb_queue_head(&ep->tx_req_head, skb);
675 } else {
676 skb_queue_tail(&ep->tx_complete_head, skb);
677 }
678 }
679
680 if (!ret)
681 queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
682
683 ath10k_dbg(ar, ATH10K_DBG_HTC,
684 "bundle tx status %d eid %d req count %d count %d len %d\n",
685 ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
686 return ret;
687 }
688
ath10k_htc_send_one_skb(struct ath10k_htc_ep * ep,struct sk_buff * skb)689 static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
690 {
691 struct ath10k_htc *htc = ep->htc;
692 struct ath10k *ar = htc->ar;
693 int ret;
694
695 ret = ath10k_htc_send(htc, ep->eid, skb);
696
697 if (ret)
698 skb_queue_head(&ep->tx_req_head, skb);
699
700 ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
701 ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
702 }
703
ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep * ep)704 static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
705 {
706 struct ath10k_htc *htc = ep->htc;
707 struct sk_buff *bundle_skb, *skb;
708 struct sk_buff_head tx_save_head;
709 struct ath10k_htc_hdr *hdr;
710 u8 *bundle_buf;
711 int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
712
713 if (htc->ar->state == ATH10K_STATE_WEDGED)
714 return -ECOMM;
715
716 if (ep->tx_credit_flow_enabled &&
717 ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
718 return 0;
719
720 bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
721 bundle_skb = dev_alloc_skb(bundles_left);
722
723 if (!bundle_skb)
724 return -ENOMEM;
725
726 bundle_buf = bundle_skb->data;
727 skb_queue_head_init(&tx_save_head);
728
729 while (true) {
730 skb = skb_dequeue(&ep->tx_req_head);
731 if (!skb)
732 break;
733
734 credit_pad = 0;
735 trans_len = skb->len + sizeof(*hdr);
736 credit_remainder = trans_len % ep->tx_credit_size;
737
738 if (credit_remainder != 0) {
739 credit_pad = ep->tx_credit_size - credit_remainder;
740 trans_len += credit_pad;
741 }
742
743 ret = ath10k_htc_consume_credit(ep,
744 bundle_buf + trans_len - bundle_skb->data,
745 false);
746 if (ret) {
747 skb_queue_head(&ep->tx_req_head, skb);
748 break;
749 }
750
751 if (bundles_left < trans_len) {
752 bundle_skb->len = bundle_buf - bundle_skb->data;
753 ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
754
755 if (ret) {
756 skb_queue_head(&ep->tx_req_head, skb);
757 return ret;
758 }
759
760 if (skb_queue_len(&ep->tx_req_head) == 0) {
761 ath10k_htc_send_one_skb(ep, skb);
762 return ret;
763 }
764
765 if (ep->tx_credit_flow_enabled &&
766 ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
767 skb_queue_head(&ep->tx_req_head, skb);
768 return 0;
769 }
770
771 bundles_left =
772 ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
773 bundle_skb = dev_alloc_skb(bundles_left);
774
775 if (!bundle_skb) {
776 skb_queue_head(&ep->tx_req_head, skb);
777 return -ENOMEM;
778 }
779 bundle_buf = bundle_skb->data;
780 skb_queue_head_init(&tx_save_head);
781 }
782
783 skb_push(skb, sizeof(struct ath10k_htc_hdr));
784 ath10k_htc_prepare_tx_skb(ep, skb);
785
786 memcpy(bundle_buf, skb->data, skb->len);
787 hdr = (struct ath10k_htc_hdr *)bundle_buf;
788 hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
789 hdr->pad_len = __cpu_to_le16(credit_pad);
790 bundle_buf += trans_len;
791 bundles_left -= trans_len;
792 skb_queue_tail(&tx_save_head, skb);
793 }
794
795 if (bundle_buf != bundle_skb->data) {
796 bundle_skb->len = bundle_buf - bundle_skb->data;
797 ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
798 } else {
799 dev_kfree_skb_any(bundle_skb);
800 }
801
802 return ret;
803 }
804
ath10k_htc_bundle_tx_work(struct work_struct * work)805 static void ath10k_htc_bundle_tx_work(struct work_struct *work)
806 {
807 struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
808 struct ath10k_htc_ep *ep;
809 struct sk_buff *skb;
810 int i;
811
812 for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
813 ep = &ar->htc.endpoint[i];
814
815 if (!ep->bundle_tx)
816 continue;
817
818 ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
819 ep->eid, skb_queue_len(&ep->tx_req_head));
820
821 if (skb_queue_len(&ep->tx_req_head) >=
822 ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
823 ath10k_htc_send_bundle_skbs(ep);
824 } else {
825 skb = skb_dequeue(&ep->tx_req_head);
826
827 if (!skb)
828 continue;
829 ath10k_htc_send_one_skb(ep, skb);
830 }
831 }
832 }
833
ath10k_htc_tx_complete_work(struct work_struct * work)834 static void ath10k_htc_tx_complete_work(struct work_struct *work)
835 {
836 struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
837 struct ath10k_htc_ep *ep;
838 enum ath10k_htc_ep_id eid;
839 struct sk_buff *skb;
840 int i;
841
842 for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
843 ep = &ar->htc.endpoint[i];
844 eid = ep->eid;
845 if (ep->bundle_tx && eid == ar->htt.eid) {
846 ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
847 ep->eid, skb_queue_len(&ep->tx_complete_head));
848
849 while (true) {
850 skb = skb_dequeue(&ep->tx_complete_head);
851 if (!skb)
852 break;
853 ath10k_htc_notify_tx_completion(ep, skb);
854 }
855 }
856 }
857 }
858
ath10k_htc_send_hl(struct ath10k_htc * htc,enum ath10k_htc_ep_id eid,struct sk_buff * skb)859 int ath10k_htc_send_hl(struct ath10k_htc *htc,
860 enum ath10k_htc_ep_id eid,
861 struct sk_buff *skb)
862 {
863 struct ath10k_htc_ep *ep = &htc->endpoint[eid];
864 struct ath10k *ar = htc->ar;
865
866 if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
867 ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
868 return -ENOMEM;
869 }
870
871 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
872 eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
873
874 if (ep->bundle_tx) {
875 skb_queue_tail(&ep->tx_req_head, skb);
876 queue_work(ar->workqueue, &ar->bundle_tx_work);
877 return 0;
878 } else {
879 return ath10k_htc_send(htc, eid, skb);
880 }
881 }
882
ath10k_htc_setup_tx_req(struct ath10k_htc_ep * ep)883 void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
884 {
885 if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
886 !ep->bundle_tx) {
887 ep->bundle_tx = true;
888 skb_queue_head_init(&ep->tx_req_head);
889 skb_queue_head_init(&ep->tx_complete_head);
890 }
891 }
892
ath10k_htc_stop_hl(struct ath10k * ar)893 void ath10k_htc_stop_hl(struct ath10k *ar)
894 {
895 struct ath10k_htc_ep *ep;
896 int i;
897
898 cancel_work_sync(&ar->bundle_tx_work);
899 cancel_work_sync(&ar->tx_complete_work);
900
901 for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
902 ep = &ar->htc.endpoint[i];
903
904 if (!ep->bundle_tx)
905 continue;
906
907 ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
908 ep->eid, skb_queue_len(&ep->tx_req_head));
909
910 skb_queue_purge(&ep->tx_req_head);
911 }
912 }
913
ath10k_htc_wait_target(struct ath10k_htc * htc)914 int ath10k_htc_wait_target(struct ath10k_htc *htc)
915 {
916 struct ath10k *ar = htc->ar;
917 int i, status = 0;
918 unsigned long time_left;
919 struct ath10k_htc_msg *msg;
920 u16 message_id;
921
922 time_left = wait_for_completion_timeout(&htc->ctl_resp,
923 ATH10K_HTC_WAIT_TIMEOUT_HZ);
924 if (!time_left) {
925 /* Workaround: In some cases the PCI HIF doesn't
926 * receive interrupt for the control response message
927 * even if the buffer was completed. It is suspected
928 * iomap writes unmasking PCI CE irqs aren't propagated
929 * properly in KVM PCI-passthrough sometimes.
930 */
931 ath10k_warn(ar, "failed to receive control response completion, polling..\n");
932
933 for (i = 0; i < CE_COUNT; i++)
934 ath10k_hif_send_complete_check(htc->ar, i, 1);
935
936 time_left =
937 wait_for_completion_timeout(&htc->ctl_resp,
938 ATH10K_HTC_WAIT_TIMEOUT_HZ);
939
940 if (!time_left)
941 status = -ETIMEDOUT;
942 }
943
944 if (status < 0) {
945 ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
946 return status;
947 }
948
949 if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
950 ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
951 htc->control_resp_len);
952 return -ECOMM;
953 }
954
955 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
956 message_id = __le16_to_cpu(msg->hdr.message_id);
957
958 if (message_id != ATH10K_HTC_MSG_READY_ID) {
959 ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
960 return -ECOMM;
961 }
962
963 if (ar->hw_params.use_fw_tx_credits)
964 htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
965 else
966 htc->total_transmit_credits = 1;
967
968 htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
969
970 ath10k_dbg(ar, ATH10K_DBG_HTC,
971 "Target ready! transmit resources: %d size:%d actual credits:%d\n",
972 htc->total_transmit_credits,
973 htc->target_credit_size,
974 msg->ready.credit_count);
975
976 if ((htc->total_transmit_credits == 0) ||
977 (htc->target_credit_size == 0)) {
978 ath10k_err(ar, "Invalid credit size received\n");
979 return -ECOMM;
980 }
981
982 /* The only way to determine if the ready message is an extended
983 * message is from the size.
984 */
985 if (htc->control_resp_len >=
986 sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
987 htc->alt_data_credit_size =
988 __le16_to_cpu(msg->ready_ext.reserved) &
989 ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
990 htc->max_msgs_per_htc_bundle =
991 min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
992 HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
993 ath10k_dbg(ar, ATH10K_DBG_HTC,
994 "Extended ready message RX bundle size %d alt size %d\n",
995 htc->max_msgs_per_htc_bundle,
996 htc->alt_data_credit_size);
997 }
998
999 INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
1000 INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
1001
1002 return 0;
1003 }
1004
ath10k_htc_change_tx_credit_flow(struct ath10k_htc * htc,enum ath10k_htc_ep_id eid,bool enable)1005 void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
1006 enum ath10k_htc_ep_id eid,
1007 bool enable)
1008 {
1009 struct ath10k *ar = htc->ar;
1010 struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid];
1011
1012 ep->tx_credit_flow_enabled = enable;
1013 }
1014
ath10k_htc_connect_service(struct ath10k_htc * htc,struct ath10k_htc_svc_conn_req * conn_req,struct ath10k_htc_svc_conn_resp * conn_resp)1015 int ath10k_htc_connect_service(struct ath10k_htc *htc,
1016 struct ath10k_htc_svc_conn_req *conn_req,
1017 struct ath10k_htc_svc_conn_resp *conn_resp)
1018 {
1019 struct ath10k *ar = htc->ar;
1020 struct ath10k_htc_msg *msg;
1021 struct ath10k_htc_conn_svc *req_msg;
1022 struct ath10k_htc_conn_svc_response resp_msg_dummy;
1023 struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
1024 enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
1025 struct ath10k_htc_ep *ep;
1026 struct sk_buff *skb;
1027 unsigned int max_msg_size = 0;
1028 int length, status;
1029 unsigned long time_left;
1030 bool disable_credit_flow_ctrl = false;
1031 u16 message_id, service_id, flags = 0;
1032 u8 tx_alloc = 0;
1033
1034 /* special case for HTC pseudo control service */
1035 if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
1036 disable_credit_flow_ctrl = true;
1037 assigned_eid = ATH10K_HTC_EP_0;
1038 max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
1039 memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
1040 goto setup;
1041 }
1042
1043 tx_alloc = ath10k_htc_get_credit_allocation(htc,
1044 conn_req->service_id);
1045 if (!tx_alloc)
1046 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1047 "boot htc service %s does not allocate target credits\n",
1048 htc_service_name(conn_req->service_id));
1049
1050 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
1051 if (!skb) {
1052 ath10k_err(ar, "Failed to allocate HTC packet\n");
1053 return -ENOMEM;
1054 }
1055
1056 length = sizeof(msg->hdr) + sizeof(msg->connect_service);
1057 skb_put(skb, length);
1058 memset(skb->data, 0, length);
1059
1060 msg = (struct ath10k_htc_msg *)skb->data;
1061 msg->hdr.message_id =
1062 __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
1063
1064 flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
1065
1066 /* Only enable credit flow control for WMI ctrl service */
1067 if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
1068 flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1069 disable_credit_flow_ctrl = true;
1070 }
1071
1072 req_msg = &msg->connect_service;
1073 req_msg->flags = __cpu_to_le16(flags);
1074 req_msg->service_id = __cpu_to_le16(conn_req->service_id);
1075
1076 reinit_completion(&htc->ctl_resp);
1077
1078 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
1079 if (status) {
1080 kfree_skb(skb);
1081 return status;
1082 }
1083
1084 /* wait for response */
1085 time_left = wait_for_completion_timeout(&htc->ctl_resp,
1086 ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
1087 if (!time_left) {
1088 ath10k_err(ar, "Service connect timeout\n");
1089 return -ETIMEDOUT;
1090 }
1091
1092 /* we controlled the buffer creation, it's aligned */
1093 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
1094 resp_msg = &msg->connect_service_response;
1095 message_id = __le16_to_cpu(msg->hdr.message_id);
1096 service_id = __le16_to_cpu(resp_msg->service_id);
1097
1098 if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
1099 (htc->control_resp_len < sizeof(msg->hdr) +
1100 sizeof(msg->connect_service_response))) {
1101 ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
1102 return -EPROTO;
1103 }
1104
1105 ath10k_dbg(ar, ATH10K_DBG_HTC,
1106 "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
1107 htc_service_name(service_id),
1108 resp_msg->status, resp_msg->eid);
1109
1110 conn_resp->connect_resp_code = resp_msg->status;
1111
1112 /* check response status */
1113 if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
1114 ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
1115 htc_service_name(service_id),
1116 resp_msg->status);
1117 return -EPROTO;
1118 }
1119
1120 assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
1121 max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
1122
1123 setup:
1124
1125 if (assigned_eid >= ATH10K_HTC_EP_COUNT)
1126 return -EPROTO;
1127
1128 if (max_msg_size == 0)
1129 return -EPROTO;
1130
1131 ep = &htc->endpoint[assigned_eid];
1132 ep->eid = assigned_eid;
1133
1134 if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
1135 return -EPROTO;
1136
1137 /* return assigned endpoint to caller */
1138 conn_resp->eid = assigned_eid;
1139 conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
1140
1141 /* setup the endpoint */
1142 ep->service_id = conn_req->service_id;
1143 ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
1144 ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
1145 ep->tx_credits = tx_alloc;
1146 ep->tx_credit_size = htc->target_credit_size;
1147
1148 if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
1149 htc->alt_data_credit_size != 0)
1150 ep->tx_credit_size = htc->alt_data_credit_size;
1151
1152 /* copy all the callbacks */
1153 ep->ep_ops = conn_req->ep_ops;
1154
1155 status = ath10k_hif_map_service_to_pipe(htc->ar,
1156 ep->service_id,
1157 &ep->ul_pipe_id,
1158 &ep->dl_pipe_id);
1159 if (status) {
1160 ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
1161 ep->service_id);
1162 return status;
1163 }
1164
1165 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1166 "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
1167 htc_service_name(ep->service_id), ep->ul_pipe_id,
1168 ep->dl_pipe_id, ep->eid);
1169
1170 if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
1171 ep->tx_credit_flow_enabled = false;
1172 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1173 "boot htc service '%s' eid %d TX flow control disabled\n",
1174 htc_service_name(ep->service_id), assigned_eid);
1175 }
1176
1177 return status;
1178 }
1179
ath10k_htc_alloc_skb(struct ath10k * ar,int size)1180 struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
1181 {
1182 struct sk_buff *skb;
1183
1184 skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
1185 if (!skb)
1186 return NULL;
1187
1188 skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
1189
1190 /* FW/HTC requires 4-byte aligned streams */
1191 if (!IS_ALIGNED((unsigned long)skb->data, 4))
1192 ath10k_warn(ar, "Unaligned HTC tx skb\n");
1193
1194 return skb;
1195 }
1196
ath10k_htc_pktlog_process_rx(struct ath10k * ar,struct sk_buff * skb)1197 static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
1198 {
1199 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
1200 dev_kfree_skb_any(skb);
1201 }
1202
ath10k_htc_pktlog_connect(struct ath10k * ar)1203 static int ath10k_htc_pktlog_connect(struct ath10k *ar)
1204 {
1205 struct ath10k_htc_svc_conn_resp conn_resp;
1206 struct ath10k_htc_svc_conn_req conn_req;
1207 int status;
1208
1209 memset(&conn_req, 0, sizeof(conn_req));
1210 memset(&conn_resp, 0, sizeof(conn_resp));
1211
1212 conn_req.ep_ops.ep_tx_complete = NULL;
1213 conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
1214 conn_req.ep_ops.ep_tx_credits = NULL;
1215
1216 /* connect to control service */
1217 conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
1218 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
1219 if (status) {
1220 ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
1221 status);
1222 return status;
1223 }
1224
1225 return 0;
1226 }
1227
ath10k_htc_pktlog_svc_supported(struct ath10k * ar)1228 static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
1229 {
1230 u8 ul_pipe_id;
1231 u8 dl_pipe_id;
1232 int status;
1233
1234 status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
1235 &ul_pipe_id,
1236 &dl_pipe_id);
1237 if (status) {
1238 ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
1239 ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
1240
1241 return false;
1242 }
1243
1244 return true;
1245 }
1246
ath10k_htc_start(struct ath10k_htc * htc)1247 int ath10k_htc_start(struct ath10k_htc *htc)
1248 {
1249 struct ath10k *ar = htc->ar;
1250 struct sk_buff *skb;
1251 int status = 0;
1252 struct ath10k_htc_msg *msg;
1253
1254 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
1255 if (!skb)
1256 return -ENOMEM;
1257
1258 skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
1259 memset(skb->data, 0, skb->len);
1260
1261 msg = (struct ath10k_htc_msg *)skb->data;
1262 msg->hdr.message_id =
1263 __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
1264
1265 if (ar->hif.bus == ATH10K_BUS_SDIO) {
1266 /* Extra setup params used by SDIO */
1267 msg->setup_complete_ext.flags =
1268 __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN);
1269 msg->setup_complete_ext.max_msgs_per_bundled_recv =
1270 htc->max_msgs_per_htc_bundle;
1271 }
1272 ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
1273
1274 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
1275 if (status) {
1276 kfree_skb(skb);
1277 return status;
1278 }
1279
1280 if (ath10k_htc_pktlog_svc_supported(ar)) {
1281 status = ath10k_htc_pktlog_connect(ar);
1282 if (status) {
1283 ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
1284 return status;
1285 }
1286 }
1287
1288 return 0;
1289 }
1290
1291 /* registered target arrival callback from the HIF layer */
ath10k_htc_init(struct ath10k * ar)1292 int ath10k_htc_init(struct ath10k *ar)
1293 {
1294 int status;
1295 struct ath10k_htc *htc = &ar->htc;
1296 struct ath10k_htc_svc_conn_req conn_req;
1297 struct ath10k_htc_svc_conn_resp conn_resp;
1298
1299 spin_lock_init(&htc->tx_lock);
1300
1301 ath10k_htc_reset_endpoint_states(htc);
1302
1303 htc->ar = ar;
1304
1305 /* setup our pseudo HTC control endpoint connection */
1306 memset(&conn_req, 0, sizeof(conn_req));
1307 memset(&conn_resp, 0, sizeof(conn_resp));
1308 conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
1309 conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
1310 conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
1311 conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
1312
1313 /* connect fake service */
1314 status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
1315 if (status) {
1316 ath10k_err(ar, "could not connect to htc service (%d)\n",
1317 status);
1318 return status;
1319 }
1320
1321 init_completion(&htc->ctl_resp);
1322
1323 return 0;
1324 }
1325