1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Bluetooth support for Intel PCIe devices
5 *
6 * Copyright (C) 2024 Intel Corporation
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/firmware.h>
12 #include <linux/pci.h>
13 #include <linux/wait.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16
17 #include <linux/unaligned.h>
18
19 #include <net/bluetooth/bluetooth.h>
20 #include <net/bluetooth/hci_core.h>
21
22 #include "btintel.h"
23 #include "btintel_pcie.h"
24
25 #define VERSION "0.1"
26
27 #define BTINTEL_PCI_DEVICE(dev, subdev) \
28 .vendor = PCI_VENDOR_ID_INTEL, \
29 .device = (dev), \
30 .subvendor = PCI_ANY_ID, \
31 .subdevice = (subdev), \
32 .driver_data = 0
33
34 #define POLL_INTERVAL_US 10
35
36 /* Intel Bluetooth PCIe device id table */
37 static const struct pci_device_id btintel_pcie_table[] = {
38 { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
39 { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
40 { 0 }
41 };
42 MODULE_DEVICE_TABLE(pci, btintel_pcie_table);
43
44 /* Intel PCIe uses 4 bytes of HCI type instead of 1 byte BT SIG HCI type */
45 #define BTINTEL_PCIE_HCI_TYPE_LEN 4
46 #define BTINTEL_PCIE_HCI_CMD_PKT 0x00000001
47 #define BTINTEL_PCIE_HCI_ACL_PKT 0x00000002
48 #define BTINTEL_PCIE_HCI_SCO_PKT 0x00000003
49 #define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004
50 #define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005
51
52 #define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
53
54 #define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024
55 #define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00
56
57 #define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096
58 #define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800
59
60 #define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5
61
62 #define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2
63 #define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61
64
65 /* Alive interrupt context */
66 enum {
67 BTINTEL_PCIE_ROM,
68 BTINTEL_PCIE_FW_DL,
69 BTINTEL_PCIE_HCI_RESET,
70 BTINTEL_PCIE_INTEL_HCI_RESET1,
71 BTINTEL_PCIE_INTEL_HCI_RESET2,
72 BTINTEL_PCIE_D0,
73 BTINTEL_PCIE_D3
74 };
75
76 /* Structure for dbgc fragment buffer
77 * @buf_addr_lsb: LSB of the buffer's physical address
78 * @buf_addr_msb: MSB of the buffer's physical address
79 * @buf_size: Total size of the buffer
80 */
81 struct btintel_pcie_dbgc_ctxt_buf {
82 u32 buf_addr_lsb;
83 u32 buf_addr_msb;
84 u32 buf_size;
85 };
86
87 /* Structure for dbgc fragment
88 * @magic_num: 0XA5A5A5A5
89 * @ver: For Driver-FW compatibility
90 * @total_size: Total size of the payload debug info
91 * @num_buf: Num of allocated debug bufs
92 * @bufs: All buffer's addresses and sizes
93 */
94 struct btintel_pcie_dbgc_ctxt {
95 u32 magic_num;
96 u32 ver;
97 u32 total_size;
98 u32 num_buf;
99 struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT];
100 };
101
102 /* This function initializes the memory for DBGC buffers and formats the
103 * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and
104 * size as the payload
105 */
btintel_pcie_setup_dbgc(struct btintel_pcie_data * data)106 static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data)
107 {
108 struct btintel_pcie_dbgc_ctxt db_frag;
109 struct data_buf *buf;
110 int i;
111
112 data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT;
113 data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count,
114 sizeof(*buf), GFP_KERNEL);
115 if (!data->dbgc.bufs)
116 return -ENOMEM;
117
118 data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev,
119 data->dbgc.count *
120 BTINTEL_PCIE_DBGC_BUFFER_SIZE,
121 &data->dbgc.buf_p_addr,
122 GFP_KERNEL | __GFP_NOWARN);
123 if (!data->dbgc.buf_v_addr)
124 return -ENOMEM;
125
126 data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev,
127 sizeof(struct btintel_pcie_dbgc_ctxt),
128 &data->dbgc.frag_p_addr,
129 GFP_KERNEL | __GFP_NOWARN);
130 if (!data->dbgc.frag_v_addr)
131 return -ENOMEM;
132
133 data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt);
134
135 db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM;
136 db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION;
137 db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE;
138 db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT;
139
140 for (i = 0; i < data->dbgc.count; i++) {
141 buf = &data->dbgc.bufs[i];
142 buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
143 buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
144 db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr);
145 db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr);
146 db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE;
147 }
148
149 memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag));
150 return 0;
151 }
152
ipc_print_ia_ring(struct hci_dev * hdev,struct ia * ia,u16 queue_num)153 static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia,
154 u16 queue_num)
155 {
156 bt_dev_dbg(hdev, "IA: %s: tr-h:%02u tr-t:%02u cr-h:%02u cr-t:%02u",
157 queue_num == BTINTEL_PCIE_TXQ_NUM ? "TXQ" : "RXQ",
158 ia->tr_hia[queue_num], ia->tr_tia[queue_num],
159 ia->cr_hia[queue_num], ia->cr_tia[queue_num]);
160 }
161
ipc_print_urbd1(struct hci_dev * hdev,struct urbd1 * urbd1,u16 index)162 static inline void ipc_print_urbd1(struct hci_dev *hdev, struct urbd1 *urbd1,
163 u16 index)
164 {
165 bt_dev_dbg(hdev, "RXQ:urbd1(%u) frbd_tag:%u status: 0x%x fixed:0x%x",
166 index, urbd1->frbd_tag, urbd1->status, urbd1->fixed);
167 }
168
btintel_pcie_get_data(struct msix_entry * entry)169 static struct btintel_pcie_data *btintel_pcie_get_data(struct msix_entry *entry)
170 {
171 u8 queue = entry->entry;
172 struct msix_entry *entries = entry - queue;
173
174 return container_of(entries, struct btintel_pcie_data, msix_entries[0]);
175 }
176
177 /* Set the doorbell for TXQ to notify the device that @index (actually index-1)
178 * of the TFD is updated and ready to transmit.
179 */
btintel_pcie_set_tx_db(struct btintel_pcie_data * data,u16 index)180 static void btintel_pcie_set_tx_db(struct btintel_pcie_data *data, u16 index)
181 {
182 u32 val;
183
184 val = index;
185 val |= (BTINTEL_PCIE_TX_DB_VEC << 16);
186
187 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
188 }
189
190 /* Copy the data to next(@tfd_index) data buffer and update the TFD(transfer
191 * descriptor) with the data length and the DMA address of the data buffer.
192 */
btintel_pcie_prepare_tx(struct txq * txq,u16 tfd_index,struct sk_buff * skb)193 static void btintel_pcie_prepare_tx(struct txq *txq, u16 tfd_index,
194 struct sk_buff *skb)
195 {
196 struct data_buf *buf;
197 struct tfd *tfd;
198
199 tfd = &txq->tfds[tfd_index];
200 memset(tfd, 0, sizeof(*tfd));
201
202 buf = &txq->bufs[tfd_index];
203
204 tfd->size = skb->len;
205 tfd->addr = buf->data_p_addr;
206
207 /* Copy the outgoing data to DMA buffer */
208 memcpy(buf->data, skb->data, tfd->size);
209 }
210
btintel_pcie_send_sync(struct btintel_pcie_data * data,struct sk_buff * skb)211 static int btintel_pcie_send_sync(struct btintel_pcie_data *data,
212 struct sk_buff *skb)
213 {
214 int ret;
215 u16 tfd_index;
216 struct txq *txq = &data->txq;
217
218 tfd_index = data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM];
219
220 if (tfd_index > txq->count)
221 return -ERANGE;
222
223 /* Prepare for TX. It updates the TFD with the length of data and
224 * address of the DMA buffer, and copy the data to the DMA buffer
225 */
226 btintel_pcie_prepare_tx(txq, tfd_index, skb);
227
228 tfd_index = (tfd_index + 1) % txq->count;
229 data->ia.tr_hia[BTINTEL_PCIE_TXQ_NUM] = tfd_index;
230
231 /* Arm wait event condition */
232 data->tx_wait_done = false;
233
234 /* Set the doorbell to notify the device */
235 btintel_pcie_set_tx_db(data, tfd_index);
236
237 /* Wait for the complete interrupt - URBD0 */
238 ret = wait_event_timeout(data->tx_wait_q, data->tx_wait_done,
239 msecs_to_jiffies(BTINTEL_PCIE_TX_WAIT_TIMEOUT_MS));
240 if (!ret)
241 return -ETIME;
242
243 return 0;
244 }
245
246 /* Set the doorbell for RXQ to notify the device that @index (actually index-1)
247 * is available to receive the data
248 */
btintel_pcie_set_rx_db(struct btintel_pcie_data * data,u16 index)249 static void btintel_pcie_set_rx_db(struct btintel_pcie_data *data, u16 index)
250 {
251 u32 val;
252
253 val = index;
254 val |= (BTINTEL_PCIE_RX_DB_VEC << 16);
255
256 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR, val);
257 }
258
259 /* Update the FRBD (free buffer descriptor) with the @frbd_index and the
260 * DMA address of the free buffer.
261 */
btintel_pcie_prepare_rx(struct rxq * rxq,u16 frbd_index)262 static void btintel_pcie_prepare_rx(struct rxq *rxq, u16 frbd_index)
263 {
264 struct data_buf *buf;
265 struct frbd *frbd;
266
267 /* Get the buffer of the FRBD for DMA */
268 buf = &rxq->bufs[frbd_index];
269
270 frbd = &rxq->frbds[frbd_index];
271 memset(frbd, 0, sizeof(*frbd));
272
273 /* Update FRBD */
274 frbd->tag = frbd_index;
275 frbd->addr = buf->data_p_addr;
276 }
277
btintel_pcie_submit_rx(struct btintel_pcie_data * data)278 static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
279 {
280 u16 frbd_index;
281 struct rxq *rxq = &data->rxq;
282
283 frbd_index = data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM];
284
285 if (frbd_index > rxq->count)
286 return -ERANGE;
287
288 /* Prepare for RX submit. It updates the FRBD with the address of DMA
289 * buffer
290 */
291 btintel_pcie_prepare_rx(rxq, frbd_index);
292
293 frbd_index = (frbd_index + 1) % rxq->count;
294 data->ia.tr_hia[BTINTEL_PCIE_RXQ_NUM] = frbd_index;
295 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
296
297 /* Set the doorbell to notify the device */
298 btintel_pcie_set_rx_db(data, frbd_index);
299
300 return 0;
301 }
302
btintel_pcie_start_rx(struct btintel_pcie_data * data)303 static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
304 {
305 int i, ret;
306
307 for (i = 0; i < BTINTEL_PCIE_RX_MAX_QUEUE; i++) {
308 ret = btintel_pcie_submit_rx(data);
309 if (ret)
310 return ret;
311 }
312
313 return 0;
314 }
315
btintel_pcie_reset_ia(struct btintel_pcie_data * data)316 static void btintel_pcie_reset_ia(struct btintel_pcie_data *data)
317 {
318 memset(data->ia.tr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
319 memset(data->ia.tr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
320 memset(data->ia.cr_hia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
321 memset(data->ia.cr_tia, 0, sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES);
322 }
323
btintel_pcie_reset_bt(struct btintel_pcie_data * data)324 static int btintel_pcie_reset_bt(struct btintel_pcie_data *data)
325 {
326 u32 reg;
327 int retry = 3;
328
329 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
330
331 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
332 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
333 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
334 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON;
335
336 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
337
338 do {
339 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
340 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS)
341 break;
342 usleep_range(10000, 12000);
343
344 } while (--retry > 0);
345 usleep_range(10000, 12000);
346
347 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
348
349 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
350 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT |
351 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
352 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET;
353 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
354 usleep_range(10000, 12000);
355
356 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
357 bt_dev_dbg(data->hdev, "csr register after reset: 0x%8.8x", reg);
358
359 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
360
361 /* If shared hardware reset is success then boot stage register shall be
362 * set to 0
363 */
364 return reg == 0 ? 0 : -ENODEV;
365 }
366
btintel_pcie_mac_init(struct btintel_pcie_data * data)367 static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
368 {
369 u32 reg;
370
371 /* Set MAC_INIT bit to start primary bootloader */
372 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
373 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
374 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
375 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
376 reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
377 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
378 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
379 }
380
btintel_pcie_add_dmp_data(struct hci_dev * hdev,const void * data,int size)381 static int btintel_pcie_add_dmp_data(struct hci_dev *hdev, const void *data, int size)
382 {
383 struct sk_buff *skb;
384 int err;
385
386 skb = alloc_skb(size, GFP_ATOMIC);
387 if (!skb)
388 return -ENOMEM;
389
390 skb_put_data(skb, data, size);
391 err = hci_devcd_append(hdev, skb);
392 if (err) {
393 bt_dev_err(hdev, "Failed to append data in the coredump");
394 return err;
395 }
396
397 return 0;
398 }
399
btintel_pcie_get_mac_access(struct btintel_pcie_data * data)400 static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
401 {
402 u32 reg;
403 int retry = 15;
404
405 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
406
407 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
408 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
409 if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0)
410 reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
411
412 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
413
414 do {
415 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
416 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS)
417 return 0;
418 /* Need delay here for Target Access harwdware to settle down*/
419 usleep_range(1000, 1200);
420
421 } while (--retry > 0);
422
423 return -ETIME;
424 }
425
btintel_pcie_release_mac_access(struct btintel_pcie_data * data)426 static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
427 {
428 u32 reg;
429
430 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
431
432 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ)
433 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ;
434
435 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS)
436 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS;
437
438 if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ)
439 reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ;
440
441 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
442 }
443
btintel_pcie_copy_tlv(struct sk_buff * skb,enum btintel_pcie_tlv_type type,void * data,int size)444 static void btintel_pcie_copy_tlv(struct sk_buff *skb, enum btintel_pcie_tlv_type type,
445 void *data, int size)
446 {
447 struct intel_tlv *tlv;
448
449 tlv = skb_put(skb, sizeof(*tlv) + size);
450 tlv->type = type;
451 tlv->len = size;
452 memcpy(tlv->val, data, tlv->len);
453 }
454
btintel_pcie_read_dram_buffers(struct btintel_pcie_data * data)455 static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
456 {
457 u32 offset, prev_size, wr_ptr_status, dump_size, i;
458 struct btintel_pcie_dbgc *dbgc = &data->dbgc;
459 u8 buf_idx, dump_time_len, fw_build;
460 struct hci_dev *hdev = data->hdev;
461 struct intel_tlv *tlv;
462 struct timespec64 now;
463 struct sk_buff *skb;
464 struct tm tm_now;
465 char buf[256];
466 u16 hdr_len;
467 int ret;
468
469 wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
470 offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
471
472 buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status);
473 if (buf_idx > dbgc->count) {
474 bt_dev_warn(hdev, "Buffer index is invalid");
475 return -EINVAL;
476 }
477
478 prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE;
479 if (prev_size + offset >= prev_size)
480 data->dmp_hdr.write_ptr = prev_size + offset;
481 else
482 return -EINVAL;
483
484 ktime_get_real_ts64(&now);
485 time64_to_tm(now.tv_sec, 0, &tm_now);
486 dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
487 tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
488 tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
489
490 fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len,
491 "Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
492 2000 + (data->dmp_hdr.fw_timestamp >> 8),
493 data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
494 data->dmp_hdr.fw_build_num);
495
496 hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
497 sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
498 sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
499 sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
500 sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
501 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
502 sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
503 sizeof(*tlv) + dump_time_len +
504 sizeof(*tlv) + fw_build;
505
506 dump_size = hdr_len + sizeof(hdr_len);
507
508 skb = alloc_skb(dump_size, GFP_KERNEL);
509 if (!skb)
510 return -ENOMEM;
511
512 /* Add debug buffers data length to dump size */
513 dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
514
515 ret = hci_devcd_init(hdev, dump_size);
516 if (ret) {
517 bt_dev_err(hdev, "Failed to init devcoredump, err %d", ret);
518 kfree_skb(skb);
519 return ret;
520 }
521
522 skb_put_data(skb, &hdr_len, sizeof(hdr_len));
523
524 btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
525 sizeof(data->dmp_hdr.cnvi_bt));
526
527 btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
528 sizeof(data->dmp_hdr.write_ptr));
529
530 data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
531 BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
532
533 btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
534 sizeof(data->dmp_hdr.wrap_ctr));
535
536 btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
537 sizeof(data->dmp_hdr.trigger_reason));
538
539 btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
540 sizeof(data->dmp_hdr.fw_git_sha1));
541
542 btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
543 sizeof(data->dmp_hdr.cnvr_top));
544
545 btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
546 sizeof(data->dmp_hdr.cnvi_top));
547
548 btintel_pcie_copy_tlv(skb, BTINTEL_DUMP_TIME, buf, dump_time_len);
549
550 btintel_pcie_copy_tlv(skb, BTINTEL_FW_BUILD, buf + dump_time_len, fw_build);
551
552 ret = hci_devcd_append(hdev, skb);
553 if (ret)
554 goto exit_err;
555
556 for (i = 0; i < dbgc->count; i++) {
557 ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data,
558 BTINTEL_PCIE_DBGC_BUFFER_SIZE);
559 if (ret)
560 break;
561 }
562
563 exit_err:
564 hci_devcd_complete(hdev);
565 return ret;
566 }
567
btintel_pcie_dump_traces(struct hci_dev * hdev)568 static void btintel_pcie_dump_traces(struct hci_dev *hdev)
569 {
570 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
571 int ret = 0;
572
573 ret = btintel_pcie_get_mac_access(data);
574 if (ret) {
575 bt_dev_err(hdev, "Failed to get mac access: (%d)", ret);
576 return;
577 }
578
579 ret = btintel_pcie_read_dram_buffers(data);
580
581 btintel_pcie_release_mac_access(data);
582
583 if (ret)
584 bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
585 }
586
btintel_pcie_dump_hdr(struct hci_dev * hdev,struct sk_buff * skb)587 static void btintel_pcie_dump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
588 {
589 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
590 u16 len = skb->len;
591 u16 *hdrlen_ptr;
592 char buf[80];
593
594 hdrlen_ptr = skb_put_zero(skb, sizeof(len));
595
596 snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
597 INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt));
598 skb_put_data(skb, buf, strlen(buf));
599
600 snprintf(buf, sizeof(buf), "Firmware Build Number: %u\n",
601 data->dmp_hdr.fw_build_num);
602 skb_put_data(skb, buf, strlen(buf));
603
604 snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name);
605 skb_put_data(skb, buf, strlen(buf));
606
607 snprintf(buf, sizeof(buf), "Vendor: Intel\n");
608 skb_put_data(skb, buf, strlen(buf));
609
610 *hdrlen_ptr = skb->len - len;
611 }
612
btintel_pcie_dump_notify(struct hci_dev * hdev,int state)613 static void btintel_pcie_dump_notify(struct hci_dev *hdev, int state)
614 {
615 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
616
617 switch (state) {
618 case HCI_DEVCOREDUMP_IDLE:
619 data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
620 break;
621 case HCI_DEVCOREDUMP_ACTIVE:
622 data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE;
623 break;
624 case HCI_DEVCOREDUMP_TIMEOUT:
625 case HCI_DEVCOREDUMP_ABORT:
626 case HCI_DEVCOREDUMP_DONE:
627 data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
628 break;
629 }
630 }
631
632 /* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
633 * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
634 * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
635 * Then the host reads firmware version from BTINTEL_CSR_F2D_MBX and the boot stage
636 * from BTINTEL_PCIE_CSR_BOOT_STAGE_REG.
637 */
btintel_pcie_enable_bt(struct btintel_pcie_data * data)638 static int btintel_pcie_enable_bt(struct btintel_pcie_data *data)
639 {
640 int err;
641 u32 reg;
642
643 data->gp0_received = false;
644
645 /* Update the DMA address of CI struct to CSR */
646 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG,
647 data->ci_p_addr & 0xffffffff);
648 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG,
649 (u64)data->ci_p_addr >> 32);
650
651 /* Reset the cached value of boot stage. it is updated by the MSI-X
652 * gp0 interrupt handler.
653 */
654 data->boot_stage_cache = 0x0;
655
656 /* Set MAC_INIT bit to start primary bootloader */
657 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
658 reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT |
659 BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON |
660 BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET);
661 reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA |
662 BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT);
663
664 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
665
666 /* MAC is ready. Enable BT FUNC */
667 btintel_pcie_set_reg_bits(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG,
668 BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT);
669
670 btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG);
671
672 /* wait for interrupt from the device after booting up to primary
673 * bootloader.
674 */
675 data->alive_intr_ctxt = BTINTEL_PCIE_ROM;
676 err = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
677 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
678 if (!err)
679 return -ETIME;
680
681 /* Check cached boot stage is BTINTEL_PCIE_CSR_BOOT_STAGE_ROM(BIT(0)) */
682 if (~data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM)
683 return -ENODEV;
684
685 return 0;
686 }
687
btintel_pcie_in_op(struct btintel_pcie_data * data)688 static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data)
689 {
690 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW;
691 }
692
btintel_pcie_in_iml(struct btintel_pcie_data * data)693 static inline bool btintel_pcie_in_iml(struct btintel_pcie_data *data)
694 {
695 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML &&
696 !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW);
697 }
698
btintel_pcie_in_d3(struct btintel_pcie_data * data)699 static inline bool btintel_pcie_in_d3(struct btintel_pcie_data *data)
700 {
701 return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY;
702 }
703
btintel_pcie_in_d0(struct btintel_pcie_data * data)704 static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
705 {
706 return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
707 }
708
btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data * data,u32 dxstate)709 static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
710 u32 dxstate)
711 {
712 bt_dev_dbg(data->hdev, "writing sleep_ctl_reg: 0x%8.8x", dxstate);
713 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_IPC_SLEEP_CTL_REG, dxstate);
714 }
715
btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)716 static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt)
717 {
718 switch (alive_intr_ctxt) {
719 case BTINTEL_PCIE_ROM:
720 return "rom";
721 case BTINTEL_PCIE_FW_DL:
722 return "fw_dl";
723 case BTINTEL_PCIE_D0:
724 return "d0";
725 case BTINTEL_PCIE_D3:
726 return "d3";
727 case BTINTEL_PCIE_HCI_RESET:
728 return "hci_reset";
729 case BTINTEL_PCIE_INTEL_HCI_RESET1:
730 return "intel_reset1";
731 case BTINTEL_PCIE_INTEL_HCI_RESET2:
732 return "intel_reset2";
733 default:
734 return "unknown";
735 }
736 }
737
btintel_pcie_read_device_mem(struct btintel_pcie_data * data,void * buf,u32 dev_addr,int len)738 static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data,
739 void *buf, u32 dev_addr, int len)
740 {
741 int err;
742 u32 *val = buf;
743
744 /* Get device mac access */
745 err = btintel_pcie_get_mac_access(data);
746 if (err) {
747 bt_dev_err(data->hdev, "Failed to get mac access %d", err);
748 return err;
749 }
750
751 for (; len > 0; len -= 4, dev_addr += 4, val++)
752 *val = btintel_pcie_rd_dev_mem(data, dev_addr);
753
754 btintel_pcie_release_mac_access(data);
755
756 return 0;
757 }
758
759 /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in
760 * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response.
761 */
btintel_pcie_msix_gp0_handler(struct btintel_pcie_data * data)762 static void btintel_pcie_msix_gp0_handler(struct btintel_pcie_data *data)
763 {
764 bool submit_rx, signal_waitq;
765 u32 reg, old_ctxt;
766
767 /* This interrupt is for three different causes and it is not easy to
768 * know what causes the interrupt. So, it compares each register value
769 * with cached value and update it before it wake up the queue.
770 */
771 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
772 if (reg != data->boot_stage_cache)
773 data->boot_stage_cache = reg;
774
775 bt_dev_dbg(data->hdev, "Alive context: %s old_boot_stage: 0x%8.8x new_boot_stage: 0x%8.8x",
776 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt),
777 data->boot_stage_cache, reg);
778 reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_IMG_RESPONSE_REG);
779 if (reg != data->img_resp_cache)
780 data->img_resp_cache = reg;
781
782 data->gp0_received = true;
783
784 old_ctxt = data->alive_intr_ctxt;
785 submit_rx = false;
786 signal_waitq = false;
787
788 switch (data->alive_intr_ctxt) {
789 case BTINTEL_PCIE_ROM:
790 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
791 signal_waitq = true;
792 break;
793 case BTINTEL_PCIE_FW_DL:
794 /* Error case is already handled. Ideally control shall not
795 * reach here
796 */
797 break;
798 case BTINTEL_PCIE_INTEL_HCI_RESET1:
799 if (btintel_pcie_in_op(data)) {
800 submit_rx = true;
801 break;
802 }
803
804 if (btintel_pcie_in_iml(data)) {
805 submit_rx = true;
806 data->alive_intr_ctxt = BTINTEL_PCIE_FW_DL;
807 break;
808 }
809 break;
810 case BTINTEL_PCIE_INTEL_HCI_RESET2:
811 if (btintel_test_and_clear_flag(data->hdev, INTEL_WAIT_FOR_D0)) {
812 btintel_wake_up_flag(data->hdev, INTEL_WAIT_FOR_D0);
813 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
814 }
815 break;
816 case BTINTEL_PCIE_D0:
817 if (btintel_pcie_in_d3(data)) {
818 data->alive_intr_ctxt = BTINTEL_PCIE_D3;
819 signal_waitq = true;
820 break;
821 }
822 break;
823 case BTINTEL_PCIE_D3:
824 if (btintel_pcie_in_d0(data)) {
825 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
826 submit_rx = true;
827 signal_waitq = true;
828 break;
829 }
830 break;
831 case BTINTEL_PCIE_HCI_RESET:
832 data->alive_intr_ctxt = BTINTEL_PCIE_D0;
833 submit_rx = true;
834 signal_waitq = true;
835 break;
836 default:
837 bt_dev_err(data->hdev, "Unknown state: 0x%2.2x",
838 data->alive_intr_ctxt);
839 break;
840 }
841
842 if (submit_rx) {
843 btintel_pcie_reset_ia(data);
844 btintel_pcie_start_rx(data);
845 }
846
847 if (signal_waitq) {
848 bt_dev_dbg(data->hdev, "wake up gp0 wait_q");
849 wake_up(&data->gp0_wait_q);
850 }
851
852 if (old_ctxt != data->alive_intr_ctxt)
853 bt_dev_dbg(data->hdev, "alive context changed: %s -> %s",
854 btintel_pcie_alivectxt_state2str(old_ctxt),
855 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
856 }
857
858 /* This function handles the MSX-X interrupt for rx queue 0 which is for TX
859 */
btintel_pcie_msix_tx_handle(struct btintel_pcie_data * data)860 static void btintel_pcie_msix_tx_handle(struct btintel_pcie_data *data)
861 {
862 u16 cr_tia, cr_hia;
863 struct txq *txq;
864 struct urbd0 *urbd0;
865
866 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM];
867 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
868
869 if (cr_tia == cr_hia)
870 return;
871
872 txq = &data->txq;
873
874 while (cr_tia != cr_hia) {
875 data->tx_wait_done = true;
876 wake_up(&data->tx_wait_q);
877
878 urbd0 = &txq->urbd0s[cr_tia];
879
880 if (urbd0->tfd_index > txq->count)
881 return;
882
883 cr_tia = (cr_tia + 1) % txq->count;
884 data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] = cr_tia;
885 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_TXQ_NUM);
886 }
887 }
888
btintel_pcie_recv_event(struct hci_dev * hdev,struct sk_buff * skb)889 static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
890 {
891 struct hci_event_hdr *hdr = (void *)skb->data;
892 const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 };
893 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
894
895 if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff &&
896 hdr->plen > 0) {
897 const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1;
898 unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1;
899
900 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
901 switch (skb->data[2]) {
902 case 0x02:
903 /* When switching to the operational firmware
904 * the device sends a vendor specific event
905 * indicating that the bootup completed.
906 */
907 btintel_bootup(hdev, ptr, len);
908
909 /* If bootup event is from operational image,
910 * driver needs to write sleep control register to
911 * move into D0 state
912 */
913 if (btintel_pcie_in_op(data)) {
914 btintel_pcie_wr_sleep_cntrl(data, BTINTEL_PCIE_STATE_D0);
915 data->alive_intr_ctxt = BTINTEL_PCIE_INTEL_HCI_RESET2;
916 kfree_skb(skb);
917 return 0;
918 }
919
920 if (btintel_pcie_in_iml(data)) {
921 /* In case of IML, there is no concept
922 * of D0 transition. Just mimic as if
923 * IML moved to D0 by clearing INTEL_WAIT_FOR_D0
924 * bit and waking up the task waiting on
925 * INTEL_WAIT_FOR_D0. This is required
926 * as intel_boot() is common function for
927 * both IML and OP image loading.
928 */
929 if (btintel_test_and_clear_flag(data->hdev,
930 INTEL_WAIT_FOR_D0))
931 btintel_wake_up_flag(data->hdev,
932 INTEL_WAIT_FOR_D0);
933 }
934 kfree_skb(skb);
935 return 0;
936 case 0x06:
937 /* When the firmware loading completes the
938 * device sends out a vendor specific event
939 * indicating the result of the firmware
940 * loading.
941 */
942 btintel_secure_send_result(hdev, ptr, len);
943 kfree_skb(skb);
944 return 0;
945 }
946 }
947
948 /* Handle all diagnostics events separately. May still call
949 * hci_recv_frame.
950 */
951 if (len >= sizeof(diagnostics_hdr) &&
952 memcmp(&skb->data[2], diagnostics_hdr,
953 sizeof(diagnostics_hdr)) == 0) {
954 return btintel_diagnostics(hdev, skb);
955 }
956
957 /* This is a debug event that comes from IML and OP image when it
958 * starts execution. There is no need pass this event to stack.
959 */
960 if (skb->data[2] == 0x97) {
961 hci_recv_diag(hdev, skb);
962 return 0;
963 }
964 }
965
966 return hci_recv_frame(hdev, skb);
967 }
968 /* Process the received rx data
969 * It check the frame header to identify the data type and create skb
970 * and calling HCI API
971 */
btintel_pcie_recv_frame(struct btintel_pcie_data * data,struct sk_buff * skb)972 static int btintel_pcie_recv_frame(struct btintel_pcie_data *data,
973 struct sk_buff *skb)
974 {
975 int ret;
976 u8 pkt_type;
977 u16 plen;
978 u32 pcie_pkt_type;
979 void *pdata;
980 struct hci_dev *hdev = data->hdev;
981
982 spin_lock(&data->hci_rx_lock);
983
984 /* The first 4 bytes indicates the Intel PCIe specific packet type */
985 pdata = skb_pull_data(skb, BTINTEL_PCIE_HCI_TYPE_LEN);
986 if (!pdata) {
987 bt_dev_err(hdev, "Corrupted packet received");
988 ret = -EILSEQ;
989 goto exit_error;
990 }
991
992 pcie_pkt_type = get_unaligned_le32(pdata);
993
994 switch (pcie_pkt_type) {
995 case BTINTEL_PCIE_HCI_ACL_PKT:
996 if (skb->len >= HCI_ACL_HDR_SIZE) {
997 plen = HCI_ACL_HDR_SIZE + __le16_to_cpu(hci_acl_hdr(skb)->dlen);
998 pkt_type = HCI_ACLDATA_PKT;
999 } else {
1000 bt_dev_err(hdev, "ACL packet is too short");
1001 ret = -EILSEQ;
1002 goto exit_error;
1003 }
1004 break;
1005
1006 case BTINTEL_PCIE_HCI_SCO_PKT:
1007 if (skb->len >= HCI_SCO_HDR_SIZE) {
1008 plen = HCI_SCO_HDR_SIZE + hci_sco_hdr(skb)->dlen;
1009 pkt_type = HCI_SCODATA_PKT;
1010 } else {
1011 bt_dev_err(hdev, "SCO packet is too short");
1012 ret = -EILSEQ;
1013 goto exit_error;
1014 }
1015 break;
1016
1017 case BTINTEL_PCIE_HCI_EVT_PKT:
1018 if (skb->len >= HCI_EVENT_HDR_SIZE) {
1019 plen = HCI_EVENT_HDR_SIZE + hci_event_hdr(skb)->plen;
1020 pkt_type = HCI_EVENT_PKT;
1021 } else {
1022 bt_dev_err(hdev, "Event packet is too short");
1023 ret = -EILSEQ;
1024 goto exit_error;
1025 }
1026 break;
1027
1028 case BTINTEL_PCIE_HCI_ISO_PKT:
1029 if (skb->len >= HCI_ISO_HDR_SIZE) {
1030 plen = HCI_ISO_HDR_SIZE + __le16_to_cpu(hci_iso_hdr(skb)->dlen);
1031 pkt_type = HCI_ISODATA_PKT;
1032 } else {
1033 bt_dev_err(hdev, "ISO packet is too short");
1034 ret = -EILSEQ;
1035 goto exit_error;
1036 }
1037 break;
1038
1039 default:
1040 bt_dev_err(hdev, "Invalid packet type received: 0x%4.4x",
1041 pcie_pkt_type);
1042 ret = -EINVAL;
1043 goto exit_error;
1044 }
1045
1046 if (skb->len < plen) {
1047 bt_dev_err(hdev, "Received corrupted packet. type: 0x%2.2x",
1048 pkt_type);
1049 ret = -EILSEQ;
1050 goto exit_error;
1051 }
1052
1053 bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen);
1054
1055 hci_skb_pkt_type(skb) = pkt_type;
1056 hdev->stat.byte_rx += plen;
1057 skb_trim(skb, plen);
1058
1059 if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT)
1060 ret = btintel_pcie_recv_event(hdev, skb);
1061 else
1062 ret = hci_recv_frame(hdev, skb);
1063 skb = NULL; /* skb is freed in the callee */
1064
1065 exit_error:
1066 if (skb)
1067 kfree_skb(skb);
1068
1069 if (ret)
1070 hdev->stat.err_rx++;
1071
1072 spin_unlock(&data->hci_rx_lock);
1073
1074 return ret;
1075 }
1076
btintel_pcie_read_hwexp(struct btintel_pcie_data * data)1077 static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data)
1078 {
1079 int len, err, offset, pending;
1080 struct sk_buff *skb;
1081 u8 *buf, prefix[64];
1082 u32 addr, val;
1083 u16 pkt_len;
1084
1085 struct tlv {
1086 u8 type;
1087 __le16 len;
1088 u8 val[];
1089 } __packed;
1090
1091 struct tlv *tlv;
1092
1093 switch (data->dmp_hdr.cnvi_top & 0xfff) {
1094 case BTINTEL_CNVI_BLAZARI:
1095 case BTINTEL_CNVI_BLAZARIW:
1096 /* only from step B0 onwards */
1097 if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01)
1098 return;
1099 len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */
1100 addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR;
1101 break;
1102 case BTINTEL_CNVI_SCP:
1103 len = BTINTEL_PCIE_SCP_HWEXP_SIZE;
1104 addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR;
1105 break;
1106 default:
1107 bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top);
1108 return;
1109 }
1110
1111 buf = kzalloc(len, GFP_KERNEL);
1112 if (!buf)
1113 goto exit_on_error;
1114
1115 btintel_pcie_mac_init(data);
1116
1117 err = btintel_pcie_read_device_mem(data, buf, addr, len);
1118 if (err)
1119 goto exit_on_error;
1120
1121 val = get_unaligned_le32(buf);
1122 if (val != BTINTEL_PCIE_MAGIC_NUM) {
1123 bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x",
1124 val);
1125 goto exit_on_error;
1126 }
1127
1128 snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev));
1129
1130 offset = 4;
1131 do {
1132 pending = len - offset;
1133 if (pending < sizeof(*tlv))
1134 break;
1135 tlv = (struct tlv *)(buf + offset);
1136
1137 /* If type == 0, then there are no more TLVs to be parsed */
1138 if (!tlv->type) {
1139 bt_dev_dbg(data->hdev, "Invalid TLV type 0");
1140 break;
1141 }
1142 pkt_len = le16_to_cpu(tlv->len);
1143 offset += sizeof(*tlv);
1144 pending = len - offset;
1145 if (pkt_len > pending)
1146 break;
1147
1148 offset += pkt_len;
1149
1150 /* Only TLVs of type == 1 are HCI events, no need to process other
1151 * TLVs
1152 */
1153 if (tlv->type != 1)
1154 continue;
1155
1156 bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len);
1157 if (pkt_len > HCI_MAX_EVENT_SIZE)
1158 break;
1159 skb = bt_skb_alloc(pkt_len, GFP_KERNEL);
1160 if (!skb)
1161 goto exit_on_error;
1162 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1163 skb_put_data(skb, tlv->val, pkt_len);
1164
1165 /* copy Intel specific pcie packet type */
1166 val = BTINTEL_PCIE_HCI_EVT_PKT;
1167 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val,
1168 BTINTEL_PCIE_HCI_TYPE_LEN);
1169
1170 print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1,
1171 tlv->val, pkt_len, false);
1172
1173 btintel_pcie_recv_frame(data, skb);
1174 } while (offset < len);
1175
1176 exit_on_error:
1177 kfree(buf);
1178 }
1179
btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data * data)1180 static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data)
1181 {
1182 bt_dev_err(data->hdev, "Received hw exception interrupt");
1183
1184 if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags))
1185 return;
1186
1187 if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags))
1188 return;
1189
1190 /* Trigger device core dump when there is HW exception */
1191 if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
1192 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
1193
1194 queue_work(data->workqueue, &data->rx_work);
1195 }
1196
btintel_pcie_rx_work(struct work_struct * work)1197 static void btintel_pcie_rx_work(struct work_struct *work)
1198 {
1199 struct btintel_pcie_data *data = container_of(work,
1200 struct btintel_pcie_data, rx_work);
1201 struct sk_buff *skb;
1202
1203 if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
1204 /* Unlike usb products, controller will not send hardware
1205 * exception event on exception. Instead controller writes the
1206 * hardware event to device memory along with optional debug
1207 * events, raises MSIX and halts. Driver shall read the
1208 * exception event from device memory and passes it stack for
1209 * further processing.
1210 */
1211 btintel_pcie_read_hwexp(data);
1212 clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
1213 }
1214
1215 if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
1216 btintel_pcie_dump_traces(data->hdev);
1217 clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
1218 }
1219
1220 /* Process the sk_buf in queue and send to the HCI layer */
1221 while ((skb = skb_dequeue(&data->rx_skb_q))) {
1222 btintel_pcie_recv_frame(data, skb);
1223 }
1224 }
1225
1226 /* create sk_buff with data and save it to queue and start RX work */
btintel_pcie_submit_rx_work(struct btintel_pcie_data * data,u8 status,void * buf)1227 static int btintel_pcie_submit_rx_work(struct btintel_pcie_data *data, u8 status,
1228 void *buf)
1229 {
1230 int ret, len;
1231 struct rfh_hdr *rfh_hdr;
1232 struct sk_buff *skb;
1233
1234 rfh_hdr = buf;
1235
1236 len = rfh_hdr->packet_len;
1237 if (len <= 0) {
1238 ret = -EINVAL;
1239 goto resubmit;
1240 }
1241
1242 /* Remove RFH header */
1243 buf += sizeof(*rfh_hdr);
1244
1245 skb = alloc_skb(len, GFP_ATOMIC);
1246 if (!skb)
1247 goto resubmit;
1248
1249 skb_put_data(skb, buf, len);
1250 skb_queue_tail(&data->rx_skb_q, skb);
1251 queue_work(data->workqueue, &data->rx_work);
1252
1253 resubmit:
1254 ret = btintel_pcie_submit_rx(data);
1255
1256 return ret;
1257 }
1258
1259 /* Handles the MSI-X interrupt for rx queue 1 which is for RX */
btintel_pcie_msix_rx_handle(struct btintel_pcie_data * data)1260 static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
1261 {
1262 u16 cr_hia, cr_tia;
1263 struct rxq *rxq;
1264 struct urbd1 *urbd1;
1265 struct data_buf *buf;
1266 int ret;
1267 struct hci_dev *hdev = data->hdev;
1268
1269 cr_hia = data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM];
1270 cr_tia = data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1271
1272 bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia);
1273
1274 /* Check CR_TIA and CR_HIA for change */
1275 if (cr_tia == cr_hia)
1276 return;
1277
1278 rxq = &data->rxq;
1279
1280 /* The firmware sends multiple CD in a single MSI-X and it needs to
1281 * process all received CDs in this interrupt.
1282 */
1283 while (cr_tia != cr_hia) {
1284 urbd1 = &rxq->urbd1s[cr_tia];
1285 ipc_print_urbd1(data->hdev, urbd1, cr_tia);
1286
1287 buf = &rxq->bufs[urbd1->frbd_tag];
1288 if (!buf) {
1289 bt_dev_err(hdev, "RXQ: failed to get the DMA buffer for %d",
1290 urbd1->frbd_tag);
1291 return;
1292 }
1293
1294 ret = btintel_pcie_submit_rx_work(data, urbd1->status,
1295 buf->data);
1296 if (ret) {
1297 bt_dev_err(hdev, "RXQ: failed to submit rx request");
1298 return;
1299 }
1300
1301 cr_tia = (cr_tia + 1) % rxq->count;
1302 data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM] = cr_tia;
1303 ipc_print_ia_ring(data->hdev, &data->ia, BTINTEL_PCIE_RXQ_NUM);
1304 }
1305 }
1306
btintel_pcie_msix_isr(int irq,void * data)1307 static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
1308 {
1309 return IRQ_WAKE_THREAD;
1310 }
1311
btintel_pcie_is_rxq_empty(struct btintel_pcie_data * data)1312 static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
1313 {
1314 return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
1315 }
1316
btintel_pcie_is_txackq_empty(struct btintel_pcie_data * data)1317 static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data)
1318 {
1319 return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM];
1320 }
1321
btintel_pcie_irq_msix_handler(int irq,void * dev_id)1322 static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
1323 {
1324 struct msix_entry *entry = dev_id;
1325 struct btintel_pcie_data *data = btintel_pcie_get_data(entry);
1326 u32 intr_fh, intr_hw;
1327
1328 spin_lock(&data->irq_lock);
1329 intr_fh = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES);
1330 intr_hw = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES);
1331
1332 /* Clear causes registers to avoid being handling the same cause */
1333 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_CAUSES, intr_fh);
1334 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES, intr_hw);
1335 spin_unlock(&data->irq_lock);
1336
1337 if (unlikely(!(intr_fh | intr_hw))) {
1338 /* Ignore interrupt, inta == 0 */
1339 return IRQ_NONE;
1340 }
1341
1342 /* This interrupt is raised when there is an hardware exception */
1343 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP)
1344 btintel_pcie_msix_hw_exp_handler(data);
1345
1346 /* This interrupt is triggered by the firmware after updating
1347 * boot_stage register and image_response register
1348 */
1349 if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
1350 btintel_pcie_msix_gp0_handler(data);
1351
1352 /* For TX */
1353 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
1354 btintel_pcie_msix_tx_handle(data);
1355 if (!btintel_pcie_is_rxq_empty(data))
1356 btintel_pcie_msix_rx_handle(data);
1357 }
1358
1359 /* For RX */
1360 if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) {
1361 btintel_pcie_msix_rx_handle(data);
1362 if (!btintel_pcie_is_txackq_empty(data))
1363 btintel_pcie_msix_tx_handle(data);
1364 }
1365
1366 /*
1367 * Before sending the interrupt the HW disables it to prevent a nested
1368 * interrupt. This is done by writing 1 to the corresponding bit in
1369 * the mask register. After handling the interrupt, it should be
1370 * re-enabled by clearing this bit. This register is defined as write 1
1371 * clear (W1C) register, meaning that it's cleared by writing 1
1372 * to the bit.
1373 */
1374 btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_MSIX_AUTOMASK_ST,
1375 BIT(entry->entry));
1376
1377 return IRQ_HANDLED;
1378 }
1379
1380 /* This function requests the irq for MSI-X and registers the handlers per irq.
1381 * Currently, it requests only 1 irq for all interrupt causes.
1382 */
btintel_pcie_setup_irq(struct btintel_pcie_data * data)1383 static int btintel_pcie_setup_irq(struct btintel_pcie_data *data)
1384 {
1385 int err;
1386 int num_irqs, i;
1387
1388 for (i = 0; i < BTINTEL_PCIE_MSIX_VEC_MAX; i++)
1389 data->msix_entries[i].entry = i;
1390
1391 num_irqs = pci_alloc_irq_vectors(data->pdev, BTINTEL_PCIE_MSIX_VEC_MIN,
1392 BTINTEL_PCIE_MSIX_VEC_MAX, PCI_IRQ_MSIX);
1393 if (num_irqs < 0)
1394 return num_irqs;
1395
1396 data->alloc_vecs = num_irqs;
1397 data->msix_enabled = 1;
1398 data->def_irq = 0;
1399
1400 /* setup irq handler */
1401 for (i = 0; i < data->alloc_vecs; i++) {
1402 struct msix_entry *msix_entry;
1403
1404 msix_entry = &data->msix_entries[i];
1405 msix_entry->vector = pci_irq_vector(data->pdev, i);
1406
1407 err = devm_request_threaded_irq(&data->pdev->dev,
1408 msix_entry->vector,
1409 btintel_pcie_msix_isr,
1410 btintel_pcie_irq_msix_handler,
1411 IRQF_SHARED,
1412 KBUILD_MODNAME,
1413 msix_entry);
1414 if (err) {
1415 pci_free_irq_vectors(data->pdev);
1416 data->alloc_vecs = 0;
1417 return err;
1418 }
1419 }
1420 return 0;
1421 }
1422
1423 struct btintel_pcie_causes_list {
1424 u32 cause;
1425 u32 mask_reg;
1426 u8 cause_num;
1427 };
1428
1429 static struct btintel_pcie_causes_list causes_list[] = {
1430 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 },
1431 { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 },
1432 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 },
1433 { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 },
1434 };
1435
1436 /* This function configures the interrupt masks for both HW_INT_CAUSES and
1437 * FH_INT_CAUSES which are meaningful to us.
1438 *
1439 * After resetting BT function via PCIE FLR or FUNC_CTRL reset, the driver
1440 * need to call this function again to configure since the masks
1441 * are reset to 0xFFFFFFFF after reset.
1442 */
btintel_pcie_config_msix(struct btintel_pcie_data * data)1443 static void btintel_pcie_config_msix(struct btintel_pcie_data *data)
1444 {
1445 int i;
1446 int val = data->def_irq | BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE;
1447
1448 /* Set Non Auto Clear Cause */
1449 for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
1450 btintel_pcie_wr_reg8(data,
1451 BTINTEL_PCIE_CSR_MSIX_IVAR(causes_list[i].cause_num),
1452 val);
1453 btintel_pcie_clr_reg_bits(data,
1454 causes_list[i].mask_reg,
1455 causes_list[i].cause);
1456 }
1457
1458 /* Save the initial interrupt mask */
1459 data->fh_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK);
1460 data->hw_init_mask = ~btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK);
1461 }
1462
btintel_pcie_config_pcie(struct pci_dev * pdev,struct btintel_pcie_data * data)1463 static int btintel_pcie_config_pcie(struct pci_dev *pdev,
1464 struct btintel_pcie_data *data)
1465 {
1466 int err;
1467
1468 err = pcim_enable_device(pdev);
1469 if (err)
1470 return err;
1471
1472 pci_set_master(pdev);
1473
1474 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1475 if (err) {
1476 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1477 if (err)
1478 return err;
1479 }
1480
1481 data->base_addr = pcim_iomap_region(pdev, 0, KBUILD_MODNAME);
1482 if (IS_ERR(data->base_addr))
1483 return PTR_ERR(data->base_addr);
1484
1485 err = btintel_pcie_setup_irq(data);
1486 if (err)
1487 return err;
1488
1489 /* Configure MSI-X with causes list */
1490 btintel_pcie_config_msix(data);
1491
1492 return 0;
1493 }
1494
btintel_pcie_init_ci(struct btintel_pcie_data * data,struct ctx_info * ci)1495 static void btintel_pcie_init_ci(struct btintel_pcie_data *data,
1496 struct ctx_info *ci)
1497 {
1498 ci->version = 0x1;
1499 ci->size = sizeof(*ci);
1500 ci->config = 0x0000;
1501 ci->addr_cr_hia = data->ia.cr_hia_p_addr;
1502 ci->addr_tr_tia = data->ia.tr_tia_p_addr;
1503 ci->addr_cr_tia = data->ia.cr_tia_p_addr;
1504 ci->addr_tr_hia = data->ia.tr_hia_p_addr;
1505 ci->num_cr_ia = BTINTEL_PCIE_NUM_QUEUES;
1506 ci->num_tr_ia = BTINTEL_PCIE_NUM_QUEUES;
1507 ci->addr_urbdq0 = data->txq.urbd0s_p_addr;
1508 ci->addr_tfdq = data->txq.tfds_p_addr;
1509 ci->num_tfdq = data->txq.count;
1510 ci->num_urbdq0 = data->txq.count;
1511 ci->tfdq_db_vec = BTINTEL_PCIE_TXQ_NUM;
1512 ci->urbdq0_db_vec = BTINTEL_PCIE_TXQ_NUM;
1513 ci->rbd_size = BTINTEL_PCIE_RBD_SIZE_4K;
1514 ci->addr_frbdq = data->rxq.frbds_p_addr;
1515 ci->num_frbdq = data->rxq.count;
1516 ci->frbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1517 ci->addr_urbdq1 = data->rxq.urbd1s_p_addr;
1518 ci->num_urbdq1 = data->rxq.count;
1519 ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM;
1520
1521 ci->dbg_output_mode = 0x01;
1522 ci->dbgc_addr = data->dbgc.frag_p_addr;
1523 ci->dbgc_size = data->dbgc.frag_size;
1524 ci->dbg_preset = 0x00;
1525 }
1526
btintel_pcie_free_txq_bufs(struct btintel_pcie_data * data,struct txq * txq)1527 static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data,
1528 struct txq *txq)
1529 {
1530 /* Free data buffers first */
1531 dma_free_coherent(&data->pdev->dev, txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1532 txq->buf_v_addr, txq->buf_p_addr);
1533 kfree(txq->bufs);
1534 }
1535
btintel_pcie_setup_txq_bufs(struct btintel_pcie_data * data,struct txq * txq)1536 static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data,
1537 struct txq *txq)
1538 {
1539 int i;
1540 struct data_buf *buf;
1541
1542 /* Allocate the same number of buffers as the descriptor */
1543 txq->bufs = kmalloc_array(txq->count, sizeof(*buf), GFP_KERNEL);
1544 if (!txq->bufs)
1545 return -ENOMEM;
1546
1547 /* Allocate full chunk of data buffer for DMA first and do indexing and
1548 * initialization next, so it can be freed easily
1549 */
1550 txq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1551 txq->count * BTINTEL_PCIE_BUFFER_SIZE,
1552 &txq->buf_p_addr,
1553 GFP_KERNEL | __GFP_NOWARN);
1554 if (!txq->buf_v_addr) {
1555 kfree(txq->bufs);
1556 return -ENOMEM;
1557 }
1558
1559 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1560 * have virtual address and physical address
1561 */
1562 for (i = 0; i < txq->count; i++) {
1563 buf = &txq->bufs[i];
1564 buf->data_p_addr = txq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1565 buf->data = txq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1566 }
1567
1568 return 0;
1569 }
1570
btintel_pcie_free_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq)1571 static void btintel_pcie_free_rxq_bufs(struct btintel_pcie_data *data,
1572 struct rxq *rxq)
1573 {
1574 /* Free data buffers first */
1575 dma_free_coherent(&data->pdev->dev, rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1576 rxq->buf_v_addr, rxq->buf_p_addr);
1577 kfree(rxq->bufs);
1578 }
1579
btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data * data,struct rxq * rxq)1580 static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data,
1581 struct rxq *rxq)
1582 {
1583 int i;
1584 struct data_buf *buf;
1585
1586 /* Allocate the same number of buffers as the descriptor */
1587 rxq->bufs = kmalloc_array(rxq->count, sizeof(*buf), GFP_KERNEL);
1588 if (!rxq->bufs)
1589 return -ENOMEM;
1590
1591 /* Allocate full chunk of data buffer for DMA first and do indexing and
1592 * initialization next, so it can be freed easily
1593 */
1594 rxq->buf_v_addr = dma_alloc_coherent(&data->pdev->dev,
1595 rxq->count * BTINTEL_PCIE_BUFFER_SIZE,
1596 &rxq->buf_p_addr,
1597 GFP_KERNEL | __GFP_NOWARN);
1598 if (!rxq->buf_v_addr) {
1599 kfree(rxq->bufs);
1600 return -ENOMEM;
1601 }
1602
1603 /* Setup the allocated DMA buffer to bufs. Each data_buf should
1604 * have virtual address and physical address
1605 */
1606 for (i = 0; i < rxq->count; i++) {
1607 buf = &rxq->bufs[i];
1608 buf->data_p_addr = rxq->buf_p_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1609 buf->data = rxq->buf_v_addr + (i * BTINTEL_PCIE_BUFFER_SIZE);
1610 }
1611
1612 return 0;
1613 }
1614
btintel_pcie_setup_ia(struct btintel_pcie_data * data,dma_addr_t p_addr,void * v_addr,struct ia * ia)1615 static void btintel_pcie_setup_ia(struct btintel_pcie_data *data,
1616 dma_addr_t p_addr, void *v_addr,
1617 struct ia *ia)
1618 {
1619 /* TR Head Index Array */
1620 ia->tr_hia_p_addr = p_addr;
1621 ia->tr_hia = v_addr;
1622
1623 /* TR Tail Index Array */
1624 ia->tr_tia_p_addr = p_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1625 ia->tr_tia = v_addr + sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES;
1626
1627 /* CR Head index Array */
1628 ia->cr_hia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1629 ia->cr_hia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 2);
1630
1631 /* CR Tail Index Array */
1632 ia->cr_tia_p_addr = p_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1633 ia->cr_tia = v_addr + (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 3);
1634 }
1635
btintel_pcie_free(struct btintel_pcie_data * data)1636 static void btintel_pcie_free(struct btintel_pcie_data *data)
1637 {
1638 btintel_pcie_free_rxq_bufs(data, &data->rxq);
1639 btintel_pcie_free_txq_bufs(data, &data->txq);
1640
1641 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1642 dma_pool_destroy(data->dma_pool);
1643 }
1644
1645 /* Allocate tx and rx queues, any related data structures and buffers.
1646 */
btintel_pcie_alloc(struct btintel_pcie_data * data)1647 static int btintel_pcie_alloc(struct btintel_pcie_data *data)
1648 {
1649 int err = 0;
1650 size_t total;
1651 dma_addr_t p_addr;
1652 void *v_addr;
1653
1654 /* Allocate the chunk of DMA memory for descriptors, index array, and
1655 * context information, instead of allocating individually.
1656 * The DMA memory for data buffer is allocated while setting up the
1657 * each queue.
1658 *
1659 * Total size is sum of the following
1660 * + size of TFD * Number of descriptors in queue
1661 * + size of URBD0 * Number of descriptors in queue
1662 * + size of FRBD * Number of descriptors in queue
1663 * + size of URBD1 * Number of descriptors in queue
1664 * + size of index * Number of queues(2) * type of index array(4)
1665 * + size of context information
1666 */
1667 total = (sizeof(struct tfd) + sizeof(struct urbd0) + sizeof(struct frbd)
1668 + sizeof(struct urbd1)) * BTINTEL_DESCS_COUNT;
1669
1670 /* Add the sum of size of index array and size of ci struct */
1671 total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
1672
1673 /* Allocate DMA Pool */
1674 data->dma_pool = dma_pool_create(KBUILD_MODNAME, &data->pdev->dev,
1675 total, BTINTEL_PCIE_DMA_POOL_ALIGNMENT, 0);
1676 if (!data->dma_pool) {
1677 err = -ENOMEM;
1678 goto exit_error;
1679 }
1680
1681 v_addr = dma_pool_zalloc(data->dma_pool, GFP_KERNEL | __GFP_NOWARN,
1682 &p_addr);
1683 if (!v_addr) {
1684 dma_pool_destroy(data->dma_pool);
1685 err = -ENOMEM;
1686 goto exit_error;
1687 }
1688
1689 data->dma_p_addr = p_addr;
1690 data->dma_v_addr = v_addr;
1691
1692 /* Setup descriptor count */
1693 data->txq.count = BTINTEL_DESCS_COUNT;
1694 data->rxq.count = BTINTEL_DESCS_COUNT;
1695
1696 /* Setup tfds */
1697 data->txq.tfds_p_addr = p_addr;
1698 data->txq.tfds = v_addr;
1699
1700 p_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
1701 v_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
1702
1703 /* Setup urbd0 */
1704 data->txq.urbd0s_p_addr = p_addr;
1705 data->txq.urbd0s = v_addr;
1706
1707 p_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
1708 v_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
1709
1710 /* Setup FRBD*/
1711 data->rxq.frbds_p_addr = p_addr;
1712 data->rxq.frbds = v_addr;
1713
1714 p_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
1715 v_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
1716
1717 /* Setup urbd1 */
1718 data->rxq.urbd1s_p_addr = p_addr;
1719 data->rxq.urbd1s = v_addr;
1720
1721 p_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
1722 v_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
1723
1724 /* Setup data buffers for txq */
1725 err = btintel_pcie_setup_txq_bufs(data, &data->txq);
1726 if (err)
1727 goto exit_error_pool;
1728
1729 /* Setup data buffers for rxq */
1730 err = btintel_pcie_setup_rxq_bufs(data, &data->rxq);
1731 if (err)
1732 goto exit_error_txq;
1733
1734 /* Setup Index Array */
1735 btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia);
1736
1737 /* Setup data buffers for dbgc */
1738 err = btintel_pcie_setup_dbgc(data);
1739 if (err)
1740 goto exit_error_txq;
1741
1742 /* Setup Context Information */
1743 p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1744 v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4;
1745
1746 data->ci = v_addr;
1747 data->ci_p_addr = p_addr;
1748
1749 /* Initialize the CI */
1750 btintel_pcie_init_ci(data, data->ci);
1751
1752 return 0;
1753
1754 exit_error_txq:
1755 btintel_pcie_free_txq_bufs(data, &data->txq);
1756 exit_error_pool:
1757 dma_pool_free(data->dma_pool, data->dma_v_addr, data->dma_p_addr);
1758 dma_pool_destroy(data->dma_pool);
1759 exit_error:
1760 return err;
1761 }
1762
btintel_pcie_open(struct hci_dev * hdev)1763 static int btintel_pcie_open(struct hci_dev *hdev)
1764 {
1765 bt_dev_dbg(hdev, "");
1766
1767 return 0;
1768 }
1769
btintel_pcie_close(struct hci_dev * hdev)1770 static int btintel_pcie_close(struct hci_dev *hdev)
1771 {
1772 bt_dev_dbg(hdev, "");
1773
1774 return 0;
1775 }
1776
btintel_pcie_inject_cmd_complete(struct hci_dev * hdev,__u16 opcode)1777 static int btintel_pcie_inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
1778 {
1779 struct sk_buff *skb;
1780 struct hci_event_hdr *hdr;
1781 struct hci_ev_cmd_complete *evt;
1782
1783 skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL);
1784 if (!skb)
1785 return -ENOMEM;
1786
1787 hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
1788 hdr->evt = HCI_EV_CMD_COMPLETE;
1789 hdr->plen = sizeof(*evt) + 1;
1790
1791 evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
1792 evt->ncmd = 0x01;
1793 evt->opcode = cpu_to_le16(opcode);
1794
1795 *(u8 *)skb_put(skb, 1) = 0x00;
1796
1797 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
1798
1799 return hci_recv_frame(hdev, skb);
1800 }
1801
btintel_pcie_send_frame(struct hci_dev * hdev,struct sk_buff * skb)1802 static int btintel_pcie_send_frame(struct hci_dev *hdev,
1803 struct sk_buff *skb)
1804 {
1805 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1806 struct hci_command_hdr *cmd;
1807 __u16 opcode = ~0;
1808 int ret;
1809 u32 type;
1810 u32 old_ctxt;
1811
1812 /* Due to the fw limitation, the type header of the packet should be
1813 * 4 bytes unlike 1 byte for UART. In UART, the firmware can read
1814 * the first byte to get the packet type and redirect the rest of data
1815 * packet to the right handler.
1816 *
1817 * But for PCIe, THF(Transfer Flow Handler) fetches the 4 bytes of data
1818 * from DMA memory and by the time it reads the first 4 bytes, it has
1819 * already consumed some part of packet. Thus the packet type indicator
1820 * for iBT PCIe is 4 bytes.
1821 *
1822 * Luckily, when HCI core creates the skb, it allocates 8 bytes of
1823 * head room for profile and driver use, and before sending the data
1824 * to the device, append the iBT PCIe packet type in the front.
1825 */
1826 switch (hci_skb_pkt_type(skb)) {
1827 case HCI_COMMAND_PKT:
1828 type = BTINTEL_PCIE_HCI_CMD_PKT;
1829 cmd = (void *)skb->data;
1830 opcode = le16_to_cpu(cmd->opcode);
1831 if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
1832 struct hci_command_hdr *cmd = (void *)skb->data;
1833 __u16 opcode = le16_to_cpu(cmd->opcode);
1834
1835 /* When the 0xfc01 command is issued to boot into
1836 * the operational firmware, it will actually not
1837 * send a command complete event. To keep the flow
1838 * control working inject that event here.
1839 */
1840 if (opcode == 0xfc01)
1841 btintel_pcie_inject_cmd_complete(hdev, opcode);
1842 }
1843 /* Firmware raises alive interrupt on HCI_OP_RESET */
1844 if (opcode == HCI_OP_RESET)
1845 data->gp0_received = false;
1846
1847 hdev->stat.cmd_tx++;
1848 break;
1849 case HCI_ACLDATA_PKT:
1850 type = BTINTEL_PCIE_HCI_ACL_PKT;
1851 hdev->stat.acl_tx++;
1852 break;
1853 case HCI_SCODATA_PKT:
1854 type = BTINTEL_PCIE_HCI_SCO_PKT;
1855 hdev->stat.sco_tx++;
1856 break;
1857 case HCI_ISODATA_PKT:
1858 type = BTINTEL_PCIE_HCI_ISO_PKT;
1859 break;
1860 default:
1861 bt_dev_err(hdev, "Unknown HCI packet type");
1862 return -EILSEQ;
1863 }
1864 memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &type,
1865 BTINTEL_PCIE_HCI_TYPE_LEN);
1866
1867 ret = btintel_pcie_send_sync(data, skb);
1868 if (ret) {
1869 hdev->stat.err_tx++;
1870 bt_dev_err(hdev, "Failed to send frame (%d)", ret);
1871 goto exit_error;
1872 }
1873
1874 if (type == BTINTEL_PCIE_HCI_CMD_PKT &&
1875 (opcode == HCI_OP_RESET || opcode == 0xfc01)) {
1876 old_ctxt = data->alive_intr_ctxt;
1877 data->alive_intr_ctxt =
1878 (opcode == 0xfc01 ? BTINTEL_PCIE_INTEL_HCI_RESET1 :
1879 BTINTEL_PCIE_HCI_RESET);
1880 bt_dev_dbg(data->hdev, "sent cmd: 0x%4.4x alive context changed: %s -> %s",
1881 opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
1882 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
1883 if (opcode == HCI_OP_RESET) {
1884 ret = wait_event_timeout(data->gp0_wait_q,
1885 data->gp0_received,
1886 msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
1887 if (!ret) {
1888 hdev->stat.err_tx++;
1889 bt_dev_err(hdev, "No alive interrupt received for %s",
1890 btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
1891 ret = -ETIME;
1892 goto exit_error;
1893 }
1894 }
1895 }
1896 hdev->stat.byte_tx += skb->len;
1897 kfree_skb(skb);
1898
1899 exit_error:
1900 return ret;
1901 }
1902
btintel_pcie_release_hdev(struct btintel_pcie_data * data)1903 static void btintel_pcie_release_hdev(struct btintel_pcie_data *data)
1904 {
1905 struct hci_dev *hdev;
1906
1907 hdev = data->hdev;
1908 hci_unregister_dev(hdev);
1909 hci_free_dev(hdev);
1910 data->hdev = NULL;
1911 }
1912
btintel_pcie_setup_internal(struct hci_dev * hdev)1913 static int btintel_pcie_setup_internal(struct hci_dev *hdev)
1914 {
1915 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
1916 const u8 param[1] = { 0xFF };
1917 struct intel_version_tlv ver_tlv;
1918 struct sk_buff *skb;
1919 int err;
1920
1921 BT_DBG("%s", hdev->name);
1922
1923 skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
1924 if (IS_ERR(skb)) {
1925 bt_dev_err(hdev, "Reading Intel version command failed (%ld)",
1926 PTR_ERR(skb));
1927 return PTR_ERR(skb);
1928 }
1929
1930 /* Check the status */
1931 if (skb->data[0]) {
1932 bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
1933 skb->data[0]);
1934 err = -EIO;
1935 goto exit_error;
1936 }
1937
1938 /* Apply the common HCI quirks for Intel device */
1939 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
1940 set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
1941 set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
1942
1943 /* Set up the quality report callback for Intel devices */
1944 hdev->set_quality_report = btintel_set_quality_report;
1945
1946 memset(&ver_tlv, 0, sizeof(ver_tlv));
1947 /* For TLV type device, parse the tlv data */
1948 err = btintel_parse_version_tlv(hdev, &ver_tlv, skb);
1949 if (err) {
1950 bt_dev_err(hdev, "Failed to parse TLV version information");
1951 goto exit_error;
1952 }
1953
1954 switch (INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)) {
1955 case 0x37:
1956 break;
1957 default:
1958 bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)",
1959 INTEL_HW_PLATFORM(ver_tlv.cnvi_bt));
1960 err = -EINVAL;
1961 goto exit_error;
1962 }
1963
1964 /* Check for supported iBT hardware variants of this firmware
1965 * loading method.
1966 *
1967 * This check has been put in place to ensure correct forward
1968 * compatibility options when newer hardware variants come
1969 * along.
1970 */
1971 switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
1972 case 0x1e: /* BzrI */
1973 case 0x1f: /* ScP */
1974 /* Display version information of TLV type */
1975 btintel_version_info_tlv(hdev, &ver_tlv);
1976
1977 /* Apply the device specific HCI quirks for TLV based devices
1978 *
1979 * All TLV based devices support WBS
1980 */
1981 set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
1982
1983 /* Setup MSFT Extension support */
1984 btintel_set_msft_opcode(hdev,
1985 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
1986
1987 err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
1988 if (err)
1989 goto exit_error;
1990 break;
1991 default:
1992 bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
1993 INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
1994 err = -EINVAL;
1995 goto exit_error;
1996 break;
1997 }
1998
1999 data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top;
2000 data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top;
2001 data->dmp_hdr.fw_timestamp = ver_tlv.timestamp;
2002 data->dmp_hdr.fw_build_type = ver_tlv.build_type;
2003 data->dmp_hdr.fw_build_num = ver_tlv.build_num;
2004 data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt;
2005
2006 if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
2007 data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
2008
2009 err = hci_devcd_register(hdev, btintel_pcie_dump_traces, btintel_pcie_dump_hdr,
2010 btintel_pcie_dump_notify);
2011 if (err) {
2012 bt_dev_err(hdev, "Failed to register coredump (%d)", err);
2013 goto exit_error;
2014 }
2015
2016 btintel_print_fseq_info(hdev);
2017 exit_error:
2018 kfree_skb(skb);
2019
2020 return err;
2021 }
2022
btintel_pcie_setup(struct hci_dev * hdev)2023 static int btintel_pcie_setup(struct hci_dev *hdev)
2024 {
2025 int err, fw_dl_retry = 0;
2026 struct btintel_pcie_data *data = hci_get_drvdata(hdev);
2027
2028 while ((err = btintel_pcie_setup_internal(hdev)) && fw_dl_retry++ < 1) {
2029 bt_dev_err(hdev, "Firmware download retry count: %d",
2030 fw_dl_retry);
2031 err = btintel_pcie_reset_bt(data);
2032 if (err) {
2033 bt_dev_err(hdev, "Failed to do shr reset: %d", err);
2034 break;
2035 }
2036 usleep_range(10000, 12000);
2037 btintel_pcie_reset_ia(data);
2038 btintel_pcie_config_msix(data);
2039 err = btintel_pcie_enable_bt(data);
2040 if (err) {
2041 bt_dev_err(hdev, "Failed to enable hardware: %d", err);
2042 break;
2043 }
2044 btintel_pcie_start_rx(data);
2045 }
2046 return err;
2047 }
2048
btintel_pcie_setup_hdev(struct btintel_pcie_data * data)2049 static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
2050 {
2051 int err;
2052 struct hci_dev *hdev;
2053
2054 hdev = hci_alloc_dev_priv(sizeof(struct btintel_data));
2055 if (!hdev)
2056 return -ENOMEM;
2057
2058 hdev->bus = HCI_PCI;
2059 hci_set_drvdata(hdev, data);
2060
2061 data->hdev = hdev;
2062 SET_HCIDEV_DEV(hdev, &data->pdev->dev);
2063
2064 hdev->manufacturer = 2;
2065 hdev->open = btintel_pcie_open;
2066 hdev->close = btintel_pcie_close;
2067 hdev->send = btintel_pcie_send_frame;
2068 hdev->setup = btintel_pcie_setup;
2069 hdev->shutdown = btintel_shutdown_combined;
2070 hdev->hw_error = btintel_hw_error;
2071 hdev->set_diag = btintel_set_diag;
2072 hdev->set_bdaddr = btintel_set_bdaddr;
2073
2074 err = hci_register_dev(hdev);
2075 if (err < 0) {
2076 BT_ERR("Failed to register to hdev (%d)", err);
2077 goto exit_error;
2078 }
2079
2080 data->dmp_hdr.driver_name = KBUILD_MODNAME;
2081 return 0;
2082
2083 exit_error:
2084 hci_free_dev(hdev);
2085 return err;
2086 }
2087
btintel_pcie_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2088 static int btintel_pcie_probe(struct pci_dev *pdev,
2089 const struct pci_device_id *ent)
2090 {
2091 int err;
2092 struct btintel_pcie_data *data;
2093
2094 if (!pdev)
2095 return -ENODEV;
2096
2097 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
2098 if (!data)
2099 return -ENOMEM;
2100
2101 data->pdev = pdev;
2102
2103 spin_lock_init(&data->irq_lock);
2104 spin_lock_init(&data->hci_rx_lock);
2105
2106 init_waitqueue_head(&data->gp0_wait_q);
2107 data->gp0_received = false;
2108
2109 init_waitqueue_head(&data->tx_wait_q);
2110 data->tx_wait_done = false;
2111
2112 data->workqueue = alloc_ordered_workqueue(KBUILD_MODNAME, WQ_HIGHPRI);
2113 if (!data->workqueue)
2114 return -ENOMEM;
2115
2116 skb_queue_head_init(&data->rx_skb_q);
2117 INIT_WORK(&data->rx_work, btintel_pcie_rx_work);
2118
2119 data->boot_stage_cache = 0x00;
2120 data->img_resp_cache = 0x00;
2121
2122 err = btintel_pcie_config_pcie(pdev, data);
2123 if (err)
2124 goto exit_error;
2125
2126 pci_set_drvdata(pdev, data);
2127
2128 err = btintel_pcie_alloc(data);
2129 if (err)
2130 goto exit_error;
2131
2132 err = btintel_pcie_enable_bt(data);
2133 if (err)
2134 goto exit_error;
2135
2136 /* CNV information (CNVi and CNVr) is in CSR */
2137 data->cnvi = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_HW_REV_REG);
2138
2139 data->cnvr = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_RF_ID_REG);
2140
2141 err = btintel_pcie_start_rx(data);
2142 if (err)
2143 goto exit_error;
2144
2145 err = btintel_pcie_setup_hdev(data);
2146 if (err)
2147 goto exit_error;
2148
2149 bt_dev_dbg(data->hdev, "cnvi: 0x%8.8x cnvr: 0x%8.8x", data->cnvi,
2150 data->cnvr);
2151 return 0;
2152
2153 exit_error:
2154 /* reset device before exit */
2155 btintel_pcie_reset_bt(data);
2156
2157 pci_clear_master(pdev);
2158
2159 pci_set_drvdata(pdev, NULL);
2160
2161 return err;
2162 }
2163
btintel_pcie_remove(struct pci_dev * pdev)2164 static void btintel_pcie_remove(struct pci_dev *pdev)
2165 {
2166 struct btintel_pcie_data *data;
2167
2168 data = pci_get_drvdata(pdev);
2169
2170 btintel_pcie_reset_bt(data);
2171 for (int i = 0; i < data->alloc_vecs; i++) {
2172 struct msix_entry *msix_entry;
2173
2174 msix_entry = &data->msix_entries[i];
2175 free_irq(msix_entry->vector, msix_entry);
2176 }
2177
2178 pci_free_irq_vectors(pdev);
2179
2180 btintel_pcie_release_hdev(data);
2181
2182 flush_work(&data->rx_work);
2183
2184 destroy_workqueue(data->workqueue);
2185
2186 btintel_pcie_free(data);
2187
2188 pci_clear_master(pdev);
2189
2190 pci_set_drvdata(pdev, NULL);
2191 }
2192
2193 #ifdef CONFIG_DEV_COREDUMP
btintel_pcie_coredump(struct device * dev)2194 static void btintel_pcie_coredump(struct device *dev)
2195 {
2196 struct pci_dev *pdev = to_pci_dev(dev);
2197 struct btintel_pcie_data *data = pci_get_drvdata(pdev);
2198
2199 if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags))
2200 return;
2201
2202 data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER;
2203 queue_work(data->workqueue, &data->rx_work);
2204 }
2205 #endif
2206
2207 static struct pci_driver btintel_pcie_driver = {
2208 .name = KBUILD_MODNAME,
2209 .id_table = btintel_pcie_table,
2210 .probe = btintel_pcie_probe,
2211 .remove = btintel_pcie_remove,
2212 #ifdef CONFIG_DEV_COREDUMP
2213 .driver.coredump = btintel_pcie_coredump
2214 #endif
2215 };
2216 module_pci_driver(btintel_pcie_driver);
2217
2218 MODULE_AUTHOR("Tedd Ho-Jeong An <tedd.an@intel.com>");
2219 MODULE_DESCRIPTION("Intel Bluetooth PCIe transport driver ver " VERSION);
2220 MODULE_VERSION(VERSION);
2221 MODULE_LICENSE("GPL");
2222