1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
52
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55
56 #define AUTO_OFF_TIMEOUT 2000
57
58 bool enable_hs;
59
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
63
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
67
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
hci_register_notifier(struct notifier_block * nb)77 int hci_register_notifier(struct notifier_block *nb)
78 {
79 return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
hci_unregister_notifier(struct notifier_block * nb)82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
hci_notify(struct hci_dev * hdev,int event)87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
hci_req_complete(struct hci_dev * hdev,__u16 cmd,int result)94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95 {
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102 return;
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109 }
110
hci_req_cancel(struct hci_dev * hdev,int err)111 static void hci_req_cancel(struct hci_dev *hdev, int err)
112 {
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120 }
121
122 /* Execute request and wait for completion. */
__hci_request(struct hci_dev * hdev,void (* req)(struct hci_dev * hdev,unsigned long opt),unsigned long opt,__u32 timeout)123 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124 unsigned long opt, __u32 timeout)
125 {
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
146 err = -bt_to_errno(hdev->req_result);
147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
156 }
157
158 hdev->req_status = hdev->req_result = 0;
159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163 }
164
hci_request(struct hci_dev * hdev,void (* req)(struct hci_dev * hdev,unsigned long opt),unsigned long opt,__u32 timeout)165 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166 unsigned long opt, __u32 timeout)
167 {
168 int ret;
169
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179 }
180
hci_reset_req(struct hci_dev * hdev,unsigned long opt)181 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182 {
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188 }
189
bredr_init(struct hci_dev * hdev)190 static void bredr_init(struct hci_dev *hdev)
191 {
192 struct hci_cp_delete_stored_link_key cp;
193 __le16 param;
194 __u8 flt_type;
195
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198 /* Mandatory initialization */
199
200 /* Reset */
201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
204 }
205
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
208
209 /* Read Local Version */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
211
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
214
215 /* Read BD Address */
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
223
224 /* Read Voice Setting */
225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
230 flt_type = HCI_FLT_CLEAR_ALL;
231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
232
233 /* Connection accept timeout ~20 secs */
234 param = cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
240 }
241
amp_init(struct hci_dev * hdev)242 static void amp_init(struct hci_dev *hdev)
243 {
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251 }
252
hci_init_req(struct hci_dev * hdev,unsigned long opt)253 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254 {
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285 }
286
hci_le_init_req(struct hci_dev * hdev,unsigned long opt)287 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288 {
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293 }
294
hci_scan_req(struct hci_dev * hdev,unsigned long opt)295 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296 {
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
303 }
304
hci_auth_req(struct hci_dev * hdev,unsigned long opt)305 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306 {
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
313 }
314
hci_encrypt_req(struct hci_dev * hdev,unsigned long opt)315 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316 {
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
321 /* Encryption */
322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
323 }
324
hci_linkpol_req(struct hci_dev * hdev,unsigned long opt)325 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326 {
327 __le16 policy = cpu_to_le16(opt);
328
329 BT_DBG("%s %x", hdev->name, policy);
330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333 }
334
335 /* Get HCI device by index.
336 * Device is held on return. */
hci_dev_get(int index)337 struct hci_dev *hci_dev_get(int index)
338 {
339 struct hci_dev *hdev = NULL, *d;
340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
347 list_for_each_entry(d, &hci_dev_list, list) {
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355 }
356
357 /* ---- Inquiry support ---- */
inquiry_cache_flush(struct hci_dev * hdev)358 static void inquiry_cache_flush(struct hci_dev *hdev)
359 {
360 struct inquiry_cache *cache = &hdev->inq_cache;
361 struct inquiry_entry *next = cache->list, *e;
362
363 BT_DBG("cache %p", cache);
364
365 cache->list = NULL;
366 while ((e = next)) {
367 next = e->next;
368 kfree(e);
369 }
370 }
371
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)372 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
373 {
374 struct inquiry_cache *cache = &hdev->inq_cache;
375 struct inquiry_entry *e;
376
377 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
378
379 for (e = cache->list; e; e = e->next)
380 if (!bacmp(&e->data.bdaddr, bdaddr))
381 break;
382 return e;
383 }
384
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data)385 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
386 {
387 struct inquiry_cache *cache = &hdev->inq_cache;
388 struct inquiry_entry *ie;
389
390 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
391
392 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
393 if (!ie) {
394 /* Entry not in the cache. Add new one. */
395 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
396 if (!ie)
397 return;
398
399 ie->next = cache->list;
400 cache->list = ie;
401 }
402
403 memcpy(&ie->data, data, sizeof(*data));
404 ie->timestamp = jiffies;
405 cache->timestamp = jiffies;
406 }
407
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)408 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
409 {
410 struct inquiry_cache *cache = &hdev->inq_cache;
411 struct inquiry_info *info = (struct inquiry_info *) buf;
412 struct inquiry_entry *e;
413 int copied = 0;
414
415 for (e = cache->list; e && copied < num; e = e->next, copied++) {
416 struct inquiry_data *data = &e->data;
417 bacpy(&info->bdaddr, &data->bdaddr);
418 info->pscan_rep_mode = data->pscan_rep_mode;
419 info->pscan_period_mode = data->pscan_period_mode;
420 info->pscan_mode = data->pscan_mode;
421 memcpy(info->dev_class, data->dev_class, 3);
422 info->clock_offset = data->clock_offset;
423 info++;
424 }
425
426 BT_DBG("cache %p, copied %d", cache, copied);
427 return copied;
428 }
429
hci_inq_req(struct hci_dev * hdev,unsigned long opt)430 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
431 {
432 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
433 struct hci_cp_inquiry cp;
434
435 BT_DBG("%s", hdev->name);
436
437 if (test_bit(HCI_INQUIRY, &hdev->flags))
438 return;
439
440 /* Start Inquiry */
441 memcpy(&cp.lap, &ir->lap, 3);
442 cp.length = ir->length;
443 cp.num_rsp = ir->num_rsp;
444 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
445 }
446
hci_inquiry(void __user * arg)447 int hci_inquiry(void __user *arg)
448 {
449 __u8 __user *ptr = arg;
450 struct hci_inquiry_req ir;
451 struct hci_dev *hdev;
452 int err = 0, do_inquiry = 0, max_rsp;
453 long timeo;
454 __u8 *buf;
455
456 if (copy_from_user(&ir, ptr, sizeof(ir)))
457 return -EFAULT;
458
459 hdev = hci_dev_get(ir.dev_id);
460 if (!hdev)
461 return -ENODEV;
462
463 hci_dev_lock(hdev);
464 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
465 inquiry_cache_empty(hdev) ||
466 ir.flags & IREQ_CACHE_FLUSH) {
467 inquiry_cache_flush(hdev);
468 do_inquiry = 1;
469 }
470 hci_dev_unlock(hdev);
471
472 timeo = ir.length * msecs_to_jiffies(2000);
473
474 if (do_inquiry) {
475 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
476 if (err < 0)
477 goto done;
478 }
479
480 /* for unlimited number of responses we will use buffer with 255 entries */
481 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
482
483 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
484 * copy it to the user space.
485 */
486 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
487 if (!buf) {
488 err = -ENOMEM;
489 goto done;
490 }
491
492 hci_dev_lock(hdev);
493 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
494 hci_dev_unlock(hdev);
495
496 BT_DBG("num_rsp %d", ir.num_rsp);
497
498 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
499 ptr += sizeof(ir);
500 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
501 ir.num_rsp))
502 err = -EFAULT;
503 } else
504 err = -EFAULT;
505
506 kfree(buf);
507
508 done:
509 hci_dev_put(hdev);
510 return err;
511 }
512
513 /* ---- HCI ioctl helpers ---- */
514
hci_dev_open(__u16 dev)515 int hci_dev_open(__u16 dev)
516 {
517 struct hci_dev *hdev;
518 int ret = 0;
519
520 hdev = hci_dev_get(dev);
521 if (!hdev)
522 return -ENODEV;
523
524 BT_DBG("%s %p", hdev->name, hdev);
525
526 hci_req_lock(hdev);
527
528 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
529 ret = -ERFKILL;
530 goto done;
531 }
532
533 if (test_bit(HCI_UP, &hdev->flags)) {
534 ret = -EALREADY;
535 goto done;
536 }
537
538 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
539 set_bit(HCI_RAW, &hdev->flags);
540
541 /* Treat all non BR/EDR controllers as raw devices if
542 enable_hs is not set */
543 if (hdev->dev_type != HCI_BREDR && !enable_hs)
544 set_bit(HCI_RAW, &hdev->flags);
545
546 if (hdev->open(hdev)) {
547 ret = -EIO;
548 goto done;
549 }
550
551 if (!test_bit(HCI_RAW, &hdev->flags)) {
552 atomic_set(&hdev->cmd_cnt, 1);
553 set_bit(HCI_INIT, &hdev->flags);
554 hdev->init_last_cmd = 0;
555
556 ret = __hci_request(hdev, hci_init_req, 0,
557 msecs_to_jiffies(HCI_INIT_TIMEOUT));
558
559 if (lmp_host_le_capable(hdev))
560 ret = __hci_request(hdev, hci_le_init_req, 0,
561 msecs_to_jiffies(HCI_INIT_TIMEOUT));
562
563 clear_bit(HCI_INIT, &hdev->flags);
564 }
565
566 if (!ret) {
567 hci_dev_hold(hdev);
568 set_bit(HCI_UP, &hdev->flags);
569 hci_notify(hdev, HCI_DEV_UP);
570 if (!test_bit(HCI_SETUP, &hdev->flags)) {
571 hci_dev_lock(hdev);
572 mgmt_powered(hdev, 1);
573 hci_dev_unlock(hdev);
574 }
575 } else {
576 /* Init failed, cleanup */
577 flush_work(&hdev->tx_work);
578 flush_work(&hdev->cmd_work);
579 flush_work(&hdev->rx_work);
580
581 skb_queue_purge(&hdev->cmd_q);
582 skb_queue_purge(&hdev->rx_q);
583
584 if (hdev->flush)
585 hdev->flush(hdev);
586
587 if (hdev->sent_cmd) {
588 kfree_skb(hdev->sent_cmd);
589 hdev->sent_cmd = NULL;
590 }
591
592 hdev->close(hdev);
593 hdev->flags = 0;
594 }
595
596 done:
597 hci_req_unlock(hdev);
598 hci_dev_put(hdev);
599 return ret;
600 }
601
hci_dev_do_close(struct hci_dev * hdev)602 static int hci_dev_do_close(struct hci_dev *hdev)
603 {
604 BT_DBG("%s %p", hdev->name, hdev);
605
606 hci_req_cancel(hdev, ENODEV);
607 hci_req_lock(hdev);
608
609 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
610 del_timer_sync(&hdev->cmd_timer);
611 hci_req_unlock(hdev);
612 return 0;
613 }
614
615 /* Flush RX and TX works */
616 flush_work(&hdev->tx_work);
617 flush_work(&hdev->rx_work);
618
619 if (hdev->discov_timeout > 0) {
620 cancel_delayed_work(&hdev->discov_off);
621 hdev->discov_timeout = 0;
622 }
623
624 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
625 cancel_delayed_work(&hdev->power_off);
626
627 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
628 cancel_delayed_work(&hdev->service_cache);
629
630 hci_dev_lock(hdev);
631 inquiry_cache_flush(hdev);
632 hci_conn_hash_flush(hdev);
633 hci_dev_unlock(hdev);
634
635 hci_notify(hdev, HCI_DEV_DOWN);
636
637 if (hdev->flush)
638 hdev->flush(hdev);
639
640 /* Reset device */
641 skb_queue_purge(&hdev->cmd_q);
642 atomic_set(&hdev->cmd_cnt, 1);
643 if (!test_bit(HCI_RAW, &hdev->flags) &&
644 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
645 set_bit(HCI_INIT, &hdev->flags);
646 __hci_request(hdev, hci_reset_req, 0,
647 msecs_to_jiffies(250));
648 clear_bit(HCI_INIT, &hdev->flags);
649 }
650
651 /* flush cmd work */
652 flush_work(&hdev->cmd_work);
653
654 /* Drop queues */
655 skb_queue_purge(&hdev->rx_q);
656 skb_queue_purge(&hdev->cmd_q);
657 skb_queue_purge(&hdev->raw_q);
658
659 /* Drop last sent command */
660 if (hdev->sent_cmd) {
661 del_timer_sync(&hdev->cmd_timer);
662 kfree_skb(hdev->sent_cmd);
663 hdev->sent_cmd = NULL;
664 }
665
666 /* After this point our queues are empty
667 * and no tasks are scheduled. */
668 hdev->close(hdev);
669
670 hci_dev_lock(hdev);
671 mgmt_powered(hdev, 0);
672 hci_dev_unlock(hdev);
673
674 /* Clear flags */
675 hdev->flags = 0;
676
677 hci_req_unlock(hdev);
678
679 hci_dev_put(hdev);
680 return 0;
681 }
682
hci_dev_close(__u16 dev)683 int hci_dev_close(__u16 dev)
684 {
685 struct hci_dev *hdev;
686 int err;
687
688 hdev = hci_dev_get(dev);
689 if (!hdev)
690 return -ENODEV;
691 err = hci_dev_do_close(hdev);
692 hci_dev_put(hdev);
693 return err;
694 }
695
hci_dev_reset(__u16 dev)696 int hci_dev_reset(__u16 dev)
697 {
698 struct hci_dev *hdev;
699 int ret = 0;
700
701 hdev = hci_dev_get(dev);
702 if (!hdev)
703 return -ENODEV;
704
705 hci_req_lock(hdev);
706
707 if (!test_bit(HCI_UP, &hdev->flags))
708 goto done;
709
710 /* Drop queues */
711 skb_queue_purge(&hdev->rx_q);
712 skb_queue_purge(&hdev->cmd_q);
713
714 hci_dev_lock(hdev);
715 inquiry_cache_flush(hdev);
716 hci_conn_hash_flush(hdev);
717 hci_dev_unlock(hdev);
718
719 if (hdev->flush)
720 hdev->flush(hdev);
721
722 atomic_set(&hdev->cmd_cnt, 1);
723 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
724
725 if (!test_bit(HCI_RAW, &hdev->flags))
726 ret = __hci_request(hdev, hci_reset_req, 0,
727 msecs_to_jiffies(HCI_INIT_TIMEOUT));
728
729 done:
730 hci_req_unlock(hdev);
731 hci_dev_put(hdev);
732 return ret;
733 }
734
hci_dev_reset_stat(__u16 dev)735 int hci_dev_reset_stat(__u16 dev)
736 {
737 struct hci_dev *hdev;
738 int ret = 0;
739
740 hdev = hci_dev_get(dev);
741 if (!hdev)
742 return -ENODEV;
743
744 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
745
746 hci_dev_put(hdev);
747
748 return ret;
749 }
750
hci_dev_cmd(unsigned int cmd,void __user * arg)751 int hci_dev_cmd(unsigned int cmd, void __user *arg)
752 {
753 struct hci_dev *hdev;
754 struct hci_dev_req dr;
755 int err = 0;
756
757 if (copy_from_user(&dr, arg, sizeof(dr)))
758 return -EFAULT;
759
760 hdev = hci_dev_get(dr.dev_id);
761 if (!hdev)
762 return -ENODEV;
763
764 switch (cmd) {
765 case HCISETAUTH:
766 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
767 msecs_to_jiffies(HCI_INIT_TIMEOUT));
768 break;
769
770 case HCISETENCRYPT:
771 if (!lmp_encrypt_capable(hdev)) {
772 err = -EOPNOTSUPP;
773 break;
774 }
775
776 if (!test_bit(HCI_AUTH, &hdev->flags)) {
777 /* Auth must be enabled first */
778 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
779 msecs_to_jiffies(HCI_INIT_TIMEOUT));
780 if (err)
781 break;
782 }
783
784 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
785 msecs_to_jiffies(HCI_INIT_TIMEOUT));
786 break;
787
788 case HCISETSCAN:
789 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
790 msecs_to_jiffies(HCI_INIT_TIMEOUT));
791 break;
792
793 case HCISETLINKPOL:
794 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
795 msecs_to_jiffies(HCI_INIT_TIMEOUT));
796 break;
797
798 case HCISETLINKMODE:
799 hdev->link_mode = ((__u16) dr.dev_opt) &
800 (HCI_LM_MASTER | HCI_LM_ACCEPT);
801 break;
802
803 case HCISETPTYPE:
804 hdev->pkt_type = (__u16) dr.dev_opt;
805 break;
806
807 case HCISETACLMTU:
808 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
809 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
810 break;
811
812 case HCISETSCOMTU:
813 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
814 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
815 break;
816
817 default:
818 err = -EINVAL;
819 break;
820 }
821
822 hci_dev_put(hdev);
823 return err;
824 }
825
hci_get_dev_list(void __user * arg)826 int hci_get_dev_list(void __user *arg)
827 {
828 struct hci_dev *hdev;
829 struct hci_dev_list_req *dl;
830 struct hci_dev_req *dr;
831 int n = 0, size, err;
832 __u16 dev_num;
833
834 if (get_user(dev_num, (__u16 __user *) arg))
835 return -EFAULT;
836
837 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
838 return -EINVAL;
839
840 size = sizeof(*dl) + dev_num * sizeof(*dr);
841
842 dl = kzalloc(size, GFP_KERNEL);
843 if (!dl)
844 return -ENOMEM;
845
846 dr = dl->dev_req;
847
848 read_lock(&hci_dev_list_lock);
849 list_for_each_entry(hdev, &hci_dev_list, list) {
850 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
851 cancel_delayed_work(&hdev->power_off);
852
853 if (!test_bit(HCI_MGMT, &hdev->flags))
854 set_bit(HCI_PAIRABLE, &hdev->flags);
855
856 (dr + n)->dev_id = hdev->id;
857 (dr + n)->dev_opt = hdev->flags;
858
859 if (++n >= dev_num)
860 break;
861 }
862 read_unlock(&hci_dev_list_lock);
863
864 dl->dev_num = n;
865 size = sizeof(*dl) + n * sizeof(*dr);
866
867 err = copy_to_user(arg, dl, size);
868 kfree(dl);
869
870 return err ? -EFAULT : 0;
871 }
872
hci_get_dev_info(void __user * arg)873 int hci_get_dev_info(void __user *arg)
874 {
875 struct hci_dev *hdev;
876 struct hci_dev_info di;
877 int err = 0;
878
879 if (copy_from_user(&di, arg, sizeof(di)))
880 return -EFAULT;
881
882 hdev = hci_dev_get(di.dev_id);
883 if (!hdev)
884 return -ENODEV;
885
886 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
887 cancel_delayed_work_sync(&hdev->power_off);
888
889 if (!test_bit(HCI_MGMT, &hdev->flags))
890 set_bit(HCI_PAIRABLE, &hdev->flags);
891
892 strcpy(di.name, hdev->name);
893 di.bdaddr = hdev->bdaddr;
894 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
895 di.flags = hdev->flags;
896 di.pkt_type = hdev->pkt_type;
897 di.acl_mtu = hdev->acl_mtu;
898 di.acl_pkts = hdev->acl_pkts;
899 di.sco_mtu = hdev->sco_mtu;
900 di.sco_pkts = hdev->sco_pkts;
901 di.link_policy = hdev->link_policy;
902 di.link_mode = hdev->link_mode;
903
904 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
905 memcpy(&di.features, &hdev->features, sizeof(di.features));
906
907 if (copy_to_user(arg, &di, sizeof(di)))
908 err = -EFAULT;
909
910 hci_dev_put(hdev);
911
912 return err;
913 }
914
915 /* ---- Interface to HCI drivers ---- */
916
hci_rfkill_set_block(void * data,bool blocked)917 static int hci_rfkill_set_block(void *data, bool blocked)
918 {
919 struct hci_dev *hdev = data;
920
921 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
922
923 if (!blocked)
924 return 0;
925
926 hci_dev_do_close(hdev);
927
928 return 0;
929 }
930
931 static const struct rfkill_ops hci_rfkill_ops = {
932 .set_block = hci_rfkill_set_block,
933 };
934
935 /* Alloc HCI device */
hci_alloc_dev(void)936 struct hci_dev *hci_alloc_dev(void)
937 {
938 struct hci_dev *hdev;
939
940 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
941 if (!hdev)
942 return NULL;
943
944 hci_init_sysfs(hdev);
945 skb_queue_head_init(&hdev->driver_init);
946
947 return hdev;
948 }
949 EXPORT_SYMBOL(hci_alloc_dev);
950
951 /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)952 void hci_free_dev(struct hci_dev *hdev)
953 {
954 skb_queue_purge(&hdev->driver_init);
955
956 /* will free via device release */
957 put_device(&hdev->dev);
958 }
959 EXPORT_SYMBOL(hci_free_dev);
960
hci_power_on(struct work_struct * work)961 static void hci_power_on(struct work_struct *work)
962 {
963 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
964
965 BT_DBG("%s", hdev->name);
966
967 if (hci_dev_open(hdev->id) < 0)
968 return;
969
970 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
971 schedule_delayed_work(&hdev->power_off,
972 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
973
974 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
975 mgmt_index_added(hdev);
976 }
977
hci_power_off(struct work_struct * work)978 static void hci_power_off(struct work_struct *work)
979 {
980 struct hci_dev *hdev = container_of(work, struct hci_dev,
981 power_off.work);
982
983 BT_DBG("%s", hdev->name);
984
985 clear_bit(HCI_AUTO_OFF, &hdev->flags);
986
987 hci_dev_close(hdev->id);
988 }
989
hci_discov_off(struct work_struct * work)990 static void hci_discov_off(struct work_struct *work)
991 {
992 struct hci_dev *hdev;
993 u8 scan = SCAN_PAGE;
994
995 hdev = container_of(work, struct hci_dev, discov_off.work);
996
997 BT_DBG("%s", hdev->name);
998
999 hci_dev_lock(hdev);
1000
1001 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1002
1003 hdev->discov_timeout = 0;
1004
1005 hci_dev_unlock(hdev);
1006 }
1007
hci_uuids_clear(struct hci_dev * hdev)1008 int hci_uuids_clear(struct hci_dev *hdev)
1009 {
1010 struct list_head *p, *n;
1011
1012 list_for_each_safe(p, n, &hdev->uuids) {
1013 struct bt_uuid *uuid;
1014
1015 uuid = list_entry(p, struct bt_uuid, list);
1016
1017 list_del(p);
1018 kfree(uuid);
1019 }
1020
1021 return 0;
1022 }
1023
hci_link_keys_clear(struct hci_dev * hdev)1024 int hci_link_keys_clear(struct hci_dev *hdev)
1025 {
1026 struct list_head *p, *n;
1027
1028 list_for_each_safe(p, n, &hdev->link_keys) {
1029 struct link_key *key;
1030
1031 key = list_entry(p, struct link_key, list);
1032
1033 list_del(p);
1034 kfree(key);
1035 }
1036
1037 return 0;
1038 }
1039
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1040 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1041 {
1042 struct link_key *k;
1043
1044 list_for_each_entry(k, &hdev->link_keys, list)
1045 if (bacmp(bdaddr, &k->bdaddr) == 0)
1046 return k;
1047
1048 return NULL;
1049 }
1050
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1051 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1052 u8 key_type, u8 old_key_type)
1053 {
1054 /* Legacy key */
1055 if (key_type < 0x03)
1056 return 1;
1057
1058 /* Debug keys are insecure so don't store them persistently */
1059 if (key_type == HCI_LK_DEBUG_COMBINATION)
1060 return 0;
1061
1062 /* Changed combination key and there's no previous one */
1063 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1064 return 0;
1065
1066 /* Security mode 3 case */
1067 if (!conn)
1068 return 1;
1069
1070 /* Neither local nor remote side had no-bonding as requirement */
1071 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1072 return 1;
1073
1074 /* Local side had dedicated bonding as requirement */
1075 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1076 return 1;
1077
1078 /* Remote side had dedicated bonding as requirement */
1079 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1080 return 1;
1081
1082 /* If none of the above criteria match, then don't store the key
1083 * persistently */
1084 return 0;
1085 }
1086
hci_find_ltk(struct hci_dev * hdev,__le16 ediv,u8 rand[8])1087 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1088 {
1089 struct link_key *k;
1090
1091 list_for_each_entry(k, &hdev->link_keys, list) {
1092 struct key_master_id *id;
1093
1094 if (k->type != HCI_LK_SMP_LTK)
1095 continue;
1096
1097 if (k->dlen != sizeof(*id))
1098 continue;
1099
1100 id = (void *) &k->data;
1101 if (id->ediv == ediv &&
1102 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1103 return k;
1104 }
1105
1106 return NULL;
1107 }
1108 EXPORT_SYMBOL(hci_find_ltk);
1109
hci_find_link_key_type(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1110 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1111 bdaddr_t *bdaddr, u8 type)
1112 {
1113 struct link_key *k;
1114
1115 list_for_each_entry(k, &hdev->link_keys, list)
1116 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1117 return k;
1118
1119 return NULL;
1120 }
1121 EXPORT_SYMBOL(hci_find_link_key_type);
1122
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,int new_key,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len)1123 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1124 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1125 {
1126 struct link_key *key, *old_key;
1127 u8 old_key_type, persistent;
1128
1129 old_key = hci_find_link_key(hdev, bdaddr);
1130 if (old_key) {
1131 old_key_type = old_key->type;
1132 key = old_key;
1133 } else {
1134 old_key_type = conn ? conn->key_type : 0xff;
1135 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1136 if (!key)
1137 return -ENOMEM;
1138 list_add(&key->list, &hdev->link_keys);
1139 }
1140
1141 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1142
1143 /* Some buggy controller combinations generate a changed
1144 * combination key for legacy pairing even when there's no
1145 * previous key */
1146 if (type == HCI_LK_CHANGED_COMBINATION &&
1147 (!conn || conn->remote_auth == 0xff) &&
1148 old_key_type == 0xff) {
1149 type = HCI_LK_COMBINATION;
1150 if (conn)
1151 conn->key_type = type;
1152 }
1153
1154 bacpy(&key->bdaddr, bdaddr);
1155 memcpy(key->val, val, 16);
1156 key->pin_len = pin_len;
1157
1158 if (type == HCI_LK_CHANGED_COMBINATION)
1159 key->type = old_key_type;
1160 else
1161 key->type = type;
1162
1163 if (!new_key)
1164 return 0;
1165
1166 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1167
1168 mgmt_new_link_key(hdev, key, persistent);
1169
1170 if (!persistent) {
1171 list_del(&key->list);
1172 kfree(key);
1173 }
1174
1175 return 0;
1176 }
1177
hci_add_ltk(struct hci_dev * hdev,int new_key,bdaddr_t * bdaddr,u8 key_size,__le16 ediv,u8 rand[8],u8 ltk[16])1178 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1179 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1180 {
1181 struct link_key *key, *old_key;
1182 struct key_master_id *id;
1183 u8 old_key_type;
1184
1185 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1186
1187 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1188 if (old_key) {
1189 key = old_key;
1190 old_key_type = old_key->type;
1191 } else {
1192 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1193 if (!key)
1194 return -ENOMEM;
1195 list_add(&key->list, &hdev->link_keys);
1196 old_key_type = 0xff;
1197 }
1198
1199 key->dlen = sizeof(*id);
1200
1201 bacpy(&key->bdaddr, bdaddr);
1202 memcpy(key->val, ltk, sizeof(key->val));
1203 key->type = HCI_LK_SMP_LTK;
1204 key->pin_len = key_size;
1205
1206 id = (void *) &key->data;
1207 id->ediv = ediv;
1208 memcpy(id->rand, rand, sizeof(id->rand));
1209
1210 if (new_key)
1211 mgmt_new_link_key(hdev, key, old_key_type);
1212
1213 return 0;
1214 }
1215
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1216 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1217 {
1218 struct link_key *key;
1219
1220 key = hci_find_link_key(hdev, bdaddr);
1221 if (!key)
1222 return -ENOENT;
1223
1224 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1225
1226 list_del(&key->list);
1227 kfree(key);
1228
1229 return 0;
1230 }
1231
1232 /* HCI command timer function */
hci_cmd_timer(unsigned long arg)1233 static void hci_cmd_timer(unsigned long arg)
1234 {
1235 struct hci_dev *hdev = (void *) arg;
1236
1237 BT_ERR("%s command tx timeout", hdev->name);
1238 atomic_set(&hdev->cmd_cnt, 1);
1239 queue_work(hdev->workqueue, &hdev->cmd_work);
1240 }
1241
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr)1242 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1243 bdaddr_t *bdaddr)
1244 {
1245 struct oob_data *data;
1246
1247 list_for_each_entry(data, &hdev->remote_oob_data, list)
1248 if (bacmp(bdaddr, &data->bdaddr) == 0)
1249 return data;
1250
1251 return NULL;
1252 }
1253
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr)1254 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1255 {
1256 struct oob_data *data;
1257
1258 data = hci_find_remote_oob_data(hdev, bdaddr);
1259 if (!data)
1260 return -ENOENT;
1261
1262 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1263
1264 list_del(&data->list);
1265 kfree(data);
1266
1267 return 0;
1268 }
1269
hci_remote_oob_data_clear(struct hci_dev * hdev)1270 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1271 {
1272 struct oob_data *data, *n;
1273
1274 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1275 list_del(&data->list);
1276 kfree(data);
1277 }
1278
1279 return 0;
1280 }
1281
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * hash,u8 * randomizer)1282 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1283 u8 *randomizer)
1284 {
1285 struct oob_data *data;
1286
1287 data = hci_find_remote_oob_data(hdev, bdaddr);
1288
1289 if (!data) {
1290 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1291 if (!data)
1292 return -ENOMEM;
1293
1294 bacpy(&data->bdaddr, bdaddr);
1295 list_add(&data->list, &hdev->remote_oob_data);
1296 }
1297
1298 memcpy(data->hash, hash, sizeof(data->hash));
1299 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1300
1301 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1302
1303 return 0;
1304 }
1305
hci_blacklist_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)1306 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1307 bdaddr_t *bdaddr)
1308 {
1309 struct bdaddr_list *b;
1310
1311 list_for_each_entry(b, &hdev->blacklist, list)
1312 if (bacmp(bdaddr, &b->bdaddr) == 0)
1313 return b;
1314
1315 return NULL;
1316 }
1317
hci_blacklist_clear(struct hci_dev * hdev)1318 int hci_blacklist_clear(struct hci_dev *hdev)
1319 {
1320 struct list_head *p, *n;
1321
1322 list_for_each_safe(p, n, &hdev->blacklist) {
1323 struct bdaddr_list *b;
1324
1325 b = list_entry(p, struct bdaddr_list, list);
1326
1327 list_del(p);
1328 kfree(b);
1329 }
1330
1331 return 0;
1332 }
1333
hci_blacklist_add(struct hci_dev * hdev,bdaddr_t * bdaddr)1334 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1335 {
1336 struct bdaddr_list *entry;
1337
1338 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1339 return -EBADF;
1340
1341 if (hci_blacklist_lookup(hdev, bdaddr))
1342 return -EEXIST;
1343
1344 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1345 if (!entry)
1346 return -ENOMEM;
1347
1348 bacpy(&entry->bdaddr, bdaddr);
1349
1350 list_add(&entry->list, &hdev->blacklist);
1351
1352 return mgmt_device_blocked(hdev, bdaddr);
1353 }
1354
hci_blacklist_del(struct hci_dev * hdev,bdaddr_t * bdaddr)1355 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1356 {
1357 struct bdaddr_list *entry;
1358
1359 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1360 return hci_blacklist_clear(hdev);
1361
1362 entry = hci_blacklist_lookup(hdev, bdaddr);
1363 if (!entry)
1364 return -ENOENT;
1365
1366 list_del(&entry->list);
1367 kfree(entry);
1368
1369 return mgmt_device_unblocked(hdev, bdaddr);
1370 }
1371
hci_clear_adv_cache(struct work_struct * work)1372 static void hci_clear_adv_cache(struct work_struct *work)
1373 {
1374 struct hci_dev *hdev = container_of(work, struct hci_dev,
1375 adv_work.work);
1376
1377 hci_dev_lock(hdev);
1378
1379 hci_adv_entries_clear(hdev);
1380
1381 hci_dev_unlock(hdev);
1382 }
1383
hci_adv_entries_clear(struct hci_dev * hdev)1384 int hci_adv_entries_clear(struct hci_dev *hdev)
1385 {
1386 struct adv_entry *entry, *tmp;
1387
1388 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1389 list_del(&entry->list);
1390 kfree(entry);
1391 }
1392
1393 BT_DBG("%s adv cache cleared", hdev->name);
1394
1395 return 0;
1396 }
1397
hci_find_adv_entry(struct hci_dev * hdev,bdaddr_t * bdaddr)1398 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1399 {
1400 struct adv_entry *entry;
1401
1402 list_for_each_entry(entry, &hdev->adv_entries, list)
1403 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1404 return entry;
1405
1406 return NULL;
1407 }
1408
is_connectable_adv(u8 evt_type)1409 static inline int is_connectable_adv(u8 evt_type)
1410 {
1411 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1412 return 1;
1413
1414 return 0;
1415 }
1416
hci_add_adv_entry(struct hci_dev * hdev,struct hci_ev_le_advertising_info * ev)1417 int hci_add_adv_entry(struct hci_dev *hdev,
1418 struct hci_ev_le_advertising_info *ev)
1419 {
1420 struct adv_entry *entry;
1421
1422 if (!is_connectable_adv(ev->evt_type))
1423 return -EINVAL;
1424
1425 /* Only new entries should be added to adv_entries. So, if
1426 * bdaddr was found, don't add it. */
1427 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1428 return 0;
1429
1430 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1431 if (!entry)
1432 return -ENOMEM;
1433
1434 bacpy(&entry->bdaddr, &ev->bdaddr);
1435 entry->bdaddr_type = ev->bdaddr_type;
1436
1437 list_add(&entry->list, &hdev->adv_entries);
1438
1439 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1440 batostr(&entry->bdaddr), entry->bdaddr_type);
1441
1442 return 0;
1443 }
1444
1445 /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)1446 int hci_register_dev(struct hci_dev *hdev)
1447 {
1448 struct list_head *head = &hci_dev_list, *p;
1449 int i, id, error;
1450
1451 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1452 hdev->bus, hdev->owner);
1453
1454 if (!hdev->open || !hdev->close || !hdev->destruct)
1455 return -EINVAL;
1456
1457 /* Do not allow HCI_AMP devices to register at index 0,
1458 * so the index can be used as the AMP controller ID.
1459 */
1460 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1461
1462 write_lock(&hci_dev_list_lock);
1463
1464 /* Find first available device id */
1465 list_for_each(p, &hci_dev_list) {
1466 if (list_entry(p, struct hci_dev, list)->id != id)
1467 break;
1468 head = p; id++;
1469 }
1470
1471 sprintf(hdev->name, "hci%d", id);
1472 hdev->id = id;
1473 list_add_tail(&hdev->list, head);
1474
1475 atomic_set(&hdev->refcnt, 1);
1476 mutex_init(&hdev->lock);
1477
1478 hdev->flags = 0;
1479 hdev->dev_flags = 0;
1480 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1481 hdev->esco_type = (ESCO_HV1);
1482 hdev->link_mode = (HCI_LM_ACCEPT);
1483 hdev->io_capability = 0x03; /* No Input No Output */
1484
1485 hdev->idle_timeout = 0;
1486 hdev->sniff_max_interval = 800;
1487 hdev->sniff_min_interval = 80;
1488
1489 INIT_WORK(&hdev->rx_work, hci_rx_work);
1490 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1491 INIT_WORK(&hdev->tx_work, hci_tx_work);
1492
1493
1494 skb_queue_head_init(&hdev->rx_q);
1495 skb_queue_head_init(&hdev->cmd_q);
1496 skb_queue_head_init(&hdev->raw_q);
1497
1498 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1499
1500 for (i = 0; i < NUM_REASSEMBLY; i++)
1501 hdev->reassembly[i] = NULL;
1502
1503 init_waitqueue_head(&hdev->req_wait_q);
1504 mutex_init(&hdev->req_lock);
1505
1506 inquiry_cache_init(hdev);
1507
1508 hci_conn_hash_init(hdev);
1509
1510 INIT_LIST_HEAD(&hdev->mgmt_pending);
1511
1512 INIT_LIST_HEAD(&hdev->blacklist);
1513
1514 INIT_LIST_HEAD(&hdev->uuids);
1515
1516 INIT_LIST_HEAD(&hdev->link_keys);
1517
1518 INIT_LIST_HEAD(&hdev->remote_oob_data);
1519
1520 INIT_LIST_HEAD(&hdev->adv_entries);
1521
1522 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1523 INIT_WORK(&hdev->power_on, hci_power_on);
1524 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1525
1526 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1527
1528 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1529
1530 atomic_set(&hdev->promisc, 0);
1531
1532 write_unlock(&hci_dev_list_lock);
1533
1534 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1535 WQ_MEM_RECLAIM, 1);
1536 if (!hdev->workqueue) {
1537 error = -ENOMEM;
1538 goto err;
1539 }
1540
1541 error = hci_add_sysfs(hdev);
1542 if (error < 0)
1543 goto err_wqueue;
1544
1545 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1546 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1547 if (hdev->rfkill) {
1548 if (rfkill_register(hdev->rfkill) < 0) {
1549 rfkill_destroy(hdev->rfkill);
1550 hdev->rfkill = NULL;
1551 }
1552 }
1553
1554 set_bit(HCI_AUTO_OFF, &hdev->flags);
1555 set_bit(HCI_SETUP, &hdev->flags);
1556 schedule_work(&hdev->power_on);
1557
1558 hci_notify(hdev, HCI_DEV_REG);
1559
1560 return id;
1561
1562 err_wqueue:
1563 destroy_workqueue(hdev->workqueue);
1564 err:
1565 write_lock(&hci_dev_list_lock);
1566 list_del(&hdev->list);
1567 write_unlock(&hci_dev_list_lock);
1568
1569 return error;
1570 }
1571 EXPORT_SYMBOL(hci_register_dev);
1572
1573 /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)1574 void hci_unregister_dev(struct hci_dev *hdev)
1575 {
1576 int i;
1577
1578 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1579
1580 write_lock(&hci_dev_list_lock);
1581 list_del(&hdev->list);
1582 write_unlock(&hci_dev_list_lock);
1583
1584 hci_dev_do_close(hdev);
1585
1586 for (i = 0; i < NUM_REASSEMBLY; i++)
1587 kfree_skb(hdev->reassembly[i]);
1588
1589 if (!test_bit(HCI_INIT, &hdev->flags) &&
1590 !test_bit(HCI_SETUP, &hdev->flags)) {
1591 hci_dev_lock(hdev);
1592 mgmt_index_removed(hdev);
1593 hci_dev_unlock(hdev);
1594 }
1595
1596 /* mgmt_index_removed should take care of emptying the
1597 * pending list */
1598 BUG_ON(!list_empty(&hdev->mgmt_pending));
1599
1600 hci_notify(hdev, HCI_DEV_UNREG);
1601
1602 if (hdev->rfkill) {
1603 rfkill_unregister(hdev->rfkill);
1604 rfkill_destroy(hdev->rfkill);
1605 }
1606
1607 hci_del_sysfs(hdev);
1608
1609 cancel_delayed_work_sync(&hdev->adv_work);
1610
1611 destroy_workqueue(hdev->workqueue);
1612
1613 hci_dev_lock(hdev);
1614 hci_blacklist_clear(hdev);
1615 hci_uuids_clear(hdev);
1616 hci_link_keys_clear(hdev);
1617 hci_remote_oob_data_clear(hdev);
1618 hci_adv_entries_clear(hdev);
1619 hci_dev_unlock(hdev);
1620
1621 __hci_dev_put(hdev);
1622 }
1623 EXPORT_SYMBOL(hci_unregister_dev);
1624
1625 /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)1626 int hci_suspend_dev(struct hci_dev *hdev)
1627 {
1628 hci_notify(hdev, HCI_DEV_SUSPEND);
1629 return 0;
1630 }
1631 EXPORT_SYMBOL(hci_suspend_dev);
1632
1633 /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)1634 int hci_resume_dev(struct hci_dev *hdev)
1635 {
1636 hci_notify(hdev, HCI_DEV_RESUME);
1637 return 0;
1638 }
1639 EXPORT_SYMBOL(hci_resume_dev);
1640
1641 /* Receive frame from HCI drivers */
hci_recv_frame(struct sk_buff * skb)1642 int hci_recv_frame(struct sk_buff *skb)
1643 {
1644 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1645 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1646 && !test_bit(HCI_INIT, &hdev->flags))) {
1647 kfree_skb(skb);
1648 return -ENXIO;
1649 }
1650
1651 /* Incomming skb */
1652 bt_cb(skb)->incoming = 1;
1653
1654 /* Time stamp */
1655 __net_timestamp(skb);
1656
1657 skb_queue_tail(&hdev->rx_q, skb);
1658 queue_work(hdev->workqueue, &hdev->rx_work);
1659
1660 return 0;
1661 }
1662 EXPORT_SYMBOL(hci_recv_frame);
1663
hci_reassembly(struct hci_dev * hdev,int type,void * data,int count,__u8 index)1664 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1665 int count, __u8 index)
1666 {
1667 int len = 0;
1668 int hlen = 0;
1669 int remain = count;
1670 struct sk_buff *skb;
1671 struct bt_skb_cb *scb;
1672
1673 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1674 index >= NUM_REASSEMBLY)
1675 return -EILSEQ;
1676
1677 skb = hdev->reassembly[index];
1678
1679 if (!skb) {
1680 switch (type) {
1681 case HCI_ACLDATA_PKT:
1682 len = HCI_MAX_FRAME_SIZE;
1683 hlen = HCI_ACL_HDR_SIZE;
1684 break;
1685 case HCI_EVENT_PKT:
1686 len = HCI_MAX_EVENT_SIZE;
1687 hlen = HCI_EVENT_HDR_SIZE;
1688 break;
1689 case HCI_SCODATA_PKT:
1690 len = HCI_MAX_SCO_SIZE;
1691 hlen = HCI_SCO_HDR_SIZE;
1692 break;
1693 }
1694
1695 skb = bt_skb_alloc(len, GFP_ATOMIC);
1696 if (!skb)
1697 return -ENOMEM;
1698
1699 scb = (void *) skb->cb;
1700 scb->expect = hlen;
1701 scb->pkt_type = type;
1702
1703 skb->dev = (void *) hdev;
1704 hdev->reassembly[index] = skb;
1705 }
1706
1707 while (count) {
1708 scb = (void *) skb->cb;
1709 len = min(scb->expect, (__u16)count);
1710
1711 memcpy(skb_put(skb, len), data, len);
1712
1713 count -= len;
1714 data += len;
1715 scb->expect -= len;
1716 remain = count;
1717
1718 switch (type) {
1719 case HCI_EVENT_PKT:
1720 if (skb->len == HCI_EVENT_HDR_SIZE) {
1721 struct hci_event_hdr *h = hci_event_hdr(skb);
1722 scb->expect = h->plen;
1723
1724 if (skb_tailroom(skb) < scb->expect) {
1725 kfree_skb(skb);
1726 hdev->reassembly[index] = NULL;
1727 return -ENOMEM;
1728 }
1729 }
1730 break;
1731
1732 case HCI_ACLDATA_PKT:
1733 if (skb->len == HCI_ACL_HDR_SIZE) {
1734 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1735 scb->expect = __le16_to_cpu(h->dlen);
1736
1737 if (skb_tailroom(skb) < scb->expect) {
1738 kfree_skb(skb);
1739 hdev->reassembly[index] = NULL;
1740 return -ENOMEM;
1741 }
1742 }
1743 break;
1744
1745 case HCI_SCODATA_PKT:
1746 if (skb->len == HCI_SCO_HDR_SIZE) {
1747 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1748 scb->expect = h->dlen;
1749
1750 if (skb_tailroom(skb) < scb->expect) {
1751 kfree_skb(skb);
1752 hdev->reassembly[index] = NULL;
1753 return -ENOMEM;
1754 }
1755 }
1756 break;
1757 }
1758
1759 if (scb->expect == 0) {
1760 /* Complete frame */
1761
1762 bt_cb(skb)->pkt_type = type;
1763 hci_recv_frame(skb);
1764
1765 hdev->reassembly[index] = NULL;
1766 return remain;
1767 }
1768 }
1769
1770 return remain;
1771 }
1772
hci_recv_fragment(struct hci_dev * hdev,int type,void * data,int count)1773 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1774 {
1775 int rem = 0;
1776
1777 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1778 return -EILSEQ;
1779
1780 while (count) {
1781 rem = hci_reassembly(hdev, type, data, count, type - 1);
1782 if (rem < 0)
1783 return rem;
1784
1785 data += (count - rem);
1786 count = rem;
1787 }
1788
1789 return rem;
1790 }
1791 EXPORT_SYMBOL(hci_recv_fragment);
1792
1793 #define STREAM_REASSEMBLY 0
1794
hci_recv_stream_fragment(struct hci_dev * hdev,void * data,int count)1795 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1796 {
1797 int type;
1798 int rem = 0;
1799
1800 while (count) {
1801 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1802
1803 if (!skb) {
1804 struct { char type; } *pkt;
1805
1806 /* Start of the frame */
1807 pkt = data;
1808 type = pkt->type;
1809
1810 data++;
1811 count--;
1812 } else
1813 type = bt_cb(skb)->pkt_type;
1814
1815 rem = hci_reassembly(hdev, type, data, count,
1816 STREAM_REASSEMBLY);
1817 if (rem < 0)
1818 return rem;
1819
1820 data += (count - rem);
1821 count = rem;
1822 }
1823
1824 return rem;
1825 }
1826 EXPORT_SYMBOL(hci_recv_stream_fragment);
1827
1828 /* ---- Interface to upper protocols ---- */
1829
hci_register_cb(struct hci_cb * cb)1830 int hci_register_cb(struct hci_cb *cb)
1831 {
1832 BT_DBG("%p name %s", cb, cb->name);
1833
1834 write_lock(&hci_cb_list_lock);
1835 list_add(&cb->list, &hci_cb_list);
1836 write_unlock(&hci_cb_list_lock);
1837
1838 return 0;
1839 }
1840 EXPORT_SYMBOL(hci_register_cb);
1841
hci_unregister_cb(struct hci_cb * cb)1842 int hci_unregister_cb(struct hci_cb *cb)
1843 {
1844 BT_DBG("%p name %s", cb, cb->name);
1845
1846 write_lock(&hci_cb_list_lock);
1847 list_del(&cb->list);
1848 write_unlock(&hci_cb_list_lock);
1849
1850 return 0;
1851 }
1852 EXPORT_SYMBOL(hci_unregister_cb);
1853
hci_send_frame(struct sk_buff * skb)1854 static int hci_send_frame(struct sk_buff *skb)
1855 {
1856 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1857
1858 if (!hdev) {
1859 kfree_skb(skb);
1860 return -ENODEV;
1861 }
1862
1863 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1864
1865 if (atomic_read(&hdev->promisc)) {
1866 /* Time stamp */
1867 __net_timestamp(skb);
1868
1869 hci_send_to_sock(hdev, skb, NULL);
1870 }
1871
1872 /* Get rid of skb owner, prior to sending to the driver. */
1873 skb_orphan(skb);
1874
1875 return hdev->send(skb);
1876 }
1877
1878 /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,void * param)1879 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1880 {
1881 int len = HCI_COMMAND_HDR_SIZE + plen;
1882 struct hci_command_hdr *hdr;
1883 struct sk_buff *skb;
1884
1885 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1886
1887 skb = bt_skb_alloc(len, GFP_ATOMIC);
1888 if (!skb) {
1889 BT_ERR("%s no memory for command", hdev->name);
1890 return -ENOMEM;
1891 }
1892
1893 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1894 hdr->opcode = cpu_to_le16(opcode);
1895 hdr->plen = plen;
1896
1897 if (plen)
1898 memcpy(skb_put(skb, plen), param, plen);
1899
1900 BT_DBG("skb len %d", skb->len);
1901
1902 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1903 skb->dev = (void *) hdev;
1904
1905 if (test_bit(HCI_INIT, &hdev->flags))
1906 hdev->init_last_cmd = opcode;
1907
1908 skb_queue_tail(&hdev->cmd_q, skb);
1909 queue_work(hdev->workqueue, &hdev->cmd_work);
1910
1911 return 0;
1912 }
1913
1914 /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)1915 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1916 {
1917 struct hci_command_hdr *hdr;
1918
1919 if (!hdev->sent_cmd)
1920 return NULL;
1921
1922 hdr = (void *) hdev->sent_cmd->data;
1923
1924 if (hdr->opcode != cpu_to_le16(opcode))
1925 return NULL;
1926
1927 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1928
1929 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1930 }
1931
1932 /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)1933 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1934 {
1935 struct hci_acl_hdr *hdr;
1936 int len = skb->len;
1937
1938 skb_push(skb, HCI_ACL_HDR_SIZE);
1939 skb_reset_transport_header(skb);
1940 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1941 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1942 hdr->dlen = cpu_to_le16(len);
1943 }
1944
hci_queue_acl(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)1945 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1946 struct sk_buff *skb, __u16 flags)
1947 {
1948 struct hci_dev *hdev = conn->hdev;
1949 struct sk_buff *list;
1950
1951 list = skb_shinfo(skb)->frag_list;
1952 if (!list) {
1953 /* Non fragmented */
1954 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1955
1956 skb_queue_tail(queue, skb);
1957 } else {
1958 /* Fragmented */
1959 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1960
1961 skb_shinfo(skb)->frag_list = NULL;
1962
1963 /* Queue all fragments atomically */
1964 spin_lock(&queue->lock);
1965
1966 __skb_queue_tail(queue, skb);
1967
1968 flags &= ~ACL_START;
1969 flags |= ACL_CONT;
1970 do {
1971 skb = list; list = list->next;
1972
1973 skb->dev = (void *) hdev;
1974 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1975 hci_add_acl_hdr(skb, conn->handle, flags);
1976
1977 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1978
1979 __skb_queue_tail(queue, skb);
1980 } while (list);
1981
1982 spin_unlock(&queue->lock);
1983 }
1984 }
1985
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)1986 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1987 {
1988 struct hci_conn *conn = chan->conn;
1989 struct hci_dev *hdev = conn->hdev;
1990
1991 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1992
1993 skb->dev = (void *) hdev;
1994 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1995 hci_add_acl_hdr(skb, conn->handle, flags);
1996
1997 hci_queue_acl(conn, &chan->data_q, skb, flags);
1998
1999 queue_work(hdev->workqueue, &hdev->tx_work);
2000 }
2001 EXPORT_SYMBOL(hci_send_acl);
2002
2003 /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)2004 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2005 {
2006 struct hci_dev *hdev = conn->hdev;
2007 struct hci_sco_hdr hdr;
2008
2009 BT_DBG("%s len %d", hdev->name, skb->len);
2010
2011 hdr.handle = cpu_to_le16(conn->handle);
2012 hdr.dlen = skb->len;
2013
2014 skb_push(skb, HCI_SCO_HDR_SIZE);
2015 skb_reset_transport_header(skb);
2016 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2017
2018 skb->dev = (void *) hdev;
2019 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2020
2021 skb_queue_tail(&conn->data_q, skb);
2022 queue_work(hdev->workqueue, &hdev->tx_work);
2023 }
2024 EXPORT_SYMBOL(hci_send_sco);
2025
2026 /* ---- HCI TX task (outgoing data) ---- */
2027
2028 /* HCI Connection scheduler */
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)2029 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2030 {
2031 struct hci_conn_hash *h = &hdev->conn_hash;
2032 struct hci_conn *conn = NULL, *c;
2033 int num = 0, min = ~0;
2034
2035 /* We don't have to lock device here. Connections are always
2036 * added and removed with TX task disabled. */
2037
2038 rcu_read_lock();
2039
2040 list_for_each_entry_rcu(c, &h->list, list) {
2041 if (c->type != type || skb_queue_empty(&c->data_q))
2042 continue;
2043
2044 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2045 continue;
2046
2047 num++;
2048
2049 if (c->sent < min) {
2050 min = c->sent;
2051 conn = c;
2052 }
2053
2054 if (hci_conn_num(hdev, type) == num)
2055 break;
2056 }
2057
2058 rcu_read_unlock();
2059
2060 if (conn) {
2061 int cnt, q;
2062
2063 switch (conn->type) {
2064 case ACL_LINK:
2065 cnt = hdev->acl_cnt;
2066 break;
2067 case SCO_LINK:
2068 case ESCO_LINK:
2069 cnt = hdev->sco_cnt;
2070 break;
2071 case LE_LINK:
2072 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2073 break;
2074 default:
2075 cnt = 0;
2076 BT_ERR("Unknown link type");
2077 }
2078
2079 q = cnt / num;
2080 *quote = q ? q : 1;
2081 } else
2082 *quote = 0;
2083
2084 BT_DBG("conn %p quote %d", conn, *quote);
2085 return conn;
2086 }
2087
hci_link_tx_to(struct hci_dev * hdev,__u8 type)2088 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2089 {
2090 struct hci_conn_hash *h = &hdev->conn_hash;
2091 struct hci_conn *c;
2092
2093 BT_ERR("%s link tx timeout", hdev->name);
2094
2095 rcu_read_lock();
2096
2097 /* Kill stalled connections */
2098 list_for_each_entry_rcu(c, &h->list, list) {
2099 if (c->type == type && c->sent) {
2100 BT_ERR("%s killing stalled connection %s",
2101 hdev->name, batostr(&c->dst));
2102 hci_acl_disconn(c, 0x13);
2103 }
2104 }
2105
2106 rcu_read_unlock();
2107 }
2108
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)2109 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2110 int *quote)
2111 {
2112 struct hci_conn_hash *h = &hdev->conn_hash;
2113 struct hci_chan *chan = NULL;
2114 int num = 0, min = ~0, cur_prio = 0;
2115 struct hci_conn *conn;
2116 int cnt, q, conn_num = 0;
2117
2118 BT_DBG("%s", hdev->name);
2119
2120 rcu_read_lock();
2121
2122 list_for_each_entry_rcu(conn, &h->list, list) {
2123 struct hci_chan *tmp;
2124
2125 if (conn->type != type)
2126 continue;
2127
2128 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2129 continue;
2130
2131 conn_num++;
2132
2133 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2134 struct sk_buff *skb;
2135
2136 if (skb_queue_empty(&tmp->data_q))
2137 continue;
2138
2139 skb = skb_peek(&tmp->data_q);
2140 if (skb->priority < cur_prio)
2141 continue;
2142
2143 if (skb->priority > cur_prio) {
2144 num = 0;
2145 min = ~0;
2146 cur_prio = skb->priority;
2147 }
2148
2149 num++;
2150
2151 if (conn->sent < min) {
2152 min = conn->sent;
2153 chan = tmp;
2154 }
2155 }
2156
2157 if (hci_conn_num(hdev, type) == conn_num)
2158 break;
2159 }
2160
2161 rcu_read_unlock();
2162
2163 if (!chan)
2164 return NULL;
2165
2166 switch (chan->conn->type) {
2167 case ACL_LINK:
2168 cnt = hdev->acl_cnt;
2169 break;
2170 case SCO_LINK:
2171 case ESCO_LINK:
2172 cnt = hdev->sco_cnt;
2173 break;
2174 case LE_LINK:
2175 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2176 break;
2177 default:
2178 cnt = 0;
2179 BT_ERR("Unknown link type");
2180 }
2181
2182 q = cnt / num;
2183 *quote = q ? q : 1;
2184 BT_DBG("chan %p quote %d", chan, *quote);
2185 return chan;
2186 }
2187
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)2188 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2189 {
2190 struct hci_conn_hash *h = &hdev->conn_hash;
2191 struct hci_conn *conn;
2192 int num = 0;
2193
2194 BT_DBG("%s", hdev->name);
2195
2196 rcu_read_lock();
2197
2198 list_for_each_entry_rcu(conn, &h->list, list) {
2199 struct hci_chan *chan;
2200
2201 if (conn->type != type)
2202 continue;
2203
2204 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2205 continue;
2206
2207 num++;
2208
2209 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2210 struct sk_buff *skb;
2211
2212 if (chan->sent) {
2213 chan->sent = 0;
2214 continue;
2215 }
2216
2217 if (skb_queue_empty(&chan->data_q))
2218 continue;
2219
2220 skb = skb_peek(&chan->data_q);
2221 if (skb->priority >= HCI_PRIO_MAX - 1)
2222 continue;
2223
2224 skb->priority = HCI_PRIO_MAX - 1;
2225
2226 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2227 skb->priority);
2228 }
2229
2230 if (hci_conn_num(hdev, type) == num)
2231 break;
2232 }
2233
2234 rcu_read_unlock();
2235
2236 }
2237
hci_sched_acl(struct hci_dev * hdev)2238 static inline void hci_sched_acl(struct hci_dev *hdev)
2239 {
2240 struct hci_chan *chan;
2241 struct sk_buff *skb;
2242 int quote;
2243 unsigned int cnt;
2244
2245 BT_DBG("%s", hdev->name);
2246
2247 if (!hci_conn_num(hdev, ACL_LINK))
2248 return;
2249
2250 if (!test_bit(HCI_RAW, &hdev->flags)) {
2251 /* ACL tx timeout must be longer than maximum
2252 * link supervision timeout (40.9 seconds) */
2253 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2254 hci_link_tx_to(hdev, ACL_LINK);
2255 }
2256
2257 cnt = hdev->acl_cnt;
2258
2259 while (hdev->acl_cnt &&
2260 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2261 u32 priority = (skb_peek(&chan->data_q))->priority;
2262 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2263 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2264 skb->len, skb->priority);
2265
2266 /* Stop if priority has changed */
2267 if (skb->priority < priority)
2268 break;
2269
2270 skb = skb_dequeue(&chan->data_q);
2271
2272 hci_conn_enter_active_mode(chan->conn,
2273 bt_cb(skb)->force_active);
2274
2275 hci_send_frame(skb);
2276 hdev->acl_last_tx = jiffies;
2277
2278 hdev->acl_cnt--;
2279 chan->sent++;
2280 chan->conn->sent++;
2281 }
2282 }
2283
2284 if (cnt != hdev->acl_cnt)
2285 hci_prio_recalculate(hdev, ACL_LINK);
2286 }
2287
2288 /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)2289 static inline void hci_sched_sco(struct hci_dev *hdev)
2290 {
2291 struct hci_conn *conn;
2292 struct sk_buff *skb;
2293 int quote;
2294
2295 BT_DBG("%s", hdev->name);
2296
2297 if (!hci_conn_num(hdev, SCO_LINK))
2298 return;
2299
2300 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2301 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2302 BT_DBG("skb %p len %d", skb, skb->len);
2303 hci_send_frame(skb);
2304
2305 conn->sent++;
2306 if (conn->sent == ~0)
2307 conn->sent = 0;
2308 }
2309 }
2310 }
2311
hci_sched_esco(struct hci_dev * hdev)2312 static inline void hci_sched_esco(struct hci_dev *hdev)
2313 {
2314 struct hci_conn *conn;
2315 struct sk_buff *skb;
2316 int quote;
2317
2318 BT_DBG("%s", hdev->name);
2319
2320 if (!hci_conn_num(hdev, ESCO_LINK))
2321 return;
2322
2323 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
2324 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2325 BT_DBG("skb %p len %d", skb, skb->len);
2326 hci_send_frame(skb);
2327
2328 conn->sent++;
2329 if (conn->sent == ~0)
2330 conn->sent = 0;
2331 }
2332 }
2333 }
2334
hci_sched_le(struct hci_dev * hdev)2335 static inline void hci_sched_le(struct hci_dev *hdev)
2336 {
2337 struct hci_chan *chan;
2338 struct sk_buff *skb;
2339 int quote, cnt, tmp;
2340
2341 BT_DBG("%s", hdev->name);
2342
2343 if (!hci_conn_num(hdev, LE_LINK))
2344 return;
2345
2346 if (!test_bit(HCI_RAW, &hdev->flags)) {
2347 /* LE tx timeout must be longer than maximum
2348 * link supervision timeout (40.9 seconds) */
2349 if (!hdev->le_cnt && hdev->le_pkts &&
2350 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2351 hci_link_tx_to(hdev, LE_LINK);
2352 }
2353
2354 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2355 tmp = cnt;
2356 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2357 u32 priority = (skb_peek(&chan->data_q))->priority;
2358 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2359 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2360 skb->len, skb->priority);
2361
2362 /* Stop if priority has changed */
2363 if (skb->priority < priority)
2364 break;
2365
2366 skb = skb_dequeue(&chan->data_q);
2367
2368 hci_send_frame(skb);
2369 hdev->le_last_tx = jiffies;
2370
2371 cnt--;
2372 chan->sent++;
2373 chan->conn->sent++;
2374 }
2375 }
2376
2377 if (hdev->le_pkts)
2378 hdev->le_cnt = cnt;
2379 else
2380 hdev->acl_cnt = cnt;
2381
2382 if (cnt != tmp)
2383 hci_prio_recalculate(hdev, LE_LINK);
2384 }
2385
hci_tx_work(struct work_struct * work)2386 static void hci_tx_work(struct work_struct *work)
2387 {
2388 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2389 struct sk_buff *skb;
2390
2391 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2392 hdev->sco_cnt, hdev->le_cnt);
2393
2394 /* Schedule queues and send stuff to HCI driver */
2395
2396 hci_sched_acl(hdev);
2397
2398 hci_sched_sco(hdev);
2399
2400 hci_sched_esco(hdev);
2401
2402 hci_sched_le(hdev);
2403
2404 /* Send next queued raw (unknown type) packet */
2405 while ((skb = skb_dequeue(&hdev->raw_q)))
2406 hci_send_frame(skb);
2407 }
2408
2409 /* ----- HCI RX task (incoming data processing) ----- */
2410
2411 /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)2412 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2413 {
2414 struct hci_acl_hdr *hdr = (void *) skb->data;
2415 struct hci_conn *conn;
2416 __u16 handle, flags;
2417
2418 skb_pull(skb, HCI_ACL_HDR_SIZE);
2419
2420 handle = __le16_to_cpu(hdr->handle);
2421 flags = hci_flags(handle);
2422 handle = hci_handle(handle);
2423
2424 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2425
2426 hdev->stat.acl_rx++;
2427
2428 hci_dev_lock(hdev);
2429 conn = hci_conn_hash_lookup_handle(hdev, handle);
2430 hci_dev_unlock(hdev);
2431
2432 if (conn) {
2433 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2434
2435 /* Send to upper protocol */
2436 l2cap_recv_acldata(conn, skb, flags);
2437 return;
2438 } else {
2439 BT_ERR("%s ACL packet for unknown connection handle %d",
2440 hdev->name, handle);
2441 }
2442
2443 kfree_skb(skb);
2444 }
2445
2446 /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)2447 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2448 {
2449 struct hci_sco_hdr *hdr = (void *) skb->data;
2450 struct hci_conn *conn;
2451 __u16 handle;
2452
2453 skb_pull(skb, HCI_SCO_HDR_SIZE);
2454
2455 handle = __le16_to_cpu(hdr->handle);
2456
2457 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2458
2459 hdev->stat.sco_rx++;
2460
2461 hci_dev_lock(hdev);
2462 conn = hci_conn_hash_lookup_handle(hdev, handle);
2463 hci_dev_unlock(hdev);
2464
2465 if (conn) {
2466 /* Send to upper protocol */
2467 sco_recv_scodata(conn, skb);
2468 return;
2469 } else {
2470 BT_ERR("%s SCO packet for unknown connection handle %d",
2471 hdev->name, handle);
2472 }
2473
2474 kfree_skb(skb);
2475 }
2476
hci_rx_work(struct work_struct * work)2477 static void hci_rx_work(struct work_struct *work)
2478 {
2479 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2480 struct sk_buff *skb;
2481
2482 BT_DBG("%s", hdev->name);
2483
2484 while ((skb = skb_dequeue(&hdev->rx_q))) {
2485 if (atomic_read(&hdev->promisc)) {
2486 /* Send copy to the sockets */
2487 hci_send_to_sock(hdev, skb, NULL);
2488 }
2489
2490 if (test_bit(HCI_RAW, &hdev->flags)) {
2491 kfree_skb(skb);
2492 continue;
2493 }
2494
2495 if (test_bit(HCI_INIT, &hdev->flags)) {
2496 /* Don't process data packets in this states. */
2497 switch (bt_cb(skb)->pkt_type) {
2498 case HCI_ACLDATA_PKT:
2499 case HCI_SCODATA_PKT:
2500 kfree_skb(skb);
2501 continue;
2502 }
2503 }
2504
2505 /* Process frame */
2506 switch (bt_cb(skb)->pkt_type) {
2507 case HCI_EVENT_PKT:
2508 BT_DBG("%s Event packet", hdev->name);
2509 hci_event_packet(hdev, skb);
2510 break;
2511
2512 case HCI_ACLDATA_PKT:
2513 BT_DBG("%s ACL data packet", hdev->name);
2514 hci_acldata_packet(hdev, skb);
2515 break;
2516
2517 case HCI_SCODATA_PKT:
2518 BT_DBG("%s SCO data packet", hdev->name);
2519 hci_scodata_packet(hdev, skb);
2520 break;
2521
2522 default:
2523 kfree_skb(skb);
2524 break;
2525 }
2526 }
2527 }
2528
hci_cmd_work(struct work_struct * work)2529 static void hci_cmd_work(struct work_struct *work)
2530 {
2531 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2532 struct sk_buff *skb;
2533
2534 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2535
2536 /* Send queued commands */
2537 if (atomic_read(&hdev->cmd_cnt)) {
2538 skb = skb_dequeue(&hdev->cmd_q);
2539 if (!skb)
2540 return;
2541
2542 kfree_skb(hdev->sent_cmd);
2543
2544 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2545 if (hdev->sent_cmd) {
2546 atomic_dec(&hdev->cmd_cnt);
2547 hci_send_frame(skb);
2548 if (test_bit(HCI_RESET, &hdev->flags))
2549 del_timer(&hdev->cmd_timer);
2550 else
2551 mod_timer(&hdev->cmd_timer,
2552 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2553 } else {
2554 skb_queue_head(&hdev->cmd_q, skb);
2555 queue_work(hdev->workqueue, &hdev->cmd_work);
2556 }
2557 }
2558 }
2559
hci_do_inquiry(struct hci_dev * hdev,u8 length)2560 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2561 {
2562 /* General inquiry access code (GIAC) */
2563 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2564 struct hci_cp_inquiry cp;
2565
2566 BT_DBG("%s", hdev->name);
2567
2568 if (test_bit(HCI_INQUIRY, &hdev->flags))
2569 return -EINPROGRESS;
2570
2571 memset(&cp, 0, sizeof(cp));
2572 memcpy(&cp.lap, lap, sizeof(cp.lap));
2573 cp.length = length;
2574
2575 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2576 }
2577
hci_cancel_inquiry(struct hci_dev * hdev)2578 int hci_cancel_inquiry(struct hci_dev *hdev)
2579 {
2580 BT_DBG("%s", hdev->name);
2581
2582 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2583 return -EPERM;
2584
2585 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2586 }
2587
2588 module_param(enable_hs, bool, 0644);
2589 MODULE_PARM_DESC(enable_hs, "Enable High Speed");
2590