1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
hci_le_connect(struct hci_conn * conn)48 static void hci_le_connect(struct hci_conn *conn)
49 {
50 struct hci_dev *hdev = conn->hdev;
51 struct hci_cp_le_create_conn cp;
52
53 conn->state = BT_CONNECT;
54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER;
56 conn->sec_level = BT_SECURITY_LOW;
57
58 memset(&cp, 0, sizeof(cp));
59 cp.scan_interval = cpu_to_le16(0x0060);
60 cp.scan_window = cpu_to_le16(0x0030);
61 bacpy(&cp.peer_addr, &conn->dst);
62 cp.peer_addr_type = conn->dst_type;
63 cp.conn_interval_min = cpu_to_le16(0x0028);
64 cp.conn_interval_max = cpu_to_le16(0x0038);
65 cp.supervision_timeout = cpu_to_le16(0x002a);
66 cp.min_ce_len = cpu_to_le16(0x0000);
67 cp.max_ce_len = cpu_to_le16(0x0000);
68
69 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
70 }
71
hci_le_connect_cancel(struct hci_conn * conn)72 static void hci_le_connect_cancel(struct hci_conn *conn)
73 {
74 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
75 }
76
hci_acl_connect(struct hci_conn * conn)77 void hci_acl_connect(struct hci_conn *conn)
78 {
79 struct hci_dev *hdev = conn->hdev;
80 struct inquiry_entry *ie;
81 struct hci_cp_create_conn cp;
82
83 BT_DBG("%p", conn);
84
85 conn->state = BT_CONNECT;
86 conn->out = 1;
87
88 conn->link_mode = HCI_LM_MASTER;
89
90 conn->attempt++;
91
92 conn->link_policy = hdev->link_policy;
93
94 memset(&cp, 0, sizeof(cp));
95 bacpy(&cp.bdaddr, &conn->dst);
96 cp.pscan_rep_mode = 0x02;
97
98 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
99 if (ie) {
100 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
101 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
102 cp.pscan_mode = ie->data.pscan_mode;
103 cp.clock_offset = ie->data.clock_offset |
104 cpu_to_le16(0x8000);
105 }
106
107 memcpy(conn->dev_class, ie->data.dev_class, 3);
108 conn->ssp_mode = ie->data.ssp_mode;
109 }
110
111 cp.pkt_type = cpu_to_le16(conn->pkt_type);
112 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
113 cp.role_switch = 0x01;
114 else
115 cp.role_switch = 0x00;
116
117 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
118 }
119
hci_acl_connect_cancel(struct hci_conn * conn)120 static void hci_acl_connect_cancel(struct hci_conn *conn)
121 {
122 struct hci_cp_create_conn_cancel cp;
123
124 BT_DBG("%p", conn);
125
126 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
127 return;
128
129 bacpy(&cp.bdaddr, &conn->dst);
130 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
131 }
132
hci_acl_disconn(struct hci_conn * conn,__u8 reason)133 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
134 {
135 struct hci_cp_disconnect cp;
136
137 BT_DBG("%p", conn);
138
139 conn->state = BT_DISCONN;
140
141 cp.handle = cpu_to_le16(conn->handle);
142 cp.reason = reason;
143 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
144 }
145
hci_add_sco(struct hci_conn * conn,__u16 handle)146 void hci_add_sco(struct hci_conn *conn, __u16 handle)
147 {
148 struct hci_dev *hdev = conn->hdev;
149 struct hci_cp_add_sco cp;
150
151 BT_DBG("%p", conn);
152
153 conn->state = BT_CONNECT;
154 conn->out = 1;
155
156 conn->attempt++;
157
158 cp.handle = cpu_to_le16(handle);
159 cp.pkt_type = cpu_to_le16(conn->pkt_type);
160
161 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
162 }
163
hci_setup_sync(struct hci_conn * conn,__u16 handle)164 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
165 {
166 struct hci_dev *hdev = conn->hdev;
167 struct hci_cp_setup_sync_conn cp;
168
169 BT_DBG("%p", conn);
170
171 conn->state = BT_CONNECT;
172 conn->out = 1;
173
174 conn->attempt++;
175
176 cp.handle = cpu_to_le16(handle);
177 cp.pkt_type = cpu_to_le16(conn->pkt_type);
178
179 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
180 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
181 cp.max_latency = cpu_to_le16(0xffff);
182 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
183 cp.retrans_effort = 0xff;
184
185 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
186 }
187
hci_le_conn_update(struct hci_conn * conn,u16 min,u16 max,u16 latency,u16 to_multiplier)188 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
189 u16 latency, u16 to_multiplier)
190 {
191 struct hci_cp_le_conn_update cp;
192 struct hci_dev *hdev = conn->hdev;
193
194 memset(&cp, 0, sizeof(cp));
195
196 cp.handle = cpu_to_le16(conn->handle);
197 cp.conn_interval_min = cpu_to_le16(min);
198 cp.conn_interval_max = cpu_to_le16(max);
199 cp.conn_latency = cpu_to_le16(latency);
200 cp.supervision_timeout = cpu_to_le16(to_multiplier);
201 cp.min_ce_len = cpu_to_le16(0x0001);
202 cp.max_ce_len = cpu_to_le16(0x0001);
203
204 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
205 }
206 EXPORT_SYMBOL(hci_le_conn_update);
207
hci_le_start_enc(struct hci_conn * conn,__le16 ediv,__u8 rand[8],__u8 ltk[16])208 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
209 __u8 ltk[16])
210 {
211 struct hci_dev *hdev = conn->hdev;
212 struct hci_cp_le_start_enc cp;
213
214 BT_DBG("%p", conn);
215
216 memset(&cp, 0, sizeof(cp));
217
218 cp.handle = cpu_to_le16(conn->handle);
219 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
220 cp.ediv = ediv;
221 memcpy(cp.rand, rand, sizeof(cp.rand));
222
223 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
224 }
225 EXPORT_SYMBOL(hci_le_start_enc);
226
hci_le_ltk_reply(struct hci_conn * conn,u8 ltk[16])227 void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
228 {
229 struct hci_dev *hdev = conn->hdev;
230 struct hci_cp_le_ltk_reply cp;
231
232 BT_DBG("%p", conn);
233
234 memset(&cp, 0, sizeof(cp));
235
236 cp.handle = cpu_to_le16(conn->handle);
237 memcpy(cp.ltk, ltk, sizeof(ltk));
238
239 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
240 }
241 EXPORT_SYMBOL(hci_le_ltk_reply);
242
hci_le_ltk_neg_reply(struct hci_conn * conn)243 void hci_le_ltk_neg_reply(struct hci_conn *conn)
244 {
245 struct hci_dev *hdev = conn->hdev;
246 struct hci_cp_le_ltk_neg_reply cp;
247
248 BT_DBG("%p", conn);
249
250 memset(&cp, 0, sizeof(cp));
251
252 cp.handle = cpu_to_le16(conn->handle);
253
254 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
255 }
256
257 /* Device _must_ be locked */
hci_sco_setup(struct hci_conn * conn,__u8 status)258 void hci_sco_setup(struct hci_conn *conn, __u8 status)
259 {
260 struct hci_conn *sco = conn->link;
261
262 BT_DBG("%p", conn);
263
264 if (!sco)
265 return;
266
267 if (!status) {
268 if (lmp_esco_capable(conn->hdev))
269 hci_setup_sync(sco, conn->handle);
270 else
271 hci_add_sco(sco, conn->handle);
272 } else {
273 hci_proto_connect_cfm(sco, status);
274 hci_conn_del(sco);
275 }
276 }
277
hci_conn_timeout(struct work_struct * work)278 static void hci_conn_timeout(struct work_struct *work)
279 {
280 struct hci_conn *conn = container_of(work, struct hci_conn,
281 disc_work.work);
282 struct hci_dev *hdev = conn->hdev;
283 __u8 reason;
284
285 BT_DBG("conn %p state %d", conn, conn->state);
286
287 if (atomic_read(&conn->refcnt))
288 return;
289
290 hci_dev_lock(hdev);
291
292 switch (conn->state) {
293 case BT_CONNECT:
294 case BT_CONNECT2:
295 if (conn->out) {
296 if (conn->type == ACL_LINK)
297 hci_acl_connect_cancel(conn);
298 else if (conn->type == LE_LINK)
299 hci_le_connect_cancel(conn);
300 }
301 break;
302 case BT_CONFIG:
303 case BT_CONNECTED:
304 reason = hci_proto_disconn_ind(conn);
305 hci_acl_disconn(conn, reason);
306 break;
307 default:
308 conn->state = BT_CLOSED;
309 break;
310 }
311
312 hci_dev_unlock(hdev);
313 }
314
315 /* Enter sniff mode */
hci_conn_enter_sniff_mode(struct hci_conn * conn)316 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
317 {
318 struct hci_dev *hdev = conn->hdev;
319
320 BT_DBG("conn %p mode %d", conn, conn->mode);
321
322 if (test_bit(HCI_RAW, &hdev->flags))
323 return;
324
325 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
326 return;
327
328 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
329 return;
330
331 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
332 struct hci_cp_sniff_subrate cp;
333 cp.handle = cpu_to_le16(conn->handle);
334 cp.max_latency = cpu_to_le16(0);
335 cp.min_remote_timeout = cpu_to_le16(0);
336 cp.min_local_timeout = cpu_to_le16(0);
337 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
338 }
339
340 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
341 struct hci_cp_sniff_mode cp;
342 cp.handle = cpu_to_le16(conn->handle);
343 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
344 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
345 cp.attempt = cpu_to_le16(4);
346 cp.timeout = cpu_to_le16(1);
347 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
348 }
349 }
350
hci_conn_idle(unsigned long arg)351 static void hci_conn_idle(unsigned long arg)
352 {
353 struct hci_conn *conn = (void *) arg;
354
355 BT_DBG("conn %p mode %d", conn, conn->mode);
356
357 hci_conn_enter_sniff_mode(conn);
358 }
359
hci_conn_auto_accept(unsigned long arg)360 static void hci_conn_auto_accept(unsigned long arg)
361 {
362 struct hci_conn *conn = (void *) arg;
363 struct hci_dev *hdev = conn->hdev;
364
365 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
366 &conn->dst);
367 }
368
hci_conn_add(struct hci_dev * hdev,int type,bdaddr_t * dst)369 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
370 {
371 struct hci_conn *conn;
372
373 BT_DBG("%s dst %s", hdev->name, batostr(dst));
374
375 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
376 if (!conn)
377 return NULL;
378
379 bacpy(&conn->dst, dst);
380 conn->hdev = hdev;
381 conn->type = type;
382 conn->mode = HCI_CM_ACTIVE;
383 conn->state = BT_OPEN;
384 conn->auth_type = HCI_AT_GENERAL_BONDING;
385 conn->io_capability = hdev->io_capability;
386 conn->remote_auth = 0xff;
387 conn->key_type = 0xff;
388
389 conn->power_save = 1;
390 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
391
392 switch (type) {
393 case ACL_LINK:
394 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
395 break;
396 case SCO_LINK:
397 if (lmp_esco_capable(hdev))
398 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
399 (hdev->esco_type & EDR_ESCO_MASK);
400 else
401 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
402 break;
403 case ESCO_LINK:
404 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
405 break;
406 }
407
408 skb_queue_head_init(&conn->data_q);
409
410 INIT_LIST_HEAD(&conn->chan_list);;
411
412 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
413 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
414 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
415 (unsigned long) conn);
416
417 atomic_set(&conn->refcnt, 0);
418
419 hci_dev_hold(hdev);
420
421 hci_conn_hash_add(hdev, conn);
422 if (hdev->notify)
423 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
424
425 atomic_set(&conn->devref, 0);
426
427 hci_conn_init_sysfs(conn);
428
429 return conn;
430 }
431
hci_conn_del(struct hci_conn * conn)432 int hci_conn_del(struct hci_conn *conn)
433 {
434 struct hci_dev *hdev = conn->hdev;
435
436 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
437
438 del_timer(&conn->idle_timer);
439
440 cancel_delayed_work_sync(&conn->disc_work);
441
442 del_timer(&conn->auto_accept_timer);
443
444 if (conn->type == ACL_LINK) {
445 struct hci_conn *sco = conn->link;
446 if (sco)
447 sco->link = NULL;
448
449 /* Unacked frames */
450 hdev->acl_cnt += conn->sent;
451 } else if (conn->type == LE_LINK) {
452 if (hdev->le_pkts)
453 hdev->le_cnt += conn->sent;
454 else
455 hdev->acl_cnt += conn->sent;
456 } else {
457 struct hci_conn *acl = conn->link;
458 if (acl) {
459 acl->link = NULL;
460 hci_conn_put(acl);
461 }
462 }
463
464
465 hci_chan_list_flush(conn);
466
467 hci_conn_hash_del(hdev, conn);
468 if (hdev->notify)
469 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
470
471 skb_queue_purge(&conn->data_q);
472
473 hci_conn_put_device(conn);
474
475 hci_dev_put(hdev);
476
477 if (conn->handle == 0)
478 kfree(conn);
479
480 return 0;
481 }
482
hci_get_route(bdaddr_t * dst,bdaddr_t * src)483 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
484 {
485 int use_src = bacmp(src, BDADDR_ANY);
486 struct hci_dev *hdev = NULL, *d;
487
488 BT_DBG("%s -> %s", batostr(src), batostr(dst));
489
490 read_lock(&hci_dev_list_lock);
491
492 list_for_each_entry(d, &hci_dev_list, list) {
493 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
494 continue;
495
496 /* Simple routing:
497 * No source address - find interface with bdaddr != dst
498 * Source address - find interface with bdaddr == src
499 */
500
501 if (use_src) {
502 if (!bacmp(&d->bdaddr, src)) {
503 hdev = d; break;
504 }
505 } else {
506 if (bacmp(&d->bdaddr, dst)) {
507 hdev = d; break;
508 }
509 }
510 }
511
512 if (hdev)
513 hdev = hci_dev_hold(hdev);
514
515 read_unlock(&hci_dev_list_lock);
516 return hdev;
517 }
518 EXPORT_SYMBOL(hci_get_route);
519
520 /* Create SCO, ACL or LE connection.
521 * Device _must_ be locked */
hci_connect(struct hci_dev * hdev,int type,bdaddr_t * dst,__u8 sec_level,__u8 auth_type)522 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
523 {
524 struct hci_conn *acl;
525 struct hci_conn *sco;
526 struct hci_conn *le;
527
528 BT_DBG("%s dst %s", hdev->name, batostr(dst));
529
530 if (type == LE_LINK) {
531 struct adv_entry *entry;
532
533 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
534 if (le)
535 return ERR_PTR(-EBUSY);
536
537 entry = hci_find_adv_entry(hdev, dst);
538 if (!entry)
539 return ERR_PTR(-EHOSTUNREACH);
540
541 le = hci_conn_add(hdev, LE_LINK, dst);
542 if (!le)
543 return ERR_PTR(-ENOMEM);
544
545 le->dst_type = entry->bdaddr_type;
546
547 hci_le_connect(le);
548
549 hci_conn_hold(le);
550
551 return le;
552 }
553
554 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
555 if (!acl) {
556 acl = hci_conn_add(hdev, ACL_LINK, dst);
557 if (!acl)
558 return NULL;
559 }
560
561 hci_conn_hold(acl);
562
563 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
564 acl->sec_level = BT_SECURITY_LOW;
565 acl->pending_sec_level = sec_level;
566 acl->auth_type = auth_type;
567 hci_acl_connect(acl);
568 }
569
570 if (type == ACL_LINK)
571 return acl;
572
573 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
574 if (!sco) {
575 sco = hci_conn_add(hdev, type, dst);
576 if (!sco) {
577 hci_conn_put(acl);
578 return NULL;
579 }
580 }
581
582 acl->link = sco;
583 sco->link = acl;
584
585 hci_conn_hold(sco);
586
587 if (acl->state == BT_CONNECTED &&
588 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
589 acl->power_save = 1;
590 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
591
592 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
593 /* defer SCO setup until mode change completed */
594 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
595 return sco;
596 }
597
598 hci_sco_setup(acl, 0x00);
599 }
600
601 return sco;
602 }
603 EXPORT_SYMBOL(hci_connect);
604
605 /* Check link security requirement */
hci_conn_check_link_mode(struct hci_conn * conn)606 int hci_conn_check_link_mode(struct hci_conn *conn)
607 {
608 BT_DBG("conn %p", conn);
609
610 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
611 !(conn->link_mode & HCI_LM_ENCRYPT))
612 return 0;
613
614 return 1;
615 }
616 EXPORT_SYMBOL(hci_conn_check_link_mode);
617
618 /* Authenticate remote device */
hci_conn_auth(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)619 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
620 {
621 BT_DBG("conn %p", conn);
622
623 if (conn->pending_sec_level > sec_level)
624 sec_level = conn->pending_sec_level;
625
626 if (sec_level > conn->sec_level)
627 conn->pending_sec_level = sec_level;
628 else if (conn->link_mode & HCI_LM_AUTH)
629 return 1;
630
631 /* Make sure we preserve an existing MITM requirement*/
632 auth_type |= (conn->auth_type & 0x01);
633
634 conn->auth_type = auth_type;
635
636 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
637 struct hci_cp_auth_requested cp;
638
639 /* encrypt must be pending if auth is also pending */
640 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
641
642 cp.handle = cpu_to_le16(conn->handle);
643 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
644 sizeof(cp), &cp);
645 if (conn->key_type != 0xff)
646 set_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
647 }
648
649 return 0;
650 }
651
652 /* Encrypt the the link */
hci_conn_encrypt(struct hci_conn * conn)653 static void hci_conn_encrypt(struct hci_conn *conn)
654 {
655 BT_DBG("conn %p", conn);
656
657 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
658 struct hci_cp_set_conn_encrypt cp;
659 cp.handle = cpu_to_le16(conn->handle);
660 cp.encrypt = 0x01;
661 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
662 &cp);
663 }
664 }
665
666 /* Enable security */
hci_conn_security(struct hci_conn * conn,__u8 sec_level,__u8 auth_type)667 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
668 {
669 BT_DBG("conn %p", conn);
670
671 /* For sdp we don't need the link key. */
672 if (sec_level == BT_SECURITY_SDP)
673 return 1;
674
675 /* For non 2.1 devices and low security level we don't need the link
676 key. */
677 if (sec_level == BT_SECURITY_LOW &&
678 (!conn->ssp_mode || !conn->hdev->ssp_mode))
679 return 1;
680
681 /* For other security levels we need the link key. */
682 if (!(conn->link_mode & HCI_LM_AUTH))
683 goto auth;
684
685 /* An authenticated combination key has sufficient security for any
686 security level. */
687 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
688 goto encrypt;
689
690 /* An unauthenticated combination key has sufficient security for
691 security level 1 and 2. */
692 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
693 (sec_level == BT_SECURITY_MEDIUM ||
694 sec_level == BT_SECURITY_LOW))
695 goto encrypt;
696
697 /* A combination key has always sufficient security for the security
698 levels 1 or 2. High security level requires the combination key
699 is generated using maximum PIN code length (16).
700 For pre 2.1 units. */
701 if (conn->key_type == HCI_LK_COMBINATION &&
702 (sec_level != BT_SECURITY_HIGH ||
703 conn->pin_length == 16))
704 goto encrypt;
705
706 auth:
707 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
708 return 0;
709
710 if (!hci_conn_auth(conn, sec_level, auth_type))
711 return 0;
712
713 encrypt:
714 if (conn->link_mode & HCI_LM_ENCRYPT)
715 return 1;
716
717 hci_conn_encrypt(conn);
718 return 0;
719 }
720 EXPORT_SYMBOL(hci_conn_security);
721
722 /* Check secure link requirement */
hci_conn_check_secure(struct hci_conn * conn,__u8 sec_level)723 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
724 {
725 BT_DBG("conn %p", conn);
726
727 if (sec_level != BT_SECURITY_HIGH)
728 return 1; /* Accept if non-secure is required */
729
730 if (conn->sec_level == BT_SECURITY_HIGH)
731 return 1;
732
733 return 0; /* Reject not secure link */
734 }
735 EXPORT_SYMBOL(hci_conn_check_secure);
736
737 /* Change link key */
hci_conn_change_link_key(struct hci_conn * conn)738 int hci_conn_change_link_key(struct hci_conn *conn)
739 {
740 BT_DBG("conn %p", conn);
741
742 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
743 struct hci_cp_change_conn_link_key cp;
744 cp.handle = cpu_to_le16(conn->handle);
745 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
746 sizeof(cp), &cp);
747 }
748
749 return 0;
750 }
751 EXPORT_SYMBOL(hci_conn_change_link_key);
752
753 /* Switch role */
hci_conn_switch_role(struct hci_conn * conn,__u8 role)754 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
755 {
756 BT_DBG("conn %p", conn);
757
758 if (!role && conn->link_mode & HCI_LM_MASTER)
759 return 1;
760
761 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
762 struct hci_cp_switch_role cp;
763 bacpy(&cp.bdaddr, &conn->dst);
764 cp.role = role;
765 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
766 }
767
768 return 0;
769 }
770 EXPORT_SYMBOL(hci_conn_switch_role);
771
772 /* Enter active mode */
hci_conn_enter_active_mode(struct hci_conn * conn,__u8 force_active)773 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
774 {
775 struct hci_dev *hdev = conn->hdev;
776
777 BT_DBG("conn %p mode %d", conn, conn->mode);
778
779 if (test_bit(HCI_RAW, &hdev->flags))
780 return;
781
782 if (conn->mode != HCI_CM_SNIFF)
783 goto timer;
784
785 if (!conn->power_save && !force_active)
786 goto timer;
787
788 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
789 struct hci_cp_exit_sniff_mode cp;
790 cp.handle = cpu_to_le16(conn->handle);
791 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
792 }
793
794 timer:
795 if (hdev->idle_timeout > 0)
796 mod_timer(&conn->idle_timer,
797 jiffies + msecs_to_jiffies(hdev->idle_timeout));
798 }
799
800 /* Drop all connection on the device */
hci_conn_hash_flush(struct hci_dev * hdev)801 void hci_conn_hash_flush(struct hci_dev *hdev)
802 {
803 struct hci_conn_hash *h = &hdev->conn_hash;
804 struct hci_conn *c;
805
806 BT_DBG("hdev %s", hdev->name);
807
808 list_for_each_entry_rcu(c, &h->list, list) {
809 c->state = BT_CLOSED;
810
811 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
812 hci_conn_del(c);
813 }
814 }
815
816 /* Check pending connect attempts */
hci_conn_check_pending(struct hci_dev * hdev)817 void hci_conn_check_pending(struct hci_dev *hdev)
818 {
819 struct hci_conn *conn;
820
821 BT_DBG("hdev %s", hdev->name);
822
823 hci_dev_lock(hdev);
824
825 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
826 if (conn)
827 hci_acl_connect(conn);
828
829 hci_dev_unlock(hdev);
830 }
831
hci_conn_hold_device(struct hci_conn * conn)832 void hci_conn_hold_device(struct hci_conn *conn)
833 {
834 atomic_inc(&conn->devref);
835 }
836 EXPORT_SYMBOL(hci_conn_hold_device);
837
hci_conn_put_device(struct hci_conn * conn)838 void hci_conn_put_device(struct hci_conn *conn)
839 {
840 if (atomic_dec_and_test(&conn->devref))
841 hci_conn_del_sysfs(conn);
842 }
843 EXPORT_SYMBOL(hci_conn_put_device);
844
hci_get_conn_list(void __user * arg)845 int hci_get_conn_list(void __user *arg)
846 {
847 register struct hci_conn *c;
848 struct hci_conn_list_req req, *cl;
849 struct hci_conn_info *ci;
850 struct hci_dev *hdev;
851 int n = 0, size, err;
852
853 if (copy_from_user(&req, arg, sizeof(req)))
854 return -EFAULT;
855
856 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
857 return -EINVAL;
858
859 size = sizeof(req) + req.conn_num * sizeof(*ci);
860
861 cl = kmalloc(size, GFP_KERNEL);
862 if (!cl)
863 return -ENOMEM;
864
865 hdev = hci_dev_get(req.dev_id);
866 if (!hdev) {
867 kfree(cl);
868 return -ENODEV;
869 }
870
871 ci = cl->conn_info;
872
873 hci_dev_lock(hdev);
874 list_for_each_entry(c, &hdev->conn_hash.list, list) {
875 bacpy(&(ci + n)->bdaddr, &c->dst);
876 (ci + n)->handle = c->handle;
877 (ci + n)->type = c->type;
878 (ci + n)->out = c->out;
879 (ci + n)->state = c->state;
880 (ci + n)->link_mode = c->link_mode;
881 if (++n >= req.conn_num)
882 break;
883 }
884 hci_dev_unlock(hdev);
885
886 cl->dev_id = hdev->id;
887 cl->conn_num = n;
888 size = sizeof(req) + n * sizeof(*ci);
889
890 hci_dev_put(hdev);
891
892 err = copy_to_user(arg, cl, size);
893 kfree(cl);
894
895 return err ? -EFAULT : 0;
896 }
897
hci_get_conn_info(struct hci_dev * hdev,void __user * arg)898 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
899 {
900 struct hci_conn_info_req req;
901 struct hci_conn_info ci;
902 struct hci_conn *conn;
903 char __user *ptr = arg + sizeof(req);
904
905 if (copy_from_user(&req, arg, sizeof(req)))
906 return -EFAULT;
907
908 hci_dev_lock(hdev);
909 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
910 if (conn) {
911 bacpy(&ci.bdaddr, &conn->dst);
912 ci.handle = conn->handle;
913 ci.type = conn->type;
914 ci.out = conn->out;
915 ci.state = conn->state;
916 ci.link_mode = conn->link_mode;
917 }
918 hci_dev_unlock(hdev);
919
920 if (!conn)
921 return -ENOENT;
922
923 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
924 }
925
hci_get_auth_info(struct hci_dev * hdev,void __user * arg)926 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
927 {
928 struct hci_auth_info_req req;
929 struct hci_conn *conn;
930
931 if (copy_from_user(&req, arg, sizeof(req)))
932 return -EFAULT;
933
934 hci_dev_lock(hdev);
935 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
936 if (conn)
937 req.type = conn->auth_type;
938 hci_dev_unlock(hdev);
939
940 if (!conn)
941 return -ENOENT;
942
943 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
944 }
945
hci_chan_create(struct hci_conn * conn)946 struct hci_chan *hci_chan_create(struct hci_conn *conn)
947 {
948 struct hci_dev *hdev = conn->hdev;
949 struct hci_chan *chan;
950
951 BT_DBG("%s conn %p", hdev->name, conn);
952
953 chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC);
954 if (!chan)
955 return NULL;
956
957 chan->conn = conn;
958 skb_queue_head_init(&chan->data_q);
959
960 list_add_rcu(&chan->list, &conn->chan_list);
961
962 return chan;
963 }
964
hci_chan_del(struct hci_chan * chan)965 int hci_chan_del(struct hci_chan *chan)
966 {
967 struct hci_conn *conn = chan->conn;
968 struct hci_dev *hdev = conn->hdev;
969
970 BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
971
972 list_del_rcu(&chan->list);
973
974 synchronize_rcu();
975
976 skb_queue_purge(&chan->data_q);
977 kfree(chan);
978
979 return 0;
980 }
981
hci_chan_list_flush(struct hci_conn * conn)982 void hci_chan_list_flush(struct hci_conn *conn)
983 {
984 struct hci_chan *chan;
985
986 BT_DBG("conn %p", conn);
987
988 list_for_each_entry_rcu(chan, &conn->chan_list, list)
989 hci_chan_del(chan);
990 }
991