1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 static bool enable_le;
49
50 /* Handle HCI Event packets */
51
hci_cc_inquiry_cancel(struct hci_dev * hdev,struct sk_buff * skb)52 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53 {
54 __u8 status = *((__u8 *) skb->data);
55
56 BT_DBG("%s status 0x%x", hdev->name, status);
57
58 if (status) {
59 hci_dev_lock(hdev);
60 mgmt_stop_discovery_failed(hdev, status);
61 hci_dev_unlock(hdev);
62 return;
63 }
64
65 clear_bit(HCI_INQUIRY, &hdev->flags);
66
67 hci_dev_lock(hdev);
68 mgmt_discovering(hdev, 0);
69 hci_dev_unlock(hdev);
70
71 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
72
73 hci_conn_check_pending(hdev);
74 }
75
hci_cc_exit_periodic_inq(struct hci_dev * hdev,struct sk_buff * skb)76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 __u8 status = *((__u8 *) skb->data);
79
80 BT_DBG("%s status 0x%x", hdev->name, status);
81
82 if (status)
83 return;
84
85 hci_conn_check_pending(hdev);
86 }
87
hci_cc_remote_name_req_cancel(struct hci_dev * hdev,struct sk_buff * skb)88 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
hci_cc_role_discovery(struct hci_dev * hdev,struct sk_buff * skb)93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
hci_cc_read_link_policy(struct hci_dev * hdev,struct sk_buff * skb)116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
hci_cc_write_link_policy(struct hci_dev * hdev,struct sk_buff * skb)135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
hci_cc_read_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160 {
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163 BT_DBG("%s status 0x%x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170
hci_cc_write_def_link_policy(struct hci_dev * hdev,struct sk_buff * skb)171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%x", hdev->name, status);
177
178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 if (!sent)
180 return;
181
182 if (!status)
183 hdev->link_policy = get_unaligned_le16(sent);
184
185 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 }
187
hci_cc_reset(struct hci_dev * hdev,struct sk_buff * skb)188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 hci_req_complete(hdev, HCI_OP_RESET, status);
197
198 hdev->dev_flags = 0;
199 }
200
hci_cc_write_local_name(struct hci_dev * hdev,struct sk_buff * skb)201 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
202 {
203 __u8 status = *((__u8 *) skb->data);
204 void *sent;
205
206 BT_DBG("%s status 0x%x", hdev->name, status);
207
208 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
209 if (!sent)
210 return;
211
212 hci_dev_lock(hdev);
213
214 if (test_bit(HCI_MGMT, &hdev->flags))
215 mgmt_set_local_name_complete(hdev, sent, status);
216
217 if (status == 0)
218 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
219
220 hci_dev_unlock(hdev);
221 }
222
hci_cc_read_local_name(struct hci_dev * hdev,struct sk_buff * skb)223 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
224 {
225 struct hci_rp_read_local_name *rp = (void *) skb->data;
226
227 BT_DBG("%s status 0x%x", hdev->name, rp->status);
228
229 if (rp->status)
230 return;
231
232 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
233 }
234
hci_cc_write_auth_enable(struct hci_dev * hdev,struct sk_buff * skb)235 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
236 {
237 __u8 status = *((__u8 *) skb->data);
238 void *sent;
239
240 BT_DBG("%s status 0x%x", hdev->name, status);
241
242 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
243 if (!sent)
244 return;
245
246 if (!status) {
247 __u8 param = *((__u8 *) sent);
248
249 if (param == AUTH_ENABLED)
250 set_bit(HCI_AUTH, &hdev->flags);
251 else
252 clear_bit(HCI_AUTH, &hdev->flags);
253 }
254
255 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
256 }
257
hci_cc_write_encrypt_mode(struct hci_dev * hdev,struct sk_buff * skb)258 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
259 {
260 __u8 status = *((__u8 *) skb->data);
261 void *sent;
262
263 BT_DBG("%s status 0x%x", hdev->name, status);
264
265 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
266 if (!sent)
267 return;
268
269 if (!status) {
270 __u8 param = *((__u8 *) sent);
271
272 if (param)
273 set_bit(HCI_ENCRYPT, &hdev->flags);
274 else
275 clear_bit(HCI_ENCRYPT, &hdev->flags);
276 }
277
278 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
279 }
280
hci_cc_write_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)281 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
282 {
283 __u8 param, status = *((__u8 *) skb->data);
284 int old_pscan, old_iscan;
285 void *sent;
286
287 BT_DBG("%s status 0x%x", hdev->name, status);
288
289 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
290 if (!sent)
291 return;
292
293 param = *((__u8 *) sent);
294
295 hci_dev_lock(hdev);
296
297 if (status != 0) {
298 mgmt_write_scan_failed(hdev, param, status);
299 hdev->discov_timeout = 0;
300 goto done;
301 }
302
303 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
304 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
305
306 if (param & SCAN_INQUIRY) {
307 set_bit(HCI_ISCAN, &hdev->flags);
308 if (!old_iscan)
309 mgmt_discoverable(hdev, 1);
310 if (hdev->discov_timeout > 0) {
311 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
312 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
313 to);
314 }
315 } else if (old_iscan)
316 mgmt_discoverable(hdev, 0);
317
318 if (param & SCAN_PAGE) {
319 set_bit(HCI_PSCAN, &hdev->flags);
320 if (!old_pscan)
321 mgmt_connectable(hdev, 1);
322 } else if (old_pscan)
323 mgmt_connectable(hdev, 0);
324
325 done:
326 hci_dev_unlock(hdev);
327 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
328 }
329
hci_cc_read_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)330 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
331 {
332 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
333
334 BT_DBG("%s status 0x%x", hdev->name, rp->status);
335
336 if (rp->status)
337 return;
338
339 memcpy(hdev->dev_class, rp->dev_class, 3);
340
341 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
342 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
343 }
344
hci_cc_write_class_of_dev(struct hci_dev * hdev,struct sk_buff * skb)345 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
346 {
347 __u8 status = *((__u8 *) skb->data);
348 void *sent;
349
350 BT_DBG("%s status 0x%x", hdev->name, status);
351
352 if (status)
353 return;
354
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
356 if (!sent)
357 return;
358
359 memcpy(hdev->dev_class, sent, 3);
360 }
361
hci_cc_read_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)362 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
363 {
364 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
365 __u16 setting;
366
367 BT_DBG("%s status 0x%x", hdev->name, rp->status);
368
369 if (rp->status)
370 return;
371
372 setting = __le16_to_cpu(rp->voice_setting);
373
374 if (hdev->voice_setting == setting)
375 return;
376
377 hdev->voice_setting = setting;
378
379 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
380
381 if (hdev->notify)
382 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
383 }
384
hci_cc_write_voice_setting(struct hci_dev * hdev,struct sk_buff * skb)385 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
386 {
387 __u8 status = *((__u8 *) skb->data);
388 __u16 setting;
389 void *sent;
390
391 BT_DBG("%s status 0x%x", hdev->name, status);
392
393 if (status)
394 return;
395
396 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
397 if (!sent)
398 return;
399
400 setting = get_unaligned_le16(sent);
401
402 if (hdev->voice_setting == setting)
403 return;
404
405 hdev->voice_setting = setting;
406
407 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
408
409 if (hdev->notify)
410 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
411 }
412
hci_cc_host_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)413 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
414 {
415 __u8 status = *((__u8 *) skb->data);
416
417 BT_DBG("%s status 0x%x", hdev->name, status);
418
419 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
420 }
421
hci_cc_read_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)422 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
423 {
424 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
425
426 BT_DBG("%s status 0x%x", hdev->name, rp->status);
427
428 if (rp->status)
429 return;
430
431 hdev->ssp_mode = rp->mode;
432 }
433
hci_cc_write_ssp_mode(struct hci_dev * hdev,struct sk_buff * skb)434 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
435 {
436 __u8 status = *((__u8 *) skb->data);
437 void *sent;
438
439 BT_DBG("%s status 0x%x", hdev->name, status);
440
441 if (status)
442 return;
443
444 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
445 if (!sent)
446 return;
447
448 hdev->ssp_mode = *((__u8 *) sent);
449 }
450
hci_get_inquiry_mode(struct hci_dev * hdev)451 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
452 {
453 if (hdev->features[6] & LMP_EXT_INQ)
454 return 2;
455
456 if (hdev->features[3] & LMP_RSSI_INQ)
457 return 1;
458
459 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
460 hdev->lmp_subver == 0x0757)
461 return 1;
462
463 if (hdev->manufacturer == 15) {
464 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
465 return 1;
466 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
467 return 1;
468 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
469 return 1;
470 }
471
472 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
473 hdev->lmp_subver == 0x1805)
474 return 1;
475
476 return 0;
477 }
478
hci_setup_inquiry_mode(struct hci_dev * hdev)479 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
480 {
481 u8 mode;
482
483 mode = hci_get_inquiry_mode(hdev);
484
485 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
486 }
487
hci_setup_event_mask(struct hci_dev * hdev)488 static void hci_setup_event_mask(struct hci_dev *hdev)
489 {
490 /* The second byte is 0xff instead of 0x9f (two reserved bits
491 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
492 * command otherwise */
493 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
494
495 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
496 * any event mask for pre 1.2 devices */
497 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
498 return;
499
500 events[4] |= 0x01; /* Flow Specification Complete */
501 events[4] |= 0x02; /* Inquiry Result with RSSI */
502 events[4] |= 0x04; /* Read Remote Extended Features Complete */
503 events[5] |= 0x08; /* Synchronous Connection Complete */
504 events[5] |= 0x10; /* Synchronous Connection Changed */
505
506 if (hdev->features[3] & LMP_RSSI_INQ)
507 events[4] |= 0x04; /* Inquiry Result with RSSI */
508
509 if (hdev->features[5] & LMP_SNIFF_SUBR)
510 events[5] |= 0x20; /* Sniff Subrating */
511
512 if (hdev->features[5] & LMP_PAUSE_ENC)
513 events[5] |= 0x80; /* Encryption Key Refresh Complete */
514
515 if (hdev->features[6] & LMP_EXT_INQ)
516 events[5] |= 0x40; /* Extended Inquiry Result */
517
518 if (hdev->features[6] & LMP_NO_FLUSH)
519 events[7] |= 0x01; /* Enhanced Flush Complete */
520
521 if (hdev->features[7] & LMP_LSTO)
522 events[6] |= 0x80; /* Link Supervision Timeout Changed */
523
524 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
525 events[6] |= 0x01; /* IO Capability Request */
526 events[6] |= 0x02; /* IO Capability Response */
527 events[6] |= 0x04; /* User Confirmation Request */
528 events[6] |= 0x08; /* User Passkey Request */
529 events[6] |= 0x10; /* Remote OOB Data Request */
530 events[6] |= 0x20; /* Simple Pairing Complete */
531 events[7] |= 0x04; /* User Passkey Notification */
532 events[7] |= 0x08; /* Keypress Notification */
533 events[7] |= 0x10; /* Remote Host Supported
534 * Features Notification */
535 }
536
537 if (hdev->features[4] & LMP_LE)
538 events[7] |= 0x20; /* LE Meta-Event */
539
540 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
541 }
542
hci_set_le_support(struct hci_dev * hdev)543 static void hci_set_le_support(struct hci_dev *hdev)
544 {
545 struct hci_cp_write_le_host_supported cp;
546
547 memset(&cp, 0, sizeof(cp));
548
549 if (enable_le) {
550 cp.le = 1;
551 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
552 }
553
554 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
555 }
556
hci_setup(struct hci_dev * hdev)557 static void hci_setup(struct hci_dev *hdev)
558 {
559 if (hdev->dev_type != HCI_BREDR)
560 return;
561
562 hci_setup_event_mask(hdev);
563
564 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
565 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
566
567 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
568 u8 mode = 0x01;
569 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
570 }
571
572 if (hdev->features[3] & LMP_RSSI_INQ)
573 hci_setup_inquiry_mode(hdev);
574
575 if (hdev->features[7] & LMP_INQ_TX_PWR)
576 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
577
578 if (hdev->features[7] & LMP_EXTFEATURES) {
579 struct hci_cp_read_local_ext_features cp;
580
581 cp.page = 0x01;
582 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
583 sizeof(cp), &cp);
584 }
585
586 if (hdev->features[4] & LMP_LE)
587 hci_set_le_support(hdev);
588 }
589
hci_cc_read_local_version(struct hci_dev * hdev,struct sk_buff * skb)590 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591 {
592 struct hci_rp_read_local_version *rp = (void *) skb->data;
593
594 BT_DBG("%s status 0x%x", hdev->name, rp->status);
595
596 if (rp->status)
597 return;
598
599 hdev->hci_ver = rp->hci_ver;
600 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
601 hdev->lmp_ver = rp->lmp_ver;
602 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
603 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
604
605 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
606 hdev->manufacturer,
607 hdev->hci_ver, hdev->hci_rev);
608
609 if (test_bit(HCI_INIT, &hdev->flags))
610 hci_setup(hdev);
611 }
612
hci_setup_link_policy(struct hci_dev * hdev)613 static void hci_setup_link_policy(struct hci_dev *hdev)
614 {
615 u16 link_policy = 0;
616
617 if (hdev->features[0] & LMP_RSWITCH)
618 link_policy |= HCI_LP_RSWITCH;
619 if (hdev->features[0] & LMP_HOLD)
620 link_policy |= HCI_LP_HOLD;
621 if (hdev->features[0] & LMP_SNIFF)
622 link_policy |= HCI_LP_SNIFF;
623 if (hdev->features[1] & LMP_PARK)
624 link_policy |= HCI_LP_PARK;
625
626 link_policy = cpu_to_le16(link_policy);
627 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
628 sizeof(link_policy), &link_policy);
629 }
630
hci_cc_read_local_commands(struct hci_dev * hdev,struct sk_buff * skb)631 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
632 {
633 struct hci_rp_read_local_commands *rp = (void *) skb->data;
634
635 BT_DBG("%s status 0x%x", hdev->name, rp->status);
636
637 if (rp->status)
638 goto done;
639
640 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
641
642 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
643 hci_setup_link_policy(hdev);
644
645 done:
646 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
647 }
648
hci_cc_read_local_features(struct hci_dev * hdev,struct sk_buff * skb)649 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
650 {
651 struct hci_rp_read_local_features *rp = (void *) skb->data;
652
653 BT_DBG("%s status 0x%x", hdev->name, rp->status);
654
655 if (rp->status)
656 return;
657
658 memcpy(hdev->features, rp->features, 8);
659
660 /* Adjust default settings according to features
661 * supported by device. */
662
663 if (hdev->features[0] & LMP_3SLOT)
664 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
665
666 if (hdev->features[0] & LMP_5SLOT)
667 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
668
669 if (hdev->features[1] & LMP_HV2) {
670 hdev->pkt_type |= (HCI_HV2);
671 hdev->esco_type |= (ESCO_HV2);
672 }
673
674 if (hdev->features[1] & LMP_HV3) {
675 hdev->pkt_type |= (HCI_HV3);
676 hdev->esco_type |= (ESCO_HV3);
677 }
678
679 if (hdev->features[3] & LMP_ESCO)
680 hdev->esco_type |= (ESCO_EV3);
681
682 if (hdev->features[4] & LMP_EV4)
683 hdev->esco_type |= (ESCO_EV4);
684
685 if (hdev->features[4] & LMP_EV5)
686 hdev->esco_type |= (ESCO_EV5);
687
688 if (hdev->features[5] & LMP_EDR_ESCO_2M)
689 hdev->esco_type |= (ESCO_2EV3);
690
691 if (hdev->features[5] & LMP_EDR_ESCO_3M)
692 hdev->esco_type |= (ESCO_3EV3);
693
694 if (hdev->features[5] & LMP_EDR_3S_ESCO)
695 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
696
697 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
698 hdev->features[0], hdev->features[1],
699 hdev->features[2], hdev->features[3],
700 hdev->features[4], hdev->features[5],
701 hdev->features[6], hdev->features[7]);
702 }
703
hci_cc_read_local_ext_features(struct hci_dev * hdev,struct sk_buff * skb)704 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
705 struct sk_buff *skb)
706 {
707 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
708
709 BT_DBG("%s status 0x%x", hdev->name, rp->status);
710
711 if (rp->status)
712 return;
713
714 switch (rp->page) {
715 case 0:
716 memcpy(hdev->features, rp->features, 8);
717 break;
718 case 1:
719 memcpy(hdev->host_features, rp->features, 8);
720 break;
721 }
722
723 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
724 }
725
hci_cc_read_flow_control_mode(struct hci_dev * hdev,struct sk_buff * skb)726 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
727 struct sk_buff *skb)
728 {
729 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
730
731 BT_DBG("%s status 0x%x", hdev->name, rp->status);
732
733 if (rp->status)
734 return;
735
736 hdev->flow_ctl_mode = rp->mode;
737
738 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
739 }
740
hci_cc_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)741 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
742 {
743 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
744
745 BT_DBG("%s status 0x%x", hdev->name, rp->status);
746
747 if (rp->status)
748 return;
749
750 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
751 hdev->sco_mtu = rp->sco_mtu;
752 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
753 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
754
755 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
756 hdev->sco_mtu = 64;
757 hdev->sco_pkts = 8;
758 }
759
760 hdev->acl_cnt = hdev->acl_pkts;
761 hdev->sco_cnt = hdev->sco_pkts;
762
763 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
764 hdev->acl_mtu, hdev->acl_pkts,
765 hdev->sco_mtu, hdev->sco_pkts);
766 }
767
hci_cc_read_bd_addr(struct hci_dev * hdev,struct sk_buff * skb)768 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
769 {
770 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
771
772 BT_DBG("%s status 0x%x", hdev->name, rp->status);
773
774 if (!rp->status)
775 bacpy(&hdev->bdaddr, &rp->bdaddr);
776
777 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
778 }
779
hci_cc_read_data_block_size(struct hci_dev * hdev,struct sk_buff * skb)780 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
781 struct sk_buff *skb)
782 {
783 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
784
785 BT_DBG("%s status 0x%x", hdev->name, rp->status);
786
787 if (rp->status)
788 return;
789
790 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
791 hdev->block_len = __le16_to_cpu(rp->block_len);
792 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
793
794 hdev->block_cnt = hdev->num_blocks;
795
796 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
797 hdev->block_cnt, hdev->block_len);
798
799 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
800 }
801
hci_cc_write_ca_timeout(struct hci_dev * hdev,struct sk_buff * skb)802 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
803 {
804 __u8 status = *((__u8 *) skb->data);
805
806 BT_DBG("%s status 0x%x", hdev->name, status);
807
808 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
809 }
810
hci_cc_read_local_amp_info(struct hci_dev * hdev,struct sk_buff * skb)811 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
812 struct sk_buff *skb)
813 {
814 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
815
816 BT_DBG("%s status 0x%x", hdev->name, rp->status);
817
818 if (rp->status)
819 return;
820
821 hdev->amp_status = rp->amp_status;
822 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
823 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
824 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
825 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
826 hdev->amp_type = rp->amp_type;
827 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
828 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
829 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
830 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
831
832 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
833 }
834
hci_cc_delete_stored_link_key(struct hci_dev * hdev,struct sk_buff * skb)835 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
836 struct sk_buff *skb)
837 {
838 __u8 status = *((__u8 *) skb->data);
839
840 BT_DBG("%s status 0x%x", hdev->name, status);
841
842 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
843 }
844
hci_cc_set_event_mask(struct hci_dev * hdev,struct sk_buff * skb)845 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
846 {
847 __u8 status = *((__u8 *) skb->data);
848
849 BT_DBG("%s status 0x%x", hdev->name, status);
850
851 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
852 }
853
hci_cc_write_inquiry_mode(struct hci_dev * hdev,struct sk_buff * skb)854 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
855 struct sk_buff *skb)
856 {
857 __u8 status = *((__u8 *) skb->data);
858
859 BT_DBG("%s status 0x%x", hdev->name, status);
860
861 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
862 }
863
hci_cc_read_inq_rsp_tx_power(struct hci_dev * hdev,struct sk_buff * skb)864 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
865 struct sk_buff *skb)
866 {
867 __u8 status = *((__u8 *) skb->data);
868
869 BT_DBG("%s status 0x%x", hdev->name, status);
870
871 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
872 }
873
hci_cc_set_event_flt(struct hci_dev * hdev,struct sk_buff * skb)874 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
875 {
876 __u8 status = *((__u8 *) skb->data);
877
878 BT_DBG("%s status 0x%x", hdev->name, status);
879
880 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
881 }
882
hci_cc_pin_code_reply(struct hci_dev * hdev,struct sk_buff * skb)883 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
884 {
885 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
886 struct hci_cp_pin_code_reply *cp;
887 struct hci_conn *conn;
888
889 BT_DBG("%s status 0x%x", hdev->name, rp->status);
890
891 hci_dev_lock(hdev);
892
893 if (test_bit(HCI_MGMT, &hdev->flags))
894 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
895
896 if (rp->status != 0)
897 goto unlock;
898
899 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
900 if (!cp)
901 goto unlock;
902
903 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
904 if (conn)
905 conn->pin_length = cp->pin_len;
906
907 unlock:
908 hci_dev_unlock(hdev);
909 }
910
hci_cc_pin_code_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)911 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
912 {
913 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
914
915 BT_DBG("%s status 0x%x", hdev->name, rp->status);
916
917 hci_dev_lock(hdev);
918
919 if (test_bit(HCI_MGMT, &hdev->flags))
920 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
921 rp->status);
922
923 hci_dev_unlock(hdev);
924 }
925
hci_cc_le_read_buffer_size(struct hci_dev * hdev,struct sk_buff * skb)926 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
927 struct sk_buff *skb)
928 {
929 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
930
931 BT_DBG("%s status 0x%x", hdev->name, rp->status);
932
933 if (rp->status)
934 return;
935
936 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
937 hdev->le_pkts = rp->le_max_pkt;
938
939 hdev->le_cnt = hdev->le_pkts;
940
941 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
942
943 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
944 }
945
hci_cc_user_confirm_reply(struct hci_dev * hdev,struct sk_buff * skb)946 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
947 {
948 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
949
950 BT_DBG("%s status 0x%x", hdev->name, rp->status);
951
952 hci_dev_lock(hdev);
953
954 if (test_bit(HCI_MGMT, &hdev->flags))
955 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
956 rp->status);
957
958 hci_dev_unlock(hdev);
959 }
960
hci_cc_user_confirm_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)961 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
962 struct sk_buff *skb)
963 {
964 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
965
966 BT_DBG("%s status 0x%x", hdev->name, rp->status);
967
968 hci_dev_lock(hdev);
969
970 if (test_bit(HCI_MGMT, &hdev->flags))
971 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
972 rp->status);
973
974 hci_dev_unlock(hdev);
975 }
976
hci_cc_user_passkey_reply(struct hci_dev * hdev,struct sk_buff * skb)977 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
978 {
979 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
980
981 BT_DBG("%s status 0x%x", hdev->name, rp->status);
982
983 hci_dev_lock(hdev);
984
985 if (test_bit(HCI_MGMT, &hdev->flags))
986 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
987 rp->status);
988
989 hci_dev_unlock(hdev);
990 }
991
hci_cc_user_passkey_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)992 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
993 struct sk_buff *skb)
994 {
995 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
996
997 BT_DBG("%s status 0x%x", hdev->name, rp->status);
998
999 hci_dev_lock(hdev);
1000
1001 if (test_bit(HCI_MGMT, &hdev->flags))
1002 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1003 rp->status);
1004
1005 hci_dev_unlock(hdev);
1006 }
1007
hci_cc_read_local_oob_data_reply(struct hci_dev * hdev,struct sk_buff * skb)1008 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1010 {
1011 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1012
1013 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1014
1015 hci_dev_lock(hdev);
1016 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1017 rp->randomizer, rp->status);
1018 hci_dev_unlock(hdev);
1019 }
1020
hci_cc_le_set_scan_param(struct hci_dev * hdev,struct sk_buff * skb)1021 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1022 {
1023 __u8 status = *((__u8 *) skb->data);
1024
1025 BT_DBG("%s status 0x%x", hdev->name, status);
1026 }
1027
hci_cc_le_set_scan_enable(struct hci_dev * hdev,struct sk_buff * skb)1028 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1029 struct sk_buff *skb)
1030 {
1031 struct hci_cp_le_set_scan_enable *cp;
1032 __u8 status = *((__u8 *) skb->data);
1033
1034 BT_DBG("%s status 0x%x", hdev->name, status);
1035
1036 if (status)
1037 return;
1038
1039 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1040 if (!cp)
1041 return;
1042
1043 switch (cp->enable) {
1044 case LE_SCANNING_ENABLED:
1045 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1046
1047 cancel_delayed_work_sync(&hdev->adv_work);
1048
1049 hci_dev_lock(hdev);
1050 hci_adv_entries_clear(hdev);
1051 hci_dev_unlock(hdev);
1052 break;
1053
1054 case LE_SCANNING_DISABLED:
1055 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1056
1057 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1058 break;
1059
1060 default:
1061 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1062 break;
1063 }
1064 }
1065
hci_cc_le_ltk_reply(struct hci_dev * hdev,struct sk_buff * skb)1066 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1067 {
1068 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1069
1070 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1071
1072 if (rp->status)
1073 return;
1074
1075 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1076 }
1077
hci_cc_le_ltk_neg_reply(struct hci_dev * hdev,struct sk_buff * skb)1078 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1079 {
1080 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1081
1082 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1083
1084 if (rp->status)
1085 return;
1086
1087 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1088 }
1089
hci_cc_write_le_host_supported(struct hci_dev * hdev,struct sk_buff * skb)1090 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1091 struct sk_buff *skb)
1092 {
1093 struct hci_cp_read_local_ext_features cp;
1094 __u8 status = *((__u8 *) skb->data);
1095
1096 BT_DBG("%s status 0x%x", hdev->name, status);
1097
1098 if (status)
1099 return;
1100
1101 cp.page = 0x01;
1102 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
1103 }
1104
hci_cs_inquiry(struct hci_dev * hdev,__u8 status)1105 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1106 {
1107 BT_DBG("%s status 0x%x", hdev->name, status);
1108
1109 if (status) {
1110 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1111 hci_conn_check_pending(hdev);
1112 hci_dev_lock(hdev);
1113 if (test_bit(HCI_MGMT, &hdev->flags))
1114 mgmt_start_discovery_failed(hdev, status);
1115 hci_dev_unlock(hdev);
1116 return;
1117 }
1118
1119 set_bit(HCI_INQUIRY, &hdev->flags);
1120
1121 hci_dev_lock(hdev);
1122 mgmt_discovering(hdev, 1);
1123 hci_dev_unlock(hdev);
1124 }
1125
hci_cs_create_conn(struct hci_dev * hdev,__u8 status)1126 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1127 {
1128 struct hci_cp_create_conn *cp;
1129 struct hci_conn *conn;
1130
1131 BT_DBG("%s status 0x%x", hdev->name, status);
1132
1133 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1134 if (!cp)
1135 return;
1136
1137 hci_dev_lock(hdev);
1138
1139 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1140
1141 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1142
1143 if (status) {
1144 if (conn && conn->state == BT_CONNECT) {
1145 if (status != 0x0c || conn->attempt > 2) {
1146 conn->state = BT_CLOSED;
1147 hci_proto_connect_cfm(conn, status);
1148 hci_conn_del(conn);
1149 } else
1150 conn->state = BT_CONNECT2;
1151 }
1152 } else {
1153 if (!conn) {
1154 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1155 if (conn) {
1156 conn->out = 1;
1157 conn->link_mode |= HCI_LM_MASTER;
1158 } else
1159 BT_ERR("No memory for new connection");
1160 }
1161 }
1162
1163 hci_dev_unlock(hdev);
1164 }
1165
hci_cs_add_sco(struct hci_dev * hdev,__u8 status)1166 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1167 {
1168 struct hci_cp_add_sco *cp;
1169 struct hci_conn *acl, *sco;
1170 __u16 handle;
1171
1172 BT_DBG("%s status 0x%x", hdev->name, status);
1173
1174 if (!status)
1175 return;
1176
1177 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1178 if (!cp)
1179 return;
1180
1181 handle = __le16_to_cpu(cp->handle);
1182
1183 BT_DBG("%s handle %d", hdev->name, handle);
1184
1185 hci_dev_lock(hdev);
1186
1187 acl = hci_conn_hash_lookup_handle(hdev, handle);
1188 if (acl) {
1189 sco = acl->link;
1190 if (sco) {
1191 sco->state = BT_CLOSED;
1192
1193 hci_proto_connect_cfm(sco, status);
1194 hci_conn_del(sco);
1195 }
1196 }
1197
1198 hci_dev_unlock(hdev);
1199 }
1200
hci_cs_auth_requested(struct hci_dev * hdev,__u8 status)1201 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1202 {
1203 struct hci_cp_auth_requested *cp;
1204 struct hci_conn *conn;
1205
1206 BT_DBG("%s status 0x%x", hdev->name, status);
1207
1208 if (!status)
1209 return;
1210
1211 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1212 if (!cp)
1213 return;
1214
1215 hci_dev_lock(hdev);
1216
1217 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1218 if (conn) {
1219 if (conn->state == BT_CONFIG) {
1220 hci_proto_connect_cfm(conn, status);
1221 hci_conn_put(conn);
1222 }
1223 }
1224
1225 hci_dev_unlock(hdev);
1226 }
1227
hci_cs_set_conn_encrypt(struct hci_dev * hdev,__u8 status)1228 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1229 {
1230 struct hci_cp_set_conn_encrypt *cp;
1231 struct hci_conn *conn;
1232
1233 BT_DBG("%s status 0x%x", hdev->name, status);
1234
1235 if (!status)
1236 return;
1237
1238 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1239 if (!cp)
1240 return;
1241
1242 hci_dev_lock(hdev);
1243
1244 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1245 if (conn) {
1246 if (conn->state == BT_CONFIG) {
1247 hci_proto_connect_cfm(conn, status);
1248 hci_conn_put(conn);
1249 }
1250 }
1251
1252 hci_dev_unlock(hdev);
1253 }
1254
hci_outgoing_auth_needed(struct hci_dev * hdev,struct hci_conn * conn)1255 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1256 struct hci_conn *conn)
1257 {
1258 if (conn->state != BT_CONFIG || !conn->out)
1259 return 0;
1260
1261 if (conn->pending_sec_level == BT_SECURITY_SDP)
1262 return 0;
1263
1264 /* Only request authentication for SSP connections or non-SSP
1265 * devices with sec_level HIGH or if MITM protection is requested */
1266 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
1267 conn->pending_sec_level != BT_SECURITY_HIGH &&
1268 !(conn->auth_type & 0x01))
1269 return 0;
1270
1271 return 1;
1272 }
1273
hci_cs_remote_name_req(struct hci_dev * hdev,__u8 status)1274 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1275 {
1276 struct hci_cp_remote_name_req *cp;
1277 struct hci_conn *conn;
1278
1279 BT_DBG("%s status 0x%x", hdev->name, status);
1280
1281 /* If successful wait for the name req complete event before
1282 * checking for the need to do authentication */
1283 if (!status)
1284 return;
1285
1286 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1287 if (!cp)
1288 return;
1289
1290 hci_dev_lock(hdev);
1291
1292 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1293 if (!conn)
1294 goto unlock;
1295
1296 if (!hci_outgoing_auth_needed(hdev, conn))
1297 goto unlock;
1298
1299 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1300 struct hci_cp_auth_requested cp;
1301 cp.handle = __cpu_to_le16(conn->handle);
1302 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1303 }
1304
1305 unlock:
1306 hci_dev_unlock(hdev);
1307 }
1308
hci_cs_read_remote_features(struct hci_dev * hdev,__u8 status)1309 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1310 {
1311 struct hci_cp_read_remote_features *cp;
1312 struct hci_conn *conn;
1313
1314 BT_DBG("%s status 0x%x", hdev->name, status);
1315
1316 if (!status)
1317 return;
1318
1319 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1320 if (!cp)
1321 return;
1322
1323 hci_dev_lock(hdev);
1324
1325 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1326 if (conn) {
1327 if (conn->state == BT_CONFIG) {
1328 hci_proto_connect_cfm(conn, status);
1329 hci_conn_put(conn);
1330 }
1331 }
1332
1333 hci_dev_unlock(hdev);
1334 }
1335
hci_cs_read_remote_ext_features(struct hci_dev * hdev,__u8 status)1336 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1337 {
1338 struct hci_cp_read_remote_ext_features *cp;
1339 struct hci_conn *conn;
1340
1341 BT_DBG("%s status 0x%x", hdev->name, status);
1342
1343 if (!status)
1344 return;
1345
1346 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1347 if (!cp)
1348 return;
1349
1350 hci_dev_lock(hdev);
1351
1352 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1353 if (conn) {
1354 if (conn->state == BT_CONFIG) {
1355 hci_proto_connect_cfm(conn, status);
1356 hci_conn_put(conn);
1357 }
1358 }
1359
1360 hci_dev_unlock(hdev);
1361 }
1362
hci_cs_setup_sync_conn(struct hci_dev * hdev,__u8 status)1363 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1364 {
1365 struct hci_cp_setup_sync_conn *cp;
1366 struct hci_conn *acl, *sco;
1367 __u16 handle;
1368
1369 BT_DBG("%s status 0x%x", hdev->name, status);
1370
1371 if (!status)
1372 return;
1373
1374 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1375 if (!cp)
1376 return;
1377
1378 handle = __le16_to_cpu(cp->handle);
1379
1380 BT_DBG("%s handle %d", hdev->name, handle);
1381
1382 hci_dev_lock(hdev);
1383
1384 acl = hci_conn_hash_lookup_handle(hdev, handle);
1385 if (acl) {
1386 sco = acl->link;
1387 if (sco) {
1388 sco->state = BT_CLOSED;
1389
1390 hci_proto_connect_cfm(sco, status);
1391 hci_conn_del(sco);
1392 }
1393 }
1394
1395 hci_dev_unlock(hdev);
1396 }
1397
hci_cs_sniff_mode(struct hci_dev * hdev,__u8 status)1398 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1399 {
1400 struct hci_cp_sniff_mode *cp;
1401 struct hci_conn *conn;
1402
1403 BT_DBG("%s status 0x%x", hdev->name, status);
1404
1405 if (!status)
1406 return;
1407
1408 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1409 if (!cp)
1410 return;
1411
1412 hci_dev_lock(hdev);
1413
1414 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1415 if (conn) {
1416 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1417
1418 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1419 hci_sco_setup(conn, status);
1420 }
1421
1422 hci_dev_unlock(hdev);
1423 }
1424
hci_cs_exit_sniff_mode(struct hci_dev * hdev,__u8 status)1425 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1426 {
1427 struct hci_cp_exit_sniff_mode *cp;
1428 struct hci_conn *conn;
1429
1430 BT_DBG("%s status 0x%x", hdev->name, status);
1431
1432 if (!status)
1433 return;
1434
1435 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1436 if (!cp)
1437 return;
1438
1439 hci_dev_lock(hdev);
1440
1441 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1442 if (conn) {
1443 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1444
1445 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1446 hci_sco_setup(conn, status);
1447 }
1448
1449 hci_dev_unlock(hdev);
1450 }
1451
hci_cs_le_create_conn(struct hci_dev * hdev,__u8 status)1452 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1453 {
1454 struct hci_cp_le_create_conn *cp;
1455 struct hci_conn *conn;
1456
1457 BT_DBG("%s status 0x%x", hdev->name, status);
1458
1459 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1460 if (!cp)
1461 return;
1462
1463 hci_dev_lock(hdev);
1464
1465 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1466
1467 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1468 conn);
1469
1470 if (status) {
1471 if (conn && conn->state == BT_CONNECT) {
1472 conn->state = BT_CLOSED;
1473 hci_proto_connect_cfm(conn, status);
1474 hci_conn_del(conn);
1475 }
1476 } else {
1477 if (!conn) {
1478 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1479 if (conn) {
1480 conn->dst_type = cp->peer_addr_type;
1481 conn->out = 1;
1482 } else {
1483 BT_ERR("No memory for new connection");
1484 }
1485 }
1486 }
1487
1488 hci_dev_unlock(hdev);
1489 }
1490
hci_cs_le_start_enc(struct hci_dev * hdev,u8 status)1491 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1492 {
1493 BT_DBG("%s status 0x%x", hdev->name, status);
1494 }
1495
hci_inquiry_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1496 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1497 {
1498 __u8 status = *((__u8 *) skb->data);
1499
1500 BT_DBG("%s status %d", hdev->name, status);
1501
1502 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1503
1504 hci_conn_check_pending(hdev);
1505
1506 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1507 return;
1508
1509 hci_dev_lock(hdev);
1510 mgmt_discovering(hdev, 0);
1511 hci_dev_unlock(hdev);
1512 }
1513
hci_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)1514 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1515 {
1516 struct inquiry_data data;
1517 struct inquiry_info *info = (void *) (skb->data + 1);
1518 int num_rsp = *((__u8 *) skb->data);
1519
1520 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1521
1522 if (!num_rsp)
1523 return;
1524
1525 hci_dev_lock(hdev);
1526
1527 for (; num_rsp; num_rsp--, info++) {
1528 bacpy(&data.bdaddr, &info->bdaddr);
1529 data.pscan_rep_mode = info->pscan_rep_mode;
1530 data.pscan_period_mode = info->pscan_period_mode;
1531 data.pscan_mode = info->pscan_mode;
1532 memcpy(data.dev_class, info->dev_class, 3);
1533 data.clock_offset = info->clock_offset;
1534 data.rssi = 0x00;
1535 data.ssp_mode = 0x00;
1536 hci_inquiry_cache_update(hdev, &data);
1537 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1538 info->dev_class, 0, NULL);
1539 }
1540
1541 hci_dev_unlock(hdev);
1542 }
1543
hci_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1544 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1545 {
1546 struct hci_ev_conn_complete *ev = (void *) skb->data;
1547 struct hci_conn *conn;
1548
1549 BT_DBG("%s", hdev->name);
1550
1551 hci_dev_lock(hdev);
1552
1553 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1554 if (!conn) {
1555 if (ev->link_type != SCO_LINK)
1556 goto unlock;
1557
1558 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1559 if (!conn)
1560 goto unlock;
1561
1562 conn->type = SCO_LINK;
1563 }
1564
1565 if (!ev->status) {
1566 conn->handle = __le16_to_cpu(ev->handle);
1567
1568 if (conn->type == ACL_LINK) {
1569 conn->state = BT_CONFIG;
1570 hci_conn_hold(conn);
1571 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1572 mgmt_connected(hdev, &ev->bdaddr, conn->type,
1573 conn->dst_type);
1574 } else
1575 conn->state = BT_CONNECTED;
1576
1577 hci_conn_hold_device(conn);
1578 hci_conn_add_sysfs(conn);
1579
1580 if (test_bit(HCI_AUTH, &hdev->flags))
1581 conn->link_mode |= HCI_LM_AUTH;
1582
1583 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1584 conn->link_mode |= HCI_LM_ENCRYPT;
1585
1586 /* Get remote features */
1587 if (conn->type == ACL_LINK) {
1588 struct hci_cp_read_remote_features cp;
1589 cp.handle = ev->handle;
1590 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1591 sizeof(cp), &cp);
1592 }
1593
1594 /* Set packet type for incoming connection */
1595 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1596 struct hci_cp_change_conn_ptype cp;
1597 cp.handle = ev->handle;
1598 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1599 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1600 sizeof(cp), &cp);
1601 }
1602 } else {
1603 conn->state = BT_CLOSED;
1604 if (conn->type == ACL_LINK)
1605 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1606 conn->dst_type, ev->status);
1607 }
1608
1609 if (conn->type == ACL_LINK)
1610 hci_sco_setup(conn, ev->status);
1611
1612 if (ev->status) {
1613 hci_proto_connect_cfm(conn, ev->status);
1614 hci_conn_del(conn);
1615 } else if (ev->link_type != ACL_LINK)
1616 hci_proto_connect_cfm(conn, ev->status);
1617
1618 unlock:
1619 hci_dev_unlock(hdev);
1620
1621 hci_conn_check_pending(hdev);
1622 }
1623
hci_conn_request_evt(struct hci_dev * hdev,struct sk_buff * skb)1624 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1625 {
1626 struct hci_ev_conn_request *ev = (void *) skb->data;
1627 int mask = hdev->link_mode;
1628
1629 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1630 batostr(&ev->bdaddr), ev->link_type);
1631
1632 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1633
1634 if ((mask & HCI_LM_ACCEPT) &&
1635 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1636 /* Connection accepted */
1637 struct inquiry_entry *ie;
1638 struct hci_conn *conn;
1639
1640 hci_dev_lock(hdev);
1641
1642 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1643 if (ie)
1644 memcpy(ie->data.dev_class, ev->dev_class, 3);
1645
1646 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1647 if (!conn) {
1648 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1649 if (!conn) {
1650 BT_ERR("No memory for new connection");
1651 hci_dev_unlock(hdev);
1652 return;
1653 }
1654 }
1655
1656 memcpy(conn->dev_class, ev->dev_class, 3);
1657 conn->state = BT_CONNECT;
1658
1659 hci_dev_unlock(hdev);
1660
1661 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1662 struct hci_cp_accept_conn_req cp;
1663
1664 bacpy(&cp.bdaddr, &ev->bdaddr);
1665
1666 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1667 cp.role = 0x00; /* Become master */
1668 else
1669 cp.role = 0x01; /* Remain slave */
1670
1671 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1672 sizeof(cp), &cp);
1673 } else {
1674 struct hci_cp_accept_sync_conn_req cp;
1675
1676 bacpy(&cp.bdaddr, &ev->bdaddr);
1677 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1678
1679 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1680 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1681 cp.max_latency = cpu_to_le16(0xffff);
1682 cp.content_format = cpu_to_le16(hdev->voice_setting);
1683 cp.retrans_effort = 0xff;
1684
1685 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1686 sizeof(cp), &cp);
1687 }
1688 } else {
1689 /* Connection rejected */
1690 struct hci_cp_reject_conn_req cp;
1691
1692 bacpy(&cp.bdaddr, &ev->bdaddr);
1693 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1694 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1695 }
1696 }
1697
hci_disconn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1698 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1699 {
1700 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1701 struct hci_conn *conn;
1702
1703 BT_DBG("%s status %d", hdev->name, ev->status);
1704
1705 hci_dev_lock(hdev);
1706
1707 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1708 if (!conn)
1709 goto unlock;
1710
1711 if (ev->status == 0)
1712 conn->state = BT_CLOSED;
1713
1714 if (conn->type == ACL_LINK || conn->type == LE_LINK) {
1715 if (ev->status != 0)
1716 mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
1717 else
1718 mgmt_disconnected(hdev, &conn->dst, conn->type,
1719 conn->dst_type);
1720 }
1721
1722 if (ev->status == 0) {
1723 hci_proto_disconn_cfm(conn, ev->reason);
1724 hci_conn_del(conn);
1725 }
1726
1727 unlock:
1728 hci_dev_unlock(hdev);
1729 }
1730
hci_auth_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1731 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1732 {
1733 struct hci_ev_auth_complete *ev = (void *) skb->data;
1734 struct hci_conn *conn;
1735
1736 BT_DBG("%s status %d", hdev->name, ev->status);
1737
1738 hci_dev_lock(hdev);
1739
1740 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1741 if (!conn)
1742 goto unlock;
1743
1744 if (!ev->status) {
1745 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
1746 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) {
1747 BT_INFO("re-auth of legacy device is not possible.");
1748 } else {
1749 conn->link_mode |= HCI_LM_AUTH;
1750 conn->sec_level = conn->pending_sec_level;
1751 }
1752 } else {
1753 mgmt_auth_failed(hdev, &conn->dst, ev->status);
1754 }
1755
1756 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1757 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
1758
1759 if (conn->state == BT_CONFIG) {
1760 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
1761 struct hci_cp_set_conn_encrypt cp;
1762 cp.handle = ev->handle;
1763 cp.encrypt = 0x01;
1764 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1765 &cp);
1766 } else {
1767 conn->state = BT_CONNECTED;
1768 hci_proto_connect_cfm(conn, ev->status);
1769 hci_conn_put(conn);
1770 }
1771 } else {
1772 hci_auth_cfm(conn, ev->status);
1773
1774 hci_conn_hold(conn);
1775 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1776 hci_conn_put(conn);
1777 }
1778
1779 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1780 if (!ev->status) {
1781 struct hci_cp_set_conn_encrypt cp;
1782 cp.handle = ev->handle;
1783 cp.encrypt = 0x01;
1784 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1785 &cp);
1786 } else {
1787 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1788 hci_encrypt_cfm(conn, ev->status, 0x00);
1789 }
1790 }
1791
1792 unlock:
1793 hci_dev_unlock(hdev);
1794 }
1795
hci_remote_name_evt(struct hci_dev * hdev,struct sk_buff * skb)1796 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1797 {
1798 struct hci_ev_remote_name *ev = (void *) skb->data;
1799 struct hci_conn *conn;
1800
1801 BT_DBG("%s", hdev->name);
1802
1803 hci_conn_check_pending(hdev);
1804
1805 hci_dev_lock(hdev);
1806
1807 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1808 mgmt_remote_name(hdev, &ev->bdaddr, ev->name);
1809
1810 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1811 if (!conn)
1812 goto unlock;
1813
1814 if (!hci_outgoing_auth_needed(hdev, conn))
1815 goto unlock;
1816
1817 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1818 struct hci_cp_auth_requested cp;
1819 cp.handle = __cpu_to_le16(conn->handle);
1820 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1821 }
1822
1823 unlock:
1824 hci_dev_unlock(hdev);
1825 }
1826
hci_encrypt_change_evt(struct hci_dev * hdev,struct sk_buff * skb)1827 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1828 {
1829 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1830 struct hci_conn *conn;
1831
1832 BT_DBG("%s status %d", hdev->name, ev->status);
1833
1834 hci_dev_lock(hdev);
1835
1836 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1837 if (conn) {
1838 if (!ev->status) {
1839 if (ev->encrypt) {
1840 /* Encryption implies authentication */
1841 conn->link_mode |= HCI_LM_AUTH;
1842 conn->link_mode |= HCI_LM_ENCRYPT;
1843 conn->sec_level = conn->pending_sec_level;
1844 } else
1845 conn->link_mode &= ~HCI_LM_ENCRYPT;
1846 }
1847
1848 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1849
1850 if (conn->state == BT_CONFIG) {
1851 if (!ev->status)
1852 conn->state = BT_CONNECTED;
1853
1854 hci_proto_connect_cfm(conn, ev->status);
1855 hci_conn_put(conn);
1856 } else
1857 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1858 }
1859
1860 hci_dev_unlock(hdev);
1861 }
1862
hci_change_link_key_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1863 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1864 {
1865 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1866 struct hci_conn *conn;
1867
1868 BT_DBG("%s status %d", hdev->name, ev->status);
1869
1870 hci_dev_lock(hdev);
1871
1872 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1873 if (conn) {
1874 if (!ev->status)
1875 conn->link_mode |= HCI_LM_SECURE;
1876
1877 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1878
1879 hci_key_change_cfm(conn, ev->status);
1880 }
1881
1882 hci_dev_unlock(hdev);
1883 }
1884
hci_remote_features_evt(struct hci_dev * hdev,struct sk_buff * skb)1885 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1886 {
1887 struct hci_ev_remote_features *ev = (void *) skb->data;
1888 struct hci_conn *conn;
1889
1890 BT_DBG("%s status %d", hdev->name, ev->status);
1891
1892 hci_dev_lock(hdev);
1893
1894 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1895 if (!conn)
1896 goto unlock;
1897
1898 if (!ev->status)
1899 memcpy(conn->features, ev->features, 8);
1900
1901 if (conn->state != BT_CONFIG)
1902 goto unlock;
1903
1904 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1905 struct hci_cp_read_remote_ext_features cp;
1906 cp.handle = ev->handle;
1907 cp.page = 0x01;
1908 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1909 sizeof(cp), &cp);
1910 goto unlock;
1911 }
1912
1913 if (!ev->status) {
1914 struct hci_cp_remote_name_req cp;
1915 memset(&cp, 0, sizeof(cp));
1916 bacpy(&cp.bdaddr, &conn->dst);
1917 cp.pscan_rep_mode = 0x02;
1918 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1919 }
1920
1921 if (!hci_outgoing_auth_needed(hdev, conn)) {
1922 conn->state = BT_CONNECTED;
1923 hci_proto_connect_cfm(conn, ev->status);
1924 hci_conn_put(conn);
1925 }
1926
1927 unlock:
1928 hci_dev_unlock(hdev);
1929 }
1930
hci_remote_version_evt(struct hci_dev * hdev,struct sk_buff * skb)1931 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1932 {
1933 BT_DBG("%s", hdev->name);
1934 }
1935
hci_qos_setup_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1936 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1937 {
1938 BT_DBG("%s", hdev->name);
1939 }
1940
hci_cmd_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)1941 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1942 {
1943 struct hci_ev_cmd_complete *ev = (void *) skb->data;
1944 __u16 opcode;
1945
1946 skb_pull(skb, sizeof(*ev));
1947
1948 opcode = __le16_to_cpu(ev->opcode);
1949
1950 switch (opcode) {
1951 case HCI_OP_INQUIRY_CANCEL:
1952 hci_cc_inquiry_cancel(hdev, skb);
1953 break;
1954
1955 case HCI_OP_EXIT_PERIODIC_INQ:
1956 hci_cc_exit_periodic_inq(hdev, skb);
1957 break;
1958
1959 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
1960 hci_cc_remote_name_req_cancel(hdev, skb);
1961 break;
1962
1963 case HCI_OP_ROLE_DISCOVERY:
1964 hci_cc_role_discovery(hdev, skb);
1965 break;
1966
1967 case HCI_OP_READ_LINK_POLICY:
1968 hci_cc_read_link_policy(hdev, skb);
1969 break;
1970
1971 case HCI_OP_WRITE_LINK_POLICY:
1972 hci_cc_write_link_policy(hdev, skb);
1973 break;
1974
1975 case HCI_OP_READ_DEF_LINK_POLICY:
1976 hci_cc_read_def_link_policy(hdev, skb);
1977 break;
1978
1979 case HCI_OP_WRITE_DEF_LINK_POLICY:
1980 hci_cc_write_def_link_policy(hdev, skb);
1981 break;
1982
1983 case HCI_OP_RESET:
1984 hci_cc_reset(hdev, skb);
1985 break;
1986
1987 case HCI_OP_WRITE_LOCAL_NAME:
1988 hci_cc_write_local_name(hdev, skb);
1989 break;
1990
1991 case HCI_OP_READ_LOCAL_NAME:
1992 hci_cc_read_local_name(hdev, skb);
1993 break;
1994
1995 case HCI_OP_WRITE_AUTH_ENABLE:
1996 hci_cc_write_auth_enable(hdev, skb);
1997 break;
1998
1999 case HCI_OP_WRITE_ENCRYPT_MODE:
2000 hci_cc_write_encrypt_mode(hdev, skb);
2001 break;
2002
2003 case HCI_OP_WRITE_SCAN_ENABLE:
2004 hci_cc_write_scan_enable(hdev, skb);
2005 break;
2006
2007 case HCI_OP_READ_CLASS_OF_DEV:
2008 hci_cc_read_class_of_dev(hdev, skb);
2009 break;
2010
2011 case HCI_OP_WRITE_CLASS_OF_DEV:
2012 hci_cc_write_class_of_dev(hdev, skb);
2013 break;
2014
2015 case HCI_OP_READ_VOICE_SETTING:
2016 hci_cc_read_voice_setting(hdev, skb);
2017 break;
2018
2019 case HCI_OP_WRITE_VOICE_SETTING:
2020 hci_cc_write_voice_setting(hdev, skb);
2021 break;
2022
2023 case HCI_OP_HOST_BUFFER_SIZE:
2024 hci_cc_host_buffer_size(hdev, skb);
2025 break;
2026
2027 case HCI_OP_READ_SSP_MODE:
2028 hci_cc_read_ssp_mode(hdev, skb);
2029 break;
2030
2031 case HCI_OP_WRITE_SSP_MODE:
2032 hci_cc_write_ssp_mode(hdev, skb);
2033 break;
2034
2035 case HCI_OP_READ_LOCAL_VERSION:
2036 hci_cc_read_local_version(hdev, skb);
2037 break;
2038
2039 case HCI_OP_READ_LOCAL_COMMANDS:
2040 hci_cc_read_local_commands(hdev, skb);
2041 break;
2042
2043 case HCI_OP_READ_LOCAL_FEATURES:
2044 hci_cc_read_local_features(hdev, skb);
2045 break;
2046
2047 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2048 hci_cc_read_local_ext_features(hdev, skb);
2049 break;
2050
2051 case HCI_OP_READ_BUFFER_SIZE:
2052 hci_cc_read_buffer_size(hdev, skb);
2053 break;
2054
2055 case HCI_OP_READ_BD_ADDR:
2056 hci_cc_read_bd_addr(hdev, skb);
2057 break;
2058
2059 case HCI_OP_READ_DATA_BLOCK_SIZE:
2060 hci_cc_read_data_block_size(hdev, skb);
2061 break;
2062
2063 case HCI_OP_WRITE_CA_TIMEOUT:
2064 hci_cc_write_ca_timeout(hdev, skb);
2065 break;
2066
2067 case HCI_OP_READ_FLOW_CONTROL_MODE:
2068 hci_cc_read_flow_control_mode(hdev, skb);
2069 break;
2070
2071 case HCI_OP_READ_LOCAL_AMP_INFO:
2072 hci_cc_read_local_amp_info(hdev, skb);
2073 break;
2074
2075 case HCI_OP_DELETE_STORED_LINK_KEY:
2076 hci_cc_delete_stored_link_key(hdev, skb);
2077 break;
2078
2079 case HCI_OP_SET_EVENT_MASK:
2080 hci_cc_set_event_mask(hdev, skb);
2081 break;
2082
2083 case HCI_OP_WRITE_INQUIRY_MODE:
2084 hci_cc_write_inquiry_mode(hdev, skb);
2085 break;
2086
2087 case HCI_OP_READ_INQ_RSP_TX_POWER:
2088 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2089 break;
2090
2091 case HCI_OP_SET_EVENT_FLT:
2092 hci_cc_set_event_flt(hdev, skb);
2093 break;
2094
2095 case HCI_OP_PIN_CODE_REPLY:
2096 hci_cc_pin_code_reply(hdev, skb);
2097 break;
2098
2099 case HCI_OP_PIN_CODE_NEG_REPLY:
2100 hci_cc_pin_code_neg_reply(hdev, skb);
2101 break;
2102
2103 case HCI_OP_READ_LOCAL_OOB_DATA:
2104 hci_cc_read_local_oob_data_reply(hdev, skb);
2105 break;
2106
2107 case HCI_OP_LE_READ_BUFFER_SIZE:
2108 hci_cc_le_read_buffer_size(hdev, skb);
2109 break;
2110
2111 case HCI_OP_USER_CONFIRM_REPLY:
2112 hci_cc_user_confirm_reply(hdev, skb);
2113 break;
2114
2115 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2116 hci_cc_user_confirm_neg_reply(hdev, skb);
2117 break;
2118
2119 case HCI_OP_USER_PASSKEY_REPLY:
2120 hci_cc_user_passkey_reply(hdev, skb);
2121 break;
2122
2123 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2124 hci_cc_user_passkey_neg_reply(hdev, skb);
2125
2126 case HCI_OP_LE_SET_SCAN_PARAM:
2127 hci_cc_le_set_scan_param(hdev, skb);
2128 break;
2129
2130 case HCI_OP_LE_SET_SCAN_ENABLE:
2131 hci_cc_le_set_scan_enable(hdev, skb);
2132 break;
2133
2134 case HCI_OP_LE_LTK_REPLY:
2135 hci_cc_le_ltk_reply(hdev, skb);
2136 break;
2137
2138 case HCI_OP_LE_LTK_NEG_REPLY:
2139 hci_cc_le_ltk_neg_reply(hdev, skb);
2140 break;
2141
2142 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2143 hci_cc_write_le_host_supported(hdev, skb);
2144 break;
2145
2146 default:
2147 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2148 break;
2149 }
2150
2151 if (ev->opcode != HCI_OP_NOP)
2152 del_timer(&hdev->cmd_timer);
2153
2154 if (ev->ncmd) {
2155 atomic_set(&hdev->cmd_cnt, 1);
2156 if (!skb_queue_empty(&hdev->cmd_q))
2157 queue_work(hdev->workqueue, &hdev->cmd_work);
2158 }
2159 }
2160
hci_cmd_status_evt(struct hci_dev * hdev,struct sk_buff * skb)2161 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2162 {
2163 struct hci_ev_cmd_status *ev = (void *) skb->data;
2164 __u16 opcode;
2165
2166 skb_pull(skb, sizeof(*ev));
2167
2168 opcode = __le16_to_cpu(ev->opcode);
2169
2170 switch (opcode) {
2171 case HCI_OP_INQUIRY:
2172 hci_cs_inquiry(hdev, ev->status);
2173 break;
2174
2175 case HCI_OP_CREATE_CONN:
2176 hci_cs_create_conn(hdev, ev->status);
2177 break;
2178
2179 case HCI_OP_ADD_SCO:
2180 hci_cs_add_sco(hdev, ev->status);
2181 break;
2182
2183 case HCI_OP_AUTH_REQUESTED:
2184 hci_cs_auth_requested(hdev, ev->status);
2185 break;
2186
2187 case HCI_OP_SET_CONN_ENCRYPT:
2188 hci_cs_set_conn_encrypt(hdev, ev->status);
2189 break;
2190
2191 case HCI_OP_REMOTE_NAME_REQ:
2192 hci_cs_remote_name_req(hdev, ev->status);
2193 break;
2194
2195 case HCI_OP_READ_REMOTE_FEATURES:
2196 hci_cs_read_remote_features(hdev, ev->status);
2197 break;
2198
2199 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2200 hci_cs_read_remote_ext_features(hdev, ev->status);
2201 break;
2202
2203 case HCI_OP_SETUP_SYNC_CONN:
2204 hci_cs_setup_sync_conn(hdev, ev->status);
2205 break;
2206
2207 case HCI_OP_SNIFF_MODE:
2208 hci_cs_sniff_mode(hdev, ev->status);
2209 break;
2210
2211 case HCI_OP_EXIT_SNIFF_MODE:
2212 hci_cs_exit_sniff_mode(hdev, ev->status);
2213 break;
2214
2215 case HCI_OP_DISCONNECT:
2216 if (ev->status != 0)
2217 mgmt_disconnect_failed(hdev, NULL, ev->status);
2218 break;
2219
2220 case HCI_OP_LE_CREATE_CONN:
2221 hci_cs_le_create_conn(hdev, ev->status);
2222 break;
2223
2224 case HCI_OP_LE_START_ENC:
2225 hci_cs_le_start_enc(hdev, ev->status);
2226 break;
2227
2228 default:
2229 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2230 break;
2231 }
2232
2233 if (ev->opcode != HCI_OP_NOP)
2234 del_timer(&hdev->cmd_timer);
2235
2236 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2237 atomic_set(&hdev->cmd_cnt, 1);
2238 if (!skb_queue_empty(&hdev->cmd_q))
2239 queue_work(hdev->workqueue, &hdev->cmd_work);
2240 }
2241 }
2242
hci_role_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2243 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2244 {
2245 struct hci_ev_role_change *ev = (void *) skb->data;
2246 struct hci_conn *conn;
2247
2248 BT_DBG("%s status %d", hdev->name, ev->status);
2249
2250 hci_dev_lock(hdev);
2251
2252 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2253 if (conn) {
2254 if (!ev->status) {
2255 if (ev->role)
2256 conn->link_mode &= ~HCI_LM_MASTER;
2257 else
2258 conn->link_mode |= HCI_LM_MASTER;
2259 }
2260
2261 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
2262
2263 hci_role_switch_cfm(conn, ev->status, ev->role);
2264 }
2265
2266 hci_dev_unlock(hdev);
2267 }
2268
hci_num_comp_pkts_evt(struct hci_dev * hdev,struct sk_buff * skb)2269 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2270 {
2271 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2272 int i;
2273
2274 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2275 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2276 return;
2277 }
2278
2279 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2280 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2281 BT_DBG("%s bad parameters", hdev->name);
2282 return;
2283 }
2284
2285 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2286
2287 for (i = 0; i < ev->num_hndl; i++) {
2288 struct hci_comp_pkts_info *info = &ev->handles[i];
2289 struct hci_conn *conn;
2290 __u16 handle, count;
2291
2292 handle = __le16_to_cpu(info->handle);
2293 count = __le16_to_cpu(info->count);
2294
2295 conn = hci_conn_hash_lookup_handle(hdev, handle);
2296 if (!conn)
2297 continue;
2298
2299 conn->sent -= count;
2300
2301 switch (conn->type) {
2302 case ACL_LINK:
2303 hdev->acl_cnt += count;
2304 if (hdev->acl_cnt > hdev->acl_pkts)
2305 hdev->acl_cnt = hdev->acl_pkts;
2306 break;
2307
2308 case LE_LINK:
2309 if (hdev->le_pkts) {
2310 hdev->le_cnt += count;
2311 if (hdev->le_cnt > hdev->le_pkts)
2312 hdev->le_cnt = hdev->le_pkts;
2313 } else {
2314 hdev->acl_cnt += count;
2315 if (hdev->acl_cnt > hdev->acl_pkts)
2316 hdev->acl_cnt = hdev->acl_pkts;
2317 }
2318 break;
2319
2320 case SCO_LINK:
2321 hdev->sco_cnt += count;
2322 if (hdev->sco_cnt > hdev->sco_pkts)
2323 hdev->sco_cnt = hdev->sco_pkts;
2324 break;
2325
2326 default:
2327 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2328 break;
2329 }
2330 }
2331
2332 queue_work(hdev->workqueue, &hdev->tx_work);
2333 }
2334
hci_mode_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2335 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2336 {
2337 struct hci_ev_mode_change *ev = (void *) skb->data;
2338 struct hci_conn *conn;
2339
2340 BT_DBG("%s status %d", hdev->name, ev->status);
2341
2342 hci_dev_lock(hdev);
2343
2344 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2345 if (conn) {
2346 conn->mode = ev->mode;
2347 conn->interval = __le16_to_cpu(ev->interval);
2348
2349 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
2350 if (conn->mode == HCI_CM_ACTIVE)
2351 conn->power_save = 1;
2352 else
2353 conn->power_save = 0;
2354 }
2355
2356 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
2357 hci_sco_setup(conn, ev->status);
2358 }
2359
2360 hci_dev_unlock(hdev);
2361 }
2362
hci_pin_code_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2363 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2364 {
2365 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2366 struct hci_conn *conn;
2367
2368 BT_DBG("%s", hdev->name);
2369
2370 hci_dev_lock(hdev);
2371
2372 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2373 if (!conn)
2374 goto unlock;
2375
2376 if (conn->state == BT_CONNECTED) {
2377 hci_conn_hold(conn);
2378 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2379 hci_conn_put(conn);
2380 }
2381
2382 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
2383 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2384 sizeof(ev->bdaddr), &ev->bdaddr);
2385 else if (test_bit(HCI_MGMT, &hdev->flags)) {
2386 u8 secure;
2387
2388 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2389 secure = 1;
2390 else
2391 secure = 0;
2392
2393 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2394 }
2395
2396 unlock:
2397 hci_dev_unlock(hdev);
2398 }
2399
hci_link_key_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2400 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2401 {
2402 struct hci_ev_link_key_req *ev = (void *) skb->data;
2403 struct hci_cp_link_key_reply cp;
2404 struct hci_conn *conn;
2405 struct link_key *key;
2406
2407 BT_DBG("%s", hdev->name);
2408
2409 if (!test_bit(HCI_LINK_KEYS, &hdev->flags))
2410 return;
2411
2412 hci_dev_lock(hdev);
2413
2414 key = hci_find_link_key(hdev, &ev->bdaddr);
2415 if (!key) {
2416 BT_DBG("%s link key not found for %s", hdev->name,
2417 batostr(&ev->bdaddr));
2418 goto not_found;
2419 }
2420
2421 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2422 batostr(&ev->bdaddr));
2423
2424 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
2425 key->type == HCI_LK_DEBUG_COMBINATION) {
2426 BT_DBG("%s ignoring debug key", hdev->name);
2427 goto not_found;
2428 }
2429
2430 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2431 if (conn) {
2432 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2433 conn->auth_type != 0xff &&
2434 (conn->auth_type & 0x01)) {
2435 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2436 goto not_found;
2437 }
2438
2439 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2440 conn->pending_sec_level == BT_SECURITY_HIGH) {
2441 BT_DBG("%s ignoring key unauthenticated for high \
2442 security", hdev->name);
2443 goto not_found;
2444 }
2445
2446 conn->key_type = key->type;
2447 conn->pin_length = key->pin_len;
2448 }
2449
2450 bacpy(&cp.bdaddr, &ev->bdaddr);
2451 memcpy(cp.link_key, key->val, 16);
2452
2453 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2454
2455 hci_dev_unlock(hdev);
2456
2457 return;
2458
2459 not_found:
2460 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2461 hci_dev_unlock(hdev);
2462 }
2463
hci_link_key_notify_evt(struct hci_dev * hdev,struct sk_buff * skb)2464 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2465 {
2466 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2467 struct hci_conn *conn;
2468 u8 pin_len = 0;
2469
2470 BT_DBG("%s", hdev->name);
2471
2472 hci_dev_lock(hdev);
2473
2474 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2475 if (conn) {
2476 hci_conn_hold(conn);
2477 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2478 pin_len = conn->pin_length;
2479
2480 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2481 conn->key_type = ev->key_type;
2482
2483 hci_conn_put(conn);
2484 }
2485
2486 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2487 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2488 ev->key_type, pin_len);
2489
2490 hci_dev_unlock(hdev);
2491 }
2492
hci_clock_offset_evt(struct hci_dev * hdev,struct sk_buff * skb)2493 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2494 {
2495 struct hci_ev_clock_offset *ev = (void *) skb->data;
2496 struct hci_conn *conn;
2497
2498 BT_DBG("%s status %d", hdev->name, ev->status);
2499
2500 hci_dev_lock(hdev);
2501
2502 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2503 if (conn && !ev->status) {
2504 struct inquiry_entry *ie;
2505
2506 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2507 if (ie) {
2508 ie->data.clock_offset = ev->clock_offset;
2509 ie->timestamp = jiffies;
2510 }
2511 }
2512
2513 hci_dev_unlock(hdev);
2514 }
2515
hci_pkt_type_change_evt(struct hci_dev * hdev,struct sk_buff * skb)2516 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2517 {
2518 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2519 struct hci_conn *conn;
2520
2521 BT_DBG("%s status %d", hdev->name, ev->status);
2522
2523 hci_dev_lock(hdev);
2524
2525 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2526 if (conn && !ev->status)
2527 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2528
2529 hci_dev_unlock(hdev);
2530 }
2531
hci_pscan_rep_mode_evt(struct hci_dev * hdev,struct sk_buff * skb)2532 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2533 {
2534 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2535 struct inquiry_entry *ie;
2536
2537 BT_DBG("%s", hdev->name);
2538
2539 hci_dev_lock(hdev);
2540
2541 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2542 if (ie) {
2543 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2544 ie->timestamp = jiffies;
2545 }
2546
2547 hci_dev_unlock(hdev);
2548 }
2549
hci_inquiry_result_with_rssi_evt(struct hci_dev * hdev,struct sk_buff * skb)2550 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2551 {
2552 struct inquiry_data data;
2553 int num_rsp = *((__u8 *) skb->data);
2554
2555 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2556
2557 if (!num_rsp)
2558 return;
2559
2560 hci_dev_lock(hdev);
2561
2562 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2563 struct inquiry_info_with_rssi_and_pscan_mode *info;
2564 info = (void *) (skb->data + 1);
2565
2566 for (; num_rsp; num_rsp--, info++) {
2567 bacpy(&data.bdaddr, &info->bdaddr);
2568 data.pscan_rep_mode = info->pscan_rep_mode;
2569 data.pscan_period_mode = info->pscan_period_mode;
2570 data.pscan_mode = info->pscan_mode;
2571 memcpy(data.dev_class, info->dev_class, 3);
2572 data.clock_offset = info->clock_offset;
2573 data.rssi = info->rssi;
2574 data.ssp_mode = 0x00;
2575 hci_inquiry_cache_update(hdev, &data);
2576 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2577 info->dev_class, info->rssi,
2578 NULL);
2579 }
2580 } else {
2581 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2582
2583 for (; num_rsp; num_rsp--, info++) {
2584 bacpy(&data.bdaddr, &info->bdaddr);
2585 data.pscan_rep_mode = info->pscan_rep_mode;
2586 data.pscan_period_mode = info->pscan_period_mode;
2587 data.pscan_mode = 0x00;
2588 memcpy(data.dev_class, info->dev_class, 3);
2589 data.clock_offset = info->clock_offset;
2590 data.rssi = info->rssi;
2591 data.ssp_mode = 0x00;
2592 hci_inquiry_cache_update(hdev, &data);
2593 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2594 info->dev_class, info->rssi,
2595 NULL);
2596 }
2597 }
2598
2599 hci_dev_unlock(hdev);
2600 }
2601
hci_remote_ext_features_evt(struct hci_dev * hdev,struct sk_buff * skb)2602 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2603 {
2604 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2605 struct hci_conn *conn;
2606
2607 BT_DBG("%s", hdev->name);
2608
2609 hci_dev_lock(hdev);
2610
2611 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2612 if (!conn)
2613 goto unlock;
2614
2615 if (!ev->status && ev->page == 0x01) {
2616 struct inquiry_entry *ie;
2617
2618 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2619 if (ie)
2620 ie->data.ssp_mode = (ev->features[0] & 0x01);
2621
2622 conn->ssp_mode = (ev->features[0] & 0x01);
2623 }
2624
2625 if (conn->state != BT_CONFIG)
2626 goto unlock;
2627
2628 if (!ev->status) {
2629 struct hci_cp_remote_name_req cp;
2630 memset(&cp, 0, sizeof(cp));
2631 bacpy(&cp.bdaddr, &conn->dst);
2632 cp.pscan_rep_mode = 0x02;
2633 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2634 }
2635
2636 if (!hci_outgoing_auth_needed(hdev, conn)) {
2637 conn->state = BT_CONNECTED;
2638 hci_proto_connect_cfm(conn, ev->status);
2639 hci_conn_put(conn);
2640 }
2641
2642 unlock:
2643 hci_dev_unlock(hdev);
2644 }
2645
hci_sync_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2646 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2647 {
2648 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2649 struct hci_conn *conn;
2650
2651 BT_DBG("%s status %d", hdev->name, ev->status);
2652
2653 hci_dev_lock(hdev);
2654
2655 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2656 if (!conn) {
2657 if (ev->link_type == ESCO_LINK)
2658 goto unlock;
2659
2660 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2661 if (!conn)
2662 goto unlock;
2663
2664 conn->type = SCO_LINK;
2665 }
2666
2667 switch (ev->status) {
2668 case 0x00:
2669 conn->handle = __le16_to_cpu(ev->handle);
2670 conn->state = BT_CONNECTED;
2671
2672 hci_conn_hold_device(conn);
2673 hci_conn_add_sysfs(conn);
2674 break;
2675
2676 case 0x11: /* Unsupported Feature or Parameter Value */
2677 case 0x1c: /* SCO interval rejected */
2678 case 0x1a: /* Unsupported Remote Feature */
2679 case 0x1f: /* Unspecified error */
2680 if (conn->out && conn->attempt < 2) {
2681 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2682 (hdev->esco_type & EDR_ESCO_MASK);
2683 hci_setup_sync(conn, conn->link->handle);
2684 goto unlock;
2685 }
2686 /* fall through */
2687
2688 default:
2689 conn->state = BT_CLOSED;
2690 break;
2691 }
2692
2693 hci_proto_connect_cfm(conn, ev->status);
2694 if (ev->status)
2695 hci_conn_del(conn);
2696
2697 unlock:
2698 hci_dev_unlock(hdev);
2699 }
2700
hci_sync_conn_changed_evt(struct hci_dev * hdev,struct sk_buff * skb)2701 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2702 {
2703 BT_DBG("%s", hdev->name);
2704 }
2705
hci_sniff_subrate_evt(struct hci_dev * hdev,struct sk_buff * skb)2706 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2707 {
2708 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2709
2710 BT_DBG("%s status %d", hdev->name, ev->status);
2711 }
2712
hci_extended_inquiry_result_evt(struct hci_dev * hdev,struct sk_buff * skb)2713 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2714 {
2715 struct inquiry_data data;
2716 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2717 int num_rsp = *((__u8 *) skb->data);
2718
2719 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2720
2721 if (!num_rsp)
2722 return;
2723
2724 hci_dev_lock(hdev);
2725
2726 for (; num_rsp; num_rsp--, info++) {
2727 bacpy(&data.bdaddr, &info->bdaddr);
2728 data.pscan_rep_mode = info->pscan_rep_mode;
2729 data.pscan_period_mode = info->pscan_period_mode;
2730 data.pscan_mode = 0x00;
2731 memcpy(data.dev_class, info->dev_class, 3);
2732 data.clock_offset = info->clock_offset;
2733 data.rssi = info->rssi;
2734 data.ssp_mode = 0x01;
2735 hci_inquiry_cache_update(hdev, &data);
2736 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2737 info->dev_class, info->rssi, info->data);
2738 }
2739
2740 hci_dev_unlock(hdev);
2741 }
2742
hci_get_auth_req(struct hci_conn * conn)2743 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2744 {
2745 /* If remote requests dedicated bonding follow that lead */
2746 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2747 /* If both remote and local IO capabilities allow MITM
2748 * protection then require it, otherwise don't */
2749 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2750 return 0x02;
2751 else
2752 return 0x03;
2753 }
2754
2755 /* If remote requests no-bonding follow that lead */
2756 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2757 return conn->remote_auth | (conn->auth_type & 0x01);
2758
2759 return conn->auth_type;
2760 }
2761
hci_io_capa_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2762 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2763 {
2764 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2765 struct hci_conn *conn;
2766
2767 BT_DBG("%s", hdev->name);
2768
2769 hci_dev_lock(hdev);
2770
2771 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2772 if (!conn)
2773 goto unlock;
2774
2775 hci_conn_hold(conn);
2776
2777 if (!test_bit(HCI_MGMT, &hdev->flags))
2778 goto unlock;
2779
2780 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2781 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2782 struct hci_cp_io_capability_reply cp;
2783
2784 bacpy(&cp.bdaddr, &ev->bdaddr);
2785 cp.capability = conn->io_capability;
2786 conn->auth_type = hci_get_auth_req(conn);
2787 cp.authentication = conn->auth_type;
2788
2789 if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2790 hci_find_remote_oob_data(hdev, &conn->dst))
2791 cp.oob_data = 0x01;
2792 else
2793 cp.oob_data = 0x00;
2794
2795 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2796 sizeof(cp), &cp);
2797 } else {
2798 struct hci_cp_io_capability_neg_reply cp;
2799
2800 bacpy(&cp.bdaddr, &ev->bdaddr);
2801 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
2802
2803 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2804 sizeof(cp), &cp);
2805 }
2806
2807 unlock:
2808 hci_dev_unlock(hdev);
2809 }
2810
hci_io_capa_reply_evt(struct hci_dev * hdev,struct sk_buff * skb)2811 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2812 {
2813 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2814 struct hci_conn *conn;
2815
2816 BT_DBG("%s", hdev->name);
2817
2818 hci_dev_lock(hdev);
2819
2820 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2821 if (!conn)
2822 goto unlock;
2823
2824 conn->remote_cap = ev->capability;
2825 conn->remote_oob = ev->oob_data;
2826 conn->remote_auth = ev->authentication;
2827
2828 unlock:
2829 hci_dev_unlock(hdev);
2830 }
2831
hci_user_confirm_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2832 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2833 struct sk_buff *skb)
2834 {
2835 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2836 int loc_mitm, rem_mitm, confirm_hint = 0;
2837 struct hci_conn *conn;
2838
2839 BT_DBG("%s", hdev->name);
2840
2841 hci_dev_lock(hdev);
2842
2843 if (!test_bit(HCI_MGMT, &hdev->flags))
2844 goto unlock;
2845
2846 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2847 if (!conn)
2848 goto unlock;
2849
2850 loc_mitm = (conn->auth_type & 0x01);
2851 rem_mitm = (conn->remote_auth & 0x01);
2852
2853 /* If we require MITM but the remote device can't provide that
2854 * (it has NoInputNoOutput) then reject the confirmation
2855 * request. The only exception is when we're dedicated bonding
2856 * initiators (connect_cfm_cb set) since then we always have the MITM
2857 * bit set. */
2858 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
2859 BT_DBG("Rejecting request: remote device can't provide MITM");
2860 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
2861 sizeof(ev->bdaddr), &ev->bdaddr);
2862 goto unlock;
2863 }
2864
2865 /* If no side requires MITM protection; auto-accept */
2866 if ((!loc_mitm || conn->remote_cap == 0x03) &&
2867 (!rem_mitm || conn->io_capability == 0x03)) {
2868
2869 /* If we're not the initiators request authorization to
2870 * proceed from user space (mgmt_user_confirm with
2871 * confirm_hint set to 1). */
2872 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
2873 BT_DBG("Confirming auto-accept as acceptor");
2874 confirm_hint = 1;
2875 goto confirm;
2876 }
2877
2878 BT_DBG("Auto-accept of user confirmation with %ums delay",
2879 hdev->auto_accept_delay);
2880
2881 if (hdev->auto_accept_delay > 0) {
2882 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
2883 mod_timer(&conn->auto_accept_timer, jiffies + delay);
2884 goto unlock;
2885 }
2886
2887 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
2888 sizeof(ev->bdaddr), &ev->bdaddr);
2889 goto unlock;
2890 }
2891
2892 confirm:
2893 mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
2894 confirm_hint);
2895
2896 unlock:
2897 hci_dev_unlock(hdev);
2898 }
2899
hci_user_passkey_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2900 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
2901 struct sk_buff *skb)
2902 {
2903 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
2904
2905 BT_DBG("%s", hdev->name);
2906
2907 hci_dev_lock(hdev);
2908
2909 if (test_bit(HCI_MGMT, &hdev->flags))
2910 mgmt_user_passkey_request(hdev, &ev->bdaddr);
2911
2912 hci_dev_unlock(hdev);
2913 }
2914
hci_simple_pair_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2915 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2916 {
2917 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
2918 struct hci_conn *conn;
2919
2920 BT_DBG("%s", hdev->name);
2921
2922 hci_dev_lock(hdev);
2923
2924 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2925 if (!conn)
2926 goto unlock;
2927
2928 /* To avoid duplicate auth_failed events to user space we check
2929 * the HCI_CONN_AUTH_PEND flag which will be set if we
2930 * initiated the authentication. A traditional auth_complete
2931 * event gets always produced as initiator and is also mapped to
2932 * the mgmt_auth_failed event */
2933 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
2934 mgmt_auth_failed(hdev, &conn->dst, ev->status);
2935
2936 hci_conn_put(conn);
2937
2938 unlock:
2939 hci_dev_unlock(hdev);
2940 }
2941
hci_remote_host_features_evt(struct hci_dev * hdev,struct sk_buff * skb)2942 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2943 {
2944 struct hci_ev_remote_host_features *ev = (void *) skb->data;
2945 struct inquiry_entry *ie;
2946
2947 BT_DBG("%s", hdev->name);
2948
2949 hci_dev_lock(hdev);
2950
2951 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2952 if (ie)
2953 ie->data.ssp_mode = (ev->features[0] & 0x01);
2954
2955 hci_dev_unlock(hdev);
2956 }
2957
hci_remote_oob_data_request_evt(struct hci_dev * hdev,struct sk_buff * skb)2958 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2959 struct sk_buff *skb)
2960 {
2961 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2962 struct oob_data *data;
2963
2964 BT_DBG("%s", hdev->name);
2965
2966 hci_dev_lock(hdev);
2967
2968 if (!test_bit(HCI_MGMT, &hdev->flags))
2969 goto unlock;
2970
2971 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
2972 if (data) {
2973 struct hci_cp_remote_oob_data_reply cp;
2974
2975 bacpy(&cp.bdaddr, &ev->bdaddr);
2976 memcpy(cp.hash, data->hash, sizeof(cp.hash));
2977 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
2978
2979 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
2980 &cp);
2981 } else {
2982 struct hci_cp_remote_oob_data_neg_reply cp;
2983
2984 bacpy(&cp.bdaddr, &ev->bdaddr);
2985 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
2986 &cp);
2987 }
2988
2989 unlock:
2990 hci_dev_unlock(hdev);
2991 }
2992
hci_le_conn_complete_evt(struct hci_dev * hdev,struct sk_buff * skb)2993 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2994 {
2995 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
2996 struct hci_conn *conn;
2997
2998 BT_DBG("%s status %d", hdev->name, ev->status);
2999
3000 hci_dev_lock(hdev);
3001
3002 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3003 if (!conn) {
3004 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3005 if (!conn) {
3006 BT_ERR("No memory for new connection");
3007 hci_dev_unlock(hdev);
3008 return;
3009 }
3010
3011 conn->dst_type = ev->bdaddr_type;
3012 }
3013
3014 if (ev->status) {
3015 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3016 conn->dst_type, ev->status);
3017 hci_proto_connect_cfm(conn, ev->status);
3018 conn->state = BT_CLOSED;
3019 hci_conn_del(conn);
3020 goto unlock;
3021 }
3022
3023 mgmt_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type);
3024
3025 conn->sec_level = BT_SECURITY_LOW;
3026 conn->handle = __le16_to_cpu(ev->handle);
3027 conn->state = BT_CONNECTED;
3028
3029 hci_conn_hold_device(conn);
3030 hci_conn_add_sysfs(conn);
3031
3032 hci_proto_connect_cfm(conn, ev->status);
3033
3034 unlock:
3035 hci_dev_unlock(hdev);
3036 }
3037
hci_le_adv_report_evt(struct hci_dev * hdev,struct sk_buff * skb)3038 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3039 struct sk_buff *skb)
3040 {
3041 u8 num_reports = skb->data[0];
3042 void *ptr = &skb->data[1];
3043
3044 hci_dev_lock(hdev);
3045
3046 while (num_reports--) {
3047 struct hci_ev_le_advertising_info *ev = ptr;
3048
3049 hci_add_adv_entry(hdev, ev);
3050
3051 ptr += sizeof(*ev) + ev->length + 1;
3052 }
3053
3054 hci_dev_unlock(hdev);
3055 }
3056
hci_le_ltk_request_evt(struct hci_dev * hdev,struct sk_buff * skb)3057 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3058 struct sk_buff *skb)
3059 {
3060 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3061 struct hci_cp_le_ltk_reply cp;
3062 struct hci_cp_le_ltk_neg_reply neg;
3063 struct hci_conn *conn;
3064 struct link_key *ltk;
3065
3066 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3067
3068 hci_dev_lock(hdev);
3069
3070 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3071 if (conn == NULL)
3072 goto not_found;
3073
3074 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3075 if (ltk == NULL)
3076 goto not_found;
3077
3078 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3079 cp.handle = cpu_to_le16(conn->handle);
3080 conn->pin_length = ltk->pin_len;
3081
3082 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3083
3084 hci_dev_unlock(hdev);
3085
3086 return;
3087
3088 not_found:
3089 neg.handle = ev->handle;
3090 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3091 hci_dev_unlock(hdev);
3092 }
3093
hci_le_meta_evt(struct hci_dev * hdev,struct sk_buff * skb)3094 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3095 {
3096 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3097
3098 skb_pull(skb, sizeof(*le_ev));
3099
3100 switch (le_ev->subevent) {
3101 case HCI_EV_LE_CONN_COMPLETE:
3102 hci_le_conn_complete_evt(hdev, skb);
3103 break;
3104
3105 case HCI_EV_LE_ADVERTISING_REPORT:
3106 hci_le_adv_report_evt(hdev, skb);
3107 break;
3108
3109 case HCI_EV_LE_LTK_REQ:
3110 hci_le_ltk_request_evt(hdev, skb);
3111 break;
3112
3113 default:
3114 break;
3115 }
3116 }
3117
hci_event_packet(struct hci_dev * hdev,struct sk_buff * skb)3118 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3119 {
3120 struct hci_event_hdr *hdr = (void *) skb->data;
3121 __u8 event = hdr->evt;
3122
3123 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3124
3125 switch (event) {
3126 case HCI_EV_INQUIRY_COMPLETE:
3127 hci_inquiry_complete_evt(hdev, skb);
3128 break;
3129
3130 case HCI_EV_INQUIRY_RESULT:
3131 hci_inquiry_result_evt(hdev, skb);
3132 break;
3133
3134 case HCI_EV_CONN_COMPLETE:
3135 hci_conn_complete_evt(hdev, skb);
3136 break;
3137
3138 case HCI_EV_CONN_REQUEST:
3139 hci_conn_request_evt(hdev, skb);
3140 break;
3141
3142 case HCI_EV_DISCONN_COMPLETE:
3143 hci_disconn_complete_evt(hdev, skb);
3144 break;
3145
3146 case HCI_EV_AUTH_COMPLETE:
3147 hci_auth_complete_evt(hdev, skb);
3148 break;
3149
3150 case HCI_EV_REMOTE_NAME:
3151 hci_remote_name_evt(hdev, skb);
3152 break;
3153
3154 case HCI_EV_ENCRYPT_CHANGE:
3155 hci_encrypt_change_evt(hdev, skb);
3156 break;
3157
3158 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3159 hci_change_link_key_complete_evt(hdev, skb);
3160 break;
3161
3162 case HCI_EV_REMOTE_FEATURES:
3163 hci_remote_features_evt(hdev, skb);
3164 break;
3165
3166 case HCI_EV_REMOTE_VERSION:
3167 hci_remote_version_evt(hdev, skb);
3168 break;
3169
3170 case HCI_EV_QOS_SETUP_COMPLETE:
3171 hci_qos_setup_complete_evt(hdev, skb);
3172 break;
3173
3174 case HCI_EV_CMD_COMPLETE:
3175 hci_cmd_complete_evt(hdev, skb);
3176 break;
3177
3178 case HCI_EV_CMD_STATUS:
3179 hci_cmd_status_evt(hdev, skb);
3180 break;
3181
3182 case HCI_EV_ROLE_CHANGE:
3183 hci_role_change_evt(hdev, skb);
3184 break;
3185
3186 case HCI_EV_NUM_COMP_PKTS:
3187 hci_num_comp_pkts_evt(hdev, skb);
3188 break;
3189
3190 case HCI_EV_MODE_CHANGE:
3191 hci_mode_change_evt(hdev, skb);
3192 break;
3193
3194 case HCI_EV_PIN_CODE_REQ:
3195 hci_pin_code_request_evt(hdev, skb);
3196 break;
3197
3198 case HCI_EV_LINK_KEY_REQ:
3199 hci_link_key_request_evt(hdev, skb);
3200 break;
3201
3202 case HCI_EV_LINK_KEY_NOTIFY:
3203 hci_link_key_notify_evt(hdev, skb);
3204 break;
3205
3206 case HCI_EV_CLOCK_OFFSET:
3207 hci_clock_offset_evt(hdev, skb);
3208 break;
3209
3210 case HCI_EV_PKT_TYPE_CHANGE:
3211 hci_pkt_type_change_evt(hdev, skb);
3212 break;
3213
3214 case HCI_EV_PSCAN_REP_MODE:
3215 hci_pscan_rep_mode_evt(hdev, skb);
3216 break;
3217
3218 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3219 hci_inquiry_result_with_rssi_evt(hdev, skb);
3220 break;
3221
3222 case HCI_EV_REMOTE_EXT_FEATURES:
3223 hci_remote_ext_features_evt(hdev, skb);
3224 break;
3225
3226 case HCI_EV_SYNC_CONN_COMPLETE:
3227 hci_sync_conn_complete_evt(hdev, skb);
3228 break;
3229
3230 case HCI_EV_SYNC_CONN_CHANGED:
3231 hci_sync_conn_changed_evt(hdev, skb);
3232 break;
3233
3234 case HCI_EV_SNIFF_SUBRATE:
3235 hci_sniff_subrate_evt(hdev, skb);
3236 break;
3237
3238 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3239 hci_extended_inquiry_result_evt(hdev, skb);
3240 break;
3241
3242 case HCI_EV_IO_CAPA_REQUEST:
3243 hci_io_capa_request_evt(hdev, skb);
3244 break;
3245
3246 case HCI_EV_IO_CAPA_REPLY:
3247 hci_io_capa_reply_evt(hdev, skb);
3248 break;
3249
3250 case HCI_EV_USER_CONFIRM_REQUEST:
3251 hci_user_confirm_request_evt(hdev, skb);
3252 break;
3253
3254 case HCI_EV_USER_PASSKEY_REQUEST:
3255 hci_user_passkey_request_evt(hdev, skb);
3256 break;
3257
3258 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3259 hci_simple_pair_complete_evt(hdev, skb);
3260 break;
3261
3262 case HCI_EV_REMOTE_HOST_FEATURES:
3263 hci_remote_host_features_evt(hdev, skb);
3264 break;
3265
3266 case HCI_EV_LE_META:
3267 hci_le_meta_evt(hdev, skb);
3268 break;
3269
3270 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3271 hci_remote_oob_data_request_evt(hdev, skb);
3272 break;
3273
3274 default:
3275 BT_DBG("%s event 0x%x", hdev->name, event);
3276 break;
3277 }
3278
3279 kfree_skb(skb);
3280 hdev->stat.evt_rx++;
3281 }
3282
3283 /* Generate internal stack event */
hci_si_event(struct hci_dev * hdev,int type,int dlen,void * data)3284 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3285 {
3286 struct hci_event_hdr *hdr;
3287 struct hci_ev_stack_internal *ev;
3288 struct sk_buff *skb;
3289
3290 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3291 if (!skb)
3292 return;
3293
3294 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3295 hdr->evt = HCI_EV_STACK_INTERNAL;
3296 hdr->plen = sizeof(*ev) + dlen;
3297
3298 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
3299 ev->type = type;
3300 memcpy(ev->data, data, dlen);
3301
3302 bt_cb(skb)->incoming = 1;
3303 __net_timestamp(skb);
3304
3305 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3306 skb->dev = (void *) hdev;
3307 hci_send_to_sock(hdev, skb, NULL);
3308 kfree_skb(skb);
3309 }
3310
3311 module_param(enable_le, bool, 0644);
3312 MODULE_PARM_DESC(enable_le, "Enable LE support");
3313