1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 &hdev->quirks))
833 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 }
835
836 if (lmp_le_capable(hdev)) {
837 settings |= MGMT_SETTING_LE;
838 settings |= MGMT_SETTING_SECURE_CONN;
839 settings |= MGMT_SETTING_PRIVACY;
840 settings |= MGMT_SETTING_STATIC_ADDRESS;
841 settings |= MGMT_SETTING_ADVERTISING;
842 }
843
844 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 hdev->set_bdaddr)
846 settings |= MGMT_SETTING_CONFIGURATION;
847
848 if (cis_central_capable(hdev))
849 settings |= MGMT_SETTING_CIS_CENTRAL;
850
851 if (cis_peripheral_capable(hdev))
852 settings |= MGMT_SETTING_CIS_PERIPHERAL;
853
854 if (ll_privacy_capable(hdev))
855 settings |= MGMT_SETTING_LL_PRIVACY;
856
857 settings |= MGMT_SETTING_PHY_CONFIGURATION;
858
859 return settings;
860 }
861
get_current_settings(struct hci_dev * hdev)862 static u32 get_current_settings(struct hci_dev *hdev)
863 {
864 u32 settings = 0;
865
866 if (hdev_is_powered(hdev))
867 settings |= MGMT_SETTING_POWERED;
868
869 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
870 settings |= MGMT_SETTING_CONNECTABLE;
871
872 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
873 settings |= MGMT_SETTING_FAST_CONNECTABLE;
874
875 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
876 settings |= MGMT_SETTING_DISCOVERABLE;
877
878 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
879 settings |= MGMT_SETTING_BONDABLE;
880
881 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
882 settings |= MGMT_SETTING_BREDR;
883
884 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
885 settings |= MGMT_SETTING_LE;
886
887 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
888 settings |= MGMT_SETTING_LINK_SECURITY;
889
890 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
891 settings |= MGMT_SETTING_SSP;
892
893 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
894 settings |= MGMT_SETTING_ADVERTISING;
895
896 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
897 settings |= MGMT_SETTING_SECURE_CONN;
898
899 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
900 settings |= MGMT_SETTING_DEBUG_KEYS;
901
902 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
903 settings |= MGMT_SETTING_PRIVACY;
904
905 /* The current setting for static address has two purposes. The
906 * first is to indicate if the static address will be used and
907 * the second is to indicate if it is actually set.
908 *
909 * This means if the static address is not configured, this flag
910 * will never be set. If the address is configured, then if the
911 * address is actually used decides if the flag is set or not.
912 *
913 * For single mode LE only controllers and dual-mode controllers
914 * with BR/EDR disabled, the existence of the static address will
915 * be evaluated.
916 */
917 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
918 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
919 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
920 if (bacmp(&hdev->static_addr, BDADDR_ANY))
921 settings |= MGMT_SETTING_STATIC_ADDRESS;
922 }
923
924 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
925 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
926
927 if (cis_central_capable(hdev))
928 settings |= MGMT_SETTING_CIS_CENTRAL;
929
930 if (cis_peripheral_capable(hdev))
931 settings |= MGMT_SETTING_CIS_PERIPHERAL;
932
933 if (bis_capable(hdev))
934 settings |= MGMT_SETTING_ISO_BROADCASTER;
935
936 if (sync_recv_capable(hdev))
937 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
938
939 if (ll_privacy_capable(hdev))
940 settings |= MGMT_SETTING_LL_PRIVACY;
941
942 return settings;
943 }
944
pending_find(u16 opcode,struct hci_dev * hdev)945 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
946 {
947 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
948 }
949
mgmt_get_adv_discov_flags(struct hci_dev * hdev)950 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
951 {
952 struct mgmt_pending_cmd *cmd;
953
954 /* If there's a pending mgmt command the flags will not yet have
955 * their final values, so check for this first.
956 */
957 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
958 if (cmd) {
959 struct mgmt_mode *cp = cmd->param;
960 if (cp->val == 0x01)
961 return LE_AD_GENERAL;
962 else if (cp->val == 0x02)
963 return LE_AD_LIMITED;
964 } else {
965 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
966 return LE_AD_LIMITED;
967 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
968 return LE_AD_GENERAL;
969 }
970
971 return 0;
972 }
973
mgmt_get_connectable(struct hci_dev * hdev)974 bool mgmt_get_connectable(struct hci_dev *hdev)
975 {
976 struct mgmt_pending_cmd *cmd;
977
978 /* If there's a pending mgmt command the flag will not yet have
979 * it's final value, so check for this first.
980 */
981 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
982 if (cmd) {
983 struct mgmt_mode *cp = cmd->param;
984
985 return cp->val;
986 }
987
988 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
989 }
990
service_cache_sync(struct hci_dev * hdev,void * data)991 static int service_cache_sync(struct hci_dev *hdev, void *data)
992 {
993 hci_update_eir_sync(hdev);
994 hci_update_class_sync(hdev);
995
996 return 0;
997 }
998
service_cache_off(struct work_struct * work)999 static void service_cache_off(struct work_struct *work)
1000 {
1001 struct hci_dev *hdev = container_of(work, struct hci_dev,
1002 service_cache.work);
1003
1004 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1005 return;
1006
1007 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1008 }
1009
rpa_expired_sync(struct hci_dev * hdev,void * data)1010 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1011 {
1012 /* The generation of a new RPA and programming it into the
1013 * controller happens in the hci_req_enable_advertising()
1014 * function.
1015 */
1016 if (ext_adv_capable(hdev))
1017 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1018 else
1019 return hci_enable_advertising_sync(hdev);
1020 }
1021
rpa_expired(struct work_struct * work)1022 static void rpa_expired(struct work_struct *work)
1023 {
1024 struct hci_dev *hdev = container_of(work, struct hci_dev,
1025 rpa_expired.work);
1026
1027 bt_dev_dbg(hdev, "");
1028
1029 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1030
1031 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1032 return;
1033
1034 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1035 }
1036
1037 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1038
discov_off(struct work_struct * work)1039 static void discov_off(struct work_struct *work)
1040 {
1041 struct hci_dev *hdev = container_of(work, struct hci_dev,
1042 discov_off.work);
1043
1044 bt_dev_dbg(hdev, "");
1045
1046 hci_dev_lock(hdev);
1047
1048 /* When discoverable timeout triggers, then just make sure
1049 * the limited discoverable flag is cleared. Even in the case
1050 * of a timeout triggered from general discoverable, it is
1051 * safe to unconditionally clear the flag.
1052 */
1053 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1054 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1055 hdev->discov_timeout = 0;
1056
1057 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1058
1059 mgmt_new_settings(hdev);
1060
1061 hci_dev_unlock(hdev);
1062 }
1063
1064 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1065
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1066 static void mesh_send_complete(struct hci_dev *hdev,
1067 struct mgmt_mesh_tx *mesh_tx, bool silent)
1068 {
1069 u8 handle = mesh_tx->handle;
1070
1071 if (!silent)
1072 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1073 sizeof(handle), NULL);
1074
1075 mgmt_mesh_remove(mesh_tx);
1076 }
1077
mesh_send_done_sync(struct hci_dev * hdev,void * data)1078 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1079 {
1080 struct mgmt_mesh_tx *mesh_tx;
1081
1082 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1083 hci_disable_advertising_sync(hdev);
1084 mesh_tx = mgmt_mesh_next(hdev, NULL);
1085
1086 if (mesh_tx)
1087 mesh_send_complete(hdev, mesh_tx, false);
1088
1089 return 0;
1090 }
1091
1092 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1093 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1094 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1095 {
1096 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1097
1098 if (!mesh_tx)
1099 return;
1100
1101 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1102 mesh_send_start_complete);
1103
1104 if (err < 0)
1105 mesh_send_complete(hdev, mesh_tx, false);
1106 else
1107 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1108 }
1109
mesh_send_done(struct work_struct * work)1110 static void mesh_send_done(struct work_struct *work)
1111 {
1112 struct hci_dev *hdev = container_of(work, struct hci_dev,
1113 mesh_send_done.work);
1114
1115 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1116 return;
1117
1118 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1119 }
1120
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1121 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1122 {
1123 if (hci_dev_test_flag(hdev, HCI_MGMT))
1124 return;
1125
1126 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1127
1128 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1129 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1130 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1131 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1132
1133 /* Non-mgmt controlled devices get this bit set
1134 * implicitly so that pairing works for them, however
1135 * for mgmt we require user-space to explicitly enable
1136 * it
1137 */
1138 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1139
1140 hci_dev_set_flag(hdev, HCI_MGMT);
1141 }
1142
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1143 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1144 void *data, u16 data_len)
1145 {
1146 struct mgmt_rp_read_info rp;
1147
1148 bt_dev_dbg(hdev, "sock %p", sk);
1149
1150 hci_dev_lock(hdev);
1151
1152 memset(&rp, 0, sizeof(rp));
1153
1154 bacpy(&rp.bdaddr, &hdev->bdaddr);
1155
1156 rp.version = hdev->hci_ver;
1157 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1158
1159 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1160 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1161
1162 memcpy(rp.dev_class, hdev->dev_class, 3);
1163
1164 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1165 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1166
1167 hci_dev_unlock(hdev);
1168
1169 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1170 sizeof(rp));
1171 }
1172
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1173 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1174 {
1175 u16 eir_len = 0;
1176 size_t name_len;
1177
1178 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1179 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1180 hdev->dev_class, 3);
1181
1182 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1183 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1184 hdev->appearance);
1185
1186 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1187 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1188 hdev->dev_name, name_len);
1189
1190 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1191 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1192 hdev->short_name, name_len);
1193
1194 return eir_len;
1195 }
1196
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1197 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1198 void *data, u16 data_len)
1199 {
1200 char buf[512];
1201 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1202 u16 eir_len;
1203
1204 bt_dev_dbg(hdev, "sock %p", sk);
1205
1206 memset(&buf, 0, sizeof(buf));
1207
1208 hci_dev_lock(hdev);
1209
1210 bacpy(&rp->bdaddr, &hdev->bdaddr);
1211
1212 rp->version = hdev->hci_ver;
1213 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1214
1215 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1216 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1217
1218
1219 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1220 rp->eir_len = cpu_to_le16(eir_len);
1221
1222 hci_dev_unlock(hdev);
1223
1224 /* If this command is called at least once, then the events
1225 * for class of device and local name changes are disabled
1226 * and only the new extended controller information event
1227 * is used.
1228 */
1229 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1230 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1231 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1232
1233 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1234 sizeof(*rp) + eir_len);
1235 }
1236
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1237 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1238 {
1239 char buf[512];
1240 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1241 u16 eir_len;
1242
1243 memset(buf, 0, sizeof(buf));
1244
1245 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1246 ev->eir_len = cpu_to_le16(eir_len);
1247
1248 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1249 sizeof(*ev) + eir_len,
1250 HCI_MGMT_EXT_INFO_EVENTS, skip);
1251 }
1252
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1253 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1254 {
1255 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1256
1257 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1258 sizeof(settings));
1259 }
1260
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1261 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1262 {
1263 struct mgmt_ev_advertising_added ev;
1264
1265 ev.instance = instance;
1266
1267 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1268 }
1269
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1270 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1271 u8 instance)
1272 {
1273 struct mgmt_ev_advertising_removed ev;
1274
1275 ev.instance = instance;
1276
1277 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1278 }
1279
cancel_adv_timeout(struct hci_dev * hdev)1280 static void cancel_adv_timeout(struct hci_dev *hdev)
1281 {
1282 if (hdev->adv_instance_timeout) {
1283 hdev->adv_instance_timeout = 0;
1284 cancel_delayed_work(&hdev->adv_instance_expire);
1285 }
1286 }
1287
1288 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1289 static void restart_le_actions(struct hci_dev *hdev)
1290 {
1291 struct hci_conn_params *p;
1292
1293 list_for_each_entry(p, &hdev->le_conn_params, list) {
1294 /* Needed for AUTO_OFF case where might not "really"
1295 * have been powered off.
1296 */
1297 hci_pend_le_list_del_init(p);
1298
1299 switch (p->auto_connect) {
1300 case HCI_AUTO_CONN_DIRECT:
1301 case HCI_AUTO_CONN_ALWAYS:
1302 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1303 break;
1304 case HCI_AUTO_CONN_REPORT:
1305 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1306 break;
1307 default:
1308 break;
1309 }
1310 }
1311 }
1312
new_settings(struct hci_dev * hdev,struct sock * skip)1313 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1314 {
1315 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1316
1317 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1318 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1319 }
1320
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1321 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1322 {
1323 struct mgmt_pending_cmd *cmd = data;
1324 struct mgmt_mode *cp;
1325
1326 /* Make sure cmd still outstanding. */
1327 if (err == -ECANCELED ||
1328 cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1329 return;
1330
1331 cp = cmd->param;
1332
1333 bt_dev_dbg(hdev, "err %d", err);
1334
1335 if (!err) {
1336 if (cp->val) {
1337 hci_dev_lock(hdev);
1338 restart_le_actions(hdev);
1339 hci_update_passive_scan(hdev);
1340 hci_dev_unlock(hdev);
1341 }
1342
1343 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1344
1345 /* Only call new_setting for power on as power off is deferred
1346 * to hdev->power_off work which does call hci_dev_do_close.
1347 */
1348 if (cp->val)
1349 new_settings(hdev, cmd->sk);
1350 } else {
1351 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1352 mgmt_status(err));
1353 }
1354
1355 mgmt_pending_remove(cmd);
1356 }
1357
set_powered_sync(struct hci_dev * hdev,void * data)1358 static int set_powered_sync(struct hci_dev *hdev, void *data)
1359 {
1360 struct mgmt_pending_cmd *cmd = data;
1361 struct mgmt_mode *cp;
1362
1363 /* Make sure cmd still outstanding. */
1364 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1365 return -ECANCELED;
1366
1367 cp = cmd->param;
1368
1369 BT_DBG("%s", hdev->name);
1370
1371 return hci_set_powered_sync(hdev, cp->val);
1372 }
1373
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1374 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1375 u16 len)
1376 {
1377 struct mgmt_mode *cp = data;
1378 struct mgmt_pending_cmd *cmd;
1379 int err;
1380
1381 bt_dev_dbg(hdev, "sock %p", sk);
1382
1383 if (cp->val != 0x00 && cp->val != 0x01)
1384 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 MGMT_STATUS_INVALID_PARAMS);
1386
1387 hci_dev_lock(hdev);
1388
1389 if (!cp->val) {
1390 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1391 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1392 MGMT_STATUS_BUSY);
1393 goto failed;
1394 }
1395 }
1396
1397 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1398 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1399 MGMT_STATUS_BUSY);
1400 goto failed;
1401 }
1402
1403 if (!!cp->val == hdev_is_powered(hdev)) {
1404 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1405 goto failed;
1406 }
1407
1408 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 if (!cmd) {
1410 err = -ENOMEM;
1411 goto failed;
1412 }
1413
1414 /* Cancel potentially blocking sync operation before power off */
1415 if (cp->val == 0x00) {
1416 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1417 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1418 mgmt_set_powered_complete);
1419 } else {
1420 /* Use hci_cmd_sync_submit since hdev might not be running */
1421 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1422 mgmt_set_powered_complete);
1423 }
1424
1425 if (err < 0)
1426 mgmt_pending_remove(cmd);
1427
1428 failed:
1429 hci_dev_unlock(hdev);
1430 return err;
1431 }
1432
mgmt_new_settings(struct hci_dev * hdev)1433 int mgmt_new_settings(struct hci_dev *hdev)
1434 {
1435 return new_settings(hdev, NULL);
1436 }
1437
1438 struct cmd_lookup {
1439 struct sock *sk;
1440 struct hci_dev *hdev;
1441 u8 mgmt_status;
1442 };
1443
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1444 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1445 {
1446 struct cmd_lookup *match = data;
1447
1448 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1449
1450 list_del(&cmd->list);
1451
1452 if (match->sk == NULL) {
1453 match->sk = cmd->sk;
1454 sock_hold(match->sk);
1455 }
1456
1457 mgmt_pending_free(cmd);
1458 }
1459
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1460 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1461 {
1462 u8 *status = data;
1463
1464 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1465 mgmt_pending_remove(cmd);
1466 }
1467
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1468 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1469 {
1470 struct cmd_lookup *match = data;
1471
1472 /* dequeue cmd_sync entries using cmd as data as that is about to be
1473 * removed/freed.
1474 */
1475 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1476
1477 if (cmd->cmd_complete) {
1478 cmd->cmd_complete(cmd, match->mgmt_status);
1479 mgmt_pending_remove(cmd);
1480
1481 return;
1482 }
1483
1484 cmd_status_rsp(cmd, data);
1485 }
1486
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1487 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488 {
1489 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1490 cmd->param, cmd->param_len);
1491 }
1492
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1493 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1494 {
1495 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1496 cmd->param, sizeof(struct mgmt_addr_info));
1497 }
1498
mgmt_bredr_support(struct hci_dev * hdev)1499 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1500 {
1501 if (!lmp_bredr_capable(hdev))
1502 return MGMT_STATUS_NOT_SUPPORTED;
1503 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1504 return MGMT_STATUS_REJECTED;
1505 else
1506 return MGMT_STATUS_SUCCESS;
1507 }
1508
mgmt_le_support(struct hci_dev * hdev)1509 static u8 mgmt_le_support(struct hci_dev *hdev)
1510 {
1511 if (!lmp_le_capable(hdev))
1512 return MGMT_STATUS_NOT_SUPPORTED;
1513 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1514 return MGMT_STATUS_REJECTED;
1515 else
1516 return MGMT_STATUS_SUCCESS;
1517 }
1518
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1519 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1520 int err)
1521 {
1522 struct mgmt_pending_cmd *cmd = data;
1523
1524 bt_dev_dbg(hdev, "err %d", err);
1525
1526 /* Make sure cmd still outstanding. */
1527 if (err == -ECANCELED ||
1528 cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1529 return;
1530
1531 hci_dev_lock(hdev);
1532
1533 if (err) {
1534 u8 mgmt_err = mgmt_status(err);
1535 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1536 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1537 goto done;
1538 }
1539
1540 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1541 hdev->discov_timeout > 0) {
1542 int to = secs_to_jiffies(hdev->discov_timeout);
1543 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1544 }
1545
1546 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1547 new_settings(hdev, cmd->sk);
1548
1549 done:
1550 mgmt_pending_remove(cmd);
1551 hci_dev_unlock(hdev);
1552 }
1553
set_discoverable_sync(struct hci_dev * hdev,void * data)1554 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1555 {
1556 BT_DBG("%s", hdev->name);
1557
1558 return hci_update_discoverable_sync(hdev);
1559 }
1560
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1561 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1562 u16 len)
1563 {
1564 struct mgmt_cp_set_discoverable *cp = data;
1565 struct mgmt_pending_cmd *cmd;
1566 u16 timeout;
1567 int err;
1568
1569 bt_dev_dbg(hdev, "sock %p", sk);
1570
1571 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1572 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1573 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 MGMT_STATUS_REJECTED);
1575
1576 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1577 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1578 MGMT_STATUS_INVALID_PARAMS);
1579
1580 timeout = __le16_to_cpu(cp->timeout);
1581
1582 /* Disabling discoverable requires that no timeout is set,
1583 * and enabling limited discoverable requires a timeout.
1584 */
1585 if ((cp->val == 0x00 && timeout > 0) ||
1586 (cp->val == 0x02 && timeout == 0))
1587 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1588 MGMT_STATUS_INVALID_PARAMS);
1589
1590 hci_dev_lock(hdev);
1591
1592 if (!hdev_is_powered(hdev) && timeout > 0) {
1593 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594 MGMT_STATUS_NOT_POWERED);
1595 goto failed;
1596 }
1597
1598 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1599 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1601 MGMT_STATUS_BUSY);
1602 goto failed;
1603 }
1604
1605 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1606 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1607 MGMT_STATUS_REJECTED);
1608 goto failed;
1609 }
1610
1611 if (hdev->advertising_paused) {
1612 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1613 MGMT_STATUS_BUSY);
1614 goto failed;
1615 }
1616
1617 if (!hdev_is_powered(hdev)) {
1618 bool changed = false;
1619
1620 /* Setting limited discoverable when powered off is
1621 * not a valid operation since it requires a timeout
1622 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1623 */
1624 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1625 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1626 changed = true;
1627 }
1628
1629 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1630 if (err < 0)
1631 goto failed;
1632
1633 if (changed)
1634 err = new_settings(hdev, sk);
1635
1636 goto failed;
1637 }
1638
1639 /* If the current mode is the same, then just update the timeout
1640 * value with the new value. And if only the timeout gets updated,
1641 * then no need for any HCI transactions.
1642 */
1643 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1644 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1645 HCI_LIMITED_DISCOVERABLE)) {
1646 cancel_delayed_work(&hdev->discov_off);
1647 hdev->discov_timeout = timeout;
1648
1649 if (cp->val && hdev->discov_timeout > 0) {
1650 int to = secs_to_jiffies(hdev->discov_timeout);
1651 queue_delayed_work(hdev->req_workqueue,
1652 &hdev->discov_off, to);
1653 }
1654
1655 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1656 goto failed;
1657 }
1658
1659 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1660 if (!cmd) {
1661 err = -ENOMEM;
1662 goto failed;
1663 }
1664
1665 /* Cancel any potential discoverable timeout that might be
1666 * still active and store new timeout value. The arming of
1667 * the timeout happens in the complete handler.
1668 */
1669 cancel_delayed_work(&hdev->discov_off);
1670 hdev->discov_timeout = timeout;
1671
1672 if (cp->val)
1673 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1674 else
1675 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1676
1677 /* Limited discoverable mode */
1678 if (cp->val == 0x02)
1679 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1680 else
1681 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1682
1683 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1684 mgmt_set_discoverable_complete);
1685
1686 if (err < 0)
1687 mgmt_pending_remove(cmd);
1688
1689 failed:
1690 hci_dev_unlock(hdev);
1691 return err;
1692 }
1693
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1694 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1695 int err)
1696 {
1697 struct mgmt_pending_cmd *cmd = data;
1698
1699 bt_dev_dbg(hdev, "err %d", err);
1700
1701 /* Make sure cmd still outstanding. */
1702 if (err == -ECANCELED ||
1703 cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1704 return;
1705
1706 hci_dev_lock(hdev);
1707
1708 if (err) {
1709 u8 mgmt_err = mgmt_status(err);
1710 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1711 goto done;
1712 }
1713
1714 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1715 new_settings(hdev, cmd->sk);
1716
1717 done:
1718 mgmt_pending_remove(cmd);
1719
1720 hci_dev_unlock(hdev);
1721 }
1722
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1723 static int set_connectable_update_settings(struct hci_dev *hdev,
1724 struct sock *sk, u8 val)
1725 {
1726 bool changed = false;
1727 int err;
1728
1729 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1730 changed = true;
1731
1732 if (val) {
1733 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1734 } else {
1735 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1736 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1737 }
1738
1739 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1740 if (err < 0)
1741 return err;
1742
1743 if (changed) {
1744 hci_update_scan(hdev);
1745 hci_update_passive_scan(hdev);
1746 return new_settings(hdev, sk);
1747 }
1748
1749 return 0;
1750 }
1751
set_connectable_sync(struct hci_dev * hdev,void * data)1752 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1753 {
1754 BT_DBG("%s", hdev->name);
1755
1756 return hci_update_connectable_sync(hdev);
1757 }
1758
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1759 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1760 u16 len)
1761 {
1762 struct mgmt_mode *cp = data;
1763 struct mgmt_pending_cmd *cmd;
1764 int err;
1765
1766 bt_dev_dbg(hdev, "sock %p", sk);
1767
1768 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1769 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1770 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1771 MGMT_STATUS_REJECTED);
1772
1773 if (cp->val != 0x00 && cp->val != 0x01)
1774 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1775 MGMT_STATUS_INVALID_PARAMS);
1776
1777 hci_dev_lock(hdev);
1778
1779 if (!hdev_is_powered(hdev)) {
1780 err = set_connectable_update_settings(hdev, sk, cp->val);
1781 goto failed;
1782 }
1783
1784 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1785 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1787 MGMT_STATUS_BUSY);
1788 goto failed;
1789 }
1790
1791 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1792 if (!cmd) {
1793 err = -ENOMEM;
1794 goto failed;
1795 }
1796
1797 if (cp->val) {
1798 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1799 } else {
1800 if (hdev->discov_timeout > 0)
1801 cancel_delayed_work(&hdev->discov_off);
1802
1803 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1804 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1805 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1806 }
1807
1808 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1809 mgmt_set_connectable_complete);
1810
1811 if (err < 0)
1812 mgmt_pending_remove(cmd);
1813
1814 failed:
1815 hci_dev_unlock(hdev);
1816 return err;
1817 }
1818
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1819 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1820 u16 len)
1821 {
1822 struct mgmt_mode *cp = data;
1823 bool changed;
1824 int err;
1825
1826 bt_dev_dbg(hdev, "sock %p", sk);
1827
1828 if (cp->val != 0x00 && cp->val != 0x01)
1829 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1830 MGMT_STATUS_INVALID_PARAMS);
1831
1832 hci_dev_lock(hdev);
1833
1834 if (cp->val)
1835 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1836 else
1837 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1838
1839 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1840 if (err < 0)
1841 goto unlock;
1842
1843 if (changed) {
1844 /* In limited privacy mode the change of bondable mode
1845 * may affect the local advertising address.
1846 */
1847 hci_update_discoverable(hdev);
1848
1849 err = new_settings(hdev, sk);
1850 }
1851
1852 unlock:
1853 hci_dev_unlock(hdev);
1854 return err;
1855 }
1856
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1857 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1858 u16 len)
1859 {
1860 struct mgmt_mode *cp = data;
1861 struct mgmt_pending_cmd *cmd;
1862 u8 val, status;
1863 int err;
1864
1865 bt_dev_dbg(hdev, "sock %p", sk);
1866
1867 status = mgmt_bredr_support(hdev);
1868 if (status)
1869 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1870 status);
1871
1872 if (cp->val != 0x00 && cp->val != 0x01)
1873 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1874 MGMT_STATUS_INVALID_PARAMS);
1875
1876 hci_dev_lock(hdev);
1877
1878 if (!hdev_is_powered(hdev)) {
1879 bool changed = false;
1880
1881 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1882 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1883 changed = true;
1884 }
1885
1886 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1887 if (err < 0)
1888 goto failed;
1889
1890 if (changed)
1891 err = new_settings(hdev, sk);
1892
1893 goto failed;
1894 }
1895
1896 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1897 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1898 MGMT_STATUS_BUSY);
1899 goto failed;
1900 }
1901
1902 val = !!cp->val;
1903
1904 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1905 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1906 goto failed;
1907 }
1908
1909 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1910 if (!cmd) {
1911 err = -ENOMEM;
1912 goto failed;
1913 }
1914
1915 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1916 if (err < 0) {
1917 mgmt_pending_remove(cmd);
1918 goto failed;
1919 }
1920
1921 failed:
1922 hci_dev_unlock(hdev);
1923 return err;
1924 }
1925
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1926 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1927 {
1928 struct cmd_lookup match = { NULL, hdev };
1929 struct mgmt_pending_cmd *cmd = data;
1930 struct mgmt_mode *cp = cmd->param;
1931 u8 enable = cp->val;
1932 bool changed;
1933
1934 /* Make sure cmd still outstanding. */
1935 if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1936 return;
1937
1938 if (err) {
1939 u8 mgmt_err = mgmt_status(err);
1940
1941 if (enable && hci_dev_test_and_clear_flag(hdev,
1942 HCI_SSP_ENABLED)) {
1943 new_settings(hdev, NULL);
1944 }
1945
1946 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1947 &mgmt_err);
1948 return;
1949 }
1950
1951 if (enable) {
1952 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1953 } else {
1954 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1955 }
1956
1957 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1958
1959 if (changed)
1960 new_settings(hdev, match.sk);
1961
1962 if (match.sk)
1963 sock_put(match.sk);
1964
1965 hci_update_eir_sync(hdev);
1966 }
1967
set_ssp_sync(struct hci_dev * hdev,void * data)1968 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1969 {
1970 struct mgmt_pending_cmd *cmd = data;
1971 struct mgmt_mode *cp = cmd->param;
1972 bool changed = false;
1973 int err;
1974
1975 if (cp->val)
1976 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1977
1978 err = hci_write_ssp_mode_sync(hdev, cp->val);
1979
1980 if (!err && changed)
1981 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1982
1983 return err;
1984 }
1985
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1986 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1987 {
1988 struct mgmt_mode *cp = data;
1989 struct mgmt_pending_cmd *cmd;
1990 u8 status;
1991 int err;
1992
1993 bt_dev_dbg(hdev, "sock %p", sk);
1994
1995 status = mgmt_bredr_support(hdev);
1996 if (status)
1997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1998
1999 if (!lmp_ssp_capable(hdev))
2000 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2001 MGMT_STATUS_NOT_SUPPORTED);
2002
2003 if (cp->val != 0x00 && cp->val != 0x01)
2004 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2005 MGMT_STATUS_INVALID_PARAMS);
2006
2007 hci_dev_lock(hdev);
2008
2009 if (!hdev_is_powered(hdev)) {
2010 bool changed;
2011
2012 if (cp->val) {
2013 changed = !hci_dev_test_and_set_flag(hdev,
2014 HCI_SSP_ENABLED);
2015 } else {
2016 changed = hci_dev_test_and_clear_flag(hdev,
2017 HCI_SSP_ENABLED);
2018 }
2019
2020 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2021 if (err < 0)
2022 goto failed;
2023
2024 if (changed)
2025 err = new_settings(hdev, sk);
2026
2027 goto failed;
2028 }
2029
2030 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2031 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2032 MGMT_STATUS_BUSY);
2033 goto failed;
2034 }
2035
2036 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2037 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2038 goto failed;
2039 }
2040
2041 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2042 if (!cmd)
2043 err = -ENOMEM;
2044 else
2045 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2046 set_ssp_complete);
2047
2048 if (err < 0) {
2049 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2050 MGMT_STATUS_FAILED);
2051
2052 if (cmd)
2053 mgmt_pending_remove(cmd);
2054 }
2055
2056 failed:
2057 hci_dev_unlock(hdev);
2058 return err;
2059 }
2060
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2061 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2062 {
2063 bt_dev_dbg(hdev, "sock %p", sk);
2064
2065 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2066 MGMT_STATUS_NOT_SUPPORTED);
2067 }
2068
set_le_complete(struct hci_dev * hdev,void * data,int err)2069 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2070 {
2071 struct cmd_lookup match = { NULL, hdev };
2072 u8 status = mgmt_status(err);
2073
2074 bt_dev_dbg(hdev, "err %d", err);
2075
2076 if (status) {
2077 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2078 &status);
2079 return;
2080 }
2081
2082 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2083
2084 new_settings(hdev, match.sk);
2085
2086 if (match.sk)
2087 sock_put(match.sk);
2088 }
2089
set_le_sync(struct hci_dev * hdev,void * data)2090 static int set_le_sync(struct hci_dev *hdev, void *data)
2091 {
2092 struct mgmt_pending_cmd *cmd = data;
2093 struct mgmt_mode *cp = cmd->param;
2094 u8 val = !!cp->val;
2095 int err;
2096
2097 if (!val) {
2098 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2099
2100 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2101 hci_disable_advertising_sync(hdev);
2102
2103 if (ext_adv_capable(hdev))
2104 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2105 } else {
2106 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2107 }
2108
2109 err = hci_write_le_host_supported_sync(hdev, val, 0);
2110
2111 /* Make sure the controller has a good default for
2112 * advertising data. Restrict the update to when LE
2113 * has actually been enabled. During power on, the
2114 * update in powered_update_hci will take care of it.
2115 */
2116 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2117 if (ext_adv_capable(hdev)) {
2118 int status;
2119
2120 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2121 if (!status)
2122 hci_update_scan_rsp_data_sync(hdev, 0x00);
2123 } else {
2124 hci_update_adv_data_sync(hdev, 0x00);
2125 hci_update_scan_rsp_data_sync(hdev, 0x00);
2126 }
2127
2128 hci_update_passive_scan(hdev);
2129 }
2130
2131 return err;
2132 }
2133
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2134 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2135 {
2136 struct mgmt_pending_cmd *cmd = data;
2137 u8 status = mgmt_status(err);
2138 struct sock *sk = cmd->sk;
2139
2140 if (status) {
2141 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2142 cmd_status_rsp, &status);
2143 return;
2144 }
2145
2146 mgmt_pending_remove(cmd);
2147 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2148 }
2149
set_mesh_sync(struct hci_dev * hdev,void * data)2150 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2151 {
2152 struct mgmt_pending_cmd *cmd = data;
2153 struct mgmt_cp_set_mesh *cp = cmd->param;
2154 size_t len = cmd->param_len;
2155
2156 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2157
2158 if (cp->enable)
2159 hci_dev_set_flag(hdev, HCI_MESH);
2160 else
2161 hci_dev_clear_flag(hdev, HCI_MESH);
2162
2163 len -= sizeof(*cp);
2164
2165 /* If filters don't fit, forward all adv pkts */
2166 if (len <= sizeof(hdev->mesh_ad_types))
2167 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2168
2169 hci_update_passive_scan_sync(hdev);
2170 return 0;
2171 }
2172
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2173 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2174 {
2175 struct mgmt_cp_set_mesh *cp = data;
2176 struct mgmt_pending_cmd *cmd;
2177 int err = 0;
2178
2179 bt_dev_dbg(hdev, "sock %p", sk);
2180
2181 if (!lmp_le_capable(hdev) ||
2182 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2184 MGMT_STATUS_NOT_SUPPORTED);
2185
2186 if (cp->enable != 0x00 && cp->enable != 0x01)
2187 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2188 MGMT_STATUS_INVALID_PARAMS);
2189
2190 hci_dev_lock(hdev);
2191
2192 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2193 if (!cmd)
2194 err = -ENOMEM;
2195 else
2196 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2197 set_mesh_complete);
2198
2199 if (err < 0) {
2200 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2201 MGMT_STATUS_FAILED);
2202
2203 if (cmd)
2204 mgmt_pending_remove(cmd);
2205 }
2206
2207 hci_dev_unlock(hdev);
2208 return err;
2209 }
2210
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2211 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2212 {
2213 struct mgmt_mesh_tx *mesh_tx = data;
2214 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2215 unsigned long mesh_send_interval;
2216 u8 mgmt_err = mgmt_status(err);
2217
2218 /* Report any errors here, but don't report completion */
2219
2220 if (mgmt_err) {
2221 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2222 /* Send Complete Error Code for handle */
2223 mesh_send_complete(hdev, mesh_tx, false);
2224 return;
2225 }
2226
2227 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2228 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2229 mesh_send_interval);
2230 }
2231
mesh_send_sync(struct hci_dev * hdev,void * data)2232 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2233 {
2234 struct mgmt_mesh_tx *mesh_tx = data;
2235 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2236 struct adv_info *adv, *next_instance;
2237 u8 instance = hdev->le_num_of_adv_sets + 1;
2238 u16 timeout, duration;
2239 int err = 0;
2240
2241 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2242 return MGMT_STATUS_BUSY;
2243
2244 timeout = 1000;
2245 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2246 adv = hci_add_adv_instance(hdev, instance, 0,
2247 send->adv_data_len, send->adv_data,
2248 0, NULL,
2249 timeout, duration,
2250 HCI_ADV_TX_POWER_NO_PREFERENCE,
2251 hdev->le_adv_min_interval,
2252 hdev->le_adv_max_interval,
2253 mesh_tx->handle);
2254
2255 if (!IS_ERR(adv))
2256 mesh_tx->instance = instance;
2257 else
2258 err = PTR_ERR(adv);
2259
2260 if (hdev->cur_adv_instance == instance) {
2261 /* If the currently advertised instance is being changed then
2262 * cancel the current advertising and schedule the next
2263 * instance. If there is only one instance then the overridden
2264 * advertising data will be visible right away.
2265 */
2266 cancel_adv_timeout(hdev);
2267
2268 next_instance = hci_get_next_instance(hdev, instance);
2269 if (next_instance)
2270 instance = next_instance->instance;
2271 else
2272 instance = 0;
2273 } else if (hdev->adv_instance_timeout) {
2274 /* Immediately advertise the new instance if no other, or
2275 * let it go naturally from queue if ADV is already happening
2276 */
2277 instance = 0;
2278 }
2279
2280 if (instance)
2281 return hci_schedule_adv_instance_sync(hdev, instance, true);
2282
2283 return err;
2284 }
2285
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2286 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2287 {
2288 struct mgmt_rp_mesh_read_features *rp = data;
2289
2290 if (rp->used_handles >= rp->max_handles)
2291 return;
2292
2293 rp->handles[rp->used_handles++] = mesh_tx->handle;
2294 }
2295
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2296 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2297 void *data, u16 len)
2298 {
2299 struct mgmt_rp_mesh_read_features rp;
2300
2301 if (!lmp_le_capable(hdev) ||
2302 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2303 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2304 MGMT_STATUS_NOT_SUPPORTED);
2305
2306 memset(&rp, 0, sizeof(rp));
2307 rp.index = cpu_to_le16(hdev->id);
2308 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2309 rp.max_handles = MESH_HANDLES_MAX;
2310
2311 hci_dev_lock(hdev);
2312
2313 if (rp.max_handles)
2314 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2315
2316 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2317 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2318
2319 hci_dev_unlock(hdev);
2320 return 0;
2321 }
2322
send_cancel(struct hci_dev * hdev,void * data)2323 static int send_cancel(struct hci_dev *hdev, void *data)
2324 {
2325 struct mgmt_pending_cmd *cmd = data;
2326 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2327 struct mgmt_mesh_tx *mesh_tx;
2328
2329 if (!cancel->handle) {
2330 do {
2331 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2332
2333 if (mesh_tx)
2334 mesh_send_complete(hdev, mesh_tx, false);
2335 } while (mesh_tx);
2336 } else {
2337 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2338
2339 if (mesh_tx && mesh_tx->sk == cmd->sk)
2340 mesh_send_complete(hdev, mesh_tx, false);
2341 }
2342
2343 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2344 0, NULL, 0);
2345 mgmt_pending_free(cmd);
2346
2347 return 0;
2348 }
2349
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2350 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2351 void *data, u16 len)
2352 {
2353 struct mgmt_pending_cmd *cmd;
2354 int err;
2355
2356 if (!lmp_le_capable(hdev) ||
2357 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2358 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359 MGMT_STATUS_NOT_SUPPORTED);
2360
2361 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2363 MGMT_STATUS_REJECTED);
2364
2365 hci_dev_lock(hdev);
2366 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2367 if (!cmd)
2368 err = -ENOMEM;
2369 else
2370 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2371
2372 if (err < 0) {
2373 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2374 MGMT_STATUS_FAILED);
2375
2376 if (cmd)
2377 mgmt_pending_free(cmd);
2378 }
2379
2380 hci_dev_unlock(hdev);
2381 return err;
2382 }
2383
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2384 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2385 {
2386 struct mgmt_mesh_tx *mesh_tx;
2387 struct mgmt_cp_mesh_send *send = data;
2388 struct mgmt_rp_mesh_read_features rp;
2389 bool sending;
2390 int err = 0;
2391
2392 if (!lmp_le_capable(hdev) ||
2393 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2394 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2395 MGMT_STATUS_NOT_SUPPORTED);
2396 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2397 len <= MGMT_MESH_SEND_SIZE ||
2398 len > (MGMT_MESH_SEND_SIZE + 31))
2399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2400 MGMT_STATUS_REJECTED);
2401
2402 hci_dev_lock(hdev);
2403
2404 memset(&rp, 0, sizeof(rp));
2405 rp.max_handles = MESH_HANDLES_MAX;
2406
2407 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2408
2409 if (rp.max_handles <= rp.used_handles) {
2410 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2411 MGMT_STATUS_BUSY);
2412 goto done;
2413 }
2414
2415 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2416 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2417
2418 if (!mesh_tx)
2419 err = -ENOMEM;
2420 else if (!sending)
2421 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2422 mesh_send_start_complete);
2423
2424 if (err < 0) {
2425 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2427 MGMT_STATUS_FAILED);
2428
2429 if (mesh_tx) {
2430 if (sending)
2431 mgmt_mesh_remove(mesh_tx);
2432 }
2433 } else {
2434 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2435
2436 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2437 &mesh_tx->handle, 1);
2438 }
2439
2440 done:
2441 hci_dev_unlock(hdev);
2442 return err;
2443 }
2444
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2445 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2446 {
2447 struct mgmt_mode *cp = data;
2448 struct mgmt_pending_cmd *cmd;
2449 int err;
2450 u8 val, enabled;
2451
2452 bt_dev_dbg(hdev, "sock %p", sk);
2453
2454 if (!lmp_le_capable(hdev))
2455 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2456 MGMT_STATUS_NOT_SUPPORTED);
2457
2458 if (cp->val != 0x00 && cp->val != 0x01)
2459 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2460 MGMT_STATUS_INVALID_PARAMS);
2461
2462 /* Bluetooth single mode LE only controllers or dual-mode
2463 * controllers configured as LE only devices, do not allow
2464 * switching LE off. These have either LE enabled explicitly
2465 * or BR/EDR has been previously switched off.
2466 *
2467 * When trying to enable an already enabled LE, then gracefully
2468 * send a positive response. Trying to disable it however will
2469 * result into rejection.
2470 */
2471 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2472 if (cp->val == 0x01)
2473 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2474
2475 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2476 MGMT_STATUS_REJECTED);
2477 }
2478
2479 hci_dev_lock(hdev);
2480
2481 val = !!cp->val;
2482 enabled = lmp_host_le_capable(hdev);
2483
2484 if (!hdev_is_powered(hdev) || val == enabled) {
2485 bool changed = false;
2486
2487 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2488 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2489 changed = true;
2490 }
2491
2492 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2493 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2494 changed = true;
2495 }
2496
2497 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2498 if (err < 0)
2499 goto unlock;
2500
2501 if (changed)
2502 err = new_settings(hdev, sk);
2503
2504 goto unlock;
2505 }
2506
2507 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2508 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2509 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2510 MGMT_STATUS_BUSY);
2511 goto unlock;
2512 }
2513
2514 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2515 if (!cmd)
2516 err = -ENOMEM;
2517 else
2518 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2519 set_le_complete);
2520
2521 if (err < 0) {
2522 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2523 MGMT_STATUS_FAILED);
2524
2525 if (cmd)
2526 mgmt_pending_remove(cmd);
2527 }
2528
2529 unlock:
2530 hci_dev_unlock(hdev);
2531 return err;
2532 }
2533
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2534 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2535 {
2536 struct mgmt_pending_cmd *cmd = data;
2537 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2538 struct sk_buff *skb;
2539
2540 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2541 le16_to_cpu(cp->params_len), cp->params,
2542 cp->event, cp->timeout ?
2543 secs_to_jiffies(cp->timeout) :
2544 HCI_CMD_TIMEOUT);
2545 if (IS_ERR(skb)) {
2546 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2547 mgmt_status(PTR_ERR(skb)));
2548 goto done;
2549 }
2550
2551 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2552 skb->data, skb->len);
2553
2554 kfree_skb(skb);
2555
2556 done:
2557 mgmt_pending_free(cmd);
2558
2559 return 0;
2560 }
2561
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2562 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2563 void *data, u16 len)
2564 {
2565 struct mgmt_cp_hci_cmd_sync *cp = data;
2566 struct mgmt_pending_cmd *cmd;
2567 int err;
2568
2569 if (len < sizeof(*cp))
2570 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2571 MGMT_STATUS_INVALID_PARAMS);
2572
2573 hci_dev_lock(hdev);
2574 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2575 if (!cmd)
2576 err = -ENOMEM;
2577 else
2578 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2579
2580 if (err < 0) {
2581 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2582 MGMT_STATUS_FAILED);
2583
2584 if (cmd)
2585 mgmt_pending_free(cmd);
2586 }
2587
2588 hci_dev_unlock(hdev);
2589 return err;
2590 }
2591
2592 /* This is a helper function to test for pending mgmt commands that can
2593 * cause CoD or EIR HCI commands. We can only allow one such pending
2594 * mgmt command at a time since otherwise we cannot easily track what
2595 * the current values are, will be, and based on that calculate if a new
2596 * HCI command needs to be sent and if yes with what value.
2597 */
pending_eir_or_class(struct hci_dev * hdev)2598 static bool pending_eir_or_class(struct hci_dev *hdev)
2599 {
2600 struct mgmt_pending_cmd *cmd;
2601
2602 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2603 switch (cmd->opcode) {
2604 case MGMT_OP_ADD_UUID:
2605 case MGMT_OP_REMOVE_UUID:
2606 case MGMT_OP_SET_DEV_CLASS:
2607 case MGMT_OP_SET_POWERED:
2608 return true;
2609 }
2610 }
2611
2612 return false;
2613 }
2614
2615 static const u8 bluetooth_base_uuid[] = {
2616 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2617 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2618 };
2619
get_uuid_size(const u8 * uuid)2620 static u8 get_uuid_size(const u8 *uuid)
2621 {
2622 u32 val;
2623
2624 if (memcmp(uuid, bluetooth_base_uuid, 12))
2625 return 128;
2626
2627 val = get_unaligned_le32(&uuid[12]);
2628 if (val > 0xffff)
2629 return 32;
2630
2631 return 16;
2632 }
2633
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2634 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2635 {
2636 struct mgmt_pending_cmd *cmd = data;
2637
2638 bt_dev_dbg(hdev, "err %d", err);
2639
2640 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2641 mgmt_status(err), hdev->dev_class, 3);
2642
2643 mgmt_pending_free(cmd);
2644 }
2645
add_uuid_sync(struct hci_dev * hdev,void * data)2646 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2647 {
2648 int err;
2649
2650 err = hci_update_class_sync(hdev);
2651 if (err)
2652 return err;
2653
2654 return hci_update_eir_sync(hdev);
2655 }
2656
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2657 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2658 {
2659 struct mgmt_cp_add_uuid *cp = data;
2660 struct mgmt_pending_cmd *cmd;
2661 struct bt_uuid *uuid;
2662 int err;
2663
2664 bt_dev_dbg(hdev, "sock %p", sk);
2665
2666 hci_dev_lock(hdev);
2667
2668 if (pending_eir_or_class(hdev)) {
2669 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2670 MGMT_STATUS_BUSY);
2671 goto failed;
2672 }
2673
2674 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2675 if (!uuid) {
2676 err = -ENOMEM;
2677 goto failed;
2678 }
2679
2680 memcpy(uuid->uuid, cp->uuid, 16);
2681 uuid->svc_hint = cp->svc_hint;
2682 uuid->size = get_uuid_size(cp->uuid);
2683
2684 list_add_tail(&uuid->list, &hdev->uuids);
2685
2686 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2687 if (!cmd) {
2688 err = -ENOMEM;
2689 goto failed;
2690 }
2691
2692 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2693 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2694 */
2695 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2696 mgmt_class_complete);
2697 if (err < 0) {
2698 mgmt_pending_free(cmd);
2699 goto failed;
2700 }
2701
2702 failed:
2703 hci_dev_unlock(hdev);
2704 return err;
2705 }
2706
enable_service_cache(struct hci_dev * hdev)2707 static bool enable_service_cache(struct hci_dev *hdev)
2708 {
2709 if (!hdev_is_powered(hdev))
2710 return false;
2711
2712 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2713 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2714 CACHE_TIMEOUT);
2715 return true;
2716 }
2717
2718 return false;
2719 }
2720
remove_uuid_sync(struct hci_dev * hdev,void * data)2721 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2722 {
2723 int err;
2724
2725 err = hci_update_class_sync(hdev);
2726 if (err)
2727 return err;
2728
2729 return hci_update_eir_sync(hdev);
2730 }
2731
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2732 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2733 u16 len)
2734 {
2735 struct mgmt_cp_remove_uuid *cp = data;
2736 struct mgmt_pending_cmd *cmd;
2737 struct bt_uuid *match, *tmp;
2738 static const u8 bt_uuid_any[] = {
2739 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2740 };
2741 int err, found;
2742
2743 bt_dev_dbg(hdev, "sock %p", sk);
2744
2745 hci_dev_lock(hdev);
2746
2747 if (pending_eir_or_class(hdev)) {
2748 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2749 MGMT_STATUS_BUSY);
2750 goto unlock;
2751 }
2752
2753 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2754 hci_uuids_clear(hdev);
2755
2756 if (enable_service_cache(hdev)) {
2757 err = mgmt_cmd_complete(sk, hdev->id,
2758 MGMT_OP_REMOVE_UUID,
2759 0, hdev->dev_class, 3);
2760 goto unlock;
2761 }
2762
2763 goto update_class;
2764 }
2765
2766 found = 0;
2767
2768 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2769 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2770 continue;
2771
2772 list_del(&match->list);
2773 kfree(match);
2774 found++;
2775 }
2776
2777 if (found == 0) {
2778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2779 MGMT_STATUS_INVALID_PARAMS);
2780 goto unlock;
2781 }
2782
2783 update_class:
2784 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2785 if (!cmd) {
2786 err = -ENOMEM;
2787 goto unlock;
2788 }
2789
2790 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2791 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2792 */
2793 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2794 mgmt_class_complete);
2795 if (err < 0)
2796 mgmt_pending_free(cmd);
2797
2798 unlock:
2799 hci_dev_unlock(hdev);
2800 return err;
2801 }
2802
set_class_sync(struct hci_dev * hdev,void * data)2803 static int set_class_sync(struct hci_dev *hdev, void *data)
2804 {
2805 int err = 0;
2806
2807 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2808 cancel_delayed_work_sync(&hdev->service_cache);
2809 err = hci_update_eir_sync(hdev);
2810 }
2811
2812 if (err)
2813 return err;
2814
2815 return hci_update_class_sync(hdev);
2816 }
2817
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2818 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2819 u16 len)
2820 {
2821 struct mgmt_cp_set_dev_class *cp = data;
2822 struct mgmt_pending_cmd *cmd;
2823 int err;
2824
2825 bt_dev_dbg(hdev, "sock %p", sk);
2826
2827 if (!lmp_bredr_capable(hdev))
2828 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2829 MGMT_STATUS_NOT_SUPPORTED);
2830
2831 hci_dev_lock(hdev);
2832
2833 if (pending_eir_or_class(hdev)) {
2834 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2835 MGMT_STATUS_BUSY);
2836 goto unlock;
2837 }
2838
2839 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2840 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2841 MGMT_STATUS_INVALID_PARAMS);
2842 goto unlock;
2843 }
2844
2845 hdev->major_class = cp->major;
2846 hdev->minor_class = cp->minor;
2847
2848 if (!hdev_is_powered(hdev)) {
2849 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2850 hdev->dev_class, 3);
2851 goto unlock;
2852 }
2853
2854 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2855 if (!cmd) {
2856 err = -ENOMEM;
2857 goto unlock;
2858 }
2859
2860 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2861 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2862 */
2863 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2864 mgmt_class_complete);
2865 if (err < 0)
2866 mgmt_pending_free(cmd);
2867
2868 unlock:
2869 hci_dev_unlock(hdev);
2870 return err;
2871 }
2872
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2873 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2874 u16 len)
2875 {
2876 struct mgmt_cp_load_link_keys *cp = data;
2877 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2878 sizeof(struct mgmt_link_key_info));
2879 u16 key_count, expected_len;
2880 bool changed;
2881 int i;
2882
2883 bt_dev_dbg(hdev, "sock %p", sk);
2884
2885 if (!lmp_bredr_capable(hdev))
2886 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2887 MGMT_STATUS_NOT_SUPPORTED);
2888
2889 key_count = __le16_to_cpu(cp->key_count);
2890 if (key_count > max_key_count) {
2891 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2892 key_count);
2893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2894 MGMT_STATUS_INVALID_PARAMS);
2895 }
2896
2897 expected_len = struct_size(cp, keys, key_count);
2898 if (expected_len != len) {
2899 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2900 expected_len, len);
2901 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2902 MGMT_STATUS_INVALID_PARAMS);
2903 }
2904
2905 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2906 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2907 MGMT_STATUS_INVALID_PARAMS);
2908
2909 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2910 key_count);
2911
2912 hci_dev_lock(hdev);
2913
2914 hci_link_keys_clear(hdev);
2915
2916 if (cp->debug_keys)
2917 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2918 else
2919 changed = hci_dev_test_and_clear_flag(hdev,
2920 HCI_KEEP_DEBUG_KEYS);
2921
2922 if (changed)
2923 new_settings(hdev, NULL);
2924
2925 for (i = 0; i < key_count; i++) {
2926 struct mgmt_link_key_info *key = &cp->keys[i];
2927
2928 if (hci_is_blocked_key(hdev,
2929 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2930 key->val)) {
2931 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2932 &key->addr.bdaddr);
2933 continue;
2934 }
2935
2936 if (key->addr.type != BDADDR_BREDR) {
2937 bt_dev_warn(hdev,
2938 "Invalid link address type %u for %pMR",
2939 key->addr.type, &key->addr.bdaddr);
2940 continue;
2941 }
2942
2943 if (key->type > 0x08) {
2944 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2945 key->type, &key->addr.bdaddr);
2946 continue;
2947 }
2948
2949 /* Always ignore debug keys and require a new pairing if
2950 * the user wants to use them.
2951 */
2952 if (key->type == HCI_LK_DEBUG_COMBINATION)
2953 continue;
2954
2955 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2956 key->type, key->pin_len, NULL);
2957 }
2958
2959 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2960
2961 hci_dev_unlock(hdev);
2962
2963 return 0;
2964 }
2965
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)2966 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2967 u8 addr_type, struct sock *skip_sk)
2968 {
2969 struct mgmt_ev_device_unpaired ev;
2970
2971 bacpy(&ev.addr.bdaddr, bdaddr);
2972 ev.addr.type = addr_type;
2973
2974 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2975 skip_sk);
2976 }
2977
unpair_device_complete(struct hci_dev * hdev,void * data,int err)2978 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2979 {
2980 struct mgmt_pending_cmd *cmd = data;
2981 struct mgmt_cp_unpair_device *cp = cmd->param;
2982
2983 if (!err)
2984 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2985
2986 cmd->cmd_complete(cmd, err);
2987 mgmt_pending_free(cmd);
2988 }
2989
unpair_device_sync(struct hci_dev * hdev,void * data)2990 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2991 {
2992 struct mgmt_pending_cmd *cmd = data;
2993 struct mgmt_cp_unpair_device *cp = cmd->param;
2994 struct hci_conn *conn;
2995
2996 if (cp->addr.type == BDADDR_BREDR)
2997 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2998 &cp->addr.bdaddr);
2999 else
3000 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3001 le_addr_type(cp->addr.type));
3002
3003 if (!conn)
3004 return 0;
3005
3006 /* Disregard any possible error since the likes of hci_abort_conn_sync
3007 * will clean up the connection no matter the error.
3008 */
3009 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3010
3011 return 0;
3012 }
3013
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3014 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3015 u16 len)
3016 {
3017 struct mgmt_cp_unpair_device *cp = data;
3018 struct mgmt_rp_unpair_device rp;
3019 struct hci_conn_params *params;
3020 struct mgmt_pending_cmd *cmd;
3021 struct hci_conn *conn;
3022 u8 addr_type;
3023 int err;
3024
3025 memset(&rp, 0, sizeof(rp));
3026 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3027 rp.addr.type = cp->addr.type;
3028
3029 if (!bdaddr_type_is_valid(cp->addr.type))
3030 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3031 MGMT_STATUS_INVALID_PARAMS,
3032 &rp, sizeof(rp));
3033
3034 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3035 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3036 MGMT_STATUS_INVALID_PARAMS,
3037 &rp, sizeof(rp));
3038
3039 hci_dev_lock(hdev);
3040
3041 if (!hdev_is_powered(hdev)) {
3042 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3043 MGMT_STATUS_NOT_POWERED, &rp,
3044 sizeof(rp));
3045 goto unlock;
3046 }
3047
3048 if (cp->addr.type == BDADDR_BREDR) {
3049 /* If disconnection is requested, then look up the
3050 * connection. If the remote device is connected, it
3051 * will be later used to terminate the link.
3052 *
3053 * Setting it to NULL explicitly will cause no
3054 * termination of the link.
3055 */
3056 if (cp->disconnect)
3057 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3058 &cp->addr.bdaddr);
3059 else
3060 conn = NULL;
3061
3062 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3063 if (err < 0) {
3064 err = mgmt_cmd_complete(sk, hdev->id,
3065 MGMT_OP_UNPAIR_DEVICE,
3066 MGMT_STATUS_NOT_PAIRED, &rp,
3067 sizeof(rp));
3068 goto unlock;
3069 }
3070
3071 goto done;
3072 }
3073
3074 /* LE address type */
3075 addr_type = le_addr_type(cp->addr.type);
3076
3077 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3078 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3079 if (err < 0) {
3080 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3081 MGMT_STATUS_NOT_PAIRED, &rp,
3082 sizeof(rp));
3083 goto unlock;
3084 }
3085
3086 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3087 if (!conn) {
3088 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3089 goto done;
3090 }
3091
3092
3093 /* Defer clearing up the connection parameters until closing to
3094 * give a chance of keeping them if a repairing happens.
3095 */
3096 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3097
3098 /* Disable auto-connection parameters if present */
3099 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3100 if (params) {
3101 if (params->explicit_connect)
3102 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3103 else
3104 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3105 }
3106
3107 /* If disconnection is not requested, then clear the connection
3108 * variable so that the link is not terminated.
3109 */
3110 if (!cp->disconnect)
3111 conn = NULL;
3112
3113 done:
3114 /* If the connection variable is set, then termination of the
3115 * link is requested.
3116 */
3117 if (!conn) {
3118 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3119 &rp, sizeof(rp));
3120 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3121 goto unlock;
3122 }
3123
3124 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3125 sizeof(*cp));
3126 if (!cmd) {
3127 err = -ENOMEM;
3128 goto unlock;
3129 }
3130
3131 cmd->cmd_complete = addr_cmd_complete;
3132
3133 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3134 unpair_device_complete);
3135 if (err < 0)
3136 mgmt_pending_free(cmd);
3137
3138 unlock:
3139 hci_dev_unlock(hdev);
3140 return err;
3141 }
3142
disconnect_complete(struct hci_dev * hdev,void * data,int err)3143 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3144 {
3145 struct mgmt_pending_cmd *cmd = data;
3146
3147 cmd->cmd_complete(cmd, mgmt_status(err));
3148 mgmt_pending_free(cmd);
3149 }
3150
disconnect_sync(struct hci_dev * hdev,void * data)3151 static int disconnect_sync(struct hci_dev *hdev, void *data)
3152 {
3153 struct mgmt_pending_cmd *cmd = data;
3154 struct mgmt_cp_disconnect *cp = cmd->param;
3155 struct hci_conn *conn;
3156
3157 if (cp->addr.type == BDADDR_BREDR)
3158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3159 &cp->addr.bdaddr);
3160 else
3161 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3162 le_addr_type(cp->addr.type));
3163
3164 if (!conn)
3165 return -ENOTCONN;
3166
3167 /* Disregard any possible error since the likes of hci_abort_conn_sync
3168 * will clean up the connection no matter the error.
3169 */
3170 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3171
3172 return 0;
3173 }
3174
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3175 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3176 u16 len)
3177 {
3178 struct mgmt_cp_disconnect *cp = data;
3179 struct mgmt_rp_disconnect rp;
3180 struct mgmt_pending_cmd *cmd;
3181 int err;
3182
3183 bt_dev_dbg(hdev, "sock %p", sk);
3184
3185 memset(&rp, 0, sizeof(rp));
3186 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3187 rp.addr.type = cp->addr.type;
3188
3189 if (!bdaddr_type_is_valid(cp->addr.type))
3190 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3191 MGMT_STATUS_INVALID_PARAMS,
3192 &rp, sizeof(rp));
3193
3194 hci_dev_lock(hdev);
3195
3196 if (!test_bit(HCI_UP, &hdev->flags)) {
3197 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3198 MGMT_STATUS_NOT_POWERED, &rp,
3199 sizeof(rp));
3200 goto failed;
3201 }
3202
3203 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3204 if (!cmd) {
3205 err = -ENOMEM;
3206 goto failed;
3207 }
3208
3209 cmd->cmd_complete = generic_cmd_complete;
3210
3211 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3212 disconnect_complete);
3213 if (err < 0)
3214 mgmt_pending_free(cmd);
3215
3216 failed:
3217 hci_dev_unlock(hdev);
3218 return err;
3219 }
3220
link_to_bdaddr(u8 link_type,u8 addr_type)3221 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3222 {
3223 switch (link_type) {
3224 case ISO_LINK:
3225 case LE_LINK:
3226 switch (addr_type) {
3227 case ADDR_LE_DEV_PUBLIC:
3228 return BDADDR_LE_PUBLIC;
3229
3230 default:
3231 /* Fallback to LE Random address type */
3232 return BDADDR_LE_RANDOM;
3233 }
3234
3235 default:
3236 /* Fallback to BR/EDR type */
3237 return BDADDR_BREDR;
3238 }
3239 }
3240
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3241 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3242 u16 data_len)
3243 {
3244 struct mgmt_rp_get_connections *rp;
3245 struct hci_conn *c;
3246 int err;
3247 u16 i;
3248
3249 bt_dev_dbg(hdev, "sock %p", sk);
3250
3251 hci_dev_lock(hdev);
3252
3253 if (!hdev_is_powered(hdev)) {
3254 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3255 MGMT_STATUS_NOT_POWERED);
3256 goto unlock;
3257 }
3258
3259 i = 0;
3260 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3261 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3262 i++;
3263 }
3264
3265 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3266 if (!rp) {
3267 err = -ENOMEM;
3268 goto unlock;
3269 }
3270
3271 i = 0;
3272 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3273 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3274 continue;
3275 bacpy(&rp->addr[i].bdaddr, &c->dst);
3276 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3277 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3278 continue;
3279 i++;
3280 }
3281
3282 rp->conn_count = cpu_to_le16(i);
3283
3284 /* Recalculate length in case of filtered SCO connections, etc */
3285 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3286 struct_size(rp, addr, i));
3287
3288 kfree(rp);
3289
3290 unlock:
3291 hci_dev_unlock(hdev);
3292 return err;
3293 }
3294
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3295 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3296 struct mgmt_cp_pin_code_neg_reply *cp)
3297 {
3298 struct mgmt_pending_cmd *cmd;
3299 int err;
3300
3301 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3302 sizeof(*cp));
3303 if (!cmd)
3304 return -ENOMEM;
3305
3306 cmd->cmd_complete = addr_cmd_complete;
3307
3308 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3309 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3310 if (err < 0)
3311 mgmt_pending_remove(cmd);
3312
3313 return err;
3314 }
3315
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3316 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3317 u16 len)
3318 {
3319 struct hci_conn *conn;
3320 struct mgmt_cp_pin_code_reply *cp = data;
3321 struct hci_cp_pin_code_reply reply;
3322 struct mgmt_pending_cmd *cmd;
3323 int err;
3324
3325 bt_dev_dbg(hdev, "sock %p", sk);
3326
3327 hci_dev_lock(hdev);
3328
3329 if (!hdev_is_powered(hdev)) {
3330 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3331 MGMT_STATUS_NOT_POWERED);
3332 goto failed;
3333 }
3334
3335 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3336 if (!conn) {
3337 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3338 MGMT_STATUS_NOT_CONNECTED);
3339 goto failed;
3340 }
3341
3342 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3343 struct mgmt_cp_pin_code_neg_reply ncp;
3344
3345 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3346
3347 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3348
3349 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3350 if (err >= 0)
3351 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3352 MGMT_STATUS_INVALID_PARAMS);
3353
3354 goto failed;
3355 }
3356
3357 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3358 if (!cmd) {
3359 err = -ENOMEM;
3360 goto failed;
3361 }
3362
3363 cmd->cmd_complete = addr_cmd_complete;
3364
3365 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3366 reply.pin_len = cp->pin_len;
3367 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3368
3369 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3370 if (err < 0)
3371 mgmt_pending_remove(cmd);
3372
3373 failed:
3374 hci_dev_unlock(hdev);
3375 return err;
3376 }
3377
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3378 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3379 u16 len)
3380 {
3381 struct mgmt_cp_set_io_capability *cp = data;
3382
3383 bt_dev_dbg(hdev, "sock %p", sk);
3384
3385 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3386 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3387 MGMT_STATUS_INVALID_PARAMS);
3388
3389 hci_dev_lock(hdev);
3390
3391 hdev->io_capability = cp->io_capability;
3392
3393 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3394
3395 hci_dev_unlock(hdev);
3396
3397 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3398 NULL, 0);
3399 }
3400
find_pairing(struct hci_conn * conn)3401 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3402 {
3403 struct hci_dev *hdev = conn->hdev;
3404 struct mgmt_pending_cmd *cmd;
3405
3406 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3407 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3408 continue;
3409
3410 if (cmd->user_data != conn)
3411 continue;
3412
3413 return cmd;
3414 }
3415
3416 return NULL;
3417 }
3418
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3419 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3420 {
3421 struct mgmt_rp_pair_device rp;
3422 struct hci_conn *conn = cmd->user_data;
3423 int err;
3424
3425 bacpy(&rp.addr.bdaddr, &conn->dst);
3426 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3427
3428 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3429 status, &rp, sizeof(rp));
3430
3431 /* So we don't get further callbacks for this connection */
3432 conn->connect_cfm_cb = NULL;
3433 conn->security_cfm_cb = NULL;
3434 conn->disconn_cfm_cb = NULL;
3435
3436 hci_conn_drop(conn);
3437
3438 /* The device is paired so there is no need to remove
3439 * its connection parameters anymore.
3440 */
3441 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3442
3443 hci_conn_put(conn);
3444
3445 return err;
3446 }
3447
mgmt_smp_complete(struct hci_conn * conn,bool complete)3448 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3449 {
3450 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3451 struct mgmt_pending_cmd *cmd;
3452
3453 cmd = find_pairing(conn);
3454 if (cmd) {
3455 cmd->cmd_complete(cmd, status);
3456 mgmt_pending_remove(cmd);
3457 }
3458 }
3459
pairing_complete_cb(struct hci_conn * conn,u8 status)3460 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3461 {
3462 struct mgmt_pending_cmd *cmd;
3463
3464 BT_DBG("status %u", status);
3465
3466 cmd = find_pairing(conn);
3467 if (!cmd) {
3468 BT_DBG("Unable to find a pending command");
3469 return;
3470 }
3471
3472 cmd->cmd_complete(cmd, mgmt_status(status));
3473 mgmt_pending_remove(cmd);
3474 }
3475
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3476 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3477 {
3478 struct mgmt_pending_cmd *cmd;
3479
3480 BT_DBG("status %u", status);
3481
3482 if (!status)
3483 return;
3484
3485 cmd = find_pairing(conn);
3486 if (!cmd) {
3487 BT_DBG("Unable to find a pending command");
3488 return;
3489 }
3490
3491 cmd->cmd_complete(cmd, mgmt_status(status));
3492 mgmt_pending_remove(cmd);
3493 }
3494
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3495 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3496 u16 len)
3497 {
3498 struct mgmt_cp_pair_device *cp = data;
3499 struct mgmt_rp_pair_device rp;
3500 struct mgmt_pending_cmd *cmd;
3501 u8 sec_level, auth_type;
3502 struct hci_conn *conn;
3503 int err;
3504
3505 bt_dev_dbg(hdev, "sock %p", sk);
3506
3507 memset(&rp, 0, sizeof(rp));
3508 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3509 rp.addr.type = cp->addr.type;
3510
3511 if (!bdaddr_type_is_valid(cp->addr.type))
3512 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3513 MGMT_STATUS_INVALID_PARAMS,
3514 &rp, sizeof(rp));
3515
3516 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3517 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3518 MGMT_STATUS_INVALID_PARAMS,
3519 &rp, sizeof(rp));
3520
3521 hci_dev_lock(hdev);
3522
3523 if (!hdev_is_powered(hdev)) {
3524 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3525 MGMT_STATUS_NOT_POWERED, &rp,
3526 sizeof(rp));
3527 goto unlock;
3528 }
3529
3530 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3531 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3532 MGMT_STATUS_ALREADY_PAIRED, &rp,
3533 sizeof(rp));
3534 goto unlock;
3535 }
3536
3537 sec_level = BT_SECURITY_MEDIUM;
3538 auth_type = HCI_AT_DEDICATED_BONDING;
3539
3540 if (cp->addr.type == BDADDR_BREDR) {
3541 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3542 auth_type, CONN_REASON_PAIR_DEVICE,
3543 HCI_ACL_CONN_TIMEOUT);
3544 } else {
3545 u8 addr_type = le_addr_type(cp->addr.type);
3546 struct hci_conn_params *p;
3547
3548 /* When pairing a new device, it is expected to remember
3549 * this device for future connections. Adding the connection
3550 * parameter information ahead of time allows tracking
3551 * of the peripheral preferred values and will speed up any
3552 * further connection establishment.
3553 *
3554 * If connection parameters already exist, then they
3555 * will be kept and this function does nothing.
3556 */
3557 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3558 if (!p) {
3559 err = -EIO;
3560 goto unlock;
3561 }
3562
3563 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3564 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3565
3566 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3567 sec_level, HCI_LE_CONN_TIMEOUT,
3568 CONN_REASON_PAIR_DEVICE);
3569 }
3570
3571 if (IS_ERR(conn)) {
3572 int status;
3573
3574 if (PTR_ERR(conn) == -EBUSY)
3575 status = MGMT_STATUS_BUSY;
3576 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3577 status = MGMT_STATUS_NOT_SUPPORTED;
3578 else if (PTR_ERR(conn) == -ECONNREFUSED)
3579 status = MGMT_STATUS_REJECTED;
3580 else
3581 status = MGMT_STATUS_CONNECT_FAILED;
3582
3583 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3584 status, &rp, sizeof(rp));
3585 goto unlock;
3586 }
3587
3588 if (conn->connect_cfm_cb) {
3589 hci_conn_drop(conn);
3590 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3591 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3592 goto unlock;
3593 }
3594
3595 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3596 if (!cmd) {
3597 err = -ENOMEM;
3598 hci_conn_drop(conn);
3599 goto unlock;
3600 }
3601
3602 cmd->cmd_complete = pairing_complete;
3603
3604 /* For LE, just connecting isn't a proof that the pairing finished */
3605 if (cp->addr.type == BDADDR_BREDR) {
3606 conn->connect_cfm_cb = pairing_complete_cb;
3607 conn->security_cfm_cb = pairing_complete_cb;
3608 conn->disconn_cfm_cb = pairing_complete_cb;
3609 } else {
3610 conn->connect_cfm_cb = le_pairing_complete_cb;
3611 conn->security_cfm_cb = le_pairing_complete_cb;
3612 conn->disconn_cfm_cb = le_pairing_complete_cb;
3613 }
3614
3615 conn->io_capability = cp->io_cap;
3616 cmd->user_data = hci_conn_get(conn);
3617
3618 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3619 hci_conn_security(conn, sec_level, auth_type, true)) {
3620 cmd->cmd_complete(cmd, 0);
3621 mgmt_pending_remove(cmd);
3622 }
3623
3624 err = 0;
3625
3626 unlock:
3627 hci_dev_unlock(hdev);
3628 return err;
3629 }
3630
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3631 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3632 u16 len)
3633 {
3634 struct mgmt_addr_info *addr = data;
3635 struct mgmt_pending_cmd *cmd;
3636 struct hci_conn *conn;
3637 int err;
3638
3639 bt_dev_dbg(hdev, "sock %p", sk);
3640
3641 hci_dev_lock(hdev);
3642
3643 if (!hdev_is_powered(hdev)) {
3644 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3645 MGMT_STATUS_NOT_POWERED);
3646 goto unlock;
3647 }
3648
3649 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3650 if (!cmd) {
3651 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3652 MGMT_STATUS_INVALID_PARAMS);
3653 goto unlock;
3654 }
3655
3656 conn = cmd->user_data;
3657
3658 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3659 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3660 MGMT_STATUS_INVALID_PARAMS);
3661 goto unlock;
3662 }
3663
3664 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3665 mgmt_pending_remove(cmd);
3666
3667 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3668 addr, sizeof(*addr));
3669
3670 /* Since user doesn't want to proceed with the connection, abort any
3671 * ongoing pairing and then terminate the link if it was created
3672 * because of the pair device action.
3673 */
3674 if (addr->type == BDADDR_BREDR)
3675 hci_remove_link_key(hdev, &addr->bdaddr);
3676 else
3677 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3678 le_addr_type(addr->type));
3679
3680 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3681 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3682
3683 unlock:
3684 hci_dev_unlock(hdev);
3685 return err;
3686 }
3687
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3688 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3689 struct mgmt_addr_info *addr, u16 mgmt_op,
3690 u16 hci_op, __le32 passkey)
3691 {
3692 struct mgmt_pending_cmd *cmd;
3693 struct hci_conn *conn;
3694 int err;
3695
3696 hci_dev_lock(hdev);
3697
3698 if (!hdev_is_powered(hdev)) {
3699 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3700 MGMT_STATUS_NOT_POWERED, addr,
3701 sizeof(*addr));
3702 goto done;
3703 }
3704
3705 if (addr->type == BDADDR_BREDR)
3706 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3707 else
3708 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3709 le_addr_type(addr->type));
3710
3711 if (!conn) {
3712 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3713 MGMT_STATUS_NOT_CONNECTED, addr,
3714 sizeof(*addr));
3715 goto done;
3716 }
3717
3718 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3719 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3720 if (!err)
3721 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3722 MGMT_STATUS_SUCCESS, addr,
3723 sizeof(*addr));
3724 else
3725 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3726 MGMT_STATUS_FAILED, addr,
3727 sizeof(*addr));
3728
3729 goto done;
3730 }
3731
3732 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3733 if (!cmd) {
3734 err = -ENOMEM;
3735 goto done;
3736 }
3737
3738 cmd->cmd_complete = addr_cmd_complete;
3739
3740 /* Continue with pairing via HCI */
3741 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3742 struct hci_cp_user_passkey_reply cp;
3743
3744 bacpy(&cp.bdaddr, &addr->bdaddr);
3745 cp.passkey = passkey;
3746 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3747 } else
3748 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3749 &addr->bdaddr);
3750
3751 if (err < 0)
3752 mgmt_pending_remove(cmd);
3753
3754 done:
3755 hci_dev_unlock(hdev);
3756 return err;
3757 }
3758
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3759 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3760 void *data, u16 len)
3761 {
3762 struct mgmt_cp_pin_code_neg_reply *cp = data;
3763
3764 bt_dev_dbg(hdev, "sock %p", sk);
3765
3766 return user_pairing_resp(sk, hdev, &cp->addr,
3767 MGMT_OP_PIN_CODE_NEG_REPLY,
3768 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3769 }
3770
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3771 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3772 u16 len)
3773 {
3774 struct mgmt_cp_user_confirm_reply *cp = data;
3775
3776 bt_dev_dbg(hdev, "sock %p", sk);
3777
3778 if (len != sizeof(*cp))
3779 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3780 MGMT_STATUS_INVALID_PARAMS);
3781
3782 return user_pairing_resp(sk, hdev, &cp->addr,
3783 MGMT_OP_USER_CONFIRM_REPLY,
3784 HCI_OP_USER_CONFIRM_REPLY, 0);
3785 }
3786
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3787 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3788 void *data, u16 len)
3789 {
3790 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3791
3792 bt_dev_dbg(hdev, "sock %p", sk);
3793
3794 return user_pairing_resp(sk, hdev, &cp->addr,
3795 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3796 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3797 }
3798
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3799 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3800 u16 len)
3801 {
3802 struct mgmt_cp_user_passkey_reply *cp = data;
3803
3804 bt_dev_dbg(hdev, "sock %p", sk);
3805
3806 return user_pairing_resp(sk, hdev, &cp->addr,
3807 MGMT_OP_USER_PASSKEY_REPLY,
3808 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3809 }
3810
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3811 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3812 void *data, u16 len)
3813 {
3814 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3815
3816 bt_dev_dbg(hdev, "sock %p", sk);
3817
3818 return user_pairing_resp(sk, hdev, &cp->addr,
3819 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3820 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3821 }
3822
adv_expire_sync(struct hci_dev * hdev,u32 flags)3823 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3824 {
3825 struct adv_info *adv_instance;
3826
3827 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3828 if (!adv_instance)
3829 return 0;
3830
3831 /* stop if current instance doesn't need to be changed */
3832 if (!(adv_instance->flags & flags))
3833 return 0;
3834
3835 cancel_adv_timeout(hdev);
3836
3837 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3838 if (!adv_instance)
3839 return 0;
3840
3841 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3842
3843 return 0;
3844 }
3845
name_changed_sync(struct hci_dev * hdev,void * data)3846 static int name_changed_sync(struct hci_dev *hdev, void *data)
3847 {
3848 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3849 }
3850
set_name_complete(struct hci_dev * hdev,void * data,int err)3851 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3852 {
3853 struct mgmt_pending_cmd *cmd = data;
3854 struct mgmt_cp_set_local_name *cp = cmd->param;
3855 u8 status = mgmt_status(err);
3856
3857 bt_dev_dbg(hdev, "err %d", err);
3858
3859 if (err == -ECANCELED ||
3860 cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3861 return;
3862
3863 if (status) {
3864 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3865 status);
3866 } else {
3867 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3868 cp, sizeof(*cp));
3869
3870 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3871 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3872 }
3873
3874 mgmt_pending_remove(cmd);
3875 }
3876
set_name_sync(struct hci_dev * hdev,void * data)3877 static int set_name_sync(struct hci_dev *hdev, void *data)
3878 {
3879 if (lmp_bredr_capable(hdev)) {
3880 hci_update_name_sync(hdev);
3881 hci_update_eir_sync(hdev);
3882 }
3883
3884 /* The name is stored in the scan response data and so
3885 * no need to update the advertising data here.
3886 */
3887 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3888 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3889
3890 return 0;
3891 }
3892
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3893 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3894 u16 len)
3895 {
3896 struct mgmt_cp_set_local_name *cp = data;
3897 struct mgmt_pending_cmd *cmd;
3898 int err;
3899
3900 bt_dev_dbg(hdev, "sock %p", sk);
3901
3902 hci_dev_lock(hdev);
3903
3904 /* If the old values are the same as the new ones just return a
3905 * direct command complete event.
3906 */
3907 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3908 !memcmp(hdev->short_name, cp->short_name,
3909 sizeof(hdev->short_name))) {
3910 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3911 data, len);
3912 goto failed;
3913 }
3914
3915 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3916
3917 if (!hdev_is_powered(hdev)) {
3918 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3919
3920 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3921 data, len);
3922 if (err < 0)
3923 goto failed;
3924
3925 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3926 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3927 ext_info_changed(hdev, sk);
3928
3929 goto failed;
3930 }
3931
3932 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3933 if (!cmd)
3934 err = -ENOMEM;
3935 else
3936 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3937 set_name_complete);
3938
3939 if (err < 0) {
3940 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3941 MGMT_STATUS_FAILED);
3942
3943 if (cmd)
3944 mgmt_pending_remove(cmd);
3945
3946 goto failed;
3947 }
3948
3949 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3950
3951 failed:
3952 hci_dev_unlock(hdev);
3953 return err;
3954 }
3955
appearance_changed_sync(struct hci_dev * hdev,void * data)3956 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3957 {
3958 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3959 }
3960
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3961 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3962 u16 len)
3963 {
3964 struct mgmt_cp_set_appearance *cp = data;
3965 u16 appearance;
3966 int err;
3967
3968 bt_dev_dbg(hdev, "sock %p", sk);
3969
3970 if (!lmp_le_capable(hdev))
3971 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3972 MGMT_STATUS_NOT_SUPPORTED);
3973
3974 appearance = le16_to_cpu(cp->appearance);
3975
3976 hci_dev_lock(hdev);
3977
3978 if (hdev->appearance != appearance) {
3979 hdev->appearance = appearance;
3980
3981 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3982 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3983 NULL);
3984
3985 ext_info_changed(hdev, sk);
3986 }
3987
3988 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3989 0);
3990
3991 hci_dev_unlock(hdev);
3992
3993 return err;
3994 }
3995
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3996 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3997 void *data, u16 len)
3998 {
3999 struct mgmt_rp_get_phy_configuration rp;
4000
4001 bt_dev_dbg(hdev, "sock %p", sk);
4002
4003 hci_dev_lock(hdev);
4004
4005 memset(&rp, 0, sizeof(rp));
4006
4007 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4008 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4009 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4010
4011 hci_dev_unlock(hdev);
4012
4013 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4014 &rp, sizeof(rp));
4015 }
4016
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4017 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4018 {
4019 struct mgmt_ev_phy_configuration_changed ev;
4020
4021 memset(&ev, 0, sizeof(ev));
4022
4023 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4024
4025 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4026 sizeof(ev), skip);
4027 }
4028
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4029 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4030 {
4031 struct mgmt_pending_cmd *cmd = data;
4032 struct sk_buff *skb = cmd->skb;
4033 u8 status = mgmt_status(err);
4034
4035 if (err == -ECANCELED ||
4036 cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4037 return;
4038
4039 if (!status) {
4040 if (!skb)
4041 status = MGMT_STATUS_FAILED;
4042 else if (IS_ERR(skb))
4043 status = mgmt_status(PTR_ERR(skb));
4044 else
4045 status = mgmt_status(skb->data[0]);
4046 }
4047
4048 bt_dev_dbg(hdev, "status %d", status);
4049
4050 if (status) {
4051 mgmt_cmd_status(cmd->sk, hdev->id,
4052 MGMT_OP_SET_PHY_CONFIGURATION, status);
4053 } else {
4054 mgmt_cmd_complete(cmd->sk, hdev->id,
4055 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4056 NULL, 0);
4057
4058 mgmt_phy_configuration_changed(hdev, cmd->sk);
4059 }
4060
4061 if (skb && !IS_ERR(skb))
4062 kfree_skb(skb);
4063
4064 mgmt_pending_remove(cmd);
4065 }
4066
set_default_phy_sync(struct hci_dev * hdev,void * data)4067 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4068 {
4069 struct mgmt_pending_cmd *cmd = data;
4070 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4071 struct hci_cp_le_set_default_phy cp_phy;
4072 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4073
4074 memset(&cp_phy, 0, sizeof(cp_phy));
4075
4076 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4077 cp_phy.all_phys |= 0x01;
4078
4079 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4080 cp_phy.all_phys |= 0x02;
4081
4082 if (selected_phys & MGMT_PHY_LE_1M_TX)
4083 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4084
4085 if (selected_phys & MGMT_PHY_LE_2M_TX)
4086 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4087
4088 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4089 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4090
4091 if (selected_phys & MGMT_PHY_LE_1M_RX)
4092 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4093
4094 if (selected_phys & MGMT_PHY_LE_2M_RX)
4095 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4096
4097 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4098 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4099
4100 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4101 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4102
4103 return 0;
4104 }
4105
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4106 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4107 void *data, u16 len)
4108 {
4109 struct mgmt_cp_set_phy_configuration *cp = data;
4110 struct mgmt_pending_cmd *cmd;
4111 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4112 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4113 bool changed = false;
4114 int err;
4115
4116 bt_dev_dbg(hdev, "sock %p", sk);
4117
4118 configurable_phys = get_configurable_phys(hdev);
4119 supported_phys = get_supported_phys(hdev);
4120 selected_phys = __le32_to_cpu(cp->selected_phys);
4121
4122 if (selected_phys & ~supported_phys)
4123 return mgmt_cmd_status(sk, hdev->id,
4124 MGMT_OP_SET_PHY_CONFIGURATION,
4125 MGMT_STATUS_INVALID_PARAMS);
4126
4127 unconfigure_phys = supported_phys & ~configurable_phys;
4128
4129 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4130 return mgmt_cmd_status(sk, hdev->id,
4131 MGMT_OP_SET_PHY_CONFIGURATION,
4132 MGMT_STATUS_INVALID_PARAMS);
4133
4134 if (selected_phys == get_selected_phys(hdev))
4135 return mgmt_cmd_complete(sk, hdev->id,
4136 MGMT_OP_SET_PHY_CONFIGURATION,
4137 0, NULL, 0);
4138
4139 hci_dev_lock(hdev);
4140
4141 if (!hdev_is_powered(hdev)) {
4142 err = mgmt_cmd_status(sk, hdev->id,
4143 MGMT_OP_SET_PHY_CONFIGURATION,
4144 MGMT_STATUS_REJECTED);
4145 goto unlock;
4146 }
4147
4148 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4149 err = mgmt_cmd_status(sk, hdev->id,
4150 MGMT_OP_SET_PHY_CONFIGURATION,
4151 MGMT_STATUS_BUSY);
4152 goto unlock;
4153 }
4154
4155 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4156 pkt_type |= (HCI_DH3 | HCI_DM3);
4157 else
4158 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4159
4160 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4161 pkt_type |= (HCI_DH5 | HCI_DM5);
4162 else
4163 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4164
4165 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4166 pkt_type &= ~HCI_2DH1;
4167 else
4168 pkt_type |= HCI_2DH1;
4169
4170 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4171 pkt_type &= ~HCI_2DH3;
4172 else
4173 pkt_type |= HCI_2DH3;
4174
4175 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4176 pkt_type &= ~HCI_2DH5;
4177 else
4178 pkt_type |= HCI_2DH5;
4179
4180 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4181 pkt_type &= ~HCI_3DH1;
4182 else
4183 pkt_type |= HCI_3DH1;
4184
4185 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4186 pkt_type &= ~HCI_3DH3;
4187 else
4188 pkt_type |= HCI_3DH3;
4189
4190 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4191 pkt_type &= ~HCI_3DH5;
4192 else
4193 pkt_type |= HCI_3DH5;
4194
4195 if (pkt_type != hdev->pkt_type) {
4196 hdev->pkt_type = pkt_type;
4197 changed = true;
4198 }
4199
4200 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4201 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4202 if (changed)
4203 mgmt_phy_configuration_changed(hdev, sk);
4204
4205 err = mgmt_cmd_complete(sk, hdev->id,
4206 MGMT_OP_SET_PHY_CONFIGURATION,
4207 0, NULL, 0);
4208
4209 goto unlock;
4210 }
4211
4212 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4213 len);
4214 if (!cmd)
4215 err = -ENOMEM;
4216 else
4217 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4218 set_default_phy_complete);
4219
4220 if (err < 0) {
4221 err = mgmt_cmd_status(sk, hdev->id,
4222 MGMT_OP_SET_PHY_CONFIGURATION,
4223 MGMT_STATUS_FAILED);
4224
4225 if (cmd)
4226 mgmt_pending_remove(cmd);
4227 }
4228
4229 unlock:
4230 hci_dev_unlock(hdev);
4231
4232 return err;
4233 }
4234
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4235 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4236 u16 len)
4237 {
4238 int err = MGMT_STATUS_SUCCESS;
4239 struct mgmt_cp_set_blocked_keys *keys = data;
4240 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4241 sizeof(struct mgmt_blocked_key_info));
4242 u16 key_count, expected_len;
4243 int i;
4244
4245 bt_dev_dbg(hdev, "sock %p", sk);
4246
4247 key_count = __le16_to_cpu(keys->key_count);
4248 if (key_count > max_key_count) {
4249 bt_dev_err(hdev, "too big key_count value %u", key_count);
4250 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4251 MGMT_STATUS_INVALID_PARAMS);
4252 }
4253
4254 expected_len = struct_size(keys, keys, key_count);
4255 if (expected_len != len) {
4256 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4257 expected_len, len);
4258 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4259 MGMT_STATUS_INVALID_PARAMS);
4260 }
4261
4262 hci_dev_lock(hdev);
4263
4264 hci_blocked_keys_clear(hdev);
4265
4266 for (i = 0; i < key_count; ++i) {
4267 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4268
4269 if (!b) {
4270 err = MGMT_STATUS_NO_RESOURCES;
4271 break;
4272 }
4273
4274 b->type = keys->keys[i].type;
4275 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4276 list_add_rcu(&b->list, &hdev->blocked_keys);
4277 }
4278 hci_dev_unlock(hdev);
4279
4280 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4281 err, NULL, 0);
4282 }
4283
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4284 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4285 void *data, u16 len)
4286 {
4287 struct mgmt_mode *cp = data;
4288 int err;
4289 bool changed = false;
4290
4291 bt_dev_dbg(hdev, "sock %p", sk);
4292
4293 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4294 return mgmt_cmd_status(sk, hdev->id,
4295 MGMT_OP_SET_WIDEBAND_SPEECH,
4296 MGMT_STATUS_NOT_SUPPORTED);
4297
4298 if (cp->val != 0x00 && cp->val != 0x01)
4299 return mgmt_cmd_status(sk, hdev->id,
4300 MGMT_OP_SET_WIDEBAND_SPEECH,
4301 MGMT_STATUS_INVALID_PARAMS);
4302
4303 hci_dev_lock(hdev);
4304
4305 if (hdev_is_powered(hdev) &&
4306 !!cp->val != hci_dev_test_flag(hdev,
4307 HCI_WIDEBAND_SPEECH_ENABLED)) {
4308 err = mgmt_cmd_status(sk, hdev->id,
4309 MGMT_OP_SET_WIDEBAND_SPEECH,
4310 MGMT_STATUS_REJECTED);
4311 goto unlock;
4312 }
4313
4314 if (cp->val)
4315 changed = !hci_dev_test_and_set_flag(hdev,
4316 HCI_WIDEBAND_SPEECH_ENABLED);
4317 else
4318 changed = hci_dev_test_and_clear_flag(hdev,
4319 HCI_WIDEBAND_SPEECH_ENABLED);
4320
4321 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4322 if (err < 0)
4323 goto unlock;
4324
4325 if (changed)
4326 err = new_settings(hdev, sk);
4327
4328 unlock:
4329 hci_dev_unlock(hdev);
4330 return err;
4331 }
4332
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4333 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4334 void *data, u16 data_len)
4335 {
4336 char buf[20];
4337 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4338 u16 cap_len = 0;
4339 u8 flags = 0;
4340 u8 tx_power_range[2];
4341
4342 bt_dev_dbg(hdev, "sock %p", sk);
4343
4344 memset(&buf, 0, sizeof(buf));
4345
4346 hci_dev_lock(hdev);
4347
4348 /* When the Read Simple Pairing Options command is supported, then
4349 * the remote public key validation is supported.
4350 *
4351 * Alternatively, when Microsoft extensions are available, they can
4352 * indicate support for public key validation as well.
4353 */
4354 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4355 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4356
4357 flags |= 0x02; /* Remote public key validation (LE) */
4358
4359 /* When the Read Encryption Key Size command is supported, then the
4360 * encryption key size is enforced.
4361 */
4362 if (hdev->commands[20] & 0x10)
4363 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4364
4365 flags |= 0x08; /* Encryption key size enforcement (LE) */
4366
4367 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4368 &flags, 1);
4369
4370 /* When the Read Simple Pairing Options command is supported, then
4371 * also max encryption key size information is provided.
4372 */
4373 if (hdev->commands[41] & 0x08)
4374 cap_len = eir_append_le16(rp->cap, cap_len,
4375 MGMT_CAP_MAX_ENC_KEY_SIZE,
4376 hdev->max_enc_key_size);
4377
4378 cap_len = eir_append_le16(rp->cap, cap_len,
4379 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4380 SMP_MAX_ENC_KEY_SIZE);
4381
4382 /* Append the min/max LE tx power parameters if we were able to fetch
4383 * it from the controller
4384 */
4385 if (hdev->commands[38] & 0x80) {
4386 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4387 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4388 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4389 tx_power_range, 2);
4390 }
4391
4392 rp->cap_len = cpu_to_le16(cap_len);
4393
4394 hci_dev_unlock(hdev);
4395
4396 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4397 rp, sizeof(*rp) + cap_len);
4398 }
4399
4400 #ifdef CONFIG_BT_FEATURE_DEBUG
4401 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4402 static const u8 debug_uuid[16] = {
4403 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4404 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4405 };
4406 #endif
4407
4408 /* 330859bc-7506-492d-9370-9a6f0614037f */
4409 static const u8 quality_report_uuid[16] = {
4410 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4411 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4412 };
4413
4414 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4415 static const u8 offload_codecs_uuid[16] = {
4416 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4417 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4418 };
4419
4420 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4421 static const u8 le_simultaneous_roles_uuid[16] = {
4422 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4423 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4424 };
4425
4426 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4427 static const u8 iso_socket_uuid[16] = {
4428 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4429 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4430 };
4431
4432 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4433 static const u8 mgmt_mesh_uuid[16] = {
4434 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4435 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4436 };
4437
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4438 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4439 void *data, u16 data_len)
4440 {
4441 struct mgmt_rp_read_exp_features_info *rp;
4442 size_t len;
4443 u16 idx = 0;
4444 u32 flags;
4445 int status;
4446
4447 bt_dev_dbg(hdev, "sock %p", sk);
4448
4449 /* Enough space for 7 features */
4450 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4451 rp = kzalloc(len, GFP_KERNEL);
4452 if (!rp)
4453 return -ENOMEM;
4454
4455 #ifdef CONFIG_BT_FEATURE_DEBUG
4456 if (!hdev) {
4457 flags = bt_dbg_get() ? BIT(0) : 0;
4458
4459 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4460 rp->features[idx].flags = cpu_to_le32(flags);
4461 idx++;
4462 }
4463 #endif
4464
4465 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4466 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4467 flags = BIT(0);
4468 else
4469 flags = 0;
4470
4471 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4472 rp->features[idx].flags = cpu_to_le32(flags);
4473 idx++;
4474 }
4475
4476 if (hdev && (aosp_has_quality_report(hdev) ||
4477 hdev->set_quality_report)) {
4478 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4479 flags = BIT(0);
4480 else
4481 flags = 0;
4482
4483 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4484 rp->features[idx].flags = cpu_to_le32(flags);
4485 idx++;
4486 }
4487
4488 if (hdev && hdev->get_data_path_id) {
4489 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4490 flags = BIT(0);
4491 else
4492 flags = 0;
4493
4494 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4495 rp->features[idx].flags = cpu_to_le32(flags);
4496 idx++;
4497 }
4498
4499 if (IS_ENABLED(CONFIG_BT_LE)) {
4500 flags = iso_enabled() ? BIT(0) : 0;
4501 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4502 rp->features[idx].flags = cpu_to_le32(flags);
4503 idx++;
4504 }
4505
4506 if (hdev && lmp_le_capable(hdev)) {
4507 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4508 flags = BIT(0);
4509 else
4510 flags = 0;
4511
4512 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4513 rp->features[idx].flags = cpu_to_le32(flags);
4514 idx++;
4515 }
4516
4517 rp->feature_count = cpu_to_le16(idx);
4518
4519 /* After reading the experimental features information, enable
4520 * the events to update client on any future change.
4521 */
4522 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4523
4524 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4525 MGMT_OP_READ_EXP_FEATURES_INFO,
4526 0, rp, sizeof(*rp) + (20 * idx));
4527
4528 kfree(rp);
4529 return status;
4530 }
4531
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4532 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4533 bool enabled, struct sock *skip)
4534 {
4535 struct mgmt_ev_exp_feature_changed ev;
4536
4537 memset(&ev, 0, sizeof(ev));
4538 memcpy(ev.uuid, uuid, 16);
4539 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4540
4541 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4542 &ev, sizeof(ev),
4543 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4544 }
4545
4546 #define EXP_FEAT(_uuid, _set_func) \
4547 { \
4548 .uuid = _uuid, \
4549 .set_func = _set_func, \
4550 }
4551
4552 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4553 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4554 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4555 {
4556 struct mgmt_rp_set_exp_feature rp;
4557
4558 memset(rp.uuid, 0, 16);
4559 rp.flags = cpu_to_le32(0);
4560
4561 #ifdef CONFIG_BT_FEATURE_DEBUG
4562 if (!hdev) {
4563 bool changed = bt_dbg_get();
4564
4565 bt_dbg_set(false);
4566
4567 if (changed)
4568 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4569 }
4570 #endif
4571
4572 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4573
4574 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4575 MGMT_OP_SET_EXP_FEATURE, 0,
4576 &rp, sizeof(rp));
4577 }
4578
4579 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4580 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4581 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4582 {
4583 struct mgmt_rp_set_exp_feature rp;
4584
4585 bool val, changed;
4586 int err;
4587
4588 /* Command requires to use the non-controller index */
4589 if (hdev)
4590 return mgmt_cmd_status(sk, hdev->id,
4591 MGMT_OP_SET_EXP_FEATURE,
4592 MGMT_STATUS_INVALID_INDEX);
4593
4594 /* Parameters are limited to a single octet */
4595 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4596 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4597 MGMT_OP_SET_EXP_FEATURE,
4598 MGMT_STATUS_INVALID_PARAMS);
4599
4600 /* Only boolean on/off is supported */
4601 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4602 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4603 MGMT_OP_SET_EXP_FEATURE,
4604 MGMT_STATUS_INVALID_PARAMS);
4605
4606 val = !!cp->param[0];
4607 changed = val ? !bt_dbg_get() : bt_dbg_get();
4608 bt_dbg_set(val);
4609
4610 memcpy(rp.uuid, debug_uuid, 16);
4611 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4612
4613 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4614
4615 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4616 MGMT_OP_SET_EXP_FEATURE, 0,
4617 &rp, sizeof(rp));
4618
4619 if (changed)
4620 exp_feature_changed(hdev, debug_uuid, val, sk);
4621
4622 return err;
4623 }
4624 #endif
4625
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4626 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4627 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4628 {
4629 struct mgmt_rp_set_exp_feature rp;
4630 bool val, changed;
4631 int err;
4632
4633 /* Command requires to use the controller index */
4634 if (!hdev)
4635 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4636 MGMT_OP_SET_EXP_FEATURE,
4637 MGMT_STATUS_INVALID_INDEX);
4638
4639 /* Parameters are limited to a single octet */
4640 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4641 return mgmt_cmd_status(sk, hdev->id,
4642 MGMT_OP_SET_EXP_FEATURE,
4643 MGMT_STATUS_INVALID_PARAMS);
4644
4645 /* Only boolean on/off is supported */
4646 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4647 return mgmt_cmd_status(sk, hdev->id,
4648 MGMT_OP_SET_EXP_FEATURE,
4649 MGMT_STATUS_INVALID_PARAMS);
4650
4651 val = !!cp->param[0];
4652
4653 if (val) {
4654 changed = !hci_dev_test_and_set_flag(hdev,
4655 HCI_MESH_EXPERIMENTAL);
4656 } else {
4657 hci_dev_clear_flag(hdev, HCI_MESH);
4658 changed = hci_dev_test_and_clear_flag(hdev,
4659 HCI_MESH_EXPERIMENTAL);
4660 }
4661
4662 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4663 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4664
4665 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4666
4667 err = mgmt_cmd_complete(sk, hdev->id,
4668 MGMT_OP_SET_EXP_FEATURE, 0,
4669 &rp, sizeof(rp));
4670
4671 if (changed)
4672 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4673
4674 return err;
4675 }
4676
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4677 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4678 struct mgmt_cp_set_exp_feature *cp,
4679 u16 data_len)
4680 {
4681 struct mgmt_rp_set_exp_feature rp;
4682 bool val, changed;
4683 int err;
4684
4685 /* Command requires to use a valid controller index */
4686 if (!hdev)
4687 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4688 MGMT_OP_SET_EXP_FEATURE,
4689 MGMT_STATUS_INVALID_INDEX);
4690
4691 /* Parameters are limited to a single octet */
4692 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4693 return mgmt_cmd_status(sk, hdev->id,
4694 MGMT_OP_SET_EXP_FEATURE,
4695 MGMT_STATUS_INVALID_PARAMS);
4696
4697 /* Only boolean on/off is supported */
4698 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4699 return mgmt_cmd_status(sk, hdev->id,
4700 MGMT_OP_SET_EXP_FEATURE,
4701 MGMT_STATUS_INVALID_PARAMS);
4702
4703 hci_req_sync_lock(hdev);
4704
4705 val = !!cp->param[0];
4706 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4707
4708 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4709 err = mgmt_cmd_status(sk, hdev->id,
4710 MGMT_OP_SET_EXP_FEATURE,
4711 MGMT_STATUS_NOT_SUPPORTED);
4712 goto unlock_quality_report;
4713 }
4714
4715 if (changed) {
4716 if (hdev->set_quality_report)
4717 err = hdev->set_quality_report(hdev, val);
4718 else
4719 err = aosp_set_quality_report(hdev, val);
4720
4721 if (err) {
4722 err = mgmt_cmd_status(sk, hdev->id,
4723 MGMT_OP_SET_EXP_FEATURE,
4724 MGMT_STATUS_FAILED);
4725 goto unlock_quality_report;
4726 }
4727
4728 if (val)
4729 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4730 else
4731 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4732 }
4733
4734 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4735
4736 memcpy(rp.uuid, quality_report_uuid, 16);
4737 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4738 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4739
4740 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4741 &rp, sizeof(rp));
4742
4743 if (changed)
4744 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4745
4746 unlock_quality_report:
4747 hci_req_sync_unlock(hdev);
4748 return err;
4749 }
4750
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4751 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4752 struct mgmt_cp_set_exp_feature *cp,
4753 u16 data_len)
4754 {
4755 bool val, changed;
4756 int err;
4757 struct mgmt_rp_set_exp_feature rp;
4758
4759 /* Command requires to use a valid controller index */
4760 if (!hdev)
4761 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4762 MGMT_OP_SET_EXP_FEATURE,
4763 MGMT_STATUS_INVALID_INDEX);
4764
4765 /* Parameters are limited to a single octet */
4766 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4767 return mgmt_cmd_status(sk, hdev->id,
4768 MGMT_OP_SET_EXP_FEATURE,
4769 MGMT_STATUS_INVALID_PARAMS);
4770
4771 /* Only boolean on/off is supported */
4772 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4773 return mgmt_cmd_status(sk, hdev->id,
4774 MGMT_OP_SET_EXP_FEATURE,
4775 MGMT_STATUS_INVALID_PARAMS);
4776
4777 val = !!cp->param[0];
4778 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4779
4780 if (!hdev->get_data_path_id) {
4781 return mgmt_cmd_status(sk, hdev->id,
4782 MGMT_OP_SET_EXP_FEATURE,
4783 MGMT_STATUS_NOT_SUPPORTED);
4784 }
4785
4786 if (changed) {
4787 if (val)
4788 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4789 else
4790 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4791 }
4792
4793 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4794 val, changed);
4795
4796 memcpy(rp.uuid, offload_codecs_uuid, 16);
4797 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4798 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4799 err = mgmt_cmd_complete(sk, hdev->id,
4800 MGMT_OP_SET_EXP_FEATURE, 0,
4801 &rp, sizeof(rp));
4802
4803 if (changed)
4804 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4805
4806 return err;
4807 }
4808
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4809 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4810 struct mgmt_cp_set_exp_feature *cp,
4811 u16 data_len)
4812 {
4813 bool val, changed;
4814 int err;
4815 struct mgmt_rp_set_exp_feature rp;
4816
4817 /* Command requires to use a valid controller index */
4818 if (!hdev)
4819 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4820 MGMT_OP_SET_EXP_FEATURE,
4821 MGMT_STATUS_INVALID_INDEX);
4822
4823 /* Parameters are limited to a single octet */
4824 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4825 return mgmt_cmd_status(sk, hdev->id,
4826 MGMT_OP_SET_EXP_FEATURE,
4827 MGMT_STATUS_INVALID_PARAMS);
4828
4829 /* Only boolean on/off is supported */
4830 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4831 return mgmt_cmd_status(sk, hdev->id,
4832 MGMT_OP_SET_EXP_FEATURE,
4833 MGMT_STATUS_INVALID_PARAMS);
4834
4835 val = !!cp->param[0];
4836 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4837
4838 if (!hci_dev_le_state_simultaneous(hdev)) {
4839 return mgmt_cmd_status(sk, hdev->id,
4840 MGMT_OP_SET_EXP_FEATURE,
4841 MGMT_STATUS_NOT_SUPPORTED);
4842 }
4843
4844 if (changed) {
4845 if (val)
4846 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4847 else
4848 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4849 }
4850
4851 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4852 val, changed);
4853
4854 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4855 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4856 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4857 err = mgmt_cmd_complete(sk, hdev->id,
4858 MGMT_OP_SET_EXP_FEATURE, 0,
4859 &rp, sizeof(rp));
4860
4861 if (changed)
4862 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4863
4864 return err;
4865 }
4866
4867 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4868 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4869 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4870 {
4871 struct mgmt_rp_set_exp_feature rp;
4872 bool val, changed = false;
4873 int err;
4874
4875 /* Command requires to use the non-controller index */
4876 if (hdev)
4877 return mgmt_cmd_status(sk, hdev->id,
4878 MGMT_OP_SET_EXP_FEATURE,
4879 MGMT_STATUS_INVALID_INDEX);
4880
4881 /* Parameters are limited to a single octet */
4882 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4883 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4884 MGMT_OP_SET_EXP_FEATURE,
4885 MGMT_STATUS_INVALID_PARAMS);
4886
4887 /* Only boolean on/off is supported */
4888 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4889 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4890 MGMT_OP_SET_EXP_FEATURE,
4891 MGMT_STATUS_INVALID_PARAMS);
4892
4893 val = cp->param[0] ? true : false;
4894 if (val)
4895 err = iso_init();
4896 else
4897 err = iso_exit();
4898
4899 if (!err)
4900 changed = true;
4901
4902 memcpy(rp.uuid, iso_socket_uuid, 16);
4903 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4904
4905 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4906
4907 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4908 MGMT_OP_SET_EXP_FEATURE, 0,
4909 &rp, sizeof(rp));
4910
4911 if (changed)
4912 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4913
4914 return err;
4915 }
4916 #endif
4917
4918 static const struct mgmt_exp_feature {
4919 const u8 *uuid;
4920 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4921 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4922 } exp_features[] = {
4923 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4924 #ifdef CONFIG_BT_FEATURE_DEBUG
4925 EXP_FEAT(debug_uuid, set_debug_func),
4926 #endif
4927 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4928 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4929 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4930 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4931 #ifdef CONFIG_BT_LE
4932 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4933 #endif
4934
4935 /* end with a null feature */
4936 EXP_FEAT(NULL, NULL)
4937 };
4938
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4939 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4940 void *data, u16 data_len)
4941 {
4942 struct mgmt_cp_set_exp_feature *cp = data;
4943 size_t i = 0;
4944
4945 bt_dev_dbg(hdev, "sock %p", sk);
4946
4947 for (i = 0; exp_features[i].uuid; i++) {
4948 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4949 return exp_features[i].set_func(sk, hdev, cp, data_len);
4950 }
4951
4952 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4953 MGMT_OP_SET_EXP_FEATURE,
4954 MGMT_STATUS_NOT_SUPPORTED);
4955 }
4956
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4957 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4958 u16 data_len)
4959 {
4960 struct mgmt_cp_get_device_flags *cp = data;
4961 struct mgmt_rp_get_device_flags rp;
4962 struct bdaddr_list_with_flags *br_params;
4963 struct hci_conn_params *params;
4964 u32 supported_flags;
4965 u32 current_flags = 0;
4966 u8 status = MGMT_STATUS_INVALID_PARAMS;
4967
4968 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4969 &cp->addr.bdaddr, cp->addr.type);
4970
4971 hci_dev_lock(hdev);
4972
4973 supported_flags = hdev->conn_flags;
4974
4975 memset(&rp, 0, sizeof(rp));
4976
4977 if (cp->addr.type == BDADDR_BREDR) {
4978 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4979 &cp->addr.bdaddr,
4980 cp->addr.type);
4981 if (!br_params)
4982 goto done;
4983
4984 current_flags = br_params->flags;
4985 } else {
4986 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4987 le_addr_type(cp->addr.type));
4988 if (!params)
4989 goto done;
4990
4991 current_flags = params->flags;
4992 }
4993
4994 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4995 rp.addr.type = cp->addr.type;
4996 rp.supported_flags = cpu_to_le32(supported_flags);
4997 rp.current_flags = cpu_to_le32(current_flags);
4998
4999 status = MGMT_STATUS_SUCCESS;
5000
5001 done:
5002 hci_dev_unlock(hdev);
5003
5004 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5005 &rp, sizeof(rp));
5006 }
5007
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5008 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5009 bdaddr_t *bdaddr, u8 bdaddr_type,
5010 u32 supported_flags, u32 current_flags)
5011 {
5012 struct mgmt_ev_device_flags_changed ev;
5013
5014 bacpy(&ev.addr.bdaddr, bdaddr);
5015 ev.addr.type = bdaddr_type;
5016 ev.supported_flags = cpu_to_le32(supported_flags);
5017 ev.current_flags = cpu_to_le32(current_flags);
5018
5019 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5020 }
5021
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5022 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5023 u16 len)
5024 {
5025 struct mgmt_cp_set_device_flags *cp = data;
5026 struct bdaddr_list_with_flags *br_params;
5027 struct hci_conn_params *params;
5028 u8 status = MGMT_STATUS_INVALID_PARAMS;
5029 u32 supported_flags;
5030 u32 current_flags = __le32_to_cpu(cp->current_flags);
5031
5032 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5033 &cp->addr.bdaddr, cp->addr.type, current_flags);
5034
5035 // We should take hci_dev_lock() early, I think.. conn_flags can change
5036 supported_flags = hdev->conn_flags;
5037
5038 if ((supported_flags | current_flags) != supported_flags) {
5039 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5040 current_flags, supported_flags);
5041 goto done;
5042 }
5043
5044 hci_dev_lock(hdev);
5045
5046 if (cp->addr.type == BDADDR_BREDR) {
5047 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5048 &cp->addr.bdaddr,
5049 cp->addr.type);
5050
5051 if (br_params) {
5052 br_params->flags = current_flags;
5053 status = MGMT_STATUS_SUCCESS;
5054 } else {
5055 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5056 &cp->addr.bdaddr, cp->addr.type);
5057 }
5058
5059 goto unlock;
5060 }
5061
5062 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5063 le_addr_type(cp->addr.type));
5064 if (!params) {
5065 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5066 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5067 goto unlock;
5068 }
5069
5070 supported_flags = hdev->conn_flags;
5071
5072 if ((supported_flags | current_flags) != supported_flags) {
5073 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5074 current_flags, supported_flags);
5075 goto unlock;
5076 }
5077
5078 WRITE_ONCE(params->flags, current_flags);
5079 status = MGMT_STATUS_SUCCESS;
5080
5081 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5082 * has been set.
5083 */
5084 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5085 hci_update_passive_scan(hdev);
5086
5087 unlock:
5088 hci_dev_unlock(hdev);
5089
5090 done:
5091 if (status == MGMT_STATUS_SUCCESS)
5092 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5093 supported_flags, current_flags);
5094
5095 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5096 &cp->addr, sizeof(cp->addr));
5097 }
5098
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5099 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5100 u16 handle)
5101 {
5102 struct mgmt_ev_adv_monitor_added ev;
5103
5104 ev.monitor_handle = cpu_to_le16(handle);
5105
5106 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5107 }
5108
mgmt_adv_monitor_removed(struct hci_dev * hdev,u16 handle)5109 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5110 {
5111 struct mgmt_ev_adv_monitor_removed ev;
5112 struct mgmt_pending_cmd *cmd;
5113 struct sock *sk_skip = NULL;
5114 struct mgmt_cp_remove_adv_monitor *cp;
5115
5116 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5117 if (cmd) {
5118 cp = cmd->param;
5119
5120 if (cp->monitor_handle)
5121 sk_skip = cmd->sk;
5122 }
5123
5124 ev.monitor_handle = cpu_to_le16(handle);
5125
5126 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5127 }
5128
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5129 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5130 void *data, u16 len)
5131 {
5132 struct adv_monitor *monitor = NULL;
5133 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5134 int handle, err;
5135 size_t rp_size = 0;
5136 __u32 supported = 0;
5137 __u32 enabled = 0;
5138 __u16 num_handles = 0;
5139 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5140
5141 BT_DBG("request for %s", hdev->name);
5142
5143 hci_dev_lock(hdev);
5144
5145 if (msft_monitor_supported(hdev))
5146 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5147
5148 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5149 handles[num_handles++] = monitor->handle;
5150
5151 hci_dev_unlock(hdev);
5152
5153 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5154 rp = kmalloc(rp_size, GFP_KERNEL);
5155 if (!rp)
5156 return -ENOMEM;
5157
5158 /* All supported features are currently enabled */
5159 enabled = supported;
5160
5161 rp->supported_features = cpu_to_le32(supported);
5162 rp->enabled_features = cpu_to_le32(enabled);
5163 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5164 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5165 rp->num_handles = cpu_to_le16(num_handles);
5166 if (num_handles)
5167 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5168
5169 err = mgmt_cmd_complete(sk, hdev->id,
5170 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5171 MGMT_STATUS_SUCCESS, rp, rp_size);
5172
5173 kfree(rp);
5174
5175 return err;
5176 }
5177
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5178 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5179 void *data, int status)
5180 {
5181 struct mgmt_rp_add_adv_patterns_monitor rp;
5182 struct mgmt_pending_cmd *cmd = data;
5183 struct adv_monitor *monitor = cmd->user_data;
5184
5185 hci_dev_lock(hdev);
5186
5187 rp.monitor_handle = cpu_to_le16(monitor->handle);
5188
5189 if (!status) {
5190 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5191 hdev->adv_monitors_cnt++;
5192 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5193 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5194 hci_update_passive_scan(hdev);
5195 }
5196
5197 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5198 mgmt_status(status), &rp, sizeof(rp));
5199 mgmt_pending_remove(cmd);
5200
5201 hci_dev_unlock(hdev);
5202 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5203 rp.monitor_handle, status);
5204 }
5205
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5206 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5207 {
5208 struct mgmt_pending_cmd *cmd = data;
5209 struct adv_monitor *monitor = cmd->user_data;
5210
5211 return hci_add_adv_monitor(hdev, monitor);
5212 }
5213
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5214 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5215 struct adv_monitor *m, u8 status,
5216 void *data, u16 len, u16 op)
5217 {
5218 struct mgmt_pending_cmd *cmd;
5219 int err;
5220
5221 hci_dev_lock(hdev);
5222
5223 if (status)
5224 goto unlock;
5225
5226 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5227 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5228 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5229 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5230 status = MGMT_STATUS_BUSY;
5231 goto unlock;
5232 }
5233
5234 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5235 if (!cmd) {
5236 status = MGMT_STATUS_NO_RESOURCES;
5237 goto unlock;
5238 }
5239
5240 cmd->user_data = m;
5241 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5242 mgmt_add_adv_patterns_monitor_complete);
5243 if (err) {
5244 if (err == -ENOMEM)
5245 status = MGMT_STATUS_NO_RESOURCES;
5246 else
5247 status = MGMT_STATUS_FAILED;
5248
5249 goto unlock;
5250 }
5251
5252 hci_dev_unlock(hdev);
5253
5254 return 0;
5255
5256 unlock:
5257 hci_free_adv_monitor(hdev, m);
5258 hci_dev_unlock(hdev);
5259 return mgmt_cmd_status(sk, hdev->id, op, status);
5260 }
5261
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5262 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5263 struct mgmt_adv_rssi_thresholds *rssi)
5264 {
5265 if (rssi) {
5266 m->rssi.low_threshold = rssi->low_threshold;
5267 m->rssi.low_threshold_timeout =
5268 __le16_to_cpu(rssi->low_threshold_timeout);
5269 m->rssi.high_threshold = rssi->high_threshold;
5270 m->rssi.high_threshold_timeout =
5271 __le16_to_cpu(rssi->high_threshold_timeout);
5272 m->rssi.sampling_period = rssi->sampling_period;
5273 } else {
5274 /* Default values. These numbers are the least constricting
5275 * parameters for MSFT API to work, so it behaves as if there
5276 * are no rssi parameter to consider. May need to be changed
5277 * if other API are to be supported.
5278 */
5279 m->rssi.low_threshold = -127;
5280 m->rssi.low_threshold_timeout = 60;
5281 m->rssi.high_threshold = -127;
5282 m->rssi.high_threshold_timeout = 0;
5283 m->rssi.sampling_period = 0;
5284 }
5285 }
5286
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5287 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5288 struct mgmt_adv_pattern *patterns)
5289 {
5290 u8 offset = 0, length = 0;
5291 struct adv_pattern *p = NULL;
5292 int i;
5293
5294 for (i = 0; i < pattern_count; i++) {
5295 offset = patterns[i].offset;
5296 length = patterns[i].length;
5297 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5298 length > HCI_MAX_EXT_AD_LENGTH ||
5299 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5300 return MGMT_STATUS_INVALID_PARAMS;
5301
5302 p = kmalloc(sizeof(*p), GFP_KERNEL);
5303 if (!p)
5304 return MGMT_STATUS_NO_RESOURCES;
5305
5306 p->ad_type = patterns[i].ad_type;
5307 p->offset = patterns[i].offset;
5308 p->length = patterns[i].length;
5309 memcpy(p->value, patterns[i].value, p->length);
5310
5311 INIT_LIST_HEAD(&p->list);
5312 list_add(&p->list, &m->patterns);
5313 }
5314
5315 return MGMT_STATUS_SUCCESS;
5316 }
5317
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5318 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5319 void *data, u16 len)
5320 {
5321 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5322 struct adv_monitor *m = NULL;
5323 u8 status = MGMT_STATUS_SUCCESS;
5324 size_t expected_size = sizeof(*cp);
5325
5326 BT_DBG("request for %s", hdev->name);
5327
5328 if (len <= sizeof(*cp)) {
5329 status = MGMT_STATUS_INVALID_PARAMS;
5330 goto done;
5331 }
5332
5333 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5334 if (len != expected_size) {
5335 status = MGMT_STATUS_INVALID_PARAMS;
5336 goto done;
5337 }
5338
5339 m = kzalloc(sizeof(*m), GFP_KERNEL);
5340 if (!m) {
5341 status = MGMT_STATUS_NO_RESOURCES;
5342 goto done;
5343 }
5344
5345 INIT_LIST_HEAD(&m->patterns);
5346
5347 parse_adv_monitor_rssi(m, NULL);
5348 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5349
5350 done:
5351 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5352 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5353 }
5354
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5355 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5356 void *data, u16 len)
5357 {
5358 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5359 struct adv_monitor *m = NULL;
5360 u8 status = MGMT_STATUS_SUCCESS;
5361 size_t expected_size = sizeof(*cp);
5362
5363 BT_DBG("request for %s", hdev->name);
5364
5365 if (len <= sizeof(*cp)) {
5366 status = MGMT_STATUS_INVALID_PARAMS;
5367 goto done;
5368 }
5369
5370 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5371 if (len != expected_size) {
5372 status = MGMT_STATUS_INVALID_PARAMS;
5373 goto done;
5374 }
5375
5376 m = kzalloc(sizeof(*m), GFP_KERNEL);
5377 if (!m) {
5378 status = MGMT_STATUS_NO_RESOURCES;
5379 goto done;
5380 }
5381
5382 INIT_LIST_HEAD(&m->patterns);
5383
5384 parse_adv_monitor_rssi(m, &cp->rssi);
5385 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5386
5387 done:
5388 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5389 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5390 }
5391
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5392 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5393 void *data, int status)
5394 {
5395 struct mgmt_rp_remove_adv_monitor rp;
5396 struct mgmt_pending_cmd *cmd = data;
5397 struct mgmt_cp_remove_adv_monitor *cp;
5398
5399 if (status == -ECANCELED ||
5400 cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
5401 return;
5402
5403 hci_dev_lock(hdev);
5404
5405 cp = cmd->param;
5406
5407 rp.monitor_handle = cp->monitor_handle;
5408
5409 if (!status)
5410 hci_update_passive_scan(hdev);
5411
5412 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5413 mgmt_status(status), &rp, sizeof(rp));
5414 mgmt_pending_remove(cmd);
5415
5416 hci_dev_unlock(hdev);
5417 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5418 rp.monitor_handle, status);
5419 }
5420
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5421 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5422 {
5423 struct mgmt_pending_cmd *cmd = data;
5424
5425 if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
5426 return -ECANCELED;
5427
5428 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5429 u16 handle = __le16_to_cpu(cp->monitor_handle);
5430
5431 if (!handle)
5432 return hci_remove_all_adv_monitor(hdev);
5433
5434 return hci_remove_single_adv_monitor(hdev, handle);
5435 }
5436
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5437 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5438 void *data, u16 len)
5439 {
5440 struct mgmt_pending_cmd *cmd;
5441 int err, status;
5442
5443 hci_dev_lock(hdev);
5444
5445 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5446 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5447 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5448 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5449 status = MGMT_STATUS_BUSY;
5450 goto unlock;
5451 }
5452
5453 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5454 if (!cmd) {
5455 status = MGMT_STATUS_NO_RESOURCES;
5456 goto unlock;
5457 }
5458
5459 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5460 mgmt_remove_adv_monitor_complete);
5461
5462 if (err) {
5463 mgmt_pending_remove(cmd);
5464
5465 if (err == -ENOMEM)
5466 status = MGMT_STATUS_NO_RESOURCES;
5467 else
5468 status = MGMT_STATUS_FAILED;
5469
5470 goto unlock;
5471 }
5472
5473 hci_dev_unlock(hdev);
5474
5475 return 0;
5476
5477 unlock:
5478 hci_dev_unlock(hdev);
5479 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5480 status);
5481 }
5482
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5483 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5484 {
5485 struct mgmt_rp_read_local_oob_data mgmt_rp;
5486 size_t rp_size = sizeof(mgmt_rp);
5487 struct mgmt_pending_cmd *cmd = data;
5488 struct sk_buff *skb = cmd->skb;
5489 u8 status = mgmt_status(err);
5490
5491 if (!status) {
5492 if (!skb)
5493 status = MGMT_STATUS_FAILED;
5494 else if (IS_ERR(skb))
5495 status = mgmt_status(PTR_ERR(skb));
5496 else
5497 status = mgmt_status(skb->data[0]);
5498 }
5499
5500 bt_dev_dbg(hdev, "status %d", status);
5501
5502 if (status) {
5503 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5504 goto remove;
5505 }
5506
5507 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5508
5509 if (!bredr_sc_enabled(hdev)) {
5510 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5511
5512 if (skb->len < sizeof(*rp)) {
5513 mgmt_cmd_status(cmd->sk, hdev->id,
5514 MGMT_OP_READ_LOCAL_OOB_DATA,
5515 MGMT_STATUS_FAILED);
5516 goto remove;
5517 }
5518
5519 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5520 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5521
5522 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5523 } else {
5524 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5525
5526 if (skb->len < sizeof(*rp)) {
5527 mgmt_cmd_status(cmd->sk, hdev->id,
5528 MGMT_OP_READ_LOCAL_OOB_DATA,
5529 MGMT_STATUS_FAILED);
5530 goto remove;
5531 }
5532
5533 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5534 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5535
5536 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5537 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5538 }
5539
5540 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5541 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5542
5543 remove:
5544 if (skb && !IS_ERR(skb))
5545 kfree_skb(skb);
5546
5547 mgmt_pending_free(cmd);
5548 }
5549
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5550 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5551 {
5552 struct mgmt_pending_cmd *cmd = data;
5553
5554 if (bredr_sc_enabled(hdev))
5555 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5556 else
5557 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5558
5559 if (IS_ERR(cmd->skb))
5560 return PTR_ERR(cmd->skb);
5561 else
5562 return 0;
5563 }
5564
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5565 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5566 void *data, u16 data_len)
5567 {
5568 struct mgmt_pending_cmd *cmd;
5569 int err;
5570
5571 bt_dev_dbg(hdev, "sock %p", sk);
5572
5573 hci_dev_lock(hdev);
5574
5575 if (!hdev_is_powered(hdev)) {
5576 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5577 MGMT_STATUS_NOT_POWERED);
5578 goto unlock;
5579 }
5580
5581 if (!lmp_ssp_capable(hdev)) {
5582 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5583 MGMT_STATUS_NOT_SUPPORTED);
5584 goto unlock;
5585 }
5586
5587 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5588 if (!cmd)
5589 err = -ENOMEM;
5590 else
5591 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5592 read_local_oob_data_complete);
5593
5594 if (err < 0) {
5595 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5596 MGMT_STATUS_FAILED);
5597
5598 if (cmd)
5599 mgmt_pending_free(cmd);
5600 }
5601
5602 unlock:
5603 hci_dev_unlock(hdev);
5604 return err;
5605 }
5606
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5607 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5608 void *data, u16 len)
5609 {
5610 struct mgmt_addr_info *addr = data;
5611 int err;
5612
5613 bt_dev_dbg(hdev, "sock %p", sk);
5614
5615 if (!bdaddr_type_is_valid(addr->type))
5616 return mgmt_cmd_complete(sk, hdev->id,
5617 MGMT_OP_ADD_REMOTE_OOB_DATA,
5618 MGMT_STATUS_INVALID_PARAMS,
5619 addr, sizeof(*addr));
5620
5621 hci_dev_lock(hdev);
5622
5623 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5624 struct mgmt_cp_add_remote_oob_data *cp = data;
5625 u8 status;
5626
5627 if (cp->addr.type != BDADDR_BREDR) {
5628 err = mgmt_cmd_complete(sk, hdev->id,
5629 MGMT_OP_ADD_REMOTE_OOB_DATA,
5630 MGMT_STATUS_INVALID_PARAMS,
5631 &cp->addr, sizeof(cp->addr));
5632 goto unlock;
5633 }
5634
5635 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5636 cp->addr.type, cp->hash,
5637 cp->rand, NULL, NULL);
5638 if (err < 0)
5639 status = MGMT_STATUS_FAILED;
5640 else
5641 status = MGMT_STATUS_SUCCESS;
5642
5643 err = mgmt_cmd_complete(sk, hdev->id,
5644 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5645 &cp->addr, sizeof(cp->addr));
5646 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5647 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5648 u8 *rand192, *hash192, *rand256, *hash256;
5649 u8 status;
5650
5651 if (bdaddr_type_is_le(cp->addr.type)) {
5652 /* Enforce zero-valued 192-bit parameters as
5653 * long as legacy SMP OOB isn't implemented.
5654 */
5655 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5656 memcmp(cp->hash192, ZERO_KEY, 16)) {
5657 err = mgmt_cmd_complete(sk, hdev->id,
5658 MGMT_OP_ADD_REMOTE_OOB_DATA,
5659 MGMT_STATUS_INVALID_PARAMS,
5660 addr, sizeof(*addr));
5661 goto unlock;
5662 }
5663
5664 rand192 = NULL;
5665 hash192 = NULL;
5666 } else {
5667 /* In case one of the P-192 values is set to zero,
5668 * then just disable OOB data for P-192.
5669 */
5670 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5671 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5672 rand192 = NULL;
5673 hash192 = NULL;
5674 } else {
5675 rand192 = cp->rand192;
5676 hash192 = cp->hash192;
5677 }
5678 }
5679
5680 /* In case one of the P-256 values is set to zero, then just
5681 * disable OOB data for P-256.
5682 */
5683 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5684 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5685 rand256 = NULL;
5686 hash256 = NULL;
5687 } else {
5688 rand256 = cp->rand256;
5689 hash256 = cp->hash256;
5690 }
5691
5692 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5693 cp->addr.type, hash192, rand192,
5694 hash256, rand256);
5695 if (err < 0)
5696 status = MGMT_STATUS_FAILED;
5697 else
5698 status = MGMT_STATUS_SUCCESS;
5699
5700 err = mgmt_cmd_complete(sk, hdev->id,
5701 MGMT_OP_ADD_REMOTE_OOB_DATA,
5702 status, &cp->addr, sizeof(cp->addr));
5703 } else {
5704 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5705 len);
5706 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5707 MGMT_STATUS_INVALID_PARAMS);
5708 }
5709
5710 unlock:
5711 hci_dev_unlock(hdev);
5712 return err;
5713 }
5714
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5715 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5716 void *data, u16 len)
5717 {
5718 struct mgmt_cp_remove_remote_oob_data *cp = data;
5719 u8 status;
5720 int err;
5721
5722 bt_dev_dbg(hdev, "sock %p", sk);
5723
5724 if (cp->addr.type != BDADDR_BREDR)
5725 return mgmt_cmd_complete(sk, hdev->id,
5726 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5727 MGMT_STATUS_INVALID_PARAMS,
5728 &cp->addr, sizeof(cp->addr));
5729
5730 hci_dev_lock(hdev);
5731
5732 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5733 hci_remote_oob_data_clear(hdev);
5734 status = MGMT_STATUS_SUCCESS;
5735 goto done;
5736 }
5737
5738 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5739 if (err < 0)
5740 status = MGMT_STATUS_INVALID_PARAMS;
5741 else
5742 status = MGMT_STATUS_SUCCESS;
5743
5744 done:
5745 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5746 status, &cp->addr, sizeof(cp->addr));
5747
5748 hci_dev_unlock(hdev);
5749 return err;
5750 }
5751
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5752 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5753 uint8_t *mgmt_status)
5754 {
5755 switch (type) {
5756 case DISCOV_TYPE_LE:
5757 *mgmt_status = mgmt_le_support(hdev);
5758 if (*mgmt_status)
5759 return false;
5760 break;
5761 case DISCOV_TYPE_INTERLEAVED:
5762 *mgmt_status = mgmt_le_support(hdev);
5763 if (*mgmt_status)
5764 return false;
5765 fallthrough;
5766 case DISCOV_TYPE_BREDR:
5767 *mgmt_status = mgmt_bredr_support(hdev);
5768 if (*mgmt_status)
5769 return false;
5770 break;
5771 default:
5772 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5773 return false;
5774 }
5775
5776 return true;
5777 }
5778
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5779 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5780 {
5781 struct mgmt_pending_cmd *cmd = data;
5782
5783 bt_dev_dbg(hdev, "err %d", err);
5784
5785 if (err == -ECANCELED)
5786 return;
5787
5788 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5789 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5790 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5791 return;
5792
5793 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5794 cmd->param, 1);
5795 mgmt_pending_remove(cmd);
5796
5797 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5798 DISCOVERY_FINDING);
5799 }
5800
start_discovery_sync(struct hci_dev * hdev,void * data)5801 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5802 {
5803 return hci_start_discovery_sync(hdev);
5804 }
5805
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5806 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5807 u16 op, void *data, u16 len)
5808 {
5809 struct mgmt_cp_start_discovery *cp = data;
5810 struct mgmt_pending_cmd *cmd;
5811 u8 status;
5812 int err;
5813
5814 bt_dev_dbg(hdev, "sock %p", sk);
5815
5816 hci_dev_lock(hdev);
5817
5818 if (!hdev_is_powered(hdev)) {
5819 err = mgmt_cmd_complete(sk, hdev->id, op,
5820 MGMT_STATUS_NOT_POWERED,
5821 &cp->type, sizeof(cp->type));
5822 goto failed;
5823 }
5824
5825 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5826 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5827 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5828 &cp->type, sizeof(cp->type));
5829 goto failed;
5830 }
5831
5832 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5833 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5834 &cp->type, sizeof(cp->type));
5835 goto failed;
5836 }
5837
5838 /* Can't start discovery when it is paused */
5839 if (hdev->discovery_paused) {
5840 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5841 &cp->type, sizeof(cp->type));
5842 goto failed;
5843 }
5844
5845 /* Clear the discovery filter first to free any previously
5846 * allocated memory for the UUID list.
5847 */
5848 hci_discovery_filter_clear(hdev);
5849
5850 hdev->discovery.type = cp->type;
5851 hdev->discovery.report_invalid_rssi = false;
5852 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5853 hdev->discovery.limited = true;
5854 else
5855 hdev->discovery.limited = false;
5856
5857 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5858 if (!cmd) {
5859 err = -ENOMEM;
5860 goto failed;
5861 }
5862
5863 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5864 start_discovery_complete);
5865 if (err < 0) {
5866 mgmt_pending_remove(cmd);
5867 goto failed;
5868 }
5869
5870 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5871
5872 failed:
5873 hci_dev_unlock(hdev);
5874 return err;
5875 }
5876
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5877 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5878 void *data, u16 len)
5879 {
5880 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5881 data, len);
5882 }
5883
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5884 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5885 void *data, u16 len)
5886 {
5887 return start_discovery_internal(sk, hdev,
5888 MGMT_OP_START_LIMITED_DISCOVERY,
5889 data, len);
5890 }
5891
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5892 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5893 void *data, u16 len)
5894 {
5895 struct mgmt_cp_start_service_discovery *cp = data;
5896 struct mgmt_pending_cmd *cmd;
5897 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5898 u16 uuid_count, expected_len;
5899 u8 status;
5900 int err;
5901
5902 bt_dev_dbg(hdev, "sock %p", sk);
5903
5904 hci_dev_lock(hdev);
5905
5906 if (!hdev_is_powered(hdev)) {
5907 err = mgmt_cmd_complete(sk, hdev->id,
5908 MGMT_OP_START_SERVICE_DISCOVERY,
5909 MGMT_STATUS_NOT_POWERED,
5910 &cp->type, sizeof(cp->type));
5911 goto failed;
5912 }
5913
5914 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5915 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5916 err = mgmt_cmd_complete(sk, hdev->id,
5917 MGMT_OP_START_SERVICE_DISCOVERY,
5918 MGMT_STATUS_BUSY, &cp->type,
5919 sizeof(cp->type));
5920 goto failed;
5921 }
5922
5923 if (hdev->discovery_paused) {
5924 err = mgmt_cmd_complete(sk, hdev->id,
5925 MGMT_OP_START_SERVICE_DISCOVERY,
5926 MGMT_STATUS_BUSY, &cp->type,
5927 sizeof(cp->type));
5928 goto failed;
5929 }
5930
5931 uuid_count = __le16_to_cpu(cp->uuid_count);
5932 if (uuid_count > max_uuid_count) {
5933 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5934 uuid_count);
5935 err = mgmt_cmd_complete(sk, hdev->id,
5936 MGMT_OP_START_SERVICE_DISCOVERY,
5937 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5938 sizeof(cp->type));
5939 goto failed;
5940 }
5941
5942 expected_len = sizeof(*cp) + uuid_count * 16;
5943 if (expected_len != len) {
5944 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5945 expected_len, len);
5946 err = mgmt_cmd_complete(sk, hdev->id,
5947 MGMT_OP_START_SERVICE_DISCOVERY,
5948 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5949 sizeof(cp->type));
5950 goto failed;
5951 }
5952
5953 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5954 err = mgmt_cmd_complete(sk, hdev->id,
5955 MGMT_OP_START_SERVICE_DISCOVERY,
5956 status, &cp->type, sizeof(cp->type));
5957 goto failed;
5958 }
5959
5960 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5961 hdev, data, len);
5962 if (!cmd) {
5963 err = -ENOMEM;
5964 goto failed;
5965 }
5966
5967 /* Clear the discovery filter first to free any previously
5968 * allocated memory for the UUID list.
5969 */
5970 hci_discovery_filter_clear(hdev);
5971
5972 hdev->discovery.result_filtering = true;
5973 hdev->discovery.type = cp->type;
5974 hdev->discovery.rssi = cp->rssi;
5975 hdev->discovery.uuid_count = uuid_count;
5976
5977 if (uuid_count > 0) {
5978 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5979 GFP_KERNEL);
5980 if (!hdev->discovery.uuids) {
5981 err = mgmt_cmd_complete(sk, hdev->id,
5982 MGMT_OP_START_SERVICE_DISCOVERY,
5983 MGMT_STATUS_FAILED,
5984 &cp->type, sizeof(cp->type));
5985 mgmt_pending_remove(cmd);
5986 goto failed;
5987 }
5988 }
5989
5990 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5991 start_discovery_complete);
5992 if (err < 0) {
5993 mgmt_pending_remove(cmd);
5994 goto failed;
5995 }
5996
5997 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5998
5999 failed:
6000 hci_dev_unlock(hdev);
6001 return err;
6002 }
6003
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6004 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6005 {
6006 struct mgmt_pending_cmd *cmd = data;
6007
6008 if (err == -ECANCELED ||
6009 cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6010 return;
6011
6012 bt_dev_dbg(hdev, "err %d", err);
6013
6014 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6015 cmd->param, 1);
6016 mgmt_pending_remove(cmd);
6017
6018 if (!err)
6019 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6020 }
6021
stop_discovery_sync(struct hci_dev * hdev,void * data)6022 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6023 {
6024 return hci_stop_discovery_sync(hdev);
6025 }
6026
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6027 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6028 u16 len)
6029 {
6030 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6031 struct mgmt_pending_cmd *cmd;
6032 int err;
6033
6034 bt_dev_dbg(hdev, "sock %p", sk);
6035
6036 hci_dev_lock(hdev);
6037
6038 if (!hci_discovery_active(hdev)) {
6039 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6040 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6041 sizeof(mgmt_cp->type));
6042 goto unlock;
6043 }
6044
6045 if (hdev->discovery.type != mgmt_cp->type) {
6046 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6047 MGMT_STATUS_INVALID_PARAMS,
6048 &mgmt_cp->type, sizeof(mgmt_cp->type));
6049 goto unlock;
6050 }
6051
6052 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6053 if (!cmd) {
6054 err = -ENOMEM;
6055 goto unlock;
6056 }
6057
6058 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6059 stop_discovery_complete);
6060 if (err < 0) {
6061 mgmt_pending_remove(cmd);
6062 goto unlock;
6063 }
6064
6065 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6066
6067 unlock:
6068 hci_dev_unlock(hdev);
6069 return err;
6070 }
6071
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6072 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6073 u16 len)
6074 {
6075 struct mgmt_cp_confirm_name *cp = data;
6076 struct inquiry_entry *e;
6077 int err;
6078
6079 bt_dev_dbg(hdev, "sock %p", sk);
6080
6081 hci_dev_lock(hdev);
6082
6083 if (!hci_discovery_active(hdev)) {
6084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6085 MGMT_STATUS_FAILED, &cp->addr,
6086 sizeof(cp->addr));
6087 goto failed;
6088 }
6089
6090 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6091 if (!e) {
6092 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6093 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6094 sizeof(cp->addr));
6095 goto failed;
6096 }
6097
6098 if (cp->name_known) {
6099 e->name_state = NAME_KNOWN;
6100 list_del(&e->list);
6101 } else {
6102 e->name_state = NAME_NEEDED;
6103 hci_inquiry_cache_update_resolve(hdev, e);
6104 }
6105
6106 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6107 &cp->addr, sizeof(cp->addr));
6108
6109 failed:
6110 hci_dev_unlock(hdev);
6111 return err;
6112 }
6113
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6114 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6115 u16 len)
6116 {
6117 struct mgmt_cp_block_device *cp = data;
6118 u8 status;
6119 int err;
6120
6121 bt_dev_dbg(hdev, "sock %p", sk);
6122
6123 if (!bdaddr_type_is_valid(cp->addr.type))
6124 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6125 MGMT_STATUS_INVALID_PARAMS,
6126 &cp->addr, sizeof(cp->addr));
6127
6128 hci_dev_lock(hdev);
6129
6130 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6131 cp->addr.type);
6132 if (err < 0) {
6133 status = MGMT_STATUS_FAILED;
6134 goto done;
6135 }
6136
6137 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6138 sk);
6139 status = MGMT_STATUS_SUCCESS;
6140
6141 done:
6142 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6143 &cp->addr, sizeof(cp->addr));
6144
6145 hci_dev_unlock(hdev);
6146
6147 return err;
6148 }
6149
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6150 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6151 u16 len)
6152 {
6153 struct mgmt_cp_unblock_device *cp = data;
6154 u8 status;
6155 int err;
6156
6157 bt_dev_dbg(hdev, "sock %p", sk);
6158
6159 if (!bdaddr_type_is_valid(cp->addr.type))
6160 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6161 MGMT_STATUS_INVALID_PARAMS,
6162 &cp->addr, sizeof(cp->addr));
6163
6164 hci_dev_lock(hdev);
6165
6166 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6167 cp->addr.type);
6168 if (err < 0) {
6169 status = MGMT_STATUS_INVALID_PARAMS;
6170 goto done;
6171 }
6172
6173 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6174 sk);
6175 status = MGMT_STATUS_SUCCESS;
6176
6177 done:
6178 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6179 &cp->addr, sizeof(cp->addr));
6180
6181 hci_dev_unlock(hdev);
6182
6183 return err;
6184 }
6185
set_device_id_sync(struct hci_dev * hdev,void * data)6186 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6187 {
6188 return hci_update_eir_sync(hdev);
6189 }
6190
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6191 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6192 u16 len)
6193 {
6194 struct mgmt_cp_set_device_id *cp = data;
6195 int err;
6196 __u16 source;
6197
6198 bt_dev_dbg(hdev, "sock %p", sk);
6199
6200 source = __le16_to_cpu(cp->source);
6201
6202 if (source > 0x0002)
6203 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6204 MGMT_STATUS_INVALID_PARAMS);
6205
6206 hci_dev_lock(hdev);
6207
6208 hdev->devid_source = source;
6209 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6210 hdev->devid_product = __le16_to_cpu(cp->product);
6211 hdev->devid_version = __le16_to_cpu(cp->version);
6212
6213 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6214 NULL, 0);
6215
6216 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6217
6218 hci_dev_unlock(hdev);
6219
6220 return err;
6221 }
6222
enable_advertising_instance(struct hci_dev * hdev,int err)6223 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6224 {
6225 if (err)
6226 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6227 else
6228 bt_dev_dbg(hdev, "status %d", err);
6229 }
6230
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6231 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6232 {
6233 struct cmd_lookup match = { NULL, hdev };
6234 u8 instance;
6235 struct adv_info *adv_instance;
6236 u8 status = mgmt_status(err);
6237
6238 if (status) {
6239 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6240 cmd_status_rsp, &status);
6241 return;
6242 }
6243
6244 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6245 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6246 else
6247 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6248
6249 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6250 &match);
6251
6252 new_settings(hdev, match.sk);
6253
6254 if (match.sk)
6255 sock_put(match.sk);
6256
6257 /* If "Set Advertising" was just disabled and instance advertising was
6258 * set up earlier, then re-enable multi-instance advertising.
6259 */
6260 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6261 list_empty(&hdev->adv_instances))
6262 return;
6263
6264 instance = hdev->cur_adv_instance;
6265 if (!instance) {
6266 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6267 struct adv_info, list);
6268 if (!adv_instance)
6269 return;
6270
6271 instance = adv_instance->instance;
6272 }
6273
6274 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6275
6276 enable_advertising_instance(hdev, err);
6277 }
6278
set_adv_sync(struct hci_dev * hdev,void * data)6279 static int set_adv_sync(struct hci_dev *hdev, void *data)
6280 {
6281 struct mgmt_pending_cmd *cmd = data;
6282 struct mgmt_mode *cp = cmd->param;
6283 u8 val = !!cp->val;
6284
6285 if (cp->val == 0x02)
6286 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6287 else
6288 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6289
6290 cancel_adv_timeout(hdev);
6291
6292 if (val) {
6293 /* Switch to instance "0" for the Set Advertising setting.
6294 * We cannot use update_[adv|scan_rsp]_data() here as the
6295 * HCI_ADVERTISING flag is not yet set.
6296 */
6297 hdev->cur_adv_instance = 0x00;
6298
6299 if (ext_adv_capable(hdev)) {
6300 hci_start_ext_adv_sync(hdev, 0x00);
6301 } else {
6302 hci_update_adv_data_sync(hdev, 0x00);
6303 hci_update_scan_rsp_data_sync(hdev, 0x00);
6304 hci_enable_advertising_sync(hdev);
6305 }
6306 } else {
6307 hci_disable_advertising_sync(hdev);
6308 }
6309
6310 return 0;
6311 }
6312
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6313 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6314 u16 len)
6315 {
6316 struct mgmt_mode *cp = data;
6317 struct mgmt_pending_cmd *cmd;
6318 u8 val, status;
6319 int err;
6320
6321 bt_dev_dbg(hdev, "sock %p", sk);
6322
6323 status = mgmt_le_support(hdev);
6324 if (status)
6325 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6326 status);
6327
6328 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6330 MGMT_STATUS_INVALID_PARAMS);
6331
6332 if (hdev->advertising_paused)
6333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6334 MGMT_STATUS_BUSY);
6335
6336 hci_dev_lock(hdev);
6337
6338 val = !!cp->val;
6339
6340 /* The following conditions are ones which mean that we should
6341 * not do any HCI communication but directly send a mgmt
6342 * response to user space (after toggling the flag if
6343 * necessary).
6344 */
6345 if (!hdev_is_powered(hdev) ||
6346 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6347 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6348 hci_dev_test_flag(hdev, HCI_MESH) ||
6349 hci_conn_num(hdev, LE_LINK) > 0 ||
6350 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6351 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6352 bool changed;
6353
6354 if (cp->val) {
6355 hdev->cur_adv_instance = 0x00;
6356 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6357 if (cp->val == 0x02)
6358 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6359 else
6360 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6361 } else {
6362 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6363 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6364 }
6365
6366 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6367 if (err < 0)
6368 goto unlock;
6369
6370 if (changed)
6371 err = new_settings(hdev, sk);
6372
6373 goto unlock;
6374 }
6375
6376 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6377 pending_find(MGMT_OP_SET_LE, hdev)) {
6378 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6379 MGMT_STATUS_BUSY);
6380 goto unlock;
6381 }
6382
6383 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6384 if (!cmd)
6385 err = -ENOMEM;
6386 else
6387 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6388 set_advertising_complete);
6389
6390 if (err < 0 && cmd)
6391 mgmt_pending_remove(cmd);
6392
6393 unlock:
6394 hci_dev_unlock(hdev);
6395 return err;
6396 }
6397
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6398 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6399 void *data, u16 len)
6400 {
6401 struct mgmt_cp_set_static_address *cp = data;
6402 int err;
6403
6404 bt_dev_dbg(hdev, "sock %p", sk);
6405
6406 if (!lmp_le_capable(hdev))
6407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6408 MGMT_STATUS_NOT_SUPPORTED);
6409
6410 if (hdev_is_powered(hdev))
6411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6412 MGMT_STATUS_REJECTED);
6413
6414 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6415 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6416 return mgmt_cmd_status(sk, hdev->id,
6417 MGMT_OP_SET_STATIC_ADDRESS,
6418 MGMT_STATUS_INVALID_PARAMS);
6419
6420 /* Two most significant bits shall be set */
6421 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6422 return mgmt_cmd_status(sk, hdev->id,
6423 MGMT_OP_SET_STATIC_ADDRESS,
6424 MGMT_STATUS_INVALID_PARAMS);
6425 }
6426
6427 hci_dev_lock(hdev);
6428
6429 bacpy(&hdev->static_addr, &cp->bdaddr);
6430
6431 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6432 if (err < 0)
6433 goto unlock;
6434
6435 err = new_settings(hdev, sk);
6436
6437 unlock:
6438 hci_dev_unlock(hdev);
6439 return err;
6440 }
6441
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6442 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6443 void *data, u16 len)
6444 {
6445 struct mgmt_cp_set_scan_params *cp = data;
6446 __u16 interval, window;
6447 int err;
6448
6449 bt_dev_dbg(hdev, "sock %p", sk);
6450
6451 if (!lmp_le_capable(hdev))
6452 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6453 MGMT_STATUS_NOT_SUPPORTED);
6454
6455 interval = __le16_to_cpu(cp->interval);
6456
6457 if (interval < 0x0004 || interval > 0x4000)
6458 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6459 MGMT_STATUS_INVALID_PARAMS);
6460
6461 window = __le16_to_cpu(cp->window);
6462
6463 if (window < 0x0004 || window > 0x4000)
6464 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6465 MGMT_STATUS_INVALID_PARAMS);
6466
6467 if (window > interval)
6468 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6469 MGMT_STATUS_INVALID_PARAMS);
6470
6471 hci_dev_lock(hdev);
6472
6473 hdev->le_scan_interval = interval;
6474 hdev->le_scan_window = window;
6475
6476 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6477 NULL, 0);
6478
6479 /* If background scan is running, restart it so new parameters are
6480 * loaded.
6481 */
6482 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6483 hdev->discovery.state == DISCOVERY_STOPPED)
6484 hci_update_passive_scan(hdev);
6485
6486 hci_dev_unlock(hdev);
6487
6488 return err;
6489 }
6490
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6491 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6492 {
6493 struct mgmt_pending_cmd *cmd = data;
6494
6495 bt_dev_dbg(hdev, "err %d", err);
6496
6497 if (err) {
6498 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6499 mgmt_status(err));
6500 } else {
6501 struct mgmt_mode *cp = cmd->param;
6502
6503 if (cp->val)
6504 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6505 else
6506 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6507
6508 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6509 new_settings(hdev, cmd->sk);
6510 }
6511
6512 mgmt_pending_free(cmd);
6513 }
6514
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6515 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6516 {
6517 struct mgmt_pending_cmd *cmd = data;
6518 struct mgmt_mode *cp = cmd->param;
6519
6520 return hci_write_fast_connectable_sync(hdev, cp->val);
6521 }
6522
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6523 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6524 void *data, u16 len)
6525 {
6526 struct mgmt_mode *cp = data;
6527 struct mgmt_pending_cmd *cmd;
6528 int err;
6529
6530 bt_dev_dbg(hdev, "sock %p", sk);
6531
6532 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6533 hdev->hci_ver < BLUETOOTH_VER_1_2)
6534 return mgmt_cmd_status(sk, hdev->id,
6535 MGMT_OP_SET_FAST_CONNECTABLE,
6536 MGMT_STATUS_NOT_SUPPORTED);
6537
6538 if (cp->val != 0x00 && cp->val != 0x01)
6539 return mgmt_cmd_status(sk, hdev->id,
6540 MGMT_OP_SET_FAST_CONNECTABLE,
6541 MGMT_STATUS_INVALID_PARAMS);
6542
6543 hci_dev_lock(hdev);
6544
6545 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6546 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6547 goto unlock;
6548 }
6549
6550 if (!hdev_is_powered(hdev)) {
6551 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6552 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6553 new_settings(hdev, sk);
6554 goto unlock;
6555 }
6556
6557 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6558 len);
6559 if (!cmd)
6560 err = -ENOMEM;
6561 else
6562 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6563 fast_connectable_complete);
6564
6565 if (err < 0) {
6566 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6567 MGMT_STATUS_FAILED);
6568
6569 if (cmd)
6570 mgmt_pending_free(cmd);
6571 }
6572
6573 unlock:
6574 hci_dev_unlock(hdev);
6575
6576 return err;
6577 }
6578
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6579 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6580 {
6581 struct mgmt_pending_cmd *cmd = data;
6582
6583 bt_dev_dbg(hdev, "err %d", err);
6584
6585 if (err) {
6586 u8 mgmt_err = mgmt_status(err);
6587
6588 /* We need to restore the flag if related HCI commands
6589 * failed.
6590 */
6591 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6592
6593 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6594 } else {
6595 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6596 new_settings(hdev, cmd->sk);
6597 }
6598
6599 mgmt_pending_free(cmd);
6600 }
6601
set_bredr_sync(struct hci_dev * hdev,void * data)6602 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6603 {
6604 int status;
6605
6606 status = hci_write_fast_connectable_sync(hdev, false);
6607
6608 if (!status)
6609 status = hci_update_scan_sync(hdev);
6610
6611 /* Since only the advertising data flags will change, there
6612 * is no need to update the scan response data.
6613 */
6614 if (!status)
6615 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6616
6617 return status;
6618 }
6619
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6620 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6621 {
6622 struct mgmt_mode *cp = data;
6623 struct mgmt_pending_cmd *cmd;
6624 int err;
6625
6626 bt_dev_dbg(hdev, "sock %p", sk);
6627
6628 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6630 MGMT_STATUS_NOT_SUPPORTED);
6631
6632 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6633 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6634 MGMT_STATUS_REJECTED);
6635
6636 if (cp->val != 0x00 && cp->val != 0x01)
6637 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6638 MGMT_STATUS_INVALID_PARAMS);
6639
6640 hci_dev_lock(hdev);
6641
6642 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6643 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6644 goto unlock;
6645 }
6646
6647 if (!hdev_is_powered(hdev)) {
6648 if (!cp->val) {
6649 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6650 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6651 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6652 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6653 }
6654
6655 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6656
6657 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6658 if (err < 0)
6659 goto unlock;
6660
6661 err = new_settings(hdev, sk);
6662 goto unlock;
6663 }
6664
6665 /* Reject disabling when powered on */
6666 if (!cp->val) {
6667 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6668 MGMT_STATUS_REJECTED);
6669 goto unlock;
6670 } else {
6671 /* When configuring a dual-mode controller to operate
6672 * with LE only and using a static address, then switching
6673 * BR/EDR back on is not allowed.
6674 *
6675 * Dual-mode controllers shall operate with the public
6676 * address as its identity address for BR/EDR and LE. So
6677 * reject the attempt to create an invalid configuration.
6678 *
6679 * The same restrictions applies when secure connections
6680 * has been enabled. For BR/EDR this is a controller feature
6681 * while for LE it is a host stack feature. This means that
6682 * switching BR/EDR back on when secure connections has been
6683 * enabled is not a supported transaction.
6684 */
6685 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6686 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6687 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6688 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6689 MGMT_STATUS_REJECTED);
6690 goto unlock;
6691 }
6692 }
6693
6694 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6695 if (!cmd)
6696 err = -ENOMEM;
6697 else
6698 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6699 set_bredr_complete);
6700
6701 if (err < 0) {
6702 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6703 MGMT_STATUS_FAILED);
6704 if (cmd)
6705 mgmt_pending_free(cmd);
6706
6707 goto unlock;
6708 }
6709
6710 /* We need to flip the bit already here so that
6711 * hci_req_update_adv_data generates the correct flags.
6712 */
6713 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6714
6715 unlock:
6716 hci_dev_unlock(hdev);
6717 return err;
6718 }
6719
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6720 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6721 {
6722 struct mgmt_pending_cmd *cmd = data;
6723 struct mgmt_mode *cp;
6724
6725 bt_dev_dbg(hdev, "err %d", err);
6726
6727 if (err) {
6728 u8 mgmt_err = mgmt_status(err);
6729
6730 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6731 goto done;
6732 }
6733
6734 cp = cmd->param;
6735
6736 switch (cp->val) {
6737 case 0x00:
6738 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6739 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6740 break;
6741 case 0x01:
6742 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6743 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6744 break;
6745 case 0x02:
6746 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6747 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6748 break;
6749 }
6750
6751 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6752 new_settings(hdev, cmd->sk);
6753
6754 done:
6755 mgmt_pending_free(cmd);
6756 }
6757
set_secure_conn_sync(struct hci_dev * hdev,void * data)6758 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6759 {
6760 struct mgmt_pending_cmd *cmd = data;
6761 struct mgmt_mode *cp = cmd->param;
6762 u8 val = !!cp->val;
6763
6764 /* Force write of val */
6765 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6766
6767 return hci_write_sc_support_sync(hdev, val);
6768 }
6769
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6770 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6771 void *data, u16 len)
6772 {
6773 struct mgmt_mode *cp = data;
6774 struct mgmt_pending_cmd *cmd;
6775 u8 val;
6776 int err;
6777
6778 bt_dev_dbg(hdev, "sock %p", sk);
6779
6780 if (!lmp_sc_capable(hdev) &&
6781 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6782 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6783 MGMT_STATUS_NOT_SUPPORTED);
6784
6785 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6786 lmp_sc_capable(hdev) &&
6787 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6789 MGMT_STATUS_REJECTED);
6790
6791 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6792 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6793 MGMT_STATUS_INVALID_PARAMS);
6794
6795 hci_dev_lock(hdev);
6796
6797 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6798 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6799 bool changed;
6800
6801 if (cp->val) {
6802 changed = !hci_dev_test_and_set_flag(hdev,
6803 HCI_SC_ENABLED);
6804 if (cp->val == 0x02)
6805 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6806 else
6807 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6808 } else {
6809 changed = hci_dev_test_and_clear_flag(hdev,
6810 HCI_SC_ENABLED);
6811 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6812 }
6813
6814 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6815 if (err < 0)
6816 goto failed;
6817
6818 if (changed)
6819 err = new_settings(hdev, sk);
6820
6821 goto failed;
6822 }
6823
6824 val = !!cp->val;
6825
6826 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6827 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6828 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6829 goto failed;
6830 }
6831
6832 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6833 if (!cmd)
6834 err = -ENOMEM;
6835 else
6836 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6837 set_secure_conn_complete);
6838
6839 if (err < 0) {
6840 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6841 MGMT_STATUS_FAILED);
6842 if (cmd)
6843 mgmt_pending_free(cmd);
6844 }
6845
6846 failed:
6847 hci_dev_unlock(hdev);
6848 return err;
6849 }
6850
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6851 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6852 void *data, u16 len)
6853 {
6854 struct mgmt_mode *cp = data;
6855 bool changed, use_changed;
6856 int err;
6857
6858 bt_dev_dbg(hdev, "sock %p", sk);
6859
6860 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6862 MGMT_STATUS_INVALID_PARAMS);
6863
6864 hci_dev_lock(hdev);
6865
6866 if (cp->val)
6867 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6868 else
6869 changed = hci_dev_test_and_clear_flag(hdev,
6870 HCI_KEEP_DEBUG_KEYS);
6871
6872 if (cp->val == 0x02)
6873 use_changed = !hci_dev_test_and_set_flag(hdev,
6874 HCI_USE_DEBUG_KEYS);
6875 else
6876 use_changed = hci_dev_test_and_clear_flag(hdev,
6877 HCI_USE_DEBUG_KEYS);
6878
6879 if (hdev_is_powered(hdev) && use_changed &&
6880 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6881 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6882 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6883 sizeof(mode), &mode);
6884 }
6885
6886 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6887 if (err < 0)
6888 goto unlock;
6889
6890 if (changed)
6891 err = new_settings(hdev, sk);
6892
6893 unlock:
6894 hci_dev_unlock(hdev);
6895 return err;
6896 }
6897
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6898 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6899 u16 len)
6900 {
6901 struct mgmt_cp_set_privacy *cp = cp_data;
6902 bool changed;
6903 int err;
6904
6905 bt_dev_dbg(hdev, "sock %p", sk);
6906
6907 if (!lmp_le_capable(hdev))
6908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6909 MGMT_STATUS_NOT_SUPPORTED);
6910
6911 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6912 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6913 MGMT_STATUS_INVALID_PARAMS);
6914
6915 if (hdev_is_powered(hdev))
6916 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6917 MGMT_STATUS_REJECTED);
6918
6919 hci_dev_lock(hdev);
6920
6921 /* If user space supports this command it is also expected to
6922 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6923 */
6924 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6925
6926 if (cp->privacy) {
6927 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6928 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6929 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6930 hci_adv_instances_set_rpa_expired(hdev, true);
6931 if (cp->privacy == 0x02)
6932 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6933 else
6934 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6935 } else {
6936 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6937 memset(hdev->irk, 0, sizeof(hdev->irk));
6938 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6939 hci_adv_instances_set_rpa_expired(hdev, false);
6940 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6941 }
6942
6943 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6944 if (err < 0)
6945 goto unlock;
6946
6947 if (changed)
6948 err = new_settings(hdev, sk);
6949
6950 unlock:
6951 hci_dev_unlock(hdev);
6952 return err;
6953 }
6954
irk_is_valid(struct mgmt_irk_info * irk)6955 static bool irk_is_valid(struct mgmt_irk_info *irk)
6956 {
6957 switch (irk->addr.type) {
6958 case BDADDR_LE_PUBLIC:
6959 return true;
6960
6961 case BDADDR_LE_RANDOM:
6962 /* Two most significant bits shall be set */
6963 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6964 return false;
6965 return true;
6966 }
6967
6968 return false;
6969 }
6970
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)6971 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6972 u16 len)
6973 {
6974 struct mgmt_cp_load_irks *cp = cp_data;
6975 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6976 sizeof(struct mgmt_irk_info));
6977 u16 irk_count, expected_len;
6978 int i, err;
6979
6980 bt_dev_dbg(hdev, "sock %p", sk);
6981
6982 if (!lmp_le_capable(hdev))
6983 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6984 MGMT_STATUS_NOT_SUPPORTED);
6985
6986 irk_count = __le16_to_cpu(cp->irk_count);
6987 if (irk_count > max_irk_count) {
6988 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6989 irk_count);
6990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6991 MGMT_STATUS_INVALID_PARAMS);
6992 }
6993
6994 expected_len = struct_size(cp, irks, irk_count);
6995 if (expected_len != len) {
6996 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6997 expected_len, len);
6998 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6999 MGMT_STATUS_INVALID_PARAMS);
7000 }
7001
7002 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7003
7004 for (i = 0; i < irk_count; i++) {
7005 struct mgmt_irk_info *key = &cp->irks[i];
7006
7007 if (!irk_is_valid(key))
7008 return mgmt_cmd_status(sk, hdev->id,
7009 MGMT_OP_LOAD_IRKS,
7010 MGMT_STATUS_INVALID_PARAMS);
7011 }
7012
7013 hci_dev_lock(hdev);
7014
7015 hci_smp_irks_clear(hdev);
7016
7017 for (i = 0; i < irk_count; i++) {
7018 struct mgmt_irk_info *irk = &cp->irks[i];
7019
7020 if (hci_is_blocked_key(hdev,
7021 HCI_BLOCKED_KEY_TYPE_IRK,
7022 irk->val)) {
7023 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7024 &irk->addr.bdaddr);
7025 continue;
7026 }
7027
7028 hci_add_irk(hdev, &irk->addr.bdaddr,
7029 le_addr_type(irk->addr.type), irk->val,
7030 BDADDR_ANY);
7031 }
7032
7033 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7034
7035 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7036
7037 hci_dev_unlock(hdev);
7038
7039 return err;
7040 }
7041
ltk_is_valid(struct mgmt_ltk_info * key)7042 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7043 {
7044 if (key->initiator != 0x00 && key->initiator != 0x01)
7045 return false;
7046
7047 switch (key->addr.type) {
7048 case BDADDR_LE_PUBLIC:
7049 return true;
7050
7051 case BDADDR_LE_RANDOM:
7052 /* Two most significant bits shall be set */
7053 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7054 return false;
7055 return true;
7056 }
7057
7058 return false;
7059 }
7060
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7061 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7062 void *cp_data, u16 len)
7063 {
7064 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7065 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7066 sizeof(struct mgmt_ltk_info));
7067 u16 key_count, expected_len;
7068 int i, err;
7069
7070 bt_dev_dbg(hdev, "sock %p", sk);
7071
7072 if (!lmp_le_capable(hdev))
7073 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7074 MGMT_STATUS_NOT_SUPPORTED);
7075
7076 key_count = __le16_to_cpu(cp->key_count);
7077 if (key_count > max_key_count) {
7078 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7079 key_count);
7080 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7081 MGMT_STATUS_INVALID_PARAMS);
7082 }
7083
7084 expected_len = struct_size(cp, keys, key_count);
7085 if (expected_len != len) {
7086 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7087 expected_len, len);
7088 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7089 MGMT_STATUS_INVALID_PARAMS);
7090 }
7091
7092 bt_dev_dbg(hdev, "key_count %u", key_count);
7093
7094 hci_dev_lock(hdev);
7095
7096 hci_smp_ltks_clear(hdev);
7097
7098 for (i = 0; i < key_count; i++) {
7099 struct mgmt_ltk_info *key = &cp->keys[i];
7100 u8 type, authenticated;
7101
7102 if (hci_is_blocked_key(hdev,
7103 HCI_BLOCKED_KEY_TYPE_LTK,
7104 key->val)) {
7105 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7106 &key->addr.bdaddr);
7107 continue;
7108 }
7109
7110 if (!ltk_is_valid(key)) {
7111 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7112 &key->addr.bdaddr);
7113 continue;
7114 }
7115
7116 switch (key->type) {
7117 case MGMT_LTK_UNAUTHENTICATED:
7118 authenticated = 0x00;
7119 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7120 break;
7121 case MGMT_LTK_AUTHENTICATED:
7122 authenticated = 0x01;
7123 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7124 break;
7125 case MGMT_LTK_P256_UNAUTH:
7126 authenticated = 0x00;
7127 type = SMP_LTK_P256;
7128 break;
7129 case MGMT_LTK_P256_AUTH:
7130 authenticated = 0x01;
7131 type = SMP_LTK_P256;
7132 break;
7133 case MGMT_LTK_P256_DEBUG:
7134 authenticated = 0x00;
7135 type = SMP_LTK_P256_DEBUG;
7136 fallthrough;
7137 default:
7138 continue;
7139 }
7140
7141 hci_add_ltk(hdev, &key->addr.bdaddr,
7142 le_addr_type(key->addr.type), type, authenticated,
7143 key->val, key->enc_size, key->ediv, key->rand);
7144 }
7145
7146 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7147 NULL, 0);
7148
7149 hci_dev_unlock(hdev);
7150
7151 return err;
7152 }
7153
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7154 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7155 {
7156 struct mgmt_pending_cmd *cmd = data;
7157 struct hci_conn *conn = cmd->user_data;
7158 struct mgmt_cp_get_conn_info *cp = cmd->param;
7159 struct mgmt_rp_get_conn_info rp;
7160 u8 status;
7161
7162 bt_dev_dbg(hdev, "err %d", err);
7163
7164 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7165
7166 status = mgmt_status(err);
7167 if (status == MGMT_STATUS_SUCCESS) {
7168 rp.rssi = conn->rssi;
7169 rp.tx_power = conn->tx_power;
7170 rp.max_tx_power = conn->max_tx_power;
7171 } else {
7172 rp.rssi = HCI_RSSI_INVALID;
7173 rp.tx_power = HCI_TX_POWER_INVALID;
7174 rp.max_tx_power = HCI_TX_POWER_INVALID;
7175 }
7176
7177 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7178 &rp, sizeof(rp));
7179
7180 mgmt_pending_free(cmd);
7181 }
7182
get_conn_info_sync(struct hci_dev * hdev,void * data)7183 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7184 {
7185 struct mgmt_pending_cmd *cmd = data;
7186 struct mgmt_cp_get_conn_info *cp = cmd->param;
7187 struct hci_conn *conn;
7188 int err;
7189 __le16 handle;
7190
7191 /* Make sure we are still connected */
7192 if (cp->addr.type == BDADDR_BREDR)
7193 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7194 &cp->addr.bdaddr);
7195 else
7196 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7197
7198 if (!conn || conn->state != BT_CONNECTED)
7199 return MGMT_STATUS_NOT_CONNECTED;
7200
7201 cmd->user_data = conn;
7202 handle = cpu_to_le16(conn->handle);
7203
7204 /* Refresh RSSI each time */
7205 err = hci_read_rssi_sync(hdev, handle);
7206
7207 /* For LE links TX power does not change thus we don't need to
7208 * query for it once value is known.
7209 */
7210 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7211 conn->tx_power == HCI_TX_POWER_INVALID))
7212 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7213
7214 /* Max TX power needs to be read only once per connection */
7215 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7216 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7217
7218 return err;
7219 }
7220
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7221 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7222 u16 len)
7223 {
7224 struct mgmt_cp_get_conn_info *cp = data;
7225 struct mgmt_rp_get_conn_info rp;
7226 struct hci_conn *conn;
7227 unsigned long conn_info_age;
7228 int err = 0;
7229
7230 bt_dev_dbg(hdev, "sock %p", sk);
7231
7232 memset(&rp, 0, sizeof(rp));
7233 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7234 rp.addr.type = cp->addr.type;
7235
7236 if (!bdaddr_type_is_valid(cp->addr.type))
7237 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7238 MGMT_STATUS_INVALID_PARAMS,
7239 &rp, sizeof(rp));
7240
7241 hci_dev_lock(hdev);
7242
7243 if (!hdev_is_powered(hdev)) {
7244 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7245 MGMT_STATUS_NOT_POWERED, &rp,
7246 sizeof(rp));
7247 goto unlock;
7248 }
7249
7250 if (cp->addr.type == BDADDR_BREDR)
7251 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7252 &cp->addr.bdaddr);
7253 else
7254 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7255
7256 if (!conn || conn->state != BT_CONNECTED) {
7257 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7258 MGMT_STATUS_NOT_CONNECTED, &rp,
7259 sizeof(rp));
7260 goto unlock;
7261 }
7262
7263 /* To avoid client trying to guess when to poll again for information we
7264 * calculate conn info age as random value between min/max set in hdev.
7265 */
7266 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7267 hdev->conn_info_max_age - 1);
7268
7269 /* Query controller to refresh cached values if they are too old or were
7270 * never read.
7271 */
7272 if (time_after(jiffies, conn->conn_info_timestamp +
7273 msecs_to_jiffies(conn_info_age)) ||
7274 !conn->conn_info_timestamp) {
7275 struct mgmt_pending_cmd *cmd;
7276
7277 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7278 len);
7279 if (!cmd) {
7280 err = -ENOMEM;
7281 } else {
7282 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7283 cmd, get_conn_info_complete);
7284 }
7285
7286 if (err < 0) {
7287 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7288 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7289
7290 if (cmd)
7291 mgmt_pending_free(cmd);
7292
7293 goto unlock;
7294 }
7295
7296 conn->conn_info_timestamp = jiffies;
7297 } else {
7298 /* Cache is valid, just reply with values cached in hci_conn */
7299 rp.rssi = conn->rssi;
7300 rp.tx_power = conn->tx_power;
7301 rp.max_tx_power = conn->max_tx_power;
7302
7303 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7304 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7305 }
7306
7307 unlock:
7308 hci_dev_unlock(hdev);
7309 return err;
7310 }
7311
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7312 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7313 {
7314 struct mgmt_pending_cmd *cmd = data;
7315 struct mgmt_cp_get_clock_info *cp = cmd->param;
7316 struct mgmt_rp_get_clock_info rp;
7317 struct hci_conn *conn = cmd->user_data;
7318 u8 status = mgmt_status(err);
7319
7320 bt_dev_dbg(hdev, "err %d", err);
7321
7322 memset(&rp, 0, sizeof(rp));
7323 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7324 rp.addr.type = cp->addr.type;
7325
7326 if (err)
7327 goto complete;
7328
7329 rp.local_clock = cpu_to_le32(hdev->clock);
7330
7331 if (conn) {
7332 rp.piconet_clock = cpu_to_le32(conn->clock);
7333 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7334 }
7335
7336 complete:
7337 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7338 sizeof(rp));
7339
7340 mgmt_pending_free(cmd);
7341 }
7342
get_clock_info_sync(struct hci_dev * hdev,void * data)7343 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7344 {
7345 struct mgmt_pending_cmd *cmd = data;
7346 struct mgmt_cp_get_clock_info *cp = cmd->param;
7347 struct hci_cp_read_clock hci_cp;
7348 struct hci_conn *conn;
7349
7350 memset(&hci_cp, 0, sizeof(hci_cp));
7351 hci_read_clock_sync(hdev, &hci_cp);
7352
7353 /* Make sure connection still exists */
7354 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7355 if (!conn || conn->state != BT_CONNECTED)
7356 return MGMT_STATUS_NOT_CONNECTED;
7357
7358 cmd->user_data = conn;
7359 hci_cp.handle = cpu_to_le16(conn->handle);
7360 hci_cp.which = 0x01; /* Piconet clock */
7361
7362 return hci_read_clock_sync(hdev, &hci_cp);
7363 }
7364
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7365 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7366 u16 len)
7367 {
7368 struct mgmt_cp_get_clock_info *cp = data;
7369 struct mgmt_rp_get_clock_info rp;
7370 struct mgmt_pending_cmd *cmd;
7371 struct hci_conn *conn;
7372 int err;
7373
7374 bt_dev_dbg(hdev, "sock %p", sk);
7375
7376 memset(&rp, 0, sizeof(rp));
7377 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7378 rp.addr.type = cp->addr.type;
7379
7380 if (cp->addr.type != BDADDR_BREDR)
7381 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7382 MGMT_STATUS_INVALID_PARAMS,
7383 &rp, sizeof(rp));
7384
7385 hci_dev_lock(hdev);
7386
7387 if (!hdev_is_powered(hdev)) {
7388 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7389 MGMT_STATUS_NOT_POWERED, &rp,
7390 sizeof(rp));
7391 goto unlock;
7392 }
7393
7394 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7395 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7396 &cp->addr.bdaddr);
7397 if (!conn || conn->state != BT_CONNECTED) {
7398 err = mgmt_cmd_complete(sk, hdev->id,
7399 MGMT_OP_GET_CLOCK_INFO,
7400 MGMT_STATUS_NOT_CONNECTED,
7401 &rp, sizeof(rp));
7402 goto unlock;
7403 }
7404 } else {
7405 conn = NULL;
7406 }
7407
7408 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7409 if (!cmd)
7410 err = -ENOMEM;
7411 else
7412 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7413 get_clock_info_complete);
7414
7415 if (err < 0) {
7416 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7417 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7418
7419 if (cmd)
7420 mgmt_pending_free(cmd);
7421 }
7422
7423
7424 unlock:
7425 hci_dev_unlock(hdev);
7426 return err;
7427 }
7428
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)7429 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7430 {
7431 struct hci_conn *conn;
7432
7433 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7434 if (!conn)
7435 return false;
7436
7437 if (conn->dst_type != type)
7438 return false;
7439
7440 if (conn->state != BT_CONNECTED)
7441 return false;
7442
7443 return true;
7444 }
7445
7446 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)7447 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7448 u8 addr_type, u8 auto_connect)
7449 {
7450 struct hci_conn_params *params;
7451
7452 params = hci_conn_params_add(hdev, addr, addr_type);
7453 if (!params)
7454 return -EIO;
7455
7456 if (params->auto_connect == auto_connect)
7457 return 0;
7458
7459 hci_pend_le_list_del_init(params);
7460
7461 switch (auto_connect) {
7462 case HCI_AUTO_CONN_DISABLED:
7463 case HCI_AUTO_CONN_LINK_LOSS:
7464 /* If auto connect is being disabled when we're trying to
7465 * connect to device, keep connecting.
7466 */
7467 if (params->explicit_connect)
7468 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7469 break;
7470 case HCI_AUTO_CONN_REPORT:
7471 if (params->explicit_connect)
7472 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7473 else
7474 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7475 break;
7476 case HCI_AUTO_CONN_DIRECT:
7477 case HCI_AUTO_CONN_ALWAYS:
7478 if (!is_connected(hdev, addr, addr_type))
7479 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7480 break;
7481 }
7482
7483 params->auto_connect = auto_connect;
7484
7485 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7486 addr, addr_type, auto_connect);
7487
7488 return 0;
7489 }
7490
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7491 static void device_added(struct sock *sk, struct hci_dev *hdev,
7492 bdaddr_t *bdaddr, u8 type, u8 action)
7493 {
7494 struct mgmt_ev_device_added ev;
7495
7496 bacpy(&ev.addr.bdaddr, bdaddr);
7497 ev.addr.type = type;
7498 ev.action = action;
7499
7500 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7501 }
7502
add_device_complete(struct hci_dev * hdev,void * data,int err)7503 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7504 {
7505 struct mgmt_pending_cmd *cmd = data;
7506 struct mgmt_cp_add_device *cp = cmd->param;
7507
7508 if (!err) {
7509 struct hci_conn_params *params;
7510
7511 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7512 le_addr_type(cp->addr.type));
7513
7514 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7515 cp->action);
7516 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7517 cp->addr.type, hdev->conn_flags,
7518 params ? params->flags : 0);
7519 }
7520
7521 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7522 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7523 mgmt_pending_free(cmd);
7524 }
7525
add_device_sync(struct hci_dev * hdev,void * data)7526 static int add_device_sync(struct hci_dev *hdev, void *data)
7527 {
7528 return hci_update_passive_scan_sync(hdev);
7529 }
7530
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7531 static int add_device(struct sock *sk, struct hci_dev *hdev,
7532 void *data, u16 len)
7533 {
7534 struct mgmt_pending_cmd *cmd;
7535 struct mgmt_cp_add_device *cp = data;
7536 u8 auto_conn, addr_type;
7537 struct hci_conn_params *params;
7538 int err;
7539 u32 current_flags = 0;
7540 u32 supported_flags;
7541
7542 bt_dev_dbg(hdev, "sock %p", sk);
7543
7544 if (!bdaddr_type_is_valid(cp->addr.type) ||
7545 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7546 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7547 MGMT_STATUS_INVALID_PARAMS,
7548 &cp->addr, sizeof(cp->addr));
7549
7550 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7551 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7552 MGMT_STATUS_INVALID_PARAMS,
7553 &cp->addr, sizeof(cp->addr));
7554
7555 hci_dev_lock(hdev);
7556
7557 if (cp->addr.type == BDADDR_BREDR) {
7558 /* Only incoming connections action is supported for now */
7559 if (cp->action != 0x01) {
7560 err = mgmt_cmd_complete(sk, hdev->id,
7561 MGMT_OP_ADD_DEVICE,
7562 MGMT_STATUS_INVALID_PARAMS,
7563 &cp->addr, sizeof(cp->addr));
7564 goto unlock;
7565 }
7566
7567 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7568 &cp->addr.bdaddr,
7569 cp->addr.type, 0);
7570 if (err)
7571 goto unlock;
7572
7573 hci_update_scan(hdev);
7574
7575 goto added;
7576 }
7577
7578 addr_type = le_addr_type(cp->addr.type);
7579
7580 if (cp->action == 0x02)
7581 auto_conn = HCI_AUTO_CONN_ALWAYS;
7582 else if (cp->action == 0x01)
7583 auto_conn = HCI_AUTO_CONN_DIRECT;
7584 else
7585 auto_conn = HCI_AUTO_CONN_REPORT;
7586
7587 /* Kernel internally uses conn_params with resolvable private
7588 * address, but Add Device allows only identity addresses.
7589 * Make sure it is enforced before calling
7590 * hci_conn_params_lookup.
7591 */
7592 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7593 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7594 MGMT_STATUS_INVALID_PARAMS,
7595 &cp->addr, sizeof(cp->addr));
7596 goto unlock;
7597 }
7598
7599 /* If the connection parameters don't exist for this device,
7600 * they will be created and configured with defaults.
7601 */
7602 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7603 auto_conn) < 0) {
7604 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7605 MGMT_STATUS_FAILED, &cp->addr,
7606 sizeof(cp->addr));
7607 goto unlock;
7608 } else {
7609 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7610 addr_type);
7611 if (params)
7612 current_flags = params->flags;
7613 }
7614
7615 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7616 if (!cmd) {
7617 err = -ENOMEM;
7618 goto unlock;
7619 }
7620
7621 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7622 add_device_complete);
7623 if (err < 0) {
7624 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7625 MGMT_STATUS_FAILED, &cp->addr,
7626 sizeof(cp->addr));
7627 mgmt_pending_free(cmd);
7628 }
7629
7630 goto unlock;
7631
7632 added:
7633 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7634 supported_flags = hdev->conn_flags;
7635 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7636 supported_flags, current_flags);
7637
7638 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7639 MGMT_STATUS_SUCCESS, &cp->addr,
7640 sizeof(cp->addr));
7641
7642 unlock:
7643 hci_dev_unlock(hdev);
7644 return err;
7645 }
7646
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7647 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7648 bdaddr_t *bdaddr, u8 type)
7649 {
7650 struct mgmt_ev_device_removed ev;
7651
7652 bacpy(&ev.addr.bdaddr, bdaddr);
7653 ev.addr.type = type;
7654
7655 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7656 }
7657
remove_device_sync(struct hci_dev * hdev,void * data)7658 static int remove_device_sync(struct hci_dev *hdev, void *data)
7659 {
7660 return hci_update_passive_scan_sync(hdev);
7661 }
7662
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7663 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7664 void *data, u16 len)
7665 {
7666 struct mgmt_cp_remove_device *cp = data;
7667 int err;
7668
7669 bt_dev_dbg(hdev, "sock %p", sk);
7670
7671 hci_dev_lock(hdev);
7672
7673 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7674 struct hci_conn_params *params;
7675 u8 addr_type;
7676
7677 if (!bdaddr_type_is_valid(cp->addr.type)) {
7678 err = mgmt_cmd_complete(sk, hdev->id,
7679 MGMT_OP_REMOVE_DEVICE,
7680 MGMT_STATUS_INVALID_PARAMS,
7681 &cp->addr, sizeof(cp->addr));
7682 goto unlock;
7683 }
7684
7685 if (cp->addr.type == BDADDR_BREDR) {
7686 err = hci_bdaddr_list_del(&hdev->accept_list,
7687 &cp->addr.bdaddr,
7688 cp->addr.type);
7689 if (err) {
7690 err = mgmt_cmd_complete(sk, hdev->id,
7691 MGMT_OP_REMOVE_DEVICE,
7692 MGMT_STATUS_INVALID_PARAMS,
7693 &cp->addr,
7694 sizeof(cp->addr));
7695 goto unlock;
7696 }
7697
7698 hci_update_scan(hdev);
7699
7700 device_removed(sk, hdev, &cp->addr.bdaddr,
7701 cp->addr.type);
7702 goto complete;
7703 }
7704
7705 addr_type = le_addr_type(cp->addr.type);
7706
7707 /* Kernel internally uses conn_params with resolvable private
7708 * address, but Remove Device allows only identity addresses.
7709 * Make sure it is enforced before calling
7710 * hci_conn_params_lookup.
7711 */
7712 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7713 err = mgmt_cmd_complete(sk, hdev->id,
7714 MGMT_OP_REMOVE_DEVICE,
7715 MGMT_STATUS_INVALID_PARAMS,
7716 &cp->addr, sizeof(cp->addr));
7717 goto unlock;
7718 }
7719
7720 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7721 addr_type);
7722 if (!params) {
7723 err = mgmt_cmd_complete(sk, hdev->id,
7724 MGMT_OP_REMOVE_DEVICE,
7725 MGMT_STATUS_INVALID_PARAMS,
7726 &cp->addr, sizeof(cp->addr));
7727 goto unlock;
7728 }
7729
7730 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7731 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7732 err = mgmt_cmd_complete(sk, hdev->id,
7733 MGMT_OP_REMOVE_DEVICE,
7734 MGMT_STATUS_INVALID_PARAMS,
7735 &cp->addr, sizeof(cp->addr));
7736 goto unlock;
7737 }
7738
7739 hci_conn_params_free(params);
7740
7741 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7742 } else {
7743 struct hci_conn_params *p, *tmp;
7744 struct bdaddr_list *b, *btmp;
7745
7746 if (cp->addr.type) {
7747 err = mgmt_cmd_complete(sk, hdev->id,
7748 MGMT_OP_REMOVE_DEVICE,
7749 MGMT_STATUS_INVALID_PARAMS,
7750 &cp->addr, sizeof(cp->addr));
7751 goto unlock;
7752 }
7753
7754 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7755 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7756 list_del(&b->list);
7757 kfree(b);
7758 }
7759
7760 hci_update_scan(hdev);
7761
7762 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7763 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7764 continue;
7765 device_removed(sk, hdev, &p->addr, p->addr_type);
7766 if (p->explicit_connect) {
7767 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7768 continue;
7769 }
7770 hci_conn_params_free(p);
7771 }
7772
7773 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7774 }
7775
7776 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7777
7778 complete:
7779 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7780 MGMT_STATUS_SUCCESS, &cp->addr,
7781 sizeof(cp->addr));
7782 unlock:
7783 hci_dev_unlock(hdev);
7784 return err;
7785 }
7786
conn_update_sync(struct hci_dev * hdev,void * data)7787 static int conn_update_sync(struct hci_dev *hdev, void *data)
7788 {
7789 struct hci_conn_params *params = data;
7790 struct hci_conn *conn;
7791
7792 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7793 if (!conn)
7794 return -ECANCELED;
7795
7796 return hci_le_conn_update_sync(hdev, conn, params);
7797 }
7798
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7799 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7800 u16 len)
7801 {
7802 struct mgmt_cp_load_conn_param *cp = data;
7803 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7804 sizeof(struct mgmt_conn_param));
7805 u16 param_count, expected_len;
7806 int i;
7807
7808 if (!lmp_le_capable(hdev))
7809 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7810 MGMT_STATUS_NOT_SUPPORTED);
7811
7812 param_count = __le16_to_cpu(cp->param_count);
7813 if (param_count > max_param_count) {
7814 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7815 param_count);
7816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7817 MGMT_STATUS_INVALID_PARAMS);
7818 }
7819
7820 expected_len = struct_size(cp, params, param_count);
7821 if (expected_len != len) {
7822 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7823 expected_len, len);
7824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7825 MGMT_STATUS_INVALID_PARAMS);
7826 }
7827
7828 bt_dev_dbg(hdev, "param_count %u", param_count);
7829
7830 hci_dev_lock(hdev);
7831
7832 if (param_count > 1)
7833 hci_conn_params_clear_disabled(hdev);
7834
7835 for (i = 0; i < param_count; i++) {
7836 struct mgmt_conn_param *param = &cp->params[i];
7837 struct hci_conn_params *hci_param;
7838 u16 min, max, latency, timeout;
7839 bool update = false;
7840 u8 addr_type;
7841
7842 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7843 param->addr.type);
7844
7845 if (param->addr.type == BDADDR_LE_PUBLIC) {
7846 addr_type = ADDR_LE_DEV_PUBLIC;
7847 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7848 addr_type = ADDR_LE_DEV_RANDOM;
7849 } else {
7850 bt_dev_err(hdev, "ignoring invalid connection parameters");
7851 continue;
7852 }
7853
7854 min = le16_to_cpu(param->min_interval);
7855 max = le16_to_cpu(param->max_interval);
7856 latency = le16_to_cpu(param->latency);
7857 timeout = le16_to_cpu(param->timeout);
7858
7859 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7860 min, max, latency, timeout);
7861
7862 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7863 bt_dev_err(hdev, "ignoring invalid connection parameters");
7864 continue;
7865 }
7866
7867 /* Detect when the loading is for an existing parameter then
7868 * attempt to trigger the connection update procedure.
7869 */
7870 if (!i && param_count == 1) {
7871 hci_param = hci_conn_params_lookup(hdev,
7872 ¶m->addr.bdaddr,
7873 addr_type);
7874 if (hci_param)
7875 update = true;
7876 else
7877 hci_conn_params_clear_disabled(hdev);
7878 }
7879
7880 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7881 addr_type);
7882 if (!hci_param) {
7883 bt_dev_err(hdev, "failed to add connection parameters");
7884 continue;
7885 }
7886
7887 hci_param->conn_min_interval = min;
7888 hci_param->conn_max_interval = max;
7889 hci_param->conn_latency = latency;
7890 hci_param->supervision_timeout = timeout;
7891
7892 /* Check if we need to trigger a connection update */
7893 if (update) {
7894 struct hci_conn *conn;
7895
7896 /* Lookup for existing connection as central and check
7897 * if parameters match and if they don't then trigger
7898 * a connection update.
7899 */
7900 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7901 addr_type);
7902 if (conn && conn->role == HCI_ROLE_MASTER &&
7903 (conn->le_conn_min_interval != min ||
7904 conn->le_conn_max_interval != max ||
7905 conn->le_conn_latency != latency ||
7906 conn->le_supv_timeout != timeout))
7907 hci_cmd_sync_queue(hdev, conn_update_sync,
7908 hci_param, NULL);
7909 }
7910 }
7911
7912 hci_dev_unlock(hdev);
7913
7914 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7915 NULL, 0);
7916 }
7917
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7918 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7919 void *data, u16 len)
7920 {
7921 struct mgmt_cp_set_external_config *cp = data;
7922 bool changed;
7923 int err;
7924
7925 bt_dev_dbg(hdev, "sock %p", sk);
7926
7927 if (hdev_is_powered(hdev))
7928 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7929 MGMT_STATUS_REJECTED);
7930
7931 if (cp->config != 0x00 && cp->config != 0x01)
7932 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7933 MGMT_STATUS_INVALID_PARAMS);
7934
7935 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7936 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7937 MGMT_STATUS_NOT_SUPPORTED);
7938
7939 hci_dev_lock(hdev);
7940
7941 if (cp->config)
7942 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7943 else
7944 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7945
7946 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7947 if (err < 0)
7948 goto unlock;
7949
7950 if (!changed)
7951 goto unlock;
7952
7953 err = new_options(hdev, sk);
7954
7955 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7956 mgmt_index_removed(hdev);
7957
7958 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7959 hci_dev_set_flag(hdev, HCI_CONFIG);
7960 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7961
7962 queue_work(hdev->req_workqueue, &hdev->power_on);
7963 } else {
7964 set_bit(HCI_RAW, &hdev->flags);
7965 mgmt_index_added(hdev);
7966 }
7967 }
7968
7969 unlock:
7970 hci_dev_unlock(hdev);
7971 return err;
7972 }
7973
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7974 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7975 void *data, u16 len)
7976 {
7977 struct mgmt_cp_set_public_address *cp = data;
7978 bool changed;
7979 int err;
7980
7981 bt_dev_dbg(hdev, "sock %p", sk);
7982
7983 if (hdev_is_powered(hdev))
7984 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7985 MGMT_STATUS_REJECTED);
7986
7987 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7989 MGMT_STATUS_INVALID_PARAMS);
7990
7991 if (!hdev->set_bdaddr)
7992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7993 MGMT_STATUS_NOT_SUPPORTED);
7994
7995 hci_dev_lock(hdev);
7996
7997 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7998 bacpy(&hdev->public_addr, &cp->bdaddr);
7999
8000 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8001 if (err < 0)
8002 goto unlock;
8003
8004 if (!changed)
8005 goto unlock;
8006
8007 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8008 err = new_options(hdev, sk);
8009
8010 if (is_configured(hdev)) {
8011 mgmt_index_removed(hdev);
8012
8013 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8014
8015 hci_dev_set_flag(hdev, HCI_CONFIG);
8016 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8017
8018 queue_work(hdev->req_workqueue, &hdev->power_on);
8019 }
8020
8021 unlock:
8022 hci_dev_unlock(hdev);
8023 return err;
8024 }
8025
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8026 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8027 int err)
8028 {
8029 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8030 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8031 u8 *h192, *r192, *h256, *r256;
8032 struct mgmt_pending_cmd *cmd = data;
8033 struct sk_buff *skb = cmd->skb;
8034 u8 status = mgmt_status(err);
8035 u16 eir_len;
8036
8037 if (err == -ECANCELED ||
8038 cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8039 return;
8040
8041 if (!status) {
8042 if (!skb)
8043 status = MGMT_STATUS_FAILED;
8044 else if (IS_ERR(skb))
8045 status = mgmt_status(PTR_ERR(skb));
8046 else
8047 status = mgmt_status(skb->data[0]);
8048 }
8049
8050 bt_dev_dbg(hdev, "status %u", status);
8051
8052 mgmt_cp = cmd->param;
8053
8054 if (status) {
8055 status = mgmt_status(status);
8056 eir_len = 0;
8057
8058 h192 = NULL;
8059 r192 = NULL;
8060 h256 = NULL;
8061 r256 = NULL;
8062 } else if (!bredr_sc_enabled(hdev)) {
8063 struct hci_rp_read_local_oob_data *rp;
8064
8065 if (skb->len != sizeof(*rp)) {
8066 status = MGMT_STATUS_FAILED;
8067 eir_len = 0;
8068 } else {
8069 status = MGMT_STATUS_SUCCESS;
8070 rp = (void *)skb->data;
8071
8072 eir_len = 5 + 18 + 18;
8073 h192 = rp->hash;
8074 r192 = rp->rand;
8075 h256 = NULL;
8076 r256 = NULL;
8077 }
8078 } else {
8079 struct hci_rp_read_local_oob_ext_data *rp;
8080
8081 if (skb->len != sizeof(*rp)) {
8082 status = MGMT_STATUS_FAILED;
8083 eir_len = 0;
8084 } else {
8085 status = MGMT_STATUS_SUCCESS;
8086 rp = (void *)skb->data;
8087
8088 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8089 eir_len = 5 + 18 + 18;
8090 h192 = NULL;
8091 r192 = NULL;
8092 } else {
8093 eir_len = 5 + 18 + 18 + 18 + 18;
8094 h192 = rp->hash192;
8095 r192 = rp->rand192;
8096 }
8097
8098 h256 = rp->hash256;
8099 r256 = rp->rand256;
8100 }
8101 }
8102
8103 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8104 if (!mgmt_rp)
8105 goto done;
8106
8107 if (eir_len == 0)
8108 goto send_rsp;
8109
8110 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8111 hdev->dev_class, 3);
8112
8113 if (h192 && r192) {
8114 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8115 EIR_SSP_HASH_C192, h192, 16);
8116 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8117 EIR_SSP_RAND_R192, r192, 16);
8118 }
8119
8120 if (h256 && r256) {
8121 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8122 EIR_SSP_HASH_C256, h256, 16);
8123 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8124 EIR_SSP_RAND_R256, r256, 16);
8125 }
8126
8127 send_rsp:
8128 mgmt_rp->type = mgmt_cp->type;
8129 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8130
8131 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8132 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8133 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8134 if (err < 0 || status)
8135 goto done;
8136
8137 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8138
8139 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8140 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8141 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8142 done:
8143 if (skb && !IS_ERR(skb))
8144 kfree_skb(skb);
8145
8146 kfree(mgmt_rp);
8147 mgmt_pending_remove(cmd);
8148 }
8149
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8150 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8151 struct mgmt_cp_read_local_oob_ext_data *cp)
8152 {
8153 struct mgmt_pending_cmd *cmd;
8154 int err;
8155
8156 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8157 cp, sizeof(*cp));
8158 if (!cmd)
8159 return -ENOMEM;
8160
8161 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8162 read_local_oob_ext_data_complete);
8163
8164 if (err < 0) {
8165 mgmt_pending_remove(cmd);
8166 return err;
8167 }
8168
8169 return 0;
8170 }
8171
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8172 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8173 void *data, u16 data_len)
8174 {
8175 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8176 struct mgmt_rp_read_local_oob_ext_data *rp;
8177 size_t rp_len;
8178 u16 eir_len;
8179 u8 status, flags, role, addr[7], hash[16], rand[16];
8180 int err;
8181
8182 bt_dev_dbg(hdev, "sock %p", sk);
8183
8184 if (hdev_is_powered(hdev)) {
8185 switch (cp->type) {
8186 case BIT(BDADDR_BREDR):
8187 status = mgmt_bredr_support(hdev);
8188 if (status)
8189 eir_len = 0;
8190 else
8191 eir_len = 5;
8192 break;
8193 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8194 status = mgmt_le_support(hdev);
8195 if (status)
8196 eir_len = 0;
8197 else
8198 eir_len = 9 + 3 + 18 + 18 + 3;
8199 break;
8200 default:
8201 status = MGMT_STATUS_INVALID_PARAMS;
8202 eir_len = 0;
8203 break;
8204 }
8205 } else {
8206 status = MGMT_STATUS_NOT_POWERED;
8207 eir_len = 0;
8208 }
8209
8210 rp_len = sizeof(*rp) + eir_len;
8211 rp = kmalloc(rp_len, GFP_ATOMIC);
8212 if (!rp)
8213 return -ENOMEM;
8214
8215 if (!status && !lmp_ssp_capable(hdev)) {
8216 status = MGMT_STATUS_NOT_SUPPORTED;
8217 eir_len = 0;
8218 }
8219
8220 if (status)
8221 goto complete;
8222
8223 hci_dev_lock(hdev);
8224
8225 eir_len = 0;
8226 switch (cp->type) {
8227 case BIT(BDADDR_BREDR):
8228 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8229 err = read_local_ssp_oob_req(hdev, sk, cp);
8230 hci_dev_unlock(hdev);
8231 if (!err)
8232 goto done;
8233
8234 status = MGMT_STATUS_FAILED;
8235 goto complete;
8236 } else {
8237 eir_len = eir_append_data(rp->eir, eir_len,
8238 EIR_CLASS_OF_DEV,
8239 hdev->dev_class, 3);
8240 }
8241 break;
8242 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8243 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8244 smp_generate_oob(hdev, hash, rand) < 0) {
8245 hci_dev_unlock(hdev);
8246 status = MGMT_STATUS_FAILED;
8247 goto complete;
8248 }
8249
8250 /* This should return the active RPA, but since the RPA
8251 * is only programmed on demand, it is really hard to fill
8252 * this in at the moment. For now disallow retrieving
8253 * local out-of-band data when privacy is in use.
8254 *
8255 * Returning the identity address will not help here since
8256 * pairing happens before the identity resolving key is
8257 * known and thus the connection establishment happens
8258 * based on the RPA and not the identity address.
8259 */
8260 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8261 hci_dev_unlock(hdev);
8262 status = MGMT_STATUS_REJECTED;
8263 goto complete;
8264 }
8265
8266 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8267 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8268 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8269 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8270 memcpy(addr, &hdev->static_addr, 6);
8271 addr[6] = 0x01;
8272 } else {
8273 memcpy(addr, &hdev->bdaddr, 6);
8274 addr[6] = 0x00;
8275 }
8276
8277 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8278 addr, sizeof(addr));
8279
8280 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8281 role = 0x02;
8282 else
8283 role = 0x01;
8284
8285 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8286 &role, sizeof(role));
8287
8288 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8289 eir_len = eir_append_data(rp->eir, eir_len,
8290 EIR_LE_SC_CONFIRM,
8291 hash, sizeof(hash));
8292
8293 eir_len = eir_append_data(rp->eir, eir_len,
8294 EIR_LE_SC_RANDOM,
8295 rand, sizeof(rand));
8296 }
8297
8298 flags = mgmt_get_adv_discov_flags(hdev);
8299
8300 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8301 flags |= LE_AD_NO_BREDR;
8302
8303 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8304 &flags, sizeof(flags));
8305 break;
8306 }
8307
8308 hci_dev_unlock(hdev);
8309
8310 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8311
8312 status = MGMT_STATUS_SUCCESS;
8313
8314 complete:
8315 rp->type = cp->type;
8316 rp->eir_len = cpu_to_le16(eir_len);
8317
8318 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8319 status, rp, sizeof(*rp) + eir_len);
8320 if (err < 0 || status)
8321 goto done;
8322
8323 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8324 rp, sizeof(*rp) + eir_len,
8325 HCI_MGMT_OOB_DATA_EVENTS, sk);
8326
8327 done:
8328 kfree(rp);
8329
8330 return err;
8331 }
8332
get_supported_adv_flags(struct hci_dev * hdev)8333 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8334 {
8335 u32 flags = 0;
8336
8337 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8338 flags |= MGMT_ADV_FLAG_DISCOV;
8339 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8340 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8341 flags |= MGMT_ADV_FLAG_APPEARANCE;
8342 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8343 flags |= MGMT_ADV_PARAM_DURATION;
8344 flags |= MGMT_ADV_PARAM_TIMEOUT;
8345 flags |= MGMT_ADV_PARAM_INTERVALS;
8346 flags |= MGMT_ADV_PARAM_TX_POWER;
8347 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8348
8349 /* In extended adv TX_POWER returned from Set Adv Param
8350 * will be always valid.
8351 */
8352 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8353 flags |= MGMT_ADV_FLAG_TX_POWER;
8354
8355 if (ext_adv_capable(hdev)) {
8356 flags |= MGMT_ADV_FLAG_SEC_1M;
8357 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8358 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8359
8360 if (le_2m_capable(hdev))
8361 flags |= MGMT_ADV_FLAG_SEC_2M;
8362
8363 if (le_coded_capable(hdev))
8364 flags |= MGMT_ADV_FLAG_SEC_CODED;
8365 }
8366
8367 return flags;
8368 }
8369
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8370 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8371 void *data, u16 data_len)
8372 {
8373 struct mgmt_rp_read_adv_features *rp;
8374 size_t rp_len;
8375 int err;
8376 struct adv_info *adv_instance;
8377 u32 supported_flags;
8378 u8 *instance;
8379
8380 bt_dev_dbg(hdev, "sock %p", sk);
8381
8382 if (!lmp_le_capable(hdev))
8383 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8384 MGMT_STATUS_REJECTED);
8385
8386 hci_dev_lock(hdev);
8387
8388 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8389 rp = kmalloc(rp_len, GFP_ATOMIC);
8390 if (!rp) {
8391 hci_dev_unlock(hdev);
8392 return -ENOMEM;
8393 }
8394
8395 supported_flags = get_supported_adv_flags(hdev);
8396
8397 rp->supported_flags = cpu_to_le32(supported_flags);
8398 rp->max_adv_data_len = max_adv_len(hdev);
8399 rp->max_scan_rsp_len = max_adv_len(hdev);
8400 rp->max_instances = hdev->le_num_of_adv_sets;
8401 rp->num_instances = hdev->adv_instance_cnt;
8402
8403 instance = rp->instance;
8404 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8405 /* Only instances 1-le_num_of_adv_sets are externally visible */
8406 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8407 *instance = adv_instance->instance;
8408 instance++;
8409 } else {
8410 rp->num_instances--;
8411 rp_len--;
8412 }
8413 }
8414
8415 hci_dev_unlock(hdev);
8416
8417 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8418 MGMT_STATUS_SUCCESS, rp, rp_len);
8419
8420 kfree(rp);
8421
8422 return err;
8423 }
8424
calculate_name_len(struct hci_dev * hdev)8425 static u8 calculate_name_len(struct hci_dev *hdev)
8426 {
8427 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8428
8429 return eir_append_local_name(hdev, buf, 0);
8430 }
8431
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8432 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8433 bool is_adv_data)
8434 {
8435 u8 max_len = max_adv_len(hdev);
8436
8437 if (is_adv_data) {
8438 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8439 MGMT_ADV_FLAG_LIMITED_DISCOV |
8440 MGMT_ADV_FLAG_MANAGED_FLAGS))
8441 max_len -= 3;
8442
8443 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8444 max_len -= 3;
8445 } else {
8446 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8447 max_len -= calculate_name_len(hdev);
8448
8449 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8450 max_len -= 4;
8451 }
8452
8453 return max_len;
8454 }
8455
flags_managed(u32 adv_flags)8456 static bool flags_managed(u32 adv_flags)
8457 {
8458 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8459 MGMT_ADV_FLAG_LIMITED_DISCOV |
8460 MGMT_ADV_FLAG_MANAGED_FLAGS);
8461 }
8462
tx_power_managed(u32 adv_flags)8463 static bool tx_power_managed(u32 adv_flags)
8464 {
8465 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8466 }
8467
name_managed(u32 adv_flags)8468 static bool name_managed(u32 adv_flags)
8469 {
8470 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8471 }
8472
appearance_managed(u32 adv_flags)8473 static bool appearance_managed(u32 adv_flags)
8474 {
8475 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8476 }
8477
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8478 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8479 u8 len, bool is_adv_data)
8480 {
8481 int i, cur_len;
8482 u8 max_len;
8483
8484 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8485
8486 if (len > max_len)
8487 return false;
8488
8489 /* Make sure that the data is correctly formatted. */
8490 for (i = 0; i < len; i += (cur_len + 1)) {
8491 cur_len = data[i];
8492
8493 if (!cur_len)
8494 continue;
8495
8496 if (data[i + 1] == EIR_FLAGS &&
8497 (!is_adv_data || flags_managed(adv_flags)))
8498 return false;
8499
8500 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8501 return false;
8502
8503 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8504 return false;
8505
8506 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8507 return false;
8508
8509 if (data[i + 1] == EIR_APPEARANCE &&
8510 appearance_managed(adv_flags))
8511 return false;
8512
8513 /* If the current field length would exceed the total data
8514 * length, then it's invalid.
8515 */
8516 if (i + cur_len >= len)
8517 return false;
8518 }
8519
8520 return true;
8521 }
8522
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8523 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8524 {
8525 u32 supported_flags, phy_flags;
8526
8527 /* The current implementation only supports a subset of the specified
8528 * flags. Also need to check mutual exclusiveness of sec flags.
8529 */
8530 supported_flags = get_supported_adv_flags(hdev);
8531 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8532 if (adv_flags & ~supported_flags ||
8533 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8534 return false;
8535
8536 return true;
8537 }
8538
adv_busy(struct hci_dev * hdev)8539 static bool adv_busy(struct hci_dev *hdev)
8540 {
8541 return pending_find(MGMT_OP_SET_LE, hdev);
8542 }
8543
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8544 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8545 int err)
8546 {
8547 struct adv_info *adv, *n;
8548
8549 bt_dev_dbg(hdev, "err %d", err);
8550
8551 hci_dev_lock(hdev);
8552
8553 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8554 u8 instance;
8555
8556 if (!adv->pending)
8557 continue;
8558
8559 if (!err) {
8560 adv->pending = false;
8561 continue;
8562 }
8563
8564 instance = adv->instance;
8565
8566 if (hdev->cur_adv_instance == instance)
8567 cancel_adv_timeout(hdev);
8568
8569 hci_remove_adv_instance(hdev, instance);
8570 mgmt_advertising_removed(sk, hdev, instance);
8571 }
8572
8573 hci_dev_unlock(hdev);
8574 }
8575
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8576 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8577 {
8578 struct mgmt_pending_cmd *cmd = data;
8579 struct mgmt_cp_add_advertising *cp = cmd->param;
8580 struct mgmt_rp_add_advertising rp;
8581
8582 memset(&rp, 0, sizeof(rp));
8583
8584 rp.instance = cp->instance;
8585
8586 if (err)
8587 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8588 mgmt_status(err));
8589 else
8590 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8591 mgmt_status(err), &rp, sizeof(rp));
8592
8593 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8594
8595 mgmt_pending_free(cmd);
8596 }
8597
add_advertising_sync(struct hci_dev * hdev,void * data)8598 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8599 {
8600 struct mgmt_pending_cmd *cmd = data;
8601 struct mgmt_cp_add_advertising *cp = cmd->param;
8602
8603 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8604 }
8605
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8606 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8607 void *data, u16 data_len)
8608 {
8609 struct mgmt_cp_add_advertising *cp = data;
8610 struct mgmt_rp_add_advertising rp;
8611 u32 flags;
8612 u8 status;
8613 u16 timeout, duration;
8614 unsigned int prev_instance_cnt;
8615 u8 schedule_instance = 0;
8616 struct adv_info *adv, *next_instance;
8617 int err;
8618 struct mgmt_pending_cmd *cmd;
8619
8620 bt_dev_dbg(hdev, "sock %p", sk);
8621
8622 status = mgmt_le_support(hdev);
8623 if (status)
8624 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8625 status);
8626
8627 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8628 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8629 MGMT_STATUS_INVALID_PARAMS);
8630
8631 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8633 MGMT_STATUS_INVALID_PARAMS);
8634
8635 flags = __le32_to_cpu(cp->flags);
8636 timeout = __le16_to_cpu(cp->timeout);
8637 duration = __le16_to_cpu(cp->duration);
8638
8639 if (!requested_adv_flags_are_valid(hdev, flags))
8640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8641 MGMT_STATUS_INVALID_PARAMS);
8642
8643 hci_dev_lock(hdev);
8644
8645 if (timeout && !hdev_is_powered(hdev)) {
8646 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8647 MGMT_STATUS_REJECTED);
8648 goto unlock;
8649 }
8650
8651 if (adv_busy(hdev)) {
8652 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8653 MGMT_STATUS_BUSY);
8654 goto unlock;
8655 }
8656
8657 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8658 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8659 cp->scan_rsp_len, false)) {
8660 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8661 MGMT_STATUS_INVALID_PARAMS);
8662 goto unlock;
8663 }
8664
8665 prev_instance_cnt = hdev->adv_instance_cnt;
8666
8667 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8668 cp->adv_data_len, cp->data,
8669 cp->scan_rsp_len,
8670 cp->data + cp->adv_data_len,
8671 timeout, duration,
8672 HCI_ADV_TX_POWER_NO_PREFERENCE,
8673 hdev->le_adv_min_interval,
8674 hdev->le_adv_max_interval, 0);
8675 if (IS_ERR(adv)) {
8676 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8677 MGMT_STATUS_FAILED);
8678 goto unlock;
8679 }
8680
8681 /* Only trigger an advertising added event if a new instance was
8682 * actually added.
8683 */
8684 if (hdev->adv_instance_cnt > prev_instance_cnt)
8685 mgmt_advertising_added(sk, hdev, cp->instance);
8686
8687 if (hdev->cur_adv_instance == cp->instance) {
8688 /* If the currently advertised instance is being changed then
8689 * cancel the current advertising and schedule the next
8690 * instance. If there is only one instance then the overridden
8691 * advertising data will be visible right away.
8692 */
8693 cancel_adv_timeout(hdev);
8694
8695 next_instance = hci_get_next_instance(hdev, cp->instance);
8696 if (next_instance)
8697 schedule_instance = next_instance->instance;
8698 } else if (!hdev->adv_instance_timeout) {
8699 /* Immediately advertise the new instance if no other
8700 * instance is currently being advertised.
8701 */
8702 schedule_instance = cp->instance;
8703 }
8704
8705 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8706 * there is no instance to be advertised then we have no HCI
8707 * communication to make. Simply return.
8708 */
8709 if (!hdev_is_powered(hdev) ||
8710 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8711 !schedule_instance) {
8712 rp.instance = cp->instance;
8713 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8714 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8715 goto unlock;
8716 }
8717
8718 /* We're good to go, update advertising data, parameters, and start
8719 * advertising.
8720 */
8721 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8722 data_len);
8723 if (!cmd) {
8724 err = -ENOMEM;
8725 goto unlock;
8726 }
8727
8728 cp->instance = schedule_instance;
8729
8730 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8731 add_advertising_complete);
8732 if (err < 0)
8733 mgmt_pending_free(cmd);
8734
8735 unlock:
8736 hci_dev_unlock(hdev);
8737
8738 return err;
8739 }
8740
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8741 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8742 int err)
8743 {
8744 struct mgmt_pending_cmd *cmd = data;
8745 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8746 struct mgmt_rp_add_ext_adv_params rp;
8747 struct adv_info *adv;
8748 u32 flags;
8749
8750 BT_DBG("%s", hdev->name);
8751
8752 hci_dev_lock(hdev);
8753
8754 adv = hci_find_adv_instance(hdev, cp->instance);
8755 if (!adv)
8756 goto unlock;
8757
8758 rp.instance = cp->instance;
8759 rp.tx_power = adv->tx_power;
8760
8761 /* While we're at it, inform userspace of the available space for this
8762 * advertisement, given the flags that will be used.
8763 */
8764 flags = __le32_to_cpu(cp->flags);
8765 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8766 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8767
8768 if (err) {
8769 /* If this advertisement was previously advertising and we
8770 * failed to update it, we signal that it has been removed and
8771 * delete its structure
8772 */
8773 if (!adv->pending)
8774 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8775
8776 hci_remove_adv_instance(hdev, cp->instance);
8777
8778 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8779 mgmt_status(err));
8780 } else {
8781 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8782 mgmt_status(err), &rp, sizeof(rp));
8783 }
8784
8785 unlock:
8786 mgmt_pending_free(cmd);
8787
8788 hci_dev_unlock(hdev);
8789 }
8790
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8791 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8792 {
8793 struct mgmt_pending_cmd *cmd = data;
8794 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8795
8796 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8797 }
8798
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8799 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8800 void *data, u16 data_len)
8801 {
8802 struct mgmt_cp_add_ext_adv_params *cp = data;
8803 struct mgmt_rp_add_ext_adv_params rp;
8804 struct mgmt_pending_cmd *cmd = NULL;
8805 struct adv_info *adv;
8806 u32 flags, min_interval, max_interval;
8807 u16 timeout, duration;
8808 u8 status;
8809 s8 tx_power;
8810 int err;
8811
8812 BT_DBG("%s", hdev->name);
8813
8814 status = mgmt_le_support(hdev);
8815 if (status)
8816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8817 status);
8818
8819 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8820 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8821 MGMT_STATUS_INVALID_PARAMS);
8822
8823 /* The purpose of breaking add_advertising into two separate MGMT calls
8824 * for params and data is to allow more parameters to be added to this
8825 * structure in the future. For this reason, we verify that we have the
8826 * bare minimum structure we know of when the interface was defined. Any
8827 * extra parameters we don't know about will be ignored in this request.
8828 */
8829 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8831 MGMT_STATUS_INVALID_PARAMS);
8832
8833 flags = __le32_to_cpu(cp->flags);
8834
8835 if (!requested_adv_flags_are_valid(hdev, flags))
8836 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8837 MGMT_STATUS_INVALID_PARAMS);
8838
8839 hci_dev_lock(hdev);
8840
8841 /* In new interface, we require that we are powered to register */
8842 if (!hdev_is_powered(hdev)) {
8843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8844 MGMT_STATUS_REJECTED);
8845 goto unlock;
8846 }
8847
8848 if (adv_busy(hdev)) {
8849 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8850 MGMT_STATUS_BUSY);
8851 goto unlock;
8852 }
8853
8854 /* Parse defined parameters from request, use defaults otherwise */
8855 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8856 __le16_to_cpu(cp->timeout) : 0;
8857
8858 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8859 __le16_to_cpu(cp->duration) :
8860 hdev->def_multi_adv_rotation_duration;
8861
8862 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8863 __le32_to_cpu(cp->min_interval) :
8864 hdev->le_adv_min_interval;
8865
8866 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8867 __le32_to_cpu(cp->max_interval) :
8868 hdev->le_adv_max_interval;
8869
8870 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8871 cp->tx_power :
8872 HCI_ADV_TX_POWER_NO_PREFERENCE;
8873
8874 /* Create advertising instance with no advertising or response data */
8875 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8876 timeout, duration, tx_power, min_interval,
8877 max_interval, 0);
8878
8879 if (IS_ERR(adv)) {
8880 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8881 MGMT_STATUS_FAILED);
8882 goto unlock;
8883 }
8884
8885 /* Submit request for advertising params if ext adv available */
8886 if (ext_adv_capable(hdev)) {
8887 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8888 data, data_len);
8889 if (!cmd) {
8890 err = -ENOMEM;
8891 hci_remove_adv_instance(hdev, cp->instance);
8892 goto unlock;
8893 }
8894
8895 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8896 add_ext_adv_params_complete);
8897 if (err < 0)
8898 mgmt_pending_free(cmd);
8899 } else {
8900 rp.instance = cp->instance;
8901 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8902 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8903 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8904 err = mgmt_cmd_complete(sk, hdev->id,
8905 MGMT_OP_ADD_EXT_ADV_PARAMS,
8906 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8907 }
8908
8909 unlock:
8910 hci_dev_unlock(hdev);
8911
8912 return err;
8913 }
8914
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)8915 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8916 {
8917 struct mgmt_pending_cmd *cmd = data;
8918 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8919 struct mgmt_rp_add_advertising rp;
8920
8921 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8922
8923 memset(&rp, 0, sizeof(rp));
8924
8925 rp.instance = cp->instance;
8926
8927 if (err)
8928 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8929 mgmt_status(err));
8930 else
8931 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8932 mgmt_status(err), &rp, sizeof(rp));
8933
8934 mgmt_pending_free(cmd);
8935 }
8936
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)8937 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8938 {
8939 struct mgmt_pending_cmd *cmd = data;
8940 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8941 int err;
8942
8943 if (ext_adv_capable(hdev)) {
8944 err = hci_update_adv_data_sync(hdev, cp->instance);
8945 if (err)
8946 return err;
8947
8948 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8949 if (err)
8950 return err;
8951
8952 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8953 }
8954
8955 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8956 }
8957
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8958 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8959 u16 data_len)
8960 {
8961 struct mgmt_cp_add_ext_adv_data *cp = data;
8962 struct mgmt_rp_add_ext_adv_data rp;
8963 u8 schedule_instance = 0;
8964 struct adv_info *next_instance;
8965 struct adv_info *adv_instance;
8966 int err = 0;
8967 struct mgmt_pending_cmd *cmd;
8968
8969 BT_DBG("%s", hdev->name);
8970
8971 hci_dev_lock(hdev);
8972
8973 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8974
8975 if (!adv_instance) {
8976 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8977 MGMT_STATUS_INVALID_PARAMS);
8978 goto unlock;
8979 }
8980
8981 /* In new interface, we require that we are powered to register */
8982 if (!hdev_is_powered(hdev)) {
8983 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8984 MGMT_STATUS_REJECTED);
8985 goto clear_new_instance;
8986 }
8987
8988 if (adv_busy(hdev)) {
8989 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8990 MGMT_STATUS_BUSY);
8991 goto clear_new_instance;
8992 }
8993
8994 /* Validate new data */
8995 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8996 cp->adv_data_len, true) ||
8997 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8998 cp->adv_data_len, cp->scan_rsp_len, false)) {
8999 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9000 MGMT_STATUS_INVALID_PARAMS);
9001 goto clear_new_instance;
9002 }
9003
9004 /* Set the data in the advertising instance */
9005 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9006 cp->data, cp->scan_rsp_len,
9007 cp->data + cp->adv_data_len);
9008
9009 /* If using software rotation, determine next instance to use */
9010 if (hdev->cur_adv_instance == cp->instance) {
9011 /* If the currently advertised instance is being changed
9012 * then cancel the current advertising and schedule the
9013 * next instance. If there is only one instance then the
9014 * overridden advertising data will be visible right
9015 * away
9016 */
9017 cancel_adv_timeout(hdev);
9018
9019 next_instance = hci_get_next_instance(hdev, cp->instance);
9020 if (next_instance)
9021 schedule_instance = next_instance->instance;
9022 } else if (!hdev->adv_instance_timeout) {
9023 /* Immediately advertise the new instance if no other
9024 * instance is currently being advertised.
9025 */
9026 schedule_instance = cp->instance;
9027 }
9028
9029 /* If the HCI_ADVERTISING flag is set or there is no instance to
9030 * be advertised then we have no HCI communication to make.
9031 * Simply return.
9032 */
9033 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9034 if (adv_instance->pending) {
9035 mgmt_advertising_added(sk, hdev, cp->instance);
9036 adv_instance->pending = false;
9037 }
9038 rp.instance = cp->instance;
9039 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9040 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9041 goto unlock;
9042 }
9043
9044 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9045 data_len);
9046 if (!cmd) {
9047 err = -ENOMEM;
9048 goto clear_new_instance;
9049 }
9050
9051 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9052 add_ext_adv_data_complete);
9053 if (err < 0) {
9054 mgmt_pending_free(cmd);
9055 goto clear_new_instance;
9056 }
9057
9058 /* We were successful in updating data, so trigger advertising_added
9059 * event if this is an instance that wasn't previously advertising. If
9060 * a failure occurs in the requests we initiated, we will remove the
9061 * instance again in add_advertising_complete
9062 */
9063 if (adv_instance->pending)
9064 mgmt_advertising_added(sk, hdev, cp->instance);
9065
9066 goto unlock;
9067
9068 clear_new_instance:
9069 hci_remove_adv_instance(hdev, cp->instance);
9070
9071 unlock:
9072 hci_dev_unlock(hdev);
9073
9074 return err;
9075 }
9076
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9077 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9078 int err)
9079 {
9080 struct mgmt_pending_cmd *cmd = data;
9081 struct mgmt_cp_remove_advertising *cp = cmd->param;
9082 struct mgmt_rp_remove_advertising rp;
9083
9084 bt_dev_dbg(hdev, "err %d", err);
9085
9086 memset(&rp, 0, sizeof(rp));
9087 rp.instance = cp->instance;
9088
9089 if (err)
9090 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9091 mgmt_status(err));
9092 else
9093 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9094 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9095
9096 mgmt_pending_free(cmd);
9097 }
9098
remove_advertising_sync(struct hci_dev * hdev,void * data)9099 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9100 {
9101 struct mgmt_pending_cmd *cmd = data;
9102 struct mgmt_cp_remove_advertising *cp = cmd->param;
9103 int err;
9104
9105 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9106 if (err)
9107 return err;
9108
9109 if (list_empty(&hdev->adv_instances))
9110 err = hci_disable_advertising_sync(hdev);
9111
9112 return err;
9113 }
9114
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9115 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9116 void *data, u16 data_len)
9117 {
9118 struct mgmt_cp_remove_advertising *cp = data;
9119 struct mgmt_pending_cmd *cmd;
9120 int err;
9121
9122 bt_dev_dbg(hdev, "sock %p", sk);
9123
9124 hci_dev_lock(hdev);
9125
9126 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9127 err = mgmt_cmd_status(sk, hdev->id,
9128 MGMT_OP_REMOVE_ADVERTISING,
9129 MGMT_STATUS_INVALID_PARAMS);
9130 goto unlock;
9131 }
9132
9133 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9134 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9135 MGMT_STATUS_BUSY);
9136 goto unlock;
9137 }
9138
9139 if (list_empty(&hdev->adv_instances)) {
9140 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9141 MGMT_STATUS_INVALID_PARAMS);
9142 goto unlock;
9143 }
9144
9145 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9146 data_len);
9147 if (!cmd) {
9148 err = -ENOMEM;
9149 goto unlock;
9150 }
9151
9152 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9153 remove_advertising_complete);
9154 if (err < 0)
9155 mgmt_pending_free(cmd);
9156
9157 unlock:
9158 hci_dev_unlock(hdev);
9159
9160 return err;
9161 }
9162
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9163 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9164 void *data, u16 data_len)
9165 {
9166 struct mgmt_cp_get_adv_size_info *cp = data;
9167 struct mgmt_rp_get_adv_size_info rp;
9168 u32 flags, supported_flags;
9169
9170 bt_dev_dbg(hdev, "sock %p", sk);
9171
9172 if (!lmp_le_capable(hdev))
9173 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9174 MGMT_STATUS_REJECTED);
9175
9176 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9177 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9178 MGMT_STATUS_INVALID_PARAMS);
9179
9180 flags = __le32_to_cpu(cp->flags);
9181
9182 /* The current implementation only supports a subset of the specified
9183 * flags.
9184 */
9185 supported_flags = get_supported_adv_flags(hdev);
9186 if (flags & ~supported_flags)
9187 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9188 MGMT_STATUS_INVALID_PARAMS);
9189
9190 rp.instance = cp->instance;
9191 rp.flags = cp->flags;
9192 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9193 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9194
9195 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9196 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9197 }
9198
9199 static const struct hci_mgmt_handler mgmt_handlers[] = {
9200 { NULL }, /* 0x0000 (no command) */
9201 { read_version, MGMT_READ_VERSION_SIZE,
9202 HCI_MGMT_NO_HDEV |
9203 HCI_MGMT_UNTRUSTED },
9204 { read_commands, MGMT_READ_COMMANDS_SIZE,
9205 HCI_MGMT_NO_HDEV |
9206 HCI_MGMT_UNTRUSTED },
9207 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9208 HCI_MGMT_NO_HDEV |
9209 HCI_MGMT_UNTRUSTED },
9210 { read_controller_info, MGMT_READ_INFO_SIZE,
9211 HCI_MGMT_UNTRUSTED },
9212 { set_powered, MGMT_SETTING_SIZE },
9213 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9214 { set_connectable, MGMT_SETTING_SIZE },
9215 { set_fast_connectable, MGMT_SETTING_SIZE },
9216 { set_bondable, MGMT_SETTING_SIZE },
9217 { set_link_security, MGMT_SETTING_SIZE },
9218 { set_ssp, MGMT_SETTING_SIZE },
9219 { set_hs, MGMT_SETTING_SIZE },
9220 { set_le, MGMT_SETTING_SIZE },
9221 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9222 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9223 { add_uuid, MGMT_ADD_UUID_SIZE },
9224 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9225 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9226 HCI_MGMT_VAR_LEN },
9227 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9228 HCI_MGMT_VAR_LEN },
9229 { disconnect, MGMT_DISCONNECT_SIZE },
9230 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9231 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9232 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9233 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9234 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9235 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9236 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9237 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9238 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9239 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9240 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9241 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9242 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9243 HCI_MGMT_VAR_LEN },
9244 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9245 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9246 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9247 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9248 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9249 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9250 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9251 { set_advertising, MGMT_SETTING_SIZE },
9252 { set_bredr, MGMT_SETTING_SIZE },
9253 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9254 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9255 { set_secure_conn, MGMT_SETTING_SIZE },
9256 { set_debug_keys, MGMT_SETTING_SIZE },
9257 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9258 { load_irks, MGMT_LOAD_IRKS_SIZE,
9259 HCI_MGMT_VAR_LEN },
9260 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9261 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9262 { add_device, MGMT_ADD_DEVICE_SIZE },
9263 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9264 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9265 HCI_MGMT_VAR_LEN },
9266 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9267 HCI_MGMT_NO_HDEV |
9268 HCI_MGMT_UNTRUSTED },
9269 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9270 HCI_MGMT_UNCONFIGURED |
9271 HCI_MGMT_UNTRUSTED },
9272 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9273 HCI_MGMT_UNCONFIGURED },
9274 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9275 HCI_MGMT_UNCONFIGURED },
9276 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9277 HCI_MGMT_VAR_LEN },
9278 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9279 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9280 HCI_MGMT_NO_HDEV |
9281 HCI_MGMT_UNTRUSTED },
9282 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9283 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9284 HCI_MGMT_VAR_LEN },
9285 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9286 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9287 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9288 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9289 HCI_MGMT_UNTRUSTED },
9290 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9291 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9292 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9293 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9294 HCI_MGMT_VAR_LEN },
9295 { set_wideband_speech, MGMT_SETTING_SIZE },
9296 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9297 HCI_MGMT_UNTRUSTED },
9298 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9299 HCI_MGMT_UNTRUSTED |
9300 HCI_MGMT_HDEV_OPTIONAL },
9301 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9302 HCI_MGMT_VAR_LEN |
9303 HCI_MGMT_HDEV_OPTIONAL },
9304 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9305 HCI_MGMT_UNTRUSTED },
9306 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9307 HCI_MGMT_VAR_LEN },
9308 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9309 HCI_MGMT_UNTRUSTED },
9310 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9311 HCI_MGMT_VAR_LEN },
9312 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9313 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9314 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9315 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9316 HCI_MGMT_VAR_LEN },
9317 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9318 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9319 HCI_MGMT_VAR_LEN },
9320 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9321 HCI_MGMT_VAR_LEN },
9322 { add_adv_patterns_monitor_rssi,
9323 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9324 HCI_MGMT_VAR_LEN },
9325 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9326 HCI_MGMT_VAR_LEN },
9327 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9328 { mesh_send, MGMT_MESH_SEND_SIZE,
9329 HCI_MGMT_VAR_LEN },
9330 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9331 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9332 };
9333
mgmt_index_added(struct hci_dev * hdev)9334 void mgmt_index_added(struct hci_dev *hdev)
9335 {
9336 struct mgmt_ev_ext_index ev;
9337
9338 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9339 return;
9340
9341 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9342 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9343 HCI_MGMT_UNCONF_INDEX_EVENTS);
9344 ev.type = 0x01;
9345 } else {
9346 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9347 HCI_MGMT_INDEX_EVENTS);
9348 ev.type = 0x00;
9349 }
9350
9351 ev.bus = hdev->bus;
9352
9353 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9354 HCI_MGMT_EXT_INDEX_EVENTS);
9355 }
9356
mgmt_index_removed(struct hci_dev * hdev)9357 void mgmt_index_removed(struct hci_dev *hdev)
9358 {
9359 struct mgmt_ev_ext_index ev;
9360 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9361
9362 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9363 return;
9364
9365 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9366
9367 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9368 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9369 HCI_MGMT_UNCONF_INDEX_EVENTS);
9370 ev.type = 0x01;
9371 } else {
9372 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9373 HCI_MGMT_INDEX_EVENTS);
9374 ev.type = 0x00;
9375 }
9376
9377 ev.bus = hdev->bus;
9378
9379 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9380 HCI_MGMT_EXT_INDEX_EVENTS);
9381
9382 /* Cancel any remaining timed work */
9383 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9384 return;
9385 cancel_delayed_work_sync(&hdev->discov_off);
9386 cancel_delayed_work_sync(&hdev->service_cache);
9387 cancel_delayed_work_sync(&hdev->rpa_expired);
9388 }
9389
mgmt_power_on(struct hci_dev * hdev,int err)9390 void mgmt_power_on(struct hci_dev *hdev, int err)
9391 {
9392 struct cmd_lookup match = { NULL, hdev };
9393
9394 bt_dev_dbg(hdev, "err %d", err);
9395
9396 hci_dev_lock(hdev);
9397
9398 if (!err) {
9399 restart_le_actions(hdev);
9400 hci_update_passive_scan(hdev);
9401 }
9402
9403 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9404
9405 new_settings(hdev, match.sk);
9406
9407 if (match.sk)
9408 sock_put(match.sk);
9409
9410 hci_dev_unlock(hdev);
9411 }
9412
__mgmt_power_off(struct hci_dev * hdev)9413 void __mgmt_power_off(struct hci_dev *hdev)
9414 {
9415 struct cmd_lookup match = { NULL, hdev };
9416 u8 zero_cod[] = { 0, 0, 0 };
9417
9418 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9419
9420 /* If the power off is because of hdev unregistration let
9421 * use the appropriate INVALID_INDEX status. Otherwise use
9422 * NOT_POWERED. We cover both scenarios here since later in
9423 * mgmt_index_removed() any hci_conn callbacks will have already
9424 * been triggered, potentially causing misleading DISCONNECTED
9425 * status responses.
9426 */
9427 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9428 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9429 else
9430 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9431
9432 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9433
9434 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9435 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9436 zero_cod, sizeof(zero_cod),
9437 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9438 ext_info_changed(hdev, NULL);
9439 }
9440
9441 new_settings(hdev, match.sk);
9442
9443 if (match.sk)
9444 sock_put(match.sk);
9445 }
9446
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9447 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9448 {
9449 struct mgmt_pending_cmd *cmd;
9450 u8 status;
9451
9452 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9453 if (!cmd)
9454 return;
9455
9456 if (err == -ERFKILL)
9457 status = MGMT_STATUS_RFKILLED;
9458 else
9459 status = MGMT_STATUS_FAILED;
9460
9461 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9462
9463 mgmt_pending_remove(cmd);
9464 }
9465
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9466 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9467 bool persistent)
9468 {
9469 struct mgmt_ev_new_link_key ev;
9470
9471 memset(&ev, 0, sizeof(ev));
9472
9473 ev.store_hint = persistent;
9474 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9475 ev.key.addr.type = BDADDR_BREDR;
9476 ev.key.type = key->type;
9477 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9478 ev.key.pin_len = key->pin_len;
9479
9480 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9481 }
9482
mgmt_ltk_type(struct smp_ltk * ltk)9483 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9484 {
9485 switch (ltk->type) {
9486 case SMP_LTK:
9487 case SMP_LTK_RESPONDER:
9488 if (ltk->authenticated)
9489 return MGMT_LTK_AUTHENTICATED;
9490 return MGMT_LTK_UNAUTHENTICATED;
9491 case SMP_LTK_P256:
9492 if (ltk->authenticated)
9493 return MGMT_LTK_P256_AUTH;
9494 return MGMT_LTK_P256_UNAUTH;
9495 case SMP_LTK_P256_DEBUG:
9496 return MGMT_LTK_P256_DEBUG;
9497 }
9498
9499 return MGMT_LTK_UNAUTHENTICATED;
9500 }
9501
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9502 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9503 {
9504 struct mgmt_ev_new_long_term_key ev;
9505
9506 memset(&ev, 0, sizeof(ev));
9507
9508 /* Devices using resolvable or non-resolvable random addresses
9509 * without providing an identity resolving key don't require
9510 * to store long term keys. Their addresses will change the
9511 * next time around.
9512 *
9513 * Only when a remote device provides an identity address
9514 * make sure the long term key is stored. If the remote
9515 * identity is known, the long term keys are internally
9516 * mapped to the identity address. So allow static random
9517 * and public addresses here.
9518 */
9519 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9520 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9521 ev.store_hint = 0x00;
9522 else
9523 ev.store_hint = persistent;
9524
9525 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9526 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9527 ev.key.type = mgmt_ltk_type(key);
9528 ev.key.enc_size = key->enc_size;
9529 ev.key.ediv = key->ediv;
9530 ev.key.rand = key->rand;
9531
9532 if (key->type == SMP_LTK)
9533 ev.key.initiator = 1;
9534
9535 /* Make sure we copy only the significant bytes based on the
9536 * encryption key size, and set the rest of the value to zeroes.
9537 */
9538 memcpy(ev.key.val, key->val, key->enc_size);
9539 memset(ev.key.val + key->enc_size, 0,
9540 sizeof(ev.key.val) - key->enc_size);
9541
9542 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9543 }
9544
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9545 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9546 {
9547 struct mgmt_ev_new_irk ev;
9548
9549 memset(&ev, 0, sizeof(ev));
9550
9551 ev.store_hint = persistent;
9552
9553 bacpy(&ev.rpa, &irk->rpa);
9554 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9555 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9556 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9557
9558 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9559 }
9560
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9561 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9562 bool persistent)
9563 {
9564 struct mgmt_ev_new_csrk ev;
9565
9566 memset(&ev, 0, sizeof(ev));
9567
9568 /* Devices using resolvable or non-resolvable random addresses
9569 * without providing an identity resolving key don't require
9570 * to store signature resolving keys. Their addresses will change
9571 * the next time around.
9572 *
9573 * Only when a remote device provides an identity address
9574 * make sure the signature resolving key is stored. So allow
9575 * static random and public addresses here.
9576 */
9577 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9578 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9579 ev.store_hint = 0x00;
9580 else
9581 ev.store_hint = persistent;
9582
9583 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9584 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9585 ev.key.type = csrk->type;
9586 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9587
9588 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9589 }
9590
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9591 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9592 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9593 u16 max_interval, u16 latency, u16 timeout)
9594 {
9595 struct mgmt_ev_new_conn_param ev;
9596
9597 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9598 return;
9599
9600 memset(&ev, 0, sizeof(ev));
9601 bacpy(&ev.addr.bdaddr, bdaddr);
9602 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9603 ev.store_hint = store_hint;
9604 ev.min_interval = cpu_to_le16(min_interval);
9605 ev.max_interval = cpu_to_le16(max_interval);
9606 ev.latency = cpu_to_le16(latency);
9607 ev.timeout = cpu_to_le16(timeout);
9608
9609 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9610 }
9611
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9612 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9613 u8 *name, u8 name_len)
9614 {
9615 struct sk_buff *skb;
9616 struct mgmt_ev_device_connected *ev;
9617 u16 eir_len = 0;
9618 u32 flags = 0;
9619
9620 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9621 return;
9622
9623 /* allocate buff for LE or BR/EDR adv */
9624 if (conn->le_adv_data_len > 0)
9625 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9626 sizeof(*ev) + conn->le_adv_data_len);
9627 else
9628 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9629 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9630 eir_precalc_len(sizeof(conn->dev_class)));
9631
9632 if (!skb)
9633 return;
9634
9635 ev = skb_put(skb, sizeof(*ev));
9636 bacpy(&ev->addr.bdaddr, &conn->dst);
9637 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9638
9639 if (conn->out)
9640 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9641
9642 ev->flags = __cpu_to_le32(flags);
9643
9644 /* We must ensure that the EIR Data fields are ordered and
9645 * unique. Keep it simple for now and avoid the problem by not
9646 * adding any BR/EDR data to the LE adv.
9647 */
9648 if (conn->le_adv_data_len > 0) {
9649 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9650 eir_len = conn->le_adv_data_len;
9651 } else {
9652 if (name)
9653 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9654
9655 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9656 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9657 conn->dev_class, sizeof(conn->dev_class));
9658 }
9659
9660 ev->eir_len = cpu_to_le16(eir_len);
9661
9662 mgmt_event_skb(skb, NULL);
9663 }
9664
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9665 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9666 {
9667 struct hci_dev *hdev = data;
9668 struct mgmt_cp_unpair_device *cp = cmd->param;
9669
9670 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9671
9672 cmd->cmd_complete(cmd, 0);
9673 mgmt_pending_remove(cmd);
9674 }
9675
mgmt_powering_down(struct hci_dev * hdev)9676 bool mgmt_powering_down(struct hci_dev *hdev)
9677 {
9678 struct mgmt_pending_cmd *cmd;
9679 struct mgmt_mode *cp;
9680
9681 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9682 return true;
9683
9684 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9685 if (!cmd)
9686 return false;
9687
9688 cp = cmd->param;
9689 if (!cp->val)
9690 return true;
9691
9692 return false;
9693 }
9694
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9695 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9696 u8 link_type, u8 addr_type, u8 reason,
9697 bool mgmt_connected)
9698 {
9699 struct mgmt_ev_device_disconnected ev;
9700 struct sock *sk = NULL;
9701
9702 if (!mgmt_connected)
9703 return;
9704
9705 if (link_type != ACL_LINK && link_type != LE_LINK)
9706 return;
9707
9708 bacpy(&ev.addr.bdaddr, bdaddr);
9709 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9710 ev.reason = reason;
9711
9712 /* Report disconnects due to suspend */
9713 if (hdev->suspended)
9714 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9715
9716 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9717
9718 if (sk)
9719 sock_put(sk);
9720 }
9721
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9722 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9723 u8 link_type, u8 addr_type, u8 status)
9724 {
9725 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9726 struct mgmt_cp_disconnect *cp;
9727 struct mgmt_pending_cmd *cmd;
9728
9729 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9730 hdev);
9731
9732 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9733 if (!cmd)
9734 return;
9735
9736 cp = cmd->param;
9737
9738 if (bacmp(bdaddr, &cp->addr.bdaddr))
9739 return;
9740
9741 if (cp->addr.type != bdaddr_type)
9742 return;
9743
9744 cmd->cmd_complete(cmd, mgmt_status(status));
9745 mgmt_pending_remove(cmd);
9746 }
9747
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9748 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9749 {
9750 struct mgmt_ev_connect_failed ev;
9751
9752 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9753 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9754 conn->dst_type, status, true);
9755 return;
9756 }
9757
9758 bacpy(&ev.addr.bdaddr, &conn->dst);
9759 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9760 ev.status = mgmt_status(status);
9761
9762 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9763 }
9764
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9765 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9766 {
9767 struct mgmt_ev_pin_code_request ev;
9768
9769 bacpy(&ev.addr.bdaddr, bdaddr);
9770 ev.addr.type = BDADDR_BREDR;
9771 ev.secure = secure;
9772
9773 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9774 }
9775
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9776 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9777 u8 status)
9778 {
9779 struct mgmt_pending_cmd *cmd;
9780
9781 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9782 if (!cmd)
9783 return;
9784
9785 cmd->cmd_complete(cmd, mgmt_status(status));
9786 mgmt_pending_remove(cmd);
9787 }
9788
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9789 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9790 u8 status)
9791 {
9792 struct mgmt_pending_cmd *cmd;
9793
9794 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9795 if (!cmd)
9796 return;
9797
9798 cmd->cmd_complete(cmd, mgmt_status(status));
9799 mgmt_pending_remove(cmd);
9800 }
9801
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9802 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9803 u8 link_type, u8 addr_type, u32 value,
9804 u8 confirm_hint)
9805 {
9806 struct mgmt_ev_user_confirm_request ev;
9807
9808 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9809
9810 bacpy(&ev.addr.bdaddr, bdaddr);
9811 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9812 ev.confirm_hint = confirm_hint;
9813 ev.value = cpu_to_le32(value);
9814
9815 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9816 NULL);
9817 }
9818
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9819 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9820 u8 link_type, u8 addr_type)
9821 {
9822 struct mgmt_ev_user_passkey_request ev;
9823
9824 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9825
9826 bacpy(&ev.addr.bdaddr, bdaddr);
9827 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9828
9829 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9830 NULL);
9831 }
9832
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9833 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9834 u8 link_type, u8 addr_type, u8 status,
9835 u8 opcode)
9836 {
9837 struct mgmt_pending_cmd *cmd;
9838
9839 cmd = pending_find(opcode, hdev);
9840 if (!cmd)
9841 return -ENOENT;
9842
9843 cmd->cmd_complete(cmd, mgmt_status(status));
9844 mgmt_pending_remove(cmd);
9845
9846 return 0;
9847 }
9848
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9849 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9850 u8 link_type, u8 addr_type, u8 status)
9851 {
9852 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9853 status, MGMT_OP_USER_CONFIRM_REPLY);
9854 }
9855
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9856 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9857 u8 link_type, u8 addr_type, u8 status)
9858 {
9859 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9860 status,
9861 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9862 }
9863
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9864 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9865 u8 link_type, u8 addr_type, u8 status)
9866 {
9867 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9868 status, MGMT_OP_USER_PASSKEY_REPLY);
9869 }
9870
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9871 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9872 u8 link_type, u8 addr_type, u8 status)
9873 {
9874 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9875 status,
9876 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9877 }
9878
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)9879 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9880 u8 link_type, u8 addr_type, u32 passkey,
9881 u8 entered)
9882 {
9883 struct mgmt_ev_passkey_notify ev;
9884
9885 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9886
9887 bacpy(&ev.addr.bdaddr, bdaddr);
9888 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9889 ev.passkey = __cpu_to_le32(passkey);
9890 ev.entered = entered;
9891
9892 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9893 }
9894
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)9895 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9896 {
9897 struct mgmt_ev_auth_failed ev;
9898 struct mgmt_pending_cmd *cmd;
9899 u8 status = mgmt_status(hci_status);
9900
9901 bacpy(&ev.addr.bdaddr, &conn->dst);
9902 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9903 ev.status = status;
9904
9905 cmd = find_pairing(conn);
9906
9907 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9908 cmd ? cmd->sk : NULL);
9909
9910 if (cmd) {
9911 cmd->cmd_complete(cmd, status);
9912 mgmt_pending_remove(cmd);
9913 }
9914 }
9915
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)9916 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9917 {
9918 struct cmd_lookup match = { NULL, hdev };
9919 bool changed;
9920
9921 if (status) {
9922 u8 mgmt_err = mgmt_status(status);
9923 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9924 cmd_status_rsp, &mgmt_err);
9925 return;
9926 }
9927
9928 if (test_bit(HCI_AUTH, &hdev->flags))
9929 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9930 else
9931 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9932
9933 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9934 &match);
9935
9936 if (changed)
9937 new_settings(hdev, match.sk);
9938
9939 if (match.sk)
9940 sock_put(match.sk);
9941 }
9942
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)9943 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9944 {
9945 struct cmd_lookup *match = data;
9946
9947 if (match->sk == NULL) {
9948 match->sk = cmd->sk;
9949 sock_hold(match->sk);
9950 }
9951 }
9952
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)9953 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9954 u8 status)
9955 {
9956 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9957
9958 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9959 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9960 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9961
9962 if (!status) {
9963 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9964 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9965 ext_info_changed(hdev, NULL);
9966 }
9967
9968 if (match.sk)
9969 sock_put(match.sk);
9970 }
9971
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)9972 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9973 {
9974 struct mgmt_cp_set_local_name ev;
9975 struct mgmt_pending_cmd *cmd;
9976
9977 if (status)
9978 return;
9979
9980 memset(&ev, 0, sizeof(ev));
9981 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9982 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9983
9984 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9985 if (!cmd) {
9986 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9987
9988 /* If this is a HCI command related to powering on the
9989 * HCI dev don't send any mgmt signals.
9990 */
9991 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9992 return;
9993
9994 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9995 return;
9996 }
9997
9998 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9999 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10000 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10001 }
10002
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10003 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10004 {
10005 int i;
10006
10007 for (i = 0; i < uuid_count; i++) {
10008 if (!memcmp(uuid, uuids[i], 16))
10009 return true;
10010 }
10011
10012 return false;
10013 }
10014
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10015 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10016 {
10017 u16 parsed = 0;
10018
10019 while (parsed < eir_len) {
10020 u8 field_len = eir[0];
10021 u8 uuid[16];
10022 int i;
10023
10024 if (field_len == 0)
10025 break;
10026
10027 if (eir_len - parsed < field_len + 1)
10028 break;
10029
10030 switch (eir[1]) {
10031 case EIR_UUID16_ALL:
10032 case EIR_UUID16_SOME:
10033 for (i = 0; i + 3 <= field_len; i += 2) {
10034 memcpy(uuid, bluetooth_base_uuid, 16);
10035 uuid[13] = eir[i + 3];
10036 uuid[12] = eir[i + 2];
10037 if (has_uuid(uuid, uuid_count, uuids))
10038 return true;
10039 }
10040 break;
10041 case EIR_UUID32_ALL:
10042 case EIR_UUID32_SOME:
10043 for (i = 0; i + 5 <= field_len; i += 4) {
10044 memcpy(uuid, bluetooth_base_uuid, 16);
10045 uuid[15] = eir[i + 5];
10046 uuid[14] = eir[i + 4];
10047 uuid[13] = eir[i + 3];
10048 uuid[12] = eir[i + 2];
10049 if (has_uuid(uuid, uuid_count, uuids))
10050 return true;
10051 }
10052 break;
10053 case EIR_UUID128_ALL:
10054 case EIR_UUID128_SOME:
10055 for (i = 0; i + 17 <= field_len; i += 16) {
10056 memcpy(uuid, eir + i + 2, 16);
10057 if (has_uuid(uuid, uuid_count, uuids))
10058 return true;
10059 }
10060 break;
10061 }
10062
10063 parsed += field_len + 1;
10064 eir += field_len + 1;
10065 }
10066
10067 return false;
10068 }
10069
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10070 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10071 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10072 {
10073 /* If a RSSI threshold has been specified, and
10074 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10075 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10076 * is set, let it through for further processing, as we might need to
10077 * restart the scan.
10078 *
10079 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10080 * the results are also dropped.
10081 */
10082 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10083 (rssi == HCI_RSSI_INVALID ||
10084 (rssi < hdev->discovery.rssi &&
10085 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10086 return false;
10087
10088 if (hdev->discovery.uuid_count != 0) {
10089 /* If a list of UUIDs is provided in filter, results with no
10090 * matching UUID should be dropped.
10091 */
10092 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10093 hdev->discovery.uuids) &&
10094 !eir_has_uuids(scan_rsp, scan_rsp_len,
10095 hdev->discovery.uuid_count,
10096 hdev->discovery.uuids))
10097 return false;
10098 }
10099
10100 /* If duplicate filtering does not report RSSI changes, then restart
10101 * scanning to ensure updated result with updated RSSI values.
10102 */
10103 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10104 /* Validate RSSI value against the RSSI threshold once more. */
10105 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10106 rssi < hdev->discovery.rssi)
10107 return false;
10108 }
10109
10110 return true;
10111 }
10112
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10113 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10114 bdaddr_t *bdaddr, u8 addr_type)
10115 {
10116 struct mgmt_ev_adv_monitor_device_lost ev;
10117
10118 ev.monitor_handle = cpu_to_le16(handle);
10119 bacpy(&ev.addr.bdaddr, bdaddr);
10120 ev.addr.type = addr_type;
10121
10122 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10123 NULL);
10124 }
10125
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10126 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10127 struct sk_buff *skb,
10128 struct sock *skip_sk,
10129 u16 handle)
10130 {
10131 struct sk_buff *advmon_skb;
10132 size_t advmon_skb_len;
10133 __le16 *monitor_handle;
10134
10135 if (!skb)
10136 return;
10137
10138 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10139 sizeof(struct mgmt_ev_device_found)) + skb->len;
10140 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10141 advmon_skb_len);
10142 if (!advmon_skb)
10143 return;
10144
10145 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10146 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10147 * store monitor_handle of the matched monitor.
10148 */
10149 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10150 *monitor_handle = cpu_to_le16(handle);
10151 skb_put_data(advmon_skb, skb->data, skb->len);
10152
10153 mgmt_event_skb(advmon_skb, skip_sk);
10154 }
10155
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10156 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10157 bdaddr_t *bdaddr, bool report_device,
10158 struct sk_buff *skb,
10159 struct sock *skip_sk)
10160 {
10161 struct monitored_device *dev, *tmp;
10162 bool matched = false;
10163 bool notified = false;
10164
10165 /* We have received the Advertisement Report because:
10166 * 1. the kernel has initiated active discovery
10167 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10168 * passive scanning
10169 * 3. if none of the above is true, we have one or more active
10170 * Advertisement Monitor
10171 *
10172 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10173 * and report ONLY one advertisement per device for the matched Monitor
10174 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10175 *
10176 * For case 3, since we are not active scanning and all advertisements
10177 * received are due to a matched Advertisement Monitor, report all
10178 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10179 */
10180 if (report_device && !hdev->advmon_pend_notify) {
10181 mgmt_event_skb(skb, skip_sk);
10182 return;
10183 }
10184
10185 hdev->advmon_pend_notify = false;
10186
10187 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10188 if (!bacmp(&dev->bdaddr, bdaddr)) {
10189 matched = true;
10190
10191 if (!dev->notified) {
10192 mgmt_send_adv_monitor_device_found(hdev, skb,
10193 skip_sk,
10194 dev->handle);
10195 notified = true;
10196 dev->notified = true;
10197 }
10198 }
10199
10200 if (!dev->notified)
10201 hdev->advmon_pend_notify = true;
10202 }
10203
10204 if (!report_device &&
10205 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10206 /* Handle 0 indicates that we are not active scanning and this
10207 * is a subsequent advertisement report for an already matched
10208 * Advertisement Monitor or the controller offloading support
10209 * is not available.
10210 */
10211 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10212 }
10213
10214 if (report_device)
10215 mgmt_event_skb(skb, skip_sk);
10216 else
10217 kfree_skb(skb);
10218 }
10219
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10220 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10221 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10222 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10223 u64 instant)
10224 {
10225 struct sk_buff *skb;
10226 struct mgmt_ev_mesh_device_found *ev;
10227 int i, j;
10228
10229 if (!hdev->mesh_ad_types[0])
10230 goto accepted;
10231
10232 /* Scan for requested AD types */
10233 if (eir_len > 0) {
10234 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10235 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10236 if (!hdev->mesh_ad_types[j])
10237 break;
10238
10239 if (hdev->mesh_ad_types[j] == eir[i + 1])
10240 goto accepted;
10241 }
10242 }
10243 }
10244
10245 if (scan_rsp_len > 0) {
10246 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10247 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10248 if (!hdev->mesh_ad_types[j])
10249 break;
10250
10251 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10252 goto accepted;
10253 }
10254 }
10255 }
10256
10257 return;
10258
10259 accepted:
10260 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10261 sizeof(*ev) + eir_len + scan_rsp_len);
10262 if (!skb)
10263 return;
10264
10265 ev = skb_put(skb, sizeof(*ev));
10266
10267 bacpy(&ev->addr.bdaddr, bdaddr);
10268 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10269 ev->rssi = rssi;
10270 ev->flags = cpu_to_le32(flags);
10271 ev->instant = cpu_to_le64(instant);
10272
10273 if (eir_len > 0)
10274 /* Copy EIR or advertising data into event */
10275 skb_put_data(skb, eir, eir_len);
10276
10277 if (scan_rsp_len > 0)
10278 /* Append scan response data to event */
10279 skb_put_data(skb, scan_rsp, scan_rsp_len);
10280
10281 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10282
10283 mgmt_event_skb(skb, NULL);
10284 }
10285
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10286 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10287 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10288 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10289 u64 instant)
10290 {
10291 struct sk_buff *skb;
10292 struct mgmt_ev_device_found *ev;
10293 bool report_device = hci_discovery_active(hdev);
10294
10295 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10296 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10297 eir, eir_len, scan_rsp, scan_rsp_len,
10298 instant);
10299
10300 /* Don't send events for a non-kernel initiated discovery. With
10301 * LE one exception is if we have pend_le_reports > 0 in which
10302 * case we're doing passive scanning and want these events.
10303 */
10304 if (!hci_discovery_active(hdev)) {
10305 if (link_type == ACL_LINK)
10306 return;
10307 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10308 report_device = true;
10309 else if (!hci_is_adv_monitoring(hdev))
10310 return;
10311 }
10312
10313 if (hdev->discovery.result_filtering) {
10314 /* We are using service discovery */
10315 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10316 scan_rsp_len))
10317 return;
10318 }
10319
10320 if (hdev->discovery.limited) {
10321 /* Check for limited discoverable bit */
10322 if (dev_class) {
10323 if (!(dev_class[1] & 0x20))
10324 return;
10325 } else {
10326 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10327 if (!flags || !(flags[0] & LE_AD_LIMITED))
10328 return;
10329 }
10330 }
10331
10332 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10333 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10334 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10335 if (!skb)
10336 return;
10337
10338 ev = skb_put(skb, sizeof(*ev));
10339
10340 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10341 * RSSI value was reported as 0 when not available. This behavior
10342 * is kept when using device discovery. This is required for full
10343 * backwards compatibility with the API.
10344 *
10345 * However when using service discovery, the value 127 will be
10346 * returned when the RSSI is not available.
10347 */
10348 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10349 link_type == ACL_LINK)
10350 rssi = 0;
10351
10352 bacpy(&ev->addr.bdaddr, bdaddr);
10353 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10354 ev->rssi = rssi;
10355 ev->flags = cpu_to_le32(flags);
10356
10357 if (eir_len > 0)
10358 /* Copy EIR or advertising data into event */
10359 skb_put_data(skb, eir, eir_len);
10360
10361 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10362 u8 eir_cod[5];
10363
10364 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10365 dev_class, 3);
10366 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10367 }
10368
10369 if (scan_rsp_len > 0)
10370 /* Append scan response data to event */
10371 skb_put_data(skb, scan_rsp, scan_rsp_len);
10372
10373 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10374
10375 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10376 }
10377
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10378 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10379 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10380 {
10381 struct sk_buff *skb;
10382 struct mgmt_ev_device_found *ev;
10383 u16 eir_len = 0;
10384 u32 flags = 0;
10385
10386 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10387 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10388 if (!skb)
10389 return;
10390
10391 ev = skb_put(skb, sizeof(*ev));
10392 bacpy(&ev->addr.bdaddr, bdaddr);
10393 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10394 ev->rssi = rssi;
10395
10396 if (name)
10397 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10398 else
10399 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10400
10401 ev->eir_len = cpu_to_le16(eir_len);
10402 ev->flags = cpu_to_le32(flags);
10403
10404 mgmt_event_skb(skb, NULL);
10405 }
10406
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10407 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10408 {
10409 struct mgmt_ev_discovering ev;
10410
10411 bt_dev_dbg(hdev, "discovering %u", discovering);
10412
10413 memset(&ev, 0, sizeof(ev));
10414 ev.type = hdev->discovery.type;
10415 ev.discovering = discovering;
10416
10417 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10418 }
10419
mgmt_suspending(struct hci_dev * hdev,u8 state)10420 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10421 {
10422 struct mgmt_ev_controller_suspend ev;
10423
10424 ev.suspend_state = state;
10425 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10426 }
10427
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10428 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10429 u8 addr_type)
10430 {
10431 struct mgmt_ev_controller_resume ev;
10432
10433 ev.wake_reason = reason;
10434 if (bdaddr) {
10435 bacpy(&ev.addr.bdaddr, bdaddr);
10436 ev.addr.type = addr_type;
10437 } else {
10438 memset(&ev.addr, 0, sizeof(ev.addr));
10439 }
10440
10441 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10442 }
10443
10444 static struct hci_mgmt_chan chan = {
10445 .channel = HCI_CHANNEL_CONTROL,
10446 .handler_count = ARRAY_SIZE(mgmt_handlers),
10447 .handlers = mgmt_handlers,
10448 .hdev_init = mgmt_init_hdev,
10449 };
10450
mgmt_init(void)10451 int mgmt_init(void)
10452 {
10453 return hci_mgmt_chan_register(&chan);
10454 }
10455
mgmt_exit(void)10456 void mgmt_exit(void)
10457 {
10458 hci_mgmt_chan_unregister(&chan);
10459 }
10460
mgmt_cleanup(struct sock * sk)10461 void mgmt_cleanup(struct sock *sk)
10462 {
10463 struct mgmt_mesh_tx *mesh_tx;
10464 struct hci_dev *hdev;
10465
10466 read_lock(&hci_dev_list_lock);
10467
10468 list_for_each_entry(hdev, &hci_dev_list, list) {
10469 do {
10470 mesh_tx = mgmt_mesh_next(hdev, sk);
10471
10472 if (mesh_tx)
10473 mesh_send_complete(hdev, mesh_tx, true);
10474 } while (mesh_tx);
10475 }
10476
10477 read_unlock(&hci_dev_list_lock);
10478 }
10479