1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42
43 #define MGMT_VERSION 1
44 #define MGMT_REVISION 23
45
46 static const u16 mgmt_commands[] = {
47 MGMT_OP_READ_INDEX_LIST,
48 MGMT_OP_READ_INFO,
49 MGMT_OP_SET_POWERED,
50 MGMT_OP_SET_DISCOVERABLE,
51 MGMT_OP_SET_CONNECTABLE,
52 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_BONDABLE,
54 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_SSP,
56 MGMT_OP_SET_HS,
57 MGMT_OP_SET_LE,
58 MGMT_OP_SET_DEV_CLASS,
59 MGMT_OP_SET_LOCAL_NAME,
60 MGMT_OP_ADD_UUID,
61 MGMT_OP_REMOVE_UUID,
62 MGMT_OP_LOAD_LINK_KEYS,
63 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_DISCONNECT,
65 MGMT_OP_GET_CONNECTIONS,
66 MGMT_OP_PIN_CODE_REPLY,
67 MGMT_OP_PIN_CODE_NEG_REPLY,
68 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_PAIR_DEVICE,
70 MGMT_OP_CANCEL_PAIR_DEVICE,
71 MGMT_OP_UNPAIR_DEVICE,
72 MGMT_OP_USER_CONFIRM_REPLY,
73 MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 MGMT_OP_USER_PASSKEY_REPLY,
75 MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 MGMT_OP_READ_LOCAL_OOB_DATA,
77 MGMT_OP_ADD_REMOTE_OOB_DATA,
78 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 MGMT_OP_START_DISCOVERY,
80 MGMT_OP_STOP_DISCOVERY,
81 MGMT_OP_CONFIRM_NAME,
82 MGMT_OP_BLOCK_DEVICE,
83 MGMT_OP_UNBLOCK_DEVICE,
84 MGMT_OP_SET_DEVICE_ID,
85 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_BREDR,
87 MGMT_OP_SET_STATIC_ADDRESS,
88 MGMT_OP_SET_SCAN_PARAMS,
89 MGMT_OP_SET_SECURE_CONN,
90 MGMT_OP_SET_DEBUG_KEYS,
91 MGMT_OP_SET_PRIVACY,
92 MGMT_OP_LOAD_IRKS,
93 MGMT_OP_GET_CONN_INFO,
94 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_ADD_DEVICE,
96 MGMT_OP_REMOVE_DEVICE,
97 MGMT_OP_LOAD_CONN_PARAM,
98 MGMT_OP_READ_UNCONF_INDEX_LIST,
99 MGMT_OP_READ_CONFIG_INFO,
100 MGMT_OP_SET_EXTERNAL_CONFIG,
101 MGMT_OP_SET_PUBLIC_ADDRESS,
102 MGMT_OP_START_SERVICE_DISCOVERY,
103 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 MGMT_OP_READ_EXT_INDEX_LIST,
105 MGMT_OP_READ_ADV_FEATURES,
106 MGMT_OP_ADD_ADVERTISING,
107 MGMT_OP_REMOVE_ADVERTISING,
108 MGMT_OP_GET_ADV_SIZE_INFO,
109 MGMT_OP_START_LIMITED_DISCOVERY,
110 MGMT_OP_READ_EXT_INFO,
111 MGMT_OP_SET_APPEARANCE,
112 MGMT_OP_GET_PHY_CONFIGURATION,
113 MGMT_OP_SET_PHY_CONFIGURATION,
114 MGMT_OP_SET_BLOCKED_KEYS,
115 MGMT_OP_SET_WIDEBAND_SPEECH,
116 MGMT_OP_READ_CONTROLLER_CAP,
117 MGMT_OP_READ_EXP_FEATURES_INFO,
118 MGMT_OP_SET_EXP_FEATURE,
119 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 MGMT_OP_GET_DEVICE_FLAGS,
124 MGMT_OP_SET_DEVICE_FLAGS,
125 MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 MGMT_OP_REMOVE_ADV_MONITOR,
128 MGMT_OP_ADD_EXT_ADV_PARAMS,
129 MGMT_OP_ADD_EXT_ADV_DATA,
130 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 MGMT_OP_SET_MESH_RECEIVER,
132 MGMT_OP_MESH_READ_FEATURES,
133 MGMT_OP_MESH_SEND,
134 MGMT_OP_MESH_SEND_CANCEL,
135 MGMT_OP_HCI_CMD_SYNC,
136 };
137
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197
198 static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212
213 #define CACHE_TIMEOUT secs_to_jiffies(2)
214
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284 };
285
mgmt_errno_status(int err)286 static u8 mgmt_errno_status(int err)
287 {
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310 }
311
mgmt_status(int err)312 static u8 mgmt_status(int err)
313 {
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321 }
322
mgmt_index_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag)323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325 {
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 flag, NULL);
328 }
329
mgmt_limited_event(u16 event,struct hci_dev * hdev,void * data,u16 len,int flag,struct sock * skip_sk)330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332 {
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 flag, skip_sk);
335 }
336
mgmt_event(u16 event,struct hci_dev * hdev,void * data,u16 len,struct sock * skip_sk)337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339 {
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
342 }
343
mgmt_event_skb(struct sk_buff * skb,struct sock * skip_sk)344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 skip_sk);
348 }
349
le_addr_type(u8 mgmt_addr_type)350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356 }
357
mgmt_fill_version_info(void * ver)358 void mgmt_fill_version_info(void *ver)
359 {
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365
read_version(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368 {
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(&rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 &rp, sizeof(rp));
377 }
378
read_commands(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381 {
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 rp, rp_size);
426 kfree(rp);
427
428 return err;
429 }
430
read_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433 {
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 count++;
448 }
449
450 rp_len = sizeof(*rp) + (2 * count);
451 rp = kmalloc(rp_len, GFP_ATOMIC);
452 if (!rp) {
453 read_unlock(&hci_dev_list_lock);
454 return -ENOMEM;
455 }
456
457 count = 0;
458 list_for_each_entry(d, &hci_dev_list, list) {
459 if (hci_dev_test_flag(d, HCI_SETUP) ||
460 hci_dev_test_flag(d, HCI_CONFIG) ||
461 hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 continue;
463
464 /* Devices marked as raw-only are neither configured
465 * nor unconfigured controllers.
466 */
467 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
468 continue;
469
470 if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 rp->index[count++] = cpu_to_le16(d->id);
472 bt_dev_dbg(hdev, "Added hci%u", d->id);
473 }
474 }
475
476 rp->num_controllers = cpu_to_le16(count);
477 rp_len = sizeof(*rp) + (2 * count);
478
479 read_unlock(&hci_dev_list_lock);
480
481 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 0, rp, rp_len);
483
484 kfree(rp);
485
486 return err;
487 }
488
read_unconf_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 void *data, u16 data_len)
491 {
492 struct mgmt_rp_read_unconf_index_list *rp;
493 struct hci_dev *d;
494 size_t rp_len;
495 u16 count;
496 int err;
497
498 bt_dev_dbg(hdev, "sock %p", sk);
499
500 read_lock(&hci_dev_list_lock);
501
502 count = 0;
503 list_for_each_entry(d, &hci_dev_list, list) {
504 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 count++;
506 }
507
508 rp_len = sizeof(*rp) + (2 * count);
509 rp = kmalloc(rp_len, GFP_ATOMIC);
510 if (!rp) {
511 read_unlock(&hci_dev_list_lock);
512 return -ENOMEM;
513 }
514
515 count = 0;
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (hci_dev_test_flag(d, HCI_SETUP) ||
518 hci_dev_test_flag(d, HCI_CONFIG) ||
519 hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 continue;
521
522 /* Devices marked as raw-only are neither configured
523 * nor unconfigured controllers.
524 */
525 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
526 continue;
527
528 if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
531 }
532 }
533
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
536
537 read_unlock(&hci_dev_list_lock);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
read_ext_index_list(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
549 {
550 struct mgmt_rp_read_ext_index_list *rp;
551 struct hci_dev *d;
552 u16 count;
553 int err;
554
555 bt_dev_dbg(hdev, "sock %p", sk);
556
557 read_lock(&hci_dev_list_lock);
558
559 count = 0;
560 list_for_each_entry(d, &hci_dev_list, list)
561 count++;
562
563 rp = kmalloc_flex(*rp, entry, count, GFP_ATOMIC);
564 if (!rp) {
565 read_unlock(&hci_dev_list_lock);
566 return -ENOMEM;
567 }
568
569 count = 0;
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 continue;
575
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
578 */
579 if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
580 continue;
581
582 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 rp->entry[count].type = 0x01;
584 else
585 rp->entry[count].type = 0x00;
586
587 rp->entry[count].bus = d->bus;
588 rp->entry[count++].index = cpu_to_le16(d->id);
589 bt_dev_dbg(hdev, "Added hci%u", d->id);
590 }
591
592 rp->num_controllers = cpu_to_le16(count);
593
594 read_unlock(&hci_dev_list_lock);
595
596 /* If this command is called at least once, then all the
597 * default index and unconfigured index events are disabled
598 * and from now on only extended index events are used.
599 */
600 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603
604 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 struct_size(rp, entry, count));
607
608 kfree(rp);
609
610 return err;
611 }
612
is_configured(struct hci_dev * hdev)613 static bool is_configured(struct hci_dev *hdev)
614 {
615 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
616 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 return false;
618
619 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
620 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
621 !bacmp(&hdev->public_addr, BDADDR_ANY))
622 return false;
623
624 return true;
625 }
626
get_missing_options(struct hci_dev * hdev)627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 u32 options = 0;
630
631 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 options |= MGMT_OPTION_EXTERNAL_CONFIG;
634
635 if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
636 hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
638 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639
640 return cpu_to_le32(options);
641 }
642
new_options(struct hci_dev * hdev,struct sock * skip)643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 __le32 options = get_missing_options(hdev);
646
647 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650
send_options_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 __le32 options = get_missing_options(hdev);
654
655 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 sizeof(options));
657 }
658
read_config_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 void *data, u16 data_len)
661 {
662 struct mgmt_rp_read_config_info rp;
663 u32 options = 0;
664
665 bt_dev_dbg(hdev, "sock %p", sk);
666
667 hci_dev_lock(hdev);
668
669 memset(&rp, 0, sizeof(rp));
670 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671
672 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
673 options |= MGMT_OPTION_EXTERNAL_CONFIG;
674
675 if (hdev->set_bdaddr)
676 options |= MGMT_OPTION_PUBLIC_ADDRESS;
677
678 rp.supported_options = cpu_to_le32(options);
679 rp.missing_options = get_missing_options(hdev);
680
681 hci_dev_unlock(hdev);
682
683 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 &rp, sizeof(rp));
685 }
686
get_supported_phys(struct hci_dev * hdev)687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 u32 supported_phys = 0;
690
691 if (lmp_bredr_capable(hdev)) {
692 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693
694 if (hdev->features[0][0] & LMP_3SLOT)
695 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696
697 if (hdev->features[0][0] & LMP_5SLOT)
698 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699
700 if (lmp_edr_2m_capable(hdev)) {
701 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702
703 if (lmp_edr_3slot_capable(hdev))
704 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705
706 if (lmp_edr_5slot_capable(hdev))
707 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708
709 if (lmp_edr_3m_capable(hdev)) {
710 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711
712 if (lmp_edr_3slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714
715 if (lmp_edr_5slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 }
718 }
719 }
720
721 if (lmp_le_capable(hdev)) {
722 supported_phys |= MGMT_PHY_LE_1M_TX;
723 supported_phys |= MGMT_PHY_LE_1M_RX;
724
725 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 supported_phys |= MGMT_PHY_LE_2M_TX;
727 supported_phys |= MGMT_PHY_LE_2M_RX;
728 }
729
730 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 supported_phys |= MGMT_PHY_LE_CODED_TX;
732 supported_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734 }
735
736 return supported_phys;
737 }
738
get_selected_phys(struct hci_dev * hdev)739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 u32 selected_phys = 0;
742
743 if (lmp_bredr_capable(hdev)) {
744 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745
746 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748
749 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751
752 if (lmp_edr_2m_capable(hdev)) {
753 if (!(hdev->pkt_type & HCI_2DH1))
754 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755
756 if (lmp_edr_3slot_capable(hdev) &&
757 !(hdev->pkt_type & HCI_2DH3))
758 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759
760 if (lmp_edr_5slot_capable(hdev) &&
761 !(hdev->pkt_type & HCI_2DH5))
762 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763
764 if (lmp_edr_3m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_3DH1))
766 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_3DH3))
770 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_3DH5))
774 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 }
776 }
777 }
778
779 if (lmp_le_capable(hdev)) {
780 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 selected_phys |= MGMT_PHY_LE_1M_TX;
782
783 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 selected_phys |= MGMT_PHY_LE_1M_RX;
785
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 selected_phys |= MGMT_PHY_LE_2M_TX;
788
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 selected_phys |= MGMT_PHY_LE_2M_RX;
791
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 selected_phys |= MGMT_PHY_LE_CODED_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 selected_phys |= MGMT_PHY_LE_CODED_RX;
797 }
798
799 return selected_phys;
800 }
801
get_configurable_phys(struct hci_dev * hdev)802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807
get_supported_settings(struct hci_dev * hdev)808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 u32 settings = 0;
811
812 settings |= MGMT_SETTING_POWERED;
813 settings |= MGMT_SETTING_BONDABLE;
814 settings |= MGMT_SETTING_DEBUG_KEYS;
815 settings |= MGMT_SETTING_CONNECTABLE;
816 settings |= MGMT_SETTING_DISCOVERABLE;
817
818 if (lmp_bredr_capable(hdev)) {
819 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 settings |= MGMT_SETTING_BREDR;
822 settings |= MGMT_SETTING_LINK_SECURITY;
823
824 if (lmp_ssp_capable(hdev)) {
825 settings |= MGMT_SETTING_SSP;
826 }
827
828 if (lmp_sc_capable(hdev))
829 settings |= MGMT_SETTING_SECURE_CONN;
830
831 if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
832 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 }
834
835 if (lmp_le_capable(hdev)) {
836 settings |= MGMT_SETTING_LE;
837 settings |= MGMT_SETTING_SECURE_CONN;
838 settings |= MGMT_SETTING_PRIVACY;
839 settings |= MGMT_SETTING_STATIC_ADDRESS;
840 settings |= MGMT_SETTING_ADVERTISING;
841 }
842
843 if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr)
844 settings |= MGMT_SETTING_CONFIGURATION;
845
846 if (cis_central_capable(hdev))
847 settings |= MGMT_SETTING_CIS_CENTRAL;
848
849 if (cis_peripheral_capable(hdev))
850 settings |= MGMT_SETTING_CIS_PERIPHERAL;
851
852 if (bis_capable(hdev))
853 settings |= MGMT_SETTING_ISO_BROADCASTER;
854
855 if (sync_recv_capable(hdev))
856 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
857
858 if (ll_privacy_capable(hdev))
859 settings |= MGMT_SETTING_LL_PRIVACY;
860
861 if (past_sender_capable(hdev))
862 settings |= MGMT_SETTING_PAST_SENDER;
863
864 if (past_receiver_capable(hdev))
865 settings |= MGMT_SETTING_PAST_RECEIVER;
866
867 settings |= MGMT_SETTING_PHY_CONFIGURATION;
868
869 return settings;
870 }
871
get_current_settings(struct hci_dev * hdev)872 static u32 get_current_settings(struct hci_dev *hdev)
873 {
874 u32 settings = 0;
875
876 if (hdev_is_powered(hdev))
877 settings |= MGMT_SETTING_POWERED;
878
879 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
880 settings |= MGMT_SETTING_CONNECTABLE;
881
882 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
883 settings |= MGMT_SETTING_FAST_CONNECTABLE;
884
885 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
886 settings |= MGMT_SETTING_DISCOVERABLE;
887
888 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
889 settings |= MGMT_SETTING_BONDABLE;
890
891 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
892 settings |= MGMT_SETTING_BREDR;
893
894 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
895 settings |= MGMT_SETTING_LE;
896
897 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
898 settings |= MGMT_SETTING_LINK_SECURITY;
899
900 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
901 settings |= MGMT_SETTING_SSP;
902
903 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
904 settings |= MGMT_SETTING_ADVERTISING;
905
906 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
907 settings |= MGMT_SETTING_SECURE_CONN;
908
909 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
910 settings |= MGMT_SETTING_DEBUG_KEYS;
911
912 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
913 settings |= MGMT_SETTING_PRIVACY;
914
915 /* The current setting for static address has two purposes. The
916 * first is to indicate if the static address will be used and
917 * the second is to indicate if it is actually set.
918 *
919 * This means if the static address is not configured, this flag
920 * will never be set. If the address is configured, then if the
921 * address is actually used decides if the flag is set or not.
922 *
923 * For single mode LE only controllers and dual-mode controllers
924 * with BR/EDR disabled, the existence of the static address will
925 * be evaluated.
926 */
927 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
928 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
929 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
930 if (bacmp(&hdev->static_addr, BDADDR_ANY))
931 settings |= MGMT_SETTING_STATIC_ADDRESS;
932 }
933
934 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
935 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
936
937 if (cis_central_enabled(hdev))
938 settings |= MGMT_SETTING_CIS_CENTRAL;
939
940 if (cis_peripheral_enabled(hdev))
941 settings |= MGMT_SETTING_CIS_PERIPHERAL;
942
943 if (bis_enabled(hdev))
944 settings |= MGMT_SETTING_ISO_BROADCASTER;
945
946 if (sync_recv_enabled(hdev))
947 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
948
949 if (ll_privacy_enabled(hdev))
950 settings |= MGMT_SETTING_LL_PRIVACY;
951
952 if (past_sender_enabled(hdev))
953 settings |= MGMT_SETTING_PAST_SENDER;
954
955 if (past_receiver_enabled(hdev))
956 settings |= MGMT_SETTING_PAST_RECEIVER;
957
958 return settings;
959 }
960
pending_find(u16 opcode,struct hci_dev * hdev)961 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
962 {
963 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
964 }
965
mgmt_get_adv_discov_flags(struct hci_dev * hdev)966 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
967 {
968 struct mgmt_pending_cmd *cmd;
969
970 /* If there's a pending mgmt command the flags will not yet have
971 * their final values, so check for this first.
972 */
973 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
974 if (cmd) {
975 struct mgmt_mode *cp = cmd->param;
976 if (cp->val == 0x01)
977 return LE_AD_GENERAL;
978 else if (cp->val == 0x02)
979 return LE_AD_LIMITED;
980 } else {
981 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
982 return LE_AD_LIMITED;
983 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
984 return LE_AD_GENERAL;
985 }
986
987 return 0;
988 }
989
mgmt_get_connectable(struct hci_dev * hdev)990 bool mgmt_get_connectable(struct hci_dev *hdev)
991 {
992 struct mgmt_pending_cmd *cmd;
993
994 /* If there's a pending mgmt command the flag will not yet have
995 * it's final value, so check for this first.
996 */
997 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
998 if (cmd) {
999 struct mgmt_mode *cp = cmd->param;
1000
1001 return cp->val;
1002 }
1003
1004 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1005 }
1006
service_cache_sync(struct hci_dev * hdev,void * data)1007 static int service_cache_sync(struct hci_dev *hdev, void *data)
1008 {
1009 hci_update_eir_sync(hdev);
1010 hci_update_class_sync(hdev);
1011
1012 return 0;
1013 }
1014
service_cache_off(struct work_struct * work)1015 static void service_cache_off(struct work_struct *work)
1016 {
1017 struct hci_dev *hdev = container_of(work, struct hci_dev,
1018 service_cache.work);
1019
1020 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1021 return;
1022
1023 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1024 }
1025
rpa_expired_sync(struct hci_dev * hdev,void * data)1026 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1027 {
1028 /* The generation of a new RPA and programming it into the
1029 * controller happens in the hci_req_enable_advertising()
1030 * function.
1031 */
1032 if (ext_adv_capable(hdev))
1033 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1034 else
1035 return hci_enable_advertising_sync(hdev);
1036 }
1037
rpa_expired(struct work_struct * work)1038 static void rpa_expired(struct work_struct *work)
1039 {
1040 struct hci_dev *hdev = container_of(work, struct hci_dev,
1041 rpa_expired.work);
1042
1043 bt_dev_dbg(hdev, "");
1044
1045 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1046
1047 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1048 return;
1049
1050 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1051 }
1052
1053 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1054
discov_off(struct work_struct * work)1055 static void discov_off(struct work_struct *work)
1056 {
1057 struct hci_dev *hdev = container_of(work, struct hci_dev,
1058 discov_off.work);
1059
1060 bt_dev_dbg(hdev, "");
1061
1062 hci_dev_lock(hdev);
1063
1064 /* When discoverable timeout triggers, then just make sure
1065 * the limited discoverable flag is cleared. Even in the case
1066 * of a timeout triggered from general discoverable, it is
1067 * safe to unconditionally clear the flag.
1068 */
1069 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1070 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1071 hdev->discov_timeout = 0;
1072
1073 hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1074
1075 mgmt_new_settings(hdev);
1076
1077 hci_dev_unlock(hdev);
1078 }
1079
1080 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1081
mesh_send_complete(struct hci_dev * hdev,struct mgmt_mesh_tx * mesh_tx,bool silent)1082 static void mesh_send_complete(struct hci_dev *hdev,
1083 struct mgmt_mesh_tx *mesh_tx, bool silent)
1084 {
1085 u8 handle = mesh_tx->handle;
1086
1087 if (!silent)
1088 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1089 sizeof(handle), NULL);
1090
1091 mgmt_mesh_remove(mesh_tx);
1092 }
1093
mesh_send_done_sync(struct hci_dev * hdev,void * data)1094 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1095 {
1096 struct mgmt_mesh_tx *mesh_tx;
1097
1098 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1099 if (list_empty(&hdev->adv_instances))
1100 hci_disable_advertising_sync(hdev);
1101 mesh_tx = mgmt_mesh_next(hdev, NULL);
1102
1103 if (mesh_tx)
1104 mesh_send_complete(hdev, mesh_tx, false);
1105
1106 return 0;
1107 }
1108
1109 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1110 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
mesh_next(struct hci_dev * hdev,void * data,int err)1111 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1112 {
1113 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1114
1115 if (!mesh_tx)
1116 return;
1117
1118 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1119 mesh_send_start_complete);
1120
1121 if (err < 0)
1122 mesh_send_complete(hdev, mesh_tx, false);
1123 else
1124 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1125 }
1126
mesh_send_done(struct work_struct * work)1127 static void mesh_send_done(struct work_struct *work)
1128 {
1129 struct hci_dev *hdev = container_of(work, struct hci_dev,
1130 mesh_send_done.work);
1131
1132 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1133 return;
1134
1135 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1136 }
1137
mgmt_init_hdev(struct sock * sk,struct hci_dev * hdev)1138 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1139 {
1140 if (hci_dev_test_flag(hdev, HCI_MGMT))
1141 return;
1142
1143 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1144
1145 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1146 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1147 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1148 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1149
1150 /* Non-mgmt controlled devices get this bit set
1151 * implicitly so that pairing works for them, however
1152 * for mgmt we require user-space to explicitly enable
1153 * it
1154 */
1155 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1156
1157 hci_dev_set_flag(hdev, HCI_MGMT);
1158 }
1159
read_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1160 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1161 void *data, u16 data_len)
1162 {
1163 struct mgmt_rp_read_info rp;
1164
1165 bt_dev_dbg(hdev, "sock %p", sk);
1166
1167 hci_dev_lock(hdev);
1168
1169 memset(&rp, 0, sizeof(rp));
1170
1171 bacpy(&rp.bdaddr, &hdev->bdaddr);
1172
1173 rp.version = hdev->hci_ver;
1174 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1175
1176 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1177 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1178
1179 memcpy(rp.dev_class, hdev->dev_class, 3);
1180
1181 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1182 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1183
1184 hci_dev_unlock(hdev);
1185
1186 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1187 sizeof(rp));
1188 }
1189
append_eir_data_to_buf(struct hci_dev * hdev,u8 * eir)1190 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1191 {
1192 u16 eir_len = 0;
1193 size_t name_len;
1194
1195 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1196 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1197 hdev->dev_class, 3);
1198
1199 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1200 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1201 hdev->appearance);
1202
1203 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1204 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1205 hdev->dev_name, name_len);
1206
1207 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1208 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1209 hdev->short_name, name_len);
1210
1211 return eir_len;
1212 }
1213
read_ext_controller_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)1214 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1215 void *data, u16 data_len)
1216 {
1217 char buf[512];
1218 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1219 u16 eir_len;
1220
1221 bt_dev_dbg(hdev, "sock %p", sk);
1222
1223 memset(&buf, 0, sizeof(buf));
1224
1225 hci_dev_lock(hdev);
1226
1227 bacpy(&rp->bdaddr, &hdev->bdaddr);
1228
1229 rp->version = hdev->hci_ver;
1230 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1231
1232 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1233 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1234
1235
1236 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1237 rp->eir_len = cpu_to_le16(eir_len);
1238
1239 hci_dev_unlock(hdev);
1240
1241 /* If this command is called at least once, then the events
1242 * for class of device and local name changes are disabled
1243 * and only the new extended controller information event
1244 * is used.
1245 */
1246 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1247 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1248 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1249
1250 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1251 sizeof(*rp) + eir_len);
1252 }
1253
ext_info_changed(struct hci_dev * hdev,struct sock * skip)1254 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1255 {
1256 char buf[512];
1257 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1258 u16 eir_len;
1259
1260 memset(buf, 0, sizeof(buf));
1261
1262 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1263 ev->eir_len = cpu_to_le16(eir_len);
1264
1265 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1266 sizeof(*ev) + eir_len,
1267 HCI_MGMT_EXT_INFO_EVENTS, skip);
1268 }
1269
send_settings_rsp(struct sock * sk,u16 opcode,struct hci_dev * hdev)1270 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1271 {
1272 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1273
1274 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1275 sizeof(settings));
1276 }
1277
mgmt_advertising_added(struct sock * sk,struct hci_dev * hdev,u8 instance)1278 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1279 {
1280 struct mgmt_ev_advertising_added ev;
1281
1282 ev.instance = instance;
1283
1284 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1285 }
1286
mgmt_advertising_removed(struct sock * sk,struct hci_dev * hdev,u8 instance)1287 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1288 u8 instance)
1289 {
1290 struct mgmt_ev_advertising_removed ev;
1291
1292 ev.instance = instance;
1293
1294 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1295 }
1296
cancel_adv_timeout(struct hci_dev * hdev)1297 static void cancel_adv_timeout(struct hci_dev *hdev)
1298 {
1299 if (hdev->adv_instance_timeout) {
1300 hdev->adv_instance_timeout = 0;
1301 cancel_delayed_work(&hdev->adv_instance_expire);
1302 }
1303 }
1304
1305 /* This function requires the caller holds hdev->lock */
restart_le_actions(struct hci_dev * hdev)1306 static void restart_le_actions(struct hci_dev *hdev)
1307 {
1308 struct hci_conn_params *p;
1309
1310 list_for_each_entry(p, &hdev->le_conn_params, list) {
1311 /* Needed for AUTO_OFF case where might not "really"
1312 * have been powered off.
1313 */
1314 hci_pend_le_list_del_init(p);
1315
1316 switch (p->auto_connect) {
1317 case HCI_AUTO_CONN_DIRECT:
1318 case HCI_AUTO_CONN_ALWAYS:
1319 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1320 break;
1321 case HCI_AUTO_CONN_REPORT:
1322 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1323 break;
1324 default:
1325 break;
1326 }
1327 }
1328 }
1329
new_settings(struct hci_dev * hdev,struct sock * skip)1330 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1331 {
1332 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1333
1334 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1335 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1336 }
1337
mgmt_set_powered_complete(struct hci_dev * hdev,void * data,int err)1338 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1339 {
1340 struct mgmt_pending_cmd *cmd = data;
1341 struct mgmt_mode *cp;
1342
1343 /* Make sure cmd still outstanding. */
1344 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1345 return;
1346
1347 cp = cmd->param;
1348
1349 bt_dev_dbg(hdev, "err %d", err);
1350
1351 if (!err) {
1352 if (cp->val) {
1353 hci_dev_lock(hdev);
1354 restart_le_actions(hdev);
1355 hci_update_passive_scan(hdev);
1356 hci_dev_unlock(hdev);
1357 }
1358
1359 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1360
1361 /* Only call new_setting for power on as power off is deferred
1362 * to hdev->power_off work which does call hci_dev_do_close.
1363 */
1364 if (cp->val)
1365 new_settings(hdev, cmd->sk);
1366 } else {
1367 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1368 mgmt_status(err));
1369 }
1370
1371 mgmt_pending_free(cmd);
1372 }
1373
set_powered_sync(struct hci_dev * hdev,void * data)1374 static int set_powered_sync(struct hci_dev *hdev, void *data)
1375 {
1376 struct mgmt_pending_cmd *cmd = data;
1377 struct mgmt_mode cp;
1378
1379 mutex_lock(&hdev->mgmt_pending_lock);
1380
1381 /* Make sure cmd still outstanding. */
1382 if (!__mgmt_pending_listed(hdev, cmd)) {
1383 mutex_unlock(&hdev->mgmt_pending_lock);
1384 return -ECANCELED;
1385 }
1386
1387 memcpy(&cp, cmd->param, sizeof(cp));
1388
1389 mutex_unlock(&hdev->mgmt_pending_lock);
1390
1391 BT_DBG("%s", hdev->name);
1392
1393 return hci_set_powered_sync(hdev, cp.val);
1394 }
1395
set_powered(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1396 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1397 u16 len)
1398 {
1399 struct mgmt_mode *cp = data;
1400 struct mgmt_pending_cmd *cmd;
1401 int err;
1402
1403 bt_dev_dbg(hdev, "sock %p", sk);
1404
1405 if (cp->val != 0x00 && cp->val != 0x01)
1406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1407 MGMT_STATUS_INVALID_PARAMS);
1408
1409 hci_dev_lock(hdev);
1410
1411 if (!cp->val) {
1412 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1413 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1414 MGMT_STATUS_BUSY);
1415 goto failed;
1416 }
1417 }
1418
1419 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1420 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1421 MGMT_STATUS_BUSY);
1422 goto failed;
1423 }
1424
1425 if (!!cp->val == hdev_is_powered(hdev)) {
1426 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1427 goto failed;
1428 }
1429
1430 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1431 if (!cmd) {
1432 err = -ENOMEM;
1433 goto failed;
1434 }
1435
1436 /* Cancel potentially blocking sync operation before power off */
1437 if (cp->val == 0x00) {
1438 hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1439 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1440 mgmt_set_powered_complete);
1441 } else {
1442 /* Use hci_cmd_sync_submit since hdev might not be running */
1443 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1444 mgmt_set_powered_complete);
1445 }
1446
1447 if (err < 0)
1448 mgmt_pending_remove(cmd);
1449
1450 failed:
1451 hci_dev_unlock(hdev);
1452 return err;
1453 }
1454
mgmt_new_settings(struct hci_dev * hdev)1455 int mgmt_new_settings(struct hci_dev *hdev)
1456 {
1457 return new_settings(hdev, NULL);
1458 }
1459
1460 struct cmd_lookup {
1461 struct sock *sk;
1462 struct hci_dev *hdev;
1463 u8 mgmt_status;
1464 };
1465
settings_rsp(struct mgmt_pending_cmd * cmd,void * data)1466 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1467 {
1468 struct cmd_lookup *match = data;
1469
1470 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1471
1472 if (match->sk == NULL) {
1473 match->sk = cmd->sk;
1474 sock_hold(match->sk);
1475 }
1476 }
1477
cmd_status_rsp(struct mgmt_pending_cmd * cmd,void * data)1478 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1479 {
1480 u8 *status = data;
1481
1482 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1483 }
1484
cmd_complete_rsp(struct mgmt_pending_cmd * cmd,void * data)1485 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1486 {
1487 struct cmd_lookup *match = data;
1488
1489 /* dequeue cmd_sync entries using cmd as data as that is about to be
1490 * removed/freed.
1491 */
1492 hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1493
1494 if (cmd->cmd_complete) {
1495 cmd->cmd_complete(cmd, match->mgmt_status);
1496 return;
1497 }
1498
1499 cmd_status_rsp(cmd, data);
1500 }
1501
generic_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1502 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1503 {
1504 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1505 cmd->param, cmd->param_len);
1506 }
1507
addr_cmd_complete(struct mgmt_pending_cmd * cmd,u8 status)1508 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1509 {
1510 return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1511 cmd->param, sizeof(struct mgmt_addr_info));
1512 }
1513
mgmt_bredr_support(struct hci_dev * hdev)1514 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1515 {
1516 if (!lmp_bredr_capable(hdev))
1517 return MGMT_STATUS_NOT_SUPPORTED;
1518 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1519 return MGMT_STATUS_REJECTED;
1520 else
1521 return MGMT_STATUS_SUCCESS;
1522 }
1523
mgmt_le_support(struct hci_dev * hdev)1524 static u8 mgmt_le_support(struct hci_dev *hdev)
1525 {
1526 if (!lmp_le_capable(hdev))
1527 return MGMT_STATUS_NOT_SUPPORTED;
1528 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1529 return MGMT_STATUS_REJECTED;
1530 else
1531 return MGMT_STATUS_SUCCESS;
1532 }
1533
mgmt_set_discoverable_complete(struct hci_dev * hdev,void * data,int err)1534 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1535 int err)
1536 {
1537 struct mgmt_pending_cmd *cmd = data;
1538
1539 bt_dev_dbg(hdev, "err %d", err);
1540
1541 /* Make sure cmd still outstanding. */
1542 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1543 return;
1544
1545 hci_dev_lock(hdev);
1546
1547 if (err) {
1548 u8 mgmt_err = mgmt_status(err);
1549 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1550 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1551 goto done;
1552 }
1553
1554 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1555 hdev->discov_timeout > 0) {
1556 int to = secs_to_jiffies(hdev->discov_timeout);
1557 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1558 }
1559
1560 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1561 new_settings(hdev, cmd->sk);
1562
1563 done:
1564 mgmt_pending_free(cmd);
1565 hci_dev_unlock(hdev);
1566 }
1567
set_discoverable_sync(struct hci_dev * hdev,void * data)1568 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1569 {
1570 if (!mgmt_pending_listed(hdev, data))
1571 return -ECANCELED;
1572
1573 BT_DBG("%s", hdev->name);
1574
1575 return hci_update_discoverable_sync(hdev);
1576 }
1577
set_discoverable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1578 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1579 u16 len)
1580 {
1581 struct mgmt_cp_set_discoverable *cp = data;
1582 struct mgmt_pending_cmd *cmd;
1583 u16 timeout;
1584 int err;
1585
1586 bt_dev_dbg(hdev, "sock %p", sk);
1587
1588 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1589 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1590 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1591 MGMT_STATUS_REJECTED);
1592
1593 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1594 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_INVALID_PARAMS);
1596
1597 timeout = __le16_to_cpu(cp->timeout);
1598
1599 /* Disabling discoverable requires that no timeout is set,
1600 * and enabling limited discoverable requires a timeout.
1601 */
1602 if ((cp->val == 0x00 && timeout > 0) ||
1603 (cp->val == 0x02 && timeout == 0))
1604 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1605 MGMT_STATUS_INVALID_PARAMS);
1606
1607 hci_dev_lock(hdev);
1608
1609 if (!hdev_is_powered(hdev) && timeout > 0) {
1610 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1611 MGMT_STATUS_NOT_POWERED);
1612 goto failed;
1613 }
1614
1615 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1616 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1617 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1618 MGMT_STATUS_BUSY);
1619 goto failed;
1620 }
1621
1622 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1623 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1624 MGMT_STATUS_REJECTED);
1625 goto failed;
1626 }
1627
1628 if (hdev->advertising_paused) {
1629 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1630 MGMT_STATUS_BUSY);
1631 goto failed;
1632 }
1633
1634 if (!hdev_is_powered(hdev)) {
1635 bool changed = false;
1636
1637 /* Setting limited discoverable when powered off is
1638 * not a valid operation since it requires a timeout
1639 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1640 */
1641 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1642 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1643 changed = true;
1644 }
1645
1646 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1647 if (err < 0)
1648 goto failed;
1649
1650 if (changed)
1651 err = new_settings(hdev, sk);
1652
1653 goto failed;
1654 }
1655
1656 /* If the current mode is the same, then just update the timeout
1657 * value with the new value. And if only the timeout gets updated,
1658 * then no need for any HCI transactions.
1659 */
1660 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1661 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1662 HCI_LIMITED_DISCOVERABLE)) {
1663 cancel_delayed_work(&hdev->discov_off);
1664 hdev->discov_timeout = timeout;
1665
1666 if (cp->val && hdev->discov_timeout > 0) {
1667 int to = secs_to_jiffies(hdev->discov_timeout);
1668 queue_delayed_work(hdev->req_workqueue,
1669 &hdev->discov_off, to);
1670 }
1671
1672 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1673 goto failed;
1674 }
1675
1676 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1677 if (!cmd) {
1678 err = -ENOMEM;
1679 goto failed;
1680 }
1681
1682 /* Cancel any potential discoverable timeout that might be
1683 * still active and store new timeout value. The arming of
1684 * the timeout happens in the complete handler.
1685 */
1686 cancel_delayed_work(&hdev->discov_off);
1687 hdev->discov_timeout = timeout;
1688
1689 if (cp->val)
1690 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1691 else
1692 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1693
1694 /* Limited discoverable mode */
1695 if (cp->val == 0x02)
1696 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1697 else
1698 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1699
1700 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1701 mgmt_set_discoverable_complete);
1702
1703 if (err < 0)
1704 mgmt_pending_remove(cmd);
1705
1706 failed:
1707 hci_dev_unlock(hdev);
1708 return err;
1709 }
1710
mgmt_set_connectable_complete(struct hci_dev * hdev,void * data,int err)1711 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1712 int err)
1713 {
1714 struct mgmt_pending_cmd *cmd = data;
1715
1716 bt_dev_dbg(hdev, "err %d", err);
1717
1718 /* Make sure cmd still outstanding. */
1719 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1720 return;
1721
1722 hci_dev_lock(hdev);
1723
1724 if (err) {
1725 u8 mgmt_err = mgmt_status(err);
1726 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1727 goto done;
1728 }
1729
1730 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1731 new_settings(hdev, cmd->sk);
1732
1733 done:
1734 mgmt_pending_free(cmd);
1735
1736 hci_dev_unlock(hdev);
1737 }
1738
set_connectable_update_settings(struct hci_dev * hdev,struct sock * sk,u8 val)1739 static int set_connectable_update_settings(struct hci_dev *hdev,
1740 struct sock *sk, u8 val)
1741 {
1742 bool changed = false;
1743 int err;
1744
1745 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1746 changed = true;
1747
1748 if (val) {
1749 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1750 } else {
1751 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1752 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1753 }
1754
1755 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1756 if (err < 0)
1757 return err;
1758
1759 if (changed) {
1760 hci_update_scan(hdev);
1761 hci_update_passive_scan(hdev);
1762 return new_settings(hdev, sk);
1763 }
1764
1765 return 0;
1766 }
1767
set_connectable_sync(struct hci_dev * hdev,void * data)1768 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1769 {
1770 if (!mgmt_pending_listed(hdev, data))
1771 return -ECANCELED;
1772
1773 BT_DBG("%s", hdev->name);
1774
1775 return hci_update_connectable_sync(hdev);
1776 }
1777
set_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1778 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1779 u16 len)
1780 {
1781 struct mgmt_mode *cp = data;
1782 struct mgmt_pending_cmd *cmd;
1783 int err;
1784
1785 bt_dev_dbg(hdev, "sock %p", sk);
1786
1787 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1788 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1789 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1790 MGMT_STATUS_REJECTED);
1791
1792 if (cp->val != 0x00 && cp->val != 0x01)
1793 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1794 MGMT_STATUS_INVALID_PARAMS);
1795
1796 hci_dev_lock(hdev);
1797
1798 if (!hdev_is_powered(hdev)) {
1799 err = set_connectable_update_settings(hdev, sk, cp->val);
1800 goto failed;
1801 }
1802
1803 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1804 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1806 MGMT_STATUS_BUSY);
1807 goto failed;
1808 }
1809
1810 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1811 if (!cmd) {
1812 err = -ENOMEM;
1813 goto failed;
1814 }
1815
1816 if (cp->val) {
1817 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1818 } else {
1819 if (hdev->discov_timeout > 0)
1820 cancel_delayed_work(&hdev->discov_off);
1821
1822 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1823 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1824 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1825 }
1826
1827 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1828 mgmt_set_connectable_complete);
1829
1830 if (err < 0)
1831 mgmt_pending_remove(cmd);
1832
1833 failed:
1834 hci_dev_unlock(hdev);
1835 return err;
1836 }
1837
set_bondable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1838 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1839 u16 len)
1840 {
1841 struct mgmt_mode *cp = data;
1842 bool changed;
1843 int err;
1844
1845 bt_dev_dbg(hdev, "sock %p", sk);
1846
1847 if (cp->val != 0x00 && cp->val != 0x01)
1848 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1849 MGMT_STATUS_INVALID_PARAMS);
1850
1851 hci_dev_lock(hdev);
1852
1853 if (cp->val)
1854 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1855 else
1856 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1857
1858 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1859 if (err < 0)
1860 goto unlock;
1861
1862 if (changed) {
1863 /* In limited privacy mode the change of bondable mode
1864 * may affect the local advertising address.
1865 */
1866 hci_update_discoverable(hdev);
1867
1868 err = new_settings(hdev, sk);
1869 }
1870
1871 unlock:
1872 hci_dev_unlock(hdev);
1873 return err;
1874 }
1875
set_link_security(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)1876 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1877 u16 len)
1878 {
1879 struct mgmt_mode *cp = data;
1880 struct mgmt_pending_cmd *cmd;
1881 u8 val, status;
1882 int err;
1883
1884 bt_dev_dbg(hdev, "sock %p", sk);
1885
1886 status = mgmt_bredr_support(hdev);
1887 if (status)
1888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1889 status);
1890
1891 if (cp->val != 0x00 && cp->val != 0x01)
1892 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1893 MGMT_STATUS_INVALID_PARAMS);
1894
1895 hci_dev_lock(hdev);
1896
1897 if (!hdev_is_powered(hdev)) {
1898 bool changed = false;
1899
1900 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1901 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1902 changed = true;
1903 }
1904
1905 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1906 if (err < 0)
1907 goto failed;
1908
1909 if (changed)
1910 err = new_settings(hdev, sk);
1911
1912 goto failed;
1913 }
1914
1915 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1916 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1917 MGMT_STATUS_BUSY);
1918 goto failed;
1919 }
1920
1921 val = !!cp->val;
1922
1923 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1924 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1925 goto failed;
1926 }
1927
1928 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1929 if (!cmd) {
1930 err = -ENOMEM;
1931 goto failed;
1932 }
1933
1934 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1935 if (err < 0) {
1936 mgmt_pending_remove(cmd);
1937 goto failed;
1938 }
1939
1940 failed:
1941 hci_dev_unlock(hdev);
1942 return err;
1943 }
1944
set_ssp_complete(struct hci_dev * hdev,void * data,int err)1945 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1946 {
1947 struct cmd_lookup match = { NULL, hdev };
1948 struct mgmt_pending_cmd *cmd = data;
1949 struct mgmt_mode *cp;
1950 u8 enable;
1951 bool changed;
1952
1953 /* Make sure cmd still outstanding. */
1954 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1955 return;
1956
1957 cp = cmd->param;
1958 enable = cp->val;
1959
1960 if (err) {
1961 u8 mgmt_err = mgmt_status(err);
1962
1963 if (enable && hci_dev_test_and_clear_flag(hdev,
1964 HCI_SSP_ENABLED)) {
1965 new_settings(hdev, NULL);
1966 }
1967
1968 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1969 mgmt_pending_free(cmd);
1970 return;
1971 }
1972
1973 if (enable) {
1974 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1975 } else {
1976 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1977 }
1978
1979 settings_rsp(cmd, &match);
1980
1981 if (changed)
1982 new_settings(hdev, match.sk);
1983
1984 if (match.sk)
1985 sock_put(match.sk);
1986
1987 hci_update_eir_sync(hdev);
1988 mgmt_pending_free(cmd);
1989 }
1990
set_ssp_sync(struct hci_dev * hdev,void * data)1991 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1992 {
1993 struct mgmt_pending_cmd *cmd = data;
1994 struct mgmt_mode cp;
1995 bool changed = false;
1996 int err;
1997
1998 mutex_lock(&hdev->mgmt_pending_lock);
1999
2000 if (!__mgmt_pending_listed(hdev, cmd)) {
2001 mutex_unlock(&hdev->mgmt_pending_lock);
2002 return -ECANCELED;
2003 }
2004
2005 memcpy(&cp, cmd->param, sizeof(cp));
2006
2007 mutex_unlock(&hdev->mgmt_pending_lock);
2008
2009 if (cp.val)
2010 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
2011
2012 err = hci_write_ssp_mode_sync(hdev, cp.val);
2013
2014 if (!err && changed)
2015 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
2016
2017 return err;
2018 }
2019
set_ssp(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2020 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2021 {
2022 struct mgmt_mode *cp = data;
2023 struct mgmt_pending_cmd *cmd;
2024 u8 status;
2025 int err;
2026
2027 bt_dev_dbg(hdev, "sock %p", sk);
2028
2029 status = mgmt_bredr_support(hdev);
2030 if (status)
2031 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2032
2033 if (!lmp_ssp_capable(hdev))
2034 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2035 MGMT_STATUS_NOT_SUPPORTED);
2036
2037 if (cp->val != 0x00 && cp->val != 0x01)
2038 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2039 MGMT_STATUS_INVALID_PARAMS);
2040
2041 hci_dev_lock(hdev);
2042
2043 if (!hdev_is_powered(hdev)) {
2044 bool changed;
2045
2046 if (cp->val) {
2047 changed = !hci_dev_test_and_set_flag(hdev,
2048 HCI_SSP_ENABLED);
2049 } else {
2050 changed = hci_dev_test_and_clear_flag(hdev,
2051 HCI_SSP_ENABLED);
2052 }
2053
2054 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2055 if (err < 0)
2056 goto failed;
2057
2058 if (changed)
2059 err = new_settings(hdev, sk);
2060
2061 goto failed;
2062 }
2063
2064 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2065 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2066 MGMT_STATUS_BUSY);
2067 goto failed;
2068 }
2069
2070 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2071 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2072 goto failed;
2073 }
2074
2075 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2076 if (!cmd)
2077 err = -ENOMEM;
2078 else
2079 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2080 set_ssp_complete);
2081
2082 if (err < 0) {
2083 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2084 MGMT_STATUS_FAILED);
2085
2086 if (cmd)
2087 mgmt_pending_remove(cmd);
2088 }
2089
2090 failed:
2091 hci_dev_unlock(hdev);
2092 return err;
2093 }
2094
set_hs(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2095 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2096 {
2097 bt_dev_dbg(hdev, "sock %p", sk);
2098
2099 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2100 MGMT_STATUS_NOT_SUPPORTED);
2101 }
2102
set_le_complete(struct hci_dev * hdev,void * data,int err)2103 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2104 {
2105 struct mgmt_pending_cmd *cmd = data;
2106 struct cmd_lookup match = { NULL, hdev };
2107 u8 status = mgmt_status(err);
2108
2109 bt_dev_dbg(hdev, "err %d", err);
2110
2111 if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
2112 return;
2113
2114 if (status) {
2115 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
2116 goto done;
2117 }
2118
2119 settings_rsp(cmd, &match);
2120
2121 new_settings(hdev, match.sk);
2122
2123 if (match.sk)
2124 sock_put(match.sk);
2125
2126 done:
2127 mgmt_pending_free(cmd);
2128 }
2129
set_le_sync(struct hci_dev * hdev,void * data)2130 static int set_le_sync(struct hci_dev *hdev, void *data)
2131 {
2132 struct mgmt_pending_cmd *cmd = data;
2133 struct mgmt_mode cp;
2134 u8 val;
2135 int err;
2136
2137 mutex_lock(&hdev->mgmt_pending_lock);
2138
2139 if (!__mgmt_pending_listed(hdev, cmd)) {
2140 mutex_unlock(&hdev->mgmt_pending_lock);
2141 return -ECANCELED;
2142 }
2143
2144 memcpy(&cp, cmd->param, sizeof(cp));
2145 val = !!cp.val;
2146
2147 mutex_unlock(&hdev->mgmt_pending_lock);
2148
2149 if (!val) {
2150 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2151
2152 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2153 hci_disable_advertising_sync(hdev);
2154
2155 if (ext_adv_capable(hdev))
2156 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2157 } else {
2158 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2159 }
2160
2161 err = hci_write_le_host_supported_sync(hdev, val, 0);
2162
2163 /* Make sure the controller has a good default for
2164 * advertising data. Restrict the update to when LE
2165 * has actually been enabled. During power on, the
2166 * update in powered_update_hci will take care of it.
2167 */
2168 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2169 if (ext_adv_capable(hdev)) {
2170 int status;
2171
2172 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2173 if (!status)
2174 hci_update_scan_rsp_data_sync(hdev, 0x00);
2175 } else {
2176 hci_update_adv_data_sync(hdev, 0x00);
2177 hci_update_scan_rsp_data_sync(hdev, 0x00);
2178 }
2179
2180 hci_update_passive_scan(hdev);
2181 }
2182
2183 return err;
2184 }
2185
set_mesh_complete(struct hci_dev * hdev,void * data,int err)2186 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2187 {
2188 struct mgmt_pending_cmd *cmd = data;
2189 u8 status = mgmt_status(err);
2190 struct sock *sk;
2191
2192 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
2193 return;
2194
2195 sk = cmd->sk;
2196
2197 if (status) {
2198 mgmt_cmd_status(cmd->sk, hdev->id, cmd->opcode, status);
2199 goto done;
2200 }
2201
2202 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2203
2204 done:
2205 mgmt_pending_free(cmd);
2206 }
2207
set_mesh_sync(struct hci_dev * hdev,void * data)2208 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2209 {
2210 struct mgmt_pending_cmd *cmd = data;
2211 DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types,
2212 sizeof(hdev->mesh_ad_types));
2213 size_t len;
2214
2215 mutex_lock(&hdev->mgmt_pending_lock);
2216
2217 if (!__mgmt_pending_listed(hdev, cmd)) {
2218 mutex_unlock(&hdev->mgmt_pending_lock);
2219 return -ECANCELED;
2220 }
2221
2222 len = cmd->param_len;
2223 memcpy(cp, cmd->param, min(__struct_size(cp), len));
2224
2225 mutex_unlock(&hdev->mgmt_pending_lock);
2226
2227 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2228
2229 if (cp->enable)
2230 hci_dev_set_flag(hdev, HCI_MESH);
2231 else
2232 hci_dev_clear_flag(hdev, HCI_MESH);
2233
2234 hdev->le_scan_interval = __le16_to_cpu(cp->period);
2235 hdev->le_scan_window = __le16_to_cpu(cp->window);
2236
2237 len -= sizeof(struct mgmt_cp_set_mesh);
2238
2239 /* If filters don't fit, forward all adv pkts */
2240 if (len <= sizeof(hdev->mesh_ad_types))
2241 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2242
2243 hci_update_passive_scan_sync(hdev);
2244 return 0;
2245 }
2246
set_mesh(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2247 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2248 {
2249 struct mgmt_cp_set_mesh *cp = data;
2250 struct mgmt_pending_cmd *cmd;
2251 __u16 period, window;
2252 int err = 0;
2253
2254 bt_dev_dbg(hdev, "sock %p", sk);
2255
2256 if (!lmp_le_capable(hdev) ||
2257 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2258 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2259 MGMT_STATUS_NOT_SUPPORTED);
2260
2261 if (cp->enable != 0x00 && cp->enable != 0x01)
2262 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2263 MGMT_STATUS_INVALID_PARAMS);
2264
2265 /* Keep allowed ranges in sync with set_scan_params() */
2266 period = __le16_to_cpu(cp->period);
2267
2268 if (period < 0x0004 || period > 0x4000)
2269 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2270 MGMT_STATUS_INVALID_PARAMS);
2271
2272 window = __le16_to_cpu(cp->window);
2273
2274 if (window < 0x0004 || window > 0x4000)
2275 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2276 MGMT_STATUS_INVALID_PARAMS);
2277
2278 if (window > period)
2279 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2280 MGMT_STATUS_INVALID_PARAMS);
2281
2282 hci_dev_lock(hdev);
2283
2284 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2285 if (!cmd)
2286 err = -ENOMEM;
2287 else
2288 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2289 set_mesh_complete);
2290
2291 if (err < 0) {
2292 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2293 MGMT_STATUS_FAILED);
2294
2295 if (cmd)
2296 mgmt_pending_remove(cmd);
2297 }
2298
2299 hci_dev_unlock(hdev);
2300 return err;
2301 }
2302
mesh_send_start_complete(struct hci_dev * hdev,void * data,int err)2303 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2304 {
2305 struct mgmt_mesh_tx *mesh_tx = data;
2306 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2307 unsigned long mesh_send_interval;
2308 u8 mgmt_err = mgmt_status(err);
2309
2310 /* Report any errors here, but don't report completion */
2311
2312 if (mgmt_err) {
2313 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2314 /* Send Complete Error Code for handle */
2315 mesh_send_complete(hdev, mesh_tx, false);
2316 return;
2317 }
2318
2319 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2320 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2321 mesh_send_interval);
2322 }
2323
mesh_send_sync(struct hci_dev * hdev,void * data)2324 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2325 {
2326 struct mgmt_mesh_tx *mesh_tx = data;
2327 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2328 struct adv_info *adv, *next_instance;
2329 u8 instance = hdev->le_num_of_adv_sets + 1;
2330 u16 timeout, duration;
2331 int err = 0;
2332
2333 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2334 return MGMT_STATUS_BUSY;
2335
2336 timeout = 1000;
2337 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2338 adv = hci_add_adv_instance(hdev, instance, 0,
2339 send->adv_data_len, send->adv_data,
2340 0, NULL,
2341 timeout, duration,
2342 HCI_ADV_TX_POWER_NO_PREFERENCE,
2343 hdev->le_adv_min_interval,
2344 hdev->le_adv_max_interval,
2345 mesh_tx->handle);
2346
2347 if (!IS_ERR(adv))
2348 mesh_tx->instance = instance;
2349 else
2350 err = PTR_ERR(adv);
2351
2352 if (hdev->cur_adv_instance == instance) {
2353 /* If the currently advertised instance is being changed then
2354 * cancel the current advertising and schedule the next
2355 * instance. If there is only one instance then the overridden
2356 * advertising data will be visible right away.
2357 */
2358 cancel_adv_timeout(hdev);
2359
2360 next_instance = hci_get_next_instance(hdev, instance);
2361 if (next_instance)
2362 instance = next_instance->instance;
2363 else
2364 instance = 0;
2365 } else if (hdev->adv_instance_timeout) {
2366 /* Immediately advertise the new instance if no other, or
2367 * let it go naturally from queue if ADV is already happening
2368 */
2369 instance = 0;
2370 }
2371
2372 if (instance)
2373 return hci_schedule_adv_instance_sync(hdev, instance, true);
2374
2375 return err;
2376 }
2377
send_count(struct mgmt_mesh_tx * mesh_tx,void * data)2378 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2379 {
2380 struct mgmt_rp_mesh_read_features *rp = data;
2381
2382 if (rp->used_handles >= rp->max_handles)
2383 return;
2384
2385 rp->handles[rp->used_handles++] = mesh_tx->handle;
2386 }
2387
mesh_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2388 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2389 void *data, u16 len)
2390 {
2391 struct mgmt_rp_mesh_read_features rp;
2392
2393 if (!lmp_le_capable(hdev) ||
2394 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2395 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2396 MGMT_STATUS_NOT_SUPPORTED);
2397
2398 memset(&rp, 0, sizeof(rp));
2399 rp.index = cpu_to_le16(hdev->id);
2400 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2401 rp.max_handles = MESH_HANDLES_MAX;
2402
2403 hci_dev_lock(hdev);
2404
2405 if (rp.max_handles)
2406 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2407
2408 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2409 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2410
2411 hci_dev_unlock(hdev);
2412 return 0;
2413 }
2414
send_cancel(struct hci_dev * hdev,void * data)2415 static int send_cancel(struct hci_dev *hdev, void *data)
2416 {
2417 struct mgmt_pending_cmd *cmd = data;
2418 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2419 struct mgmt_mesh_tx *mesh_tx;
2420
2421 if (!cancel->handle) {
2422 do {
2423 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2424
2425 if (mesh_tx)
2426 mesh_send_complete(hdev, mesh_tx, false);
2427 } while (mesh_tx);
2428 } else {
2429 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2430
2431 if (mesh_tx && mesh_tx->sk == cmd->sk)
2432 mesh_send_complete(hdev, mesh_tx, false);
2433 }
2434
2435 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2436 0, NULL, 0);
2437 mgmt_pending_free(cmd);
2438
2439 return 0;
2440 }
2441
mesh_send_cancel(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2442 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2443 void *data, u16 len)
2444 {
2445 struct mgmt_pending_cmd *cmd;
2446 int err;
2447
2448 if (!lmp_le_capable(hdev) ||
2449 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2450 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2451 MGMT_STATUS_NOT_SUPPORTED);
2452
2453 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2454 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2455 MGMT_STATUS_REJECTED);
2456
2457 hci_dev_lock(hdev);
2458 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2459 if (!cmd)
2460 err = -ENOMEM;
2461 else
2462 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2463
2464 if (err < 0) {
2465 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2466 MGMT_STATUS_FAILED);
2467
2468 if (cmd)
2469 mgmt_pending_free(cmd);
2470 }
2471
2472 hci_dev_unlock(hdev);
2473 return err;
2474 }
2475
mesh_send(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2476 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2477 {
2478 struct mgmt_mesh_tx *mesh_tx;
2479 struct mgmt_cp_mesh_send *send = data;
2480 struct mgmt_rp_mesh_read_features rp;
2481 u16 expected_len;
2482 bool sending;
2483 int err = 0;
2484
2485 if (!lmp_le_capable(hdev) ||
2486 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2487 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2488 MGMT_STATUS_NOT_SUPPORTED);
2489 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2490 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2491 MGMT_STATUS_REJECTED);
2492
2493 if (!send->adv_data_len || send->adv_data_len > 31)
2494 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2495 MGMT_STATUS_REJECTED);
2496
2497 expected_len = struct_size(send, adv_data, send->adv_data_len);
2498 if (expected_len != len)
2499 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2500 MGMT_STATUS_INVALID_PARAMS);
2501
2502 hci_dev_lock(hdev);
2503
2504 memset(&rp, 0, sizeof(rp));
2505 rp.max_handles = MESH_HANDLES_MAX;
2506
2507 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2508
2509 if (rp.max_handles <= rp.used_handles) {
2510 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2511 MGMT_STATUS_BUSY);
2512 goto done;
2513 }
2514
2515 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2516 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2517
2518 if (!mesh_tx)
2519 err = -ENOMEM;
2520 else if (!sending)
2521 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2522 mesh_send_start_complete);
2523
2524 if (err < 0) {
2525 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2526 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2527 MGMT_STATUS_FAILED);
2528
2529 if (mesh_tx) {
2530 if (sending)
2531 mgmt_mesh_remove(mesh_tx);
2532 }
2533 } else {
2534 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2535
2536 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2537 &mesh_tx->handle, 1);
2538 }
2539
2540 done:
2541 hci_dev_unlock(hdev);
2542 return err;
2543 }
2544
set_le(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2545 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2546 {
2547 struct mgmt_mode *cp = data;
2548 struct mgmt_pending_cmd *cmd;
2549 int err;
2550 u8 val, enabled;
2551
2552 bt_dev_dbg(hdev, "sock %p", sk);
2553
2554 if (!lmp_le_capable(hdev))
2555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2556 MGMT_STATUS_NOT_SUPPORTED);
2557
2558 if (cp->val != 0x00 && cp->val != 0x01)
2559 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2560 MGMT_STATUS_INVALID_PARAMS);
2561
2562 /* Bluetooth single mode LE only controllers or dual-mode
2563 * controllers configured as LE only devices, do not allow
2564 * switching LE off. These have either LE enabled explicitly
2565 * or BR/EDR has been previously switched off.
2566 *
2567 * When trying to enable an already enabled LE, then gracefully
2568 * send a positive response. Trying to disable it however will
2569 * result into rejection.
2570 */
2571 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2572 if (cp->val == 0x01)
2573 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2574
2575 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2576 MGMT_STATUS_REJECTED);
2577 }
2578
2579 hci_dev_lock(hdev);
2580
2581 val = !!cp->val;
2582 enabled = lmp_host_le_capable(hdev);
2583
2584 if (!hdev_is_powered(hdev) || val == enabled) {
2585 bool changed = false;
2586
2587 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2588 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2589 changed = true;
2590 }
2591
2592 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2593 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2594 changed = true;
2595 }
2596
2597 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2598 if (err < 0)
2599 goto unlock;
2600
2601 if (changed)
2602 err = new_settings(hdev, sk);
2603
2604 goto unlock;
2605 }
2606
2607 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2608 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2609 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2610 MGMT_STATUS_BUSY);
2611 goto unlock;
2612 }
2613
2614 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2615 if (!cmd)
2616 err = -ENOMEM;
2617 else
2618 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2619 set_le_complete);
2620
2621 if (err < 0) {
2622 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2623 MGMT_STATUS_FAILED);
2624
2625 if (cmd)
2626 mgmt_pending_remove(cmd);
2627 }
2628
2629 unlock:
2630 hci_dev_unlock(hdev);
2631 return err;
2632 }
2633
send_hci_cmd_sync(struct hci_dev * hdev,void * data)2634 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2635 {
2636 struct mgmt_pending_cmd *cmd = data;
2637 struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2638 struct sk_buff *skb;
2639
2640 skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2641 le16_to_cpu(cp->params_len), cp->params,
2642 cp->event, cp->timeout ?
2643 secs_to_jiffies(cp->timeout) :
2644 HCI_CMD_TIMEOUT);
2645 if (IS_ERR(skb)) {
2646 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2647 mgmt_status(PTR_ERR(skb)));
2648 goto done;
2649 }
2650
2651 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2652 skb->data, skb->len);
2653
2654 kfree_skb(skb);
2655
2656 done:
2657 mgmt_pending_free(cmd);
2658
2659 return 0;
2660 }
2661
mgmt_hci_cmd_sync(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2662 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2663 void *data, u16 len)
2664 {
2665 struct mgmt_cp_hci_cmd_sync *cp = data;
2666 struct mgmt_pending_cmd *cmd;
2667 int err;
2668
2669 if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2670 le16_to_cpu(cp->params_len)))
2671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2672 MGMT_STATUS_INVALID_PARAMS);
2673
2674 hci_dev_lock(hdev);
2675 cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2676 if (!cmd)
2677 err = -ENOMEM;
2678 else
2679 err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2680
2681 if (err < 0) {
2682 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2683 MGMT_STATUS_FAILED);
2684
2685 if (cmd)
2686 mgmt_pending_free(cmd);
2687 }
2688
2689 hci_dev_unlock(hdev);
2690 return err;
2691 }
2692
2693 /* This is a helper function to test for pending mgmt commands that can
2694 * cause CoD or EIR HCI commands. We can only allow one such pending
2695 * mgmt command at a time since otherwise we cannot easily track what
2696 * the current values are, will be, and based on that calculate if a new
2697 * HCI command needs to be sent and if yes with what value.
2698 */
pending_eir_or_class(struct hci_dev * hdev)2699 static bool pending_eir_or_class(struct hci_dev *hdev)
2700 {
2701 struct mgmt_pending_cmd *cmd;
2702
2703 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2704 switch (cmd->opcode) {
2705 case MGMT_OP_ADD_UUID:
2706 case MGMT_OP_REMOVE_UUID:
2707 case MGMT_OP_SET_DEV_CLASS:
2708 case MGMT_OP_SET_POWERED:
2709 return true;
2710 }
2711 }
2712
2713 return false;
2714 }
2715
2716 static const u8 bluetooth_base_uuid[] = {
2717 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2718 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2719 };
2720
get_uuid_size(const u8 * uuid)2721 static u8 get_uuid_size(const u8 *uuid)
2722 {
2723 u32 val;
2724
2725 if (memcmp(uuid, bluetooth_base_uuid, 12))
2726 return 128;
2727
2728 val = get_unaligned_le32(&uuid[12]);
2729 if (val > 0xffff)
2730 return 32;
2731
2732 return 16;
2733 }
2734
mgmt_class_complete(struct hci_dev * hdev,void * data,int err)2735 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2736 {
2737 struct mgmt_pending_cmd *cmd = data;
2738
2739 bt_dev_dbg(hdev, "err %d", err);
2740
2741 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2742 mgmt_status(err), hdev->dev_class, 3);
2743
2744 mgmt_pending_free(cmd);
2745 }
2746
add_uuid_sync(struct hci_dev * hdev,void * data)2747 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2748 {
2749 int err;
2750
2751 err = hci_update_class_sync(hdev);
2752 if (err)
2753 return err;
2754
2755 return hci_update_eir_sync(hdev);
2756 }
2757
add_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2758 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2759 {
2760 struct mgmt_cp_add_uuid *cp = data;
2761 struct mgmt_pending_cmd *cmd;
2762 struct bt_uuid *uuid;
2763 int err;
2764
2765 bt_dev_dbg(hdev, "sock %p", sk);
2766
2767 hci_dev_lock(hdev);
2768
2769 if (pending_eir_or_class(hdev)) {
2770 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2771 MGMT_STATUS_BUSY);
2772 goto failed;
2773 }
2774
2775 uuid = kmalloc_obj(*uuid);
2776 if (!uuid) {
2777 err = -ENOMEM;
2778 goto failed;
2779 }
2780
2781 memcpy(uuid->uuid, cp->uuid, 16);
2782 uuid->svc_hint = cp->svc_hint;
2783 uuid->size = get_uuid_size(cp->uuid);
2784
2785 list_add_tail(&uuid->list, &hdev->uuids);
2786
2787 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2788 if (!cmd) {
2789 err = -ENOMEM;
2790 goto failed;
2791 }
2792
2793 /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2794 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2795 */
2796 err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2797 mgmt_class_complete);
2798 if (err < 0) {
2799 mgmt_pending_free(cmd);
2800 goto failed;
2801 }
2802
2803 failed:
2804 hci_dev_unlock(hdev);
2805 return err;
2806 }
2807
enable_service_cache(struct hci_dev * hdev)2808 static bool enable_service_cache(struct hci_dev *hdev)
2809 {
2810 if (!hdev_is_powered(hdev))
2811 return false;
2812
2813 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2814 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2815 CACHE_TIMEOUT);
2816 return true;
2817 }
2818
2819 return false;
2820 }
2821
remove_uuid_sync(struct hci_dev * hdev,void * data)2822 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2823 {
2824 int err;
2825
2826 err = hci_update_class_sync(hdev);
2827 if (err)
2828 return err;
2829
2830 return hci_update_eir_sync(hdev);
2831 }
2832
remove_uuid(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2833 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2834 u16 len)
2835 {
2836 struct mgmt_cp_remove_uuid *cp = data;
2837 struct mgmt_pending_cmd *cmd;
2838 struct bt_uuid *match, *tmp;
2839 static const u8 bt_uuid_any[] = {
2840 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2841 };
2842 int err, found;
2843
2844 bt_dev_dbg(hdev, "sock %p", sk);
2845
2846 hci_dev_lock(hdev);
2847
2848 if (pending_eir_or_class(hdev)) {
2849 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2850 MGMT_STATUS_BUSY);
2851 goto unlock;
2852 }
2853
2854 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2855 hci_uuids_clear(hdev);
2856
2857 if (enable_service_cache(hdev)) {
2858 err = mgmt_cmd_complete(sk, hdev->id,
2859 MGMT_OP_REMOVE_UUID,
2860 0, hdev->dev_class, 3);
2861 goto unlock;
2862 }
2863
2864 goto update_class;
2865 }
2866
2867 found = 0;
2868
2869 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2870 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2871 continue;
2872
2873 list_del(&match->list);
2874 kfree(match);
2875 found++;
2876 }
2877
2878 if (found == 0) {
2879 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2880 MGMT_STATUS_INVALID_PARAMS);
2881 goto unlock;
2882 }
2883
2884 update_class:
2885 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2886 if (!cmd) {
2887 err = -ENOMEM;
2888 goto unlock;
2889 }
2890
2891 /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2892 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2893 */
2894 err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2895 mgmt_class_complete);
2896 if (err < 0)
2897 mgmt_pending_free(cmd);
2898
2899 unlock:
2900 hci_dev_unlock(hdev);
2901 return err;
2902 }
2903
set_class_sync(struct hci_dev * hdev,void * data)2904 static int set_class_sync(struct hci_dev *hdev, void *data)
2905 {
2906 int err = 0;
2907
2908 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2909 cancel_delayed_work_sync(&hdev->service_cache);
2910 err = hci_update_eir_sync(hdev);
2911 }
2912
2913 if (err)
2914 return err;
2915
2916 return hci_update_class_sync(hdev);
2917 }
2918
set_dev_class(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2919 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2920 u16 len)
2921 {
2922 struct mgmt_cp_set_dev_class *cp = data;
2923 struct mgmt_pending_cmd *cmd;
2924 int err;
2925
2926 bt_dev_dbg(hdev, "sock %p", sk);
2927
2928 if (!lmp_bredr_capable(hdev))
2929 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2930 MGMT_STATUS_NOT_SUPPORTED);
2931
2932 hci_dev_lock(hdev);
2933
2934 if (pending_eir_or_class(hdev)) {
2935 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2936 MGMT_STATUS_BUSY);
2937 goto unlock;
2938 }
2939
2940 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2941 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2942 MGMT_STATUS_INVALID_PARAMS);
2943 goto unlock;
2944 }
2945
2946 hdev->major_class = cp->major;
2947 hdev->minor_class = cp->minor;
2948
2949 if (!hdev_is_powered(hdev)) {
2950 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2951 hdev->dev_class, 3);
2952 goto unlock;
2953 }
2954
2955 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2956 if (!cmd) {
2957 err = -ENOMEM;
2958 goto unlock;
2959 }
2960
2961 /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2962 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2963 */
2964 err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2965 mgmt_class_complete);
2966 if (err < 0)
2967 mgmt_pending_free(cmd);
2968
2969 unlock:
2970 hci_dev_unlock(hdev);
2971 return err;
2972 }
2973
load_link_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)2974 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2975 u16 len)
2976 {
2977 struct mgmt_cp_load_link_keys *cp = data;
2978 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2979 sizeof(struct mgmt_link_key_info));
2980 u16 key_count, expected_len;
2981 bool changed;
2982 int i;
2983
2984 bt_dev_dbg(hdev, "sock %p", sk);
2985
2986 if (!lmp_bredr_capable(hdev))
2987 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2988 MGMT_STATUS_NOT_SUPPORTED);
2989
2990 key_count = __le16_to_cpu(cp->key_count);
2991 if (key_count > max_key_count) {
2992 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2993 key_count);
2994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2995 MGMT_STATUS_INVALID_PARAMS);
2996 }
2997
2998 expected_len = struct_size(cp, keys, key_count);
2999 if (expected_len != len) {
3000 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
3001 expected_len, len);
3002 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
3003 MGMT_STATUS_INVALID_PARAMS);
3004 }
3005
3006 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
3007 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
3008 MGMT_STATUS_INVALID_PARAMS);
3009
3010 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
3011 key_count);
3012
3013 hci_dev_lock(hdev);
3014
3015 hci_link_keys_clear(hdev);
3016
3017 if (cp->debug_keys)
3018 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
3019 else
3020 changed = hci_dev_test_and_clear_flag(hdev,
3021 HCI_KEEP_DEBUG_KEYS);
3022
3023 if (changed)
3024 new_settings(hdev, NULL);
3025
3026 for (i = 0; i < key_count; i++) {
3027 struct mgmt_link_key_info *key = &cp->keys[i];
3028
3029 if (hci_is_blocked_key(hdev,
3030 HCI_BLOCKED_KEY_TYPE_LINKKEY,
3031 key->val)) {
3032 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
3033 &key->addr.bdaddr);
3034 continue;
3035 }
3036
3037 if (key->addr.type != BDADDR_BREDR) {
3038 bt_dev_warn(hdev,
3039 "Invalid link address type %u for %pMR",
3040 key->addr.type, &key->addr.bdaddr);
3041 continue;
3042 }
3043
3044 if (key->type > 0x08) {
3045 bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
3046 key->type, &key->addr.bdaddr);
3047 continue;
3048 }
3049
3050 /* Always ignore debug keys and require a new pairing if
3051 * the user wants to use them.
3052 */
3053 if (key->type == HCI_LK_DEBUG_COMBINATION)
3054 continue;
3055
3056 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
3057 key->type, key->pin_len, NULL);
3058 }
3059
3060 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
3061
3062 hci_dev_unlock(hdev);
3063
3064 return 0;
3065 }
3066
device_unpaired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,struct sock * skip_sk)3067 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
3068 u8 addr_type, struct sock *skip_sk)
3069 {
3070 struct mgmt_ev_device_unpaired ev;
3071
3072 bacpy(&ev.addr.bdaddr, bdaddr);
3073 ev.addr.type = addr_type;
3074
3075 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
3076 skip_sk);
3077 }
3078
unpair_device_complete(struct hci_dev * hdev,void * data,int err)3079 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
3080 {
3081 struct mgmt_pending_cmd *cmd = data;
3082 struct mgmt_cp_unpair_device *cp = cmd->param;
3083
3084 if (!err)
3085 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3086
3087 cmd->cmd_complete(cmd, err);
3088 mgmt_pending_free(cmd);
3089 }
3090
unpair_device_sync(struct hci_dev * hdev,void * data)3091 static int unpair_device_sync(struct hci_dev *hdev, void *data)
3092 {
3093 struct mgmt_pending_cmd *cmd = data;
3094 struct mgmt_cp_unpair_device *cp = cmd->param;
3095 struct hci_conn *conn;
3096
3097 if (cp->addr.type == BDADDR_BREDR)
3098 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3099 &cp->addr.bdaddr);
3100 else
3101 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3102 le_addr_type(cp->addr.type));
3103
3104 if (!conn)
3105 return 0;
3106
3107 /* Disregard any possible error since the likes of hci_abort_conn_sync
3108 * will clean up the connection no matter the error.
3109 */
3110 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3111
3112 return 0;
3113 }
3114
unpair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3115 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3116 u16 len)
3117 {
3118 struct mgmt_cp_unpair_device *cp = data;
3119 struct mgmt_rp_unpair_device rp;
3120 struct hci_conn_params *params;
3121 struct mgmt_pending_cmd *cmd;
3122 struct hci_conn *conn;
3123 u8 addr_type;
3124 int err;
3125
3126 memset(&rp, 0, sizeof(rp));
3127 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3128 rp.addr.type = cp->addr.type;
3129
3130 if (!bdaddr_type_is_valid(cp->addr.type))
3131 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3132 MGMT_STATUS_INVALID_PARAMS,
3133 &rp, sizeof(rp));
3134
3135 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3136 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3137 MGMT_STATUS_INVALID_PARAMS,
3138 &rp, sizeof(rp));
3139
3140 hci_dev_lock(hdev);
3141
3142 if (!hdev_is_powered(hdev)) {
3143 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3144 MGMT_STATUS_NOT_POWERED, &rp,
3145 sizeof(rp));
3146 goto unlock;
3147 }
3148
3149 if (cp->addr.type == BDADDR_BREDR) {
3150 /* If disconnection is requested, then look up the
3151 * connection. If the remote device is connected, it
3152 * will be later used to terminate the link.
3153 *
3154 * Setting it to NULL explicitly will cause no
3155 * termination of the link.
3156 */
3157 if (cp->disconnect)
3158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3159 &cp->addr.bdaddr);
3160 else
3161 conn = NULL;
3162
3163 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3164 if (err < 0) {
3165 err = mgmt_cmd_complete(sk, hdev->id,
3166 MGMT_OP_UNPAIR_DEVICE,
3167 MGMT_STATUS_NOT_PAIRED, &rp,
3168 sizeof(rp));
3169 goto unlock;
3170 }
3171
3172 goto done;
3173 }
3174
3175 /* LE address type */
3176 addr_type = le_addr_type(cp->addr.type);
3177
3178 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3179 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3180 if (err < 0) {
3181 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3182 MGMT_STATUS_NOT_PAIRED, &rp,
3183 sizeof(rp));
3184 goto unlock;
3185 }
3186
3187 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3188 if (!conn) {
3189 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3190 goto done;
3191 }
3192
3193
3194 /* Defer clearing up the connection parameters until closing to
3195 * give a chance of keeping them if a repairing happens.
3196 */
3197 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3198
3199 /* Disable auto-connection parameters if present */
3200 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3201 if (params) {
3202 if (params->explicit_connect)
3203 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3204 else
3205 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3206 }
3207
3208 /* If disconnection is not requested, then clear the connection
3209 * variable so that the link is not terminated.
3210 */
3211 if (!cp->disconnect)
3212 conn = NULL;
3213
3214 done:
3215 /* If the connection variable is set, then termination of the
3216 * link is requested.
3217 */
3218 if (!conn) {
3219 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3220 &rp, sizeof(rp));
3221 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3222 goto unlock;
3223 }
3224
3225 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3226 sizeof(*cp));
3227 if (!cmd) {
3228 err = -ENOMEM;
3229 goto unlock;
3230 }
3231
3232 cmd->cmd_complete = addr_cmd_complete;
3233
3234 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3235 unpair_device_complete);
3236 if (err < 0)
3237 mgmt_pending_free(cmd);
3238
3239 unlock:
3240 hci_dev_unlock(hdev);
3241 return err;
3242 }
3243
disconnect_complete(struct hci_dev * hdev,void * data,int err)3244 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3245 {
3246 struct mgmt_pending_cmd *cmd = data;
3247
3248 cmd->cmd_complete(cmd, mgmt_status(err));
3249 mgmt_pending_free(cmd);
3250 }
3251
disconnect_sync(struct hci_dev * hdev,void * data)3252 static int disconnect_sync(struct hci_dev *hdev, void *data)
3253 {
3254 struct mgmt_pending_cmd *cmd = data;
3255 struct mgmt_cp_disconnect *cp = cmd->param;
3256 struct hci_conn *conn;
3257
3258 if (cp->addr.type == BDADDR_BREDR)
3259 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3260 &cp->addr.bdaddr);
3261 else
3262 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3263 le_addr_type(cp->addr.type));
3264
3265 if (!conn)
3266 return -ENOTCONN;
3267
3268 /* Disregard any possible error since the likes of hci_abort_conn_sync
3269 * will clean up the connection no matter the error.
3270 */
3271 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3272
3273 return 0;
3274 }
3275
disconnect(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3276 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3277 u16 len)
3278 {
3279 struct mgmt_cp_disconnect *cp = data;
3280 struct mgmt_rp_disconnect rp;
3281 struct mgmt_pending_cmd *cmd;
3282 int err;
3283
3284 bt_dev_dbg(hdev, "sock %p", sk);
3285
3286 memset(&rp, 0, sizeof(rp));
3287 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3288 rp.addr.type = cp->addr.type;
3289
3290 if (!bdaddr_type_is_valid(cp->addr.type))
3291 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3292 MGMT_STATUS_INVALID_PARAMS,
3293 &rp, sizeof(rp));
3294
3295 hci_dev_lock(hdev);
3296
3297 if (!test_bit(HCI_UP, &hdev->flags)) {
3298 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3299 MGMT_STATUS_NOT_POWERED, &rp,
3300 sizeof(rp));
3301 goto failed;
3302 }
3303
3304 cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3305 if (!cmd) {
3306 err = -ENOMEM;
3307 goto failed;
3308 }
3309
3310 cmd->cmd_complete = generic_cmd_complete;
3311
3312 err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3313 disconnect_complete);
3314 if (err < 0)
3315 mgmt_pending_free(cmd);
3316
3317 failed:
3318 hci_dev_unlock(hdev);
3319 return err;
3320 }
3321
link_to_bdaddr(u8 link_type,u8 addr_type)3322 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3323 {
3324 switch (link_type) {
3325 case CIS_LINK:
3326 case BIS_LINK:
3327 case PA_LINK:
3328 case LE_LINK:
3329 switch (addr_type) {
3330 case ADDR_LE_DEV_PUBLIC:
3331 return BDADDR_LE_PUBLIC;
3332
3333 default:
3334 /* Fallback to LE Random address type */
3335 return BDADDR_LE_RANDOM;
3336 }
3337
3338 default:
3339 /* Fallback to BR/EDR type */
3340 return BDADDR_BREDR;
3341 }
3342 }
3343
get_connections(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)3344 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3345 u16 data_len)
3346 {
3347 struct mgmt_rp_get_connections *rp;
3348 struct hci_conn *c;
3349 int err;
3350 u16 i;
3351
3352 bt_dev_dbg(hdev, "sock %p", sk);
3353
3354 hci_dev_lock(hdev);
3355
3356 if (!hdev_is_powered(hdev)) {
3357 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3358 MGMT_STATUS_NOT_POWERED);
3359 goto unlock;
3360 }
3361
3362 i = 0;
3363 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3364 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3365 i++;
3366 }
3367
3368 rp = kmalloc_flex(*rp, addr, i);
3369 if (!rp) {
3370 err = -ENOMEM;
3371 goto unlock;
3372 }
3373
3374 i = 0;
3375 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3376 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3377 continue;
3378 bacpy(&rp->addr[i].bdaddr, &c->dst);
3379 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3380 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3381 continue;
3382 i++;
3383 }
3384
3385 rp->conn_count = cpu_to_le16(i);
3386
3387 /* Recalculate length in case of filtered SCO connections, etc */
3388 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3389 struct_size(rp, addr, i));
3390
3391 kfree(rp);
3392
3393 unlock:
3394 hci_dev_unlock(hdev);
3395 return err;
3396 }
3397
send_pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_pin_code_neg_reply * cp)3398 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3399 struct mgmt_cp_pin_code_neg_reply *cp)
3400 {
3401 struct mgmt_pending_cmd *cmd;
3402 int err;
3403
3404 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3405 sizeof(*cp));
3406 if (!cmd)
3407 return -ENOMEM;
3408
3409 cmd->cmd_complete = addr_cmd_complete;
3410
3411 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3412 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3413 if (err < 0)
3414 mgmt_pending_remove(cmd);
3415
3416 return err;
3417 }
3418
pin_code_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3419 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3420 u16 len)
3421 {
3422 struct hci_conn *conn;
3423 struct mgmt_cp_pin_code_reply *cp = data;
3424 struct hci_cp_pin_code_reply reply;
3425 struct mgmt_pending_cmd *cmd;
3426 int err;
3427
3428 bt_dev_dbg(hdev, "sock %p", sk);
3429
3430 hci_dev_lock(hdev);
3431
3432 if (!hdev_is_powered(hdev)) {
3433 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3434 MGMT_STATUS_NOT_POWERED);
3435 goto failed;
3436 }
3437
3438 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3439 if (!conn) {
3440 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3441 MGMT_STATUS_NOT_CONNECTED);
3442 goto failed;
3443 }
3444
3445 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3446 struct mgmt_cp_pin_code_neg_reply ncp;
3447
3448 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3449
3450 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3451
3452 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3453 if (err >= 0)
3454 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3455 MGMT_STATUS_INVALID_PARAMS);
3456
3457 goto failed;
3458 }
3459
3460 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3461 if (!cmd) {
3462 err = -ENOMEM;
3463 goto failed;
3464 }
3465
3466 cmd->cmd_complete = addr_cmd_complete;
3467
3468 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3469 reply.pin_len = cp->pin_len;
3470 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3471
3472 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3473 if (err < 0)
3474 mgmt_pending_remove(cmd);
3475
3476 failed:
3477 hci_dev_unlock(hdev);
3478 return err;
3479 }
3480
set_io_capability(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3481 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3482 u16 len)
3483 {
3484 struct mgmt_cp_set_io_capability *cp = data;
3485
3486 bt_dev_dbg(hdev, "sock %p", sk);
3487
3488 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3489 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3490 MGMT_STATUS_INVALID_PARAMS);
3491
3492 hci_dev_lock(hdev);
3493
3494 hdev->io_capability = cp->io_capability;
3495
3496 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3497
3498 hci_dev_unlock(hdev);
3499
3500 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3501 NULL, 0);
3502 }
3503
find_pairing(struct hci_conn * conn)3504 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3505 {
3506 struct hci_dev *hdev = conn->hdev;
3507 struct mgmt_pending_cmd *cmd;
3508
3509 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3510 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3511 continue;
3512
3513 if (cmd->user_data != conn)
3514 continue;
3515
3516 return cmd;
3517 }
3518
3519 return NULL;
3520 }
3521
pairing_complete(struct mgmt_pending_cmd * cmd,u8 status)3522 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3523 {
3524 struct mgmt_rp_pair_device rp;
3525 struct hci_conn *conn = cmd->user_data;
3526 int err;
3527
3528 bacpy(&rp.addr.bdaddr, &conn->dst);
3529 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3530
3531 err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3532 status, &rp, sizeof(rp));
3533
3534 /* So we don't get further callbacks for this connection */
3535 conn->connect_cfm_cb = NULL;
3536 conn->security_cfm_cb = NULL;
3537 conn->disconn_cfm_cb = NULL;
3538
3539 hci_conn_drop(conn);
3540
3541 /* The device is paired so there is no need to remove
3542 * its connection parameters anymore.
3543 */
3544 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3545
3546 hci_conn_put(conn);
3547
3548 return err;
3549 }
3550
mgmt_smp_complete(struct hci_conn * conn,bool complete)3551 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3552 {
3553 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3554 struct mgmt_pending_cmd *cmd;
3555
3556 cmd = find_pairing(conn);
3557 if (cmd) {
3558 cmd->cmd_complete(cmd, status);
3559 mgmt_pending_remove(cmd);
3560 }
3561 }
3562
pairing_complete_cb(struct hci_conn * conn,u8 status)3563 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3564 {
3565 struct mgmt_pending_cmd *cmd;
3566
3567 BT_DBG("status %u", status);
3568
3569 cmd = find_pairing(conn);
3570 if (!cmd) {
3571 BT_DBG("Unable to find a pending command");
3572 return;
3573 }
3574
3575 cmd->cmd_complete(cmd, mgmt_status(status));
3576 mgmt_pending_remove(cmd);
3577 }
3578
le_pairing_complete_cb(struct hci_conn * conn,u8 status)3579 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3580 {
3581 struct mgmt_pending_cmd *cmd;
3582
3583 BT_DBG("status %u", status);
3584
3585 if (!status)
3586 return;
3587
3588 cmd = find_pairing(conn);
3589 if (!cmd) {
3590 BT_DBG("Unable to find a pending command");
3591 return;
3592 }
3593
3594 cmd->cmd_complete(cmd, mgmt_status(status));
3595 mgmt_pending_remove(cmd);
3596 }
3597
pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3598 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3599 u16 len)
3600 {
3601 struct mgmt_cp_pair_device *cp = data;
3602 struct mgmt_rp_pair_device rp;
3603 struct mgmt_pending_cmd *cmd;
3604 u8 sec_level, auth_type;
3605 struct hci_conn *conn;
3606 int err;
3607
3608 bt_dev_dbg(hdev, "sock %p", sk);
3609
3610 memset(&rp, 0, sizeof(rp));
3611 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3612 rp.addr.type = cp->addr.type;
3613
3614 if (!bdaddr_type_is_valid(cp->addr.type))
3615 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3616 MGMT_STATUS_INVALID_PARAMS,
3617 &rp, sizeof(rp));
3618
3619 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3620 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3621 MGMT_STATUS_INVALID_PARAMS,
3622 &rp, sizeof(rp));
3623
3624 hci_dev_lock(hdev);
3625
3626 if (!hdev_is_powered(hdev)) {
3627 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3628 MGMT_STATUS_NOT_POWERED, &rp,
3629 sizeof(rp));
3630 goto unlock;
3631 }
3632
3633 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3634 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3635 MGMT_STATUS_ALREADY_PAIRED, &rp,
3636 sizeof(rp));
3637 goto unlock;
3638 }
3639
3640 sec_level = BT_SECURITY_MEDIUM;
3641 auth_type = HCI_AT_DEDICATED_BONDING;
3642
3643 if (cp->addr.type == BDADDR_BREDR) {
3644 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3645 auth_type, CONN_REASON_PAIR_DEVICE,
3646 HCI_ACL_CONN_TIMEOUT);
3647 } else {
3648 u8 addr_type = le_addr_type(cp->addr.type);
3649 struct hci_conn_params *p;
3650
3651 /* When pairing a new device, it is expected to remember
3652 * this device for future connections. Adding the connection
3653 * parameter information ahead of time allows tracking
3654 * of the peripheral preferred values and will speed up any
3655 * further connection establishment.
3656 *
3657 * If connection parameters already exist, then they
3658 * will be kept and this function does nothing.
3659 */
3660 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3661 if (!p) {
3662 err = -EIO;
3663 goto unlock;
3664 }
3665
3666 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3667 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3668
3669 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3670 sec_level, HCI_LE_CONN_TIMEOUT,
3671 CONN_REASON_PAIR_DEVICE);
3672 }
3673
3674 if (IS_ERR(conn)) {
3675 int status;
3676
3677 if (PTR_ERR(conn) == -EBUSY)
3678 status = MGMT_STATUS_BUSY;
3679 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3680 status = MGMT_STATUS_NOT_SUPPORTED;
3681 else if (PTR_ERR(conn) == -ECONNREFUSED)
3682 status = MGMT_STATUS_REJECTED;
3683 else
3684 status = MGMT_STATUS_CONNECT_FAILED;
3685
3686 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3687 status, &rp, sizeof(rp));
3688 goto unlock;
3689 }
3690
3691 if (conn->connect_cfm_cb) {
3692 hci_conn_drop(conn);
3693 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3694 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3695 goto unlock;
3696 }
3697
3698 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3699 if (!cmd) {
3700 err = -ENOMEM;
3701 hci_conn_drop(conn);
3702 goto unlock;
3703 }
3704
3705 cmd->cmd_complete = pairing_complete;
3706
3707 /* For LE, just connecting isn't a proof that the pairing finished */
3708 if (cp->addr.type == BDADDR_BREDR) {
3709 conn->connect_cfm_cb = pairing_complete_cb;
3710 conn->security_cfm_cb = pairing_complete_cb;
3711 conn->disconn_cfm_cb = pairing_complete_cb;
3712 } else {
3713 conn->connect_cfm_cb = le_pairing_complete_cb;
3714 conn->security_cfm_cb = le_pairing_complete_cb;
3715 conn->disconn_cfm_cb = le_pairing_complete_cb;
3716 }
3717
3718 conn->io_capability = cp->io_cap;
3719 cmd->user_data = hci_conn_get(conn);
3720
3721 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3722 hci_conn_security(conn, sec_level, auth_type, true)) {
3723 cmd->cmd_complete(cmd, 0);
3724 mgmt_pending_remove(cmd);
3725 }
3726
3727 err = 0;
3728
3729 unlock:
3730 hci_dev_unlock(hdev);
3731 return err;
3732 }
3733
cancel_pair_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3734 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3735 u16 len)
3736 {
3737 struct mgmt_addr_info *addr = data;
3738 struct mgmt_pending_cmd *cmd;
3739 struct hci_conn *conn;
3740 int err;
3741
3742 bt_dev_dbg(hdev, "sock %p", sk);
3743
3744 hci_dev_lock(hdev);
3745
3746 if (!hdev_is_powered(hdev)) {
3747 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3748 MGMT_STATUS_NOT_POWERED);
3749 goto unlock;
3750 }
3751
3752 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3753 if (!cmd) {
3754 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3755 MGMT_STATUS_INVALID_PARAMS);
3756 goto unlock;
3757 }
3758
3759 conn = cmd->user_data;
3760
3761 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3762 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3763 MGMT_STATUS_INVALID_PARAMS);
3764 goto unlock;
3765 }
3766
3767 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3768 mgmt_pending_remove(cmd);
3769
3770 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3771 addr, sizeof(*addr));
3772
3773 /* Since user doesn't want to proceed with the connection, abort any
3774 * ongoing pairing and then terminate the link if it was created
3775 * because of the pair device action.
3776 */
3777 if (addr->type == BDADDR_BREDR)
3778 hci_remove_link_key(hdev, &addr->bdaddr);
3779 else
3780 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3781 le_addr_type(addr->type));
3782
3783 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3784 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3785
3786 unlock:
3787 hci_dev_unlock(hdev);
3788 return err;
3789 }
3790
user_pairing_resp(struct sock * sk,struct hci_dev * hdev,struct mgmt_addr_info * addr,u16 mgmt_op,u16 hci_op,__le32 passkey)3791 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3792 struct mgmt_addr_info *addr, u16 mgmt_op,
3793 u16 hci_op, __le32 passkey)
3794 {
3795 struct mgmt_pending_cmd *cmd;
3796 struct hci_conn *conn;
3797 int err;
3798
3799 hci_dev_lock(hdev);
3800
3801 if (!hdev_is_powered(hdev)) {
3802 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3803 MGMT_STATUS_NOT_POWERED, addr,
3804 sizeof(*addr));
3805 goto done;
3806 }
3807
3808 if (addr->type == BDADDR_BREDR)
3809 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3810 else
3811 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3812 le_addr_type(addr->type));
3813
3814 if (!conn) {
3815 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3816 MGMT_STATUS_NOT_CONNECTED, addr,
3817 sizeof(*addr));
3818 goto done;
3819 }
3820
3821 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3822 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3823 if (!err)
3824 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3825 MGMT_STATUS_SUCCESS, addr,
3826 sizeof(*addr));
3827 else
3828 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3829 MGMT_STATUS_FAILED, addr,
3830 sizeof(*addr));
3831
3832 goto done;
3833 }
3834
3835 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3836 if (!cmd) {
3837 err = -ENOMEM;
3838 goto done;
3839 }
3840
3841 cmd->cmd_complete = addr_cmd_complete;
3842
3843 /* Continue with pairing via HCI */
3844 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3845 struct hci_cp_user_passkey_reply cp;
3846
3847 bacpy(&cp.bdaddr, &addr->bdaddr);
3848 cp.passkey = passkey;
3849 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3850 } else
3851 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3852 &addr->bdaddr);
3853
3854 if (err < 0)
3855 mgmt_pending_remove(cmd);
3856
3857 done:
3858 hci_dev_unlock(hdev);
3859 return err;
3860 }
3861
pin_code_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3862 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3863 void *data, u16 len)
3864 {
3865 struct mgmt_cp_pin_code_neg_reply *cp = data;
3866
3867 bt_dev_dbg(hdev, "sock %p", sk);
3868
3869 return user_pairing_resp(sk, hdev, &cp->addr,
3870 MGMT_OP_PIN_CODE_NEG_REPLY,
3871 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3872 }
3873
user_confirm_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3874 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3875 u16 len)
3876 {
3877 struct mgmt_cp_user_confirm_reply *cp = data;
3878
3879 bt_dev_dbg(hdev, "sock %p", sk);
3880
3881 if (len != sizeof(*cp))
3882 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3883 MGMT_STATUS_INVALID_PARAMS);
3884
3885 return user_pairing_resp(sk, hdev, &cp->addr,
3886 MGMT_OP_USER_CONFIRM_REPLY,
3887 HCI_OP_USER_CONFIRM_REPLY, 0);
3888 }
3889
user_confirm_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3890 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3891 void *data, u16 len)
3892 {
3893 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3894
3895 bt_dev_dbg(hdev, "sock %p", sk);
3896
3897 return user_pairing_resp(sk, hdev, &cp->addr,
3898 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3899 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3900 }
3901
user_passkey_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3902 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3903 u16 len)
3904 {
3905 struct mgmt_cp_user_passkey_reply *cp = data;
3906
3907 bt_dev_dbg(hdev, "sock %p", sk);
3908
3909 return user_pairing_resp(sk, hdev, &cp->addr,
3910 MGMT_OP_USER_PASSKEY_REPLY,
3911 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3912 }
3913
user_passkey_neg_reply(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)3914 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3915 void *data, u16 len)
3916 {
3917 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3918
3919 bt_dev_dbg(hdev, "sock %p", sk);
3920
3921 return user_pairing_resp(sk, hdev, &cp->addr,
3922 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3923 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3924 }
3925
adv_expire_sync(struct hci_dev * hdev,u32 flags)3926 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3927 {
3928 struct adv_info *adv_instance;
3929
3930 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3931 if (!adv_instance)
3932 return 0;
3933
3934 /* stop if current instance doesn't need to be changed */
3935 if (!(adv_instance->flags & flags))
3936 return 0;
3937
3938 cancel_adv_timeout(hdev);
3939
3940 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3941 if (!adv_instance)
3942 return 0;
3943
3944 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3945
3946 return 0;
3947 }
3948
name_changed_sync(struct hci_dev * hdev,void * data)3949 static int name_changed_sync(struct hci_dev *hdev, void *data)
3950 {
3951 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3952 }
3953
set_name_complete(struct hci_dev * hdev,void * data,int err)3954 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3955 {
3956 struct mgmt_pending_cmd *cmd = data;
3957 struct mgmt_cp_set_local_name *cp;
3958 u8 status = mgmt_status(err);
3959
3960 bt_dev_dbg(hdev, "err %d", err);
3961
3962 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
3963 return;
3964
3965 cp = cmd->param;
3966
3967 if (status) {
3968 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3969 status);
3970 } else {
3971 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3972 cp, sizeof(*cp));
3973
3974 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3975 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3976 }
3977
3978 mgmt_pending_free(cmd);
3979 }
3980
set_name_sync(struct hci_dev * hdev,void * data)3981 static int set_name_sync(struct hci_dev *hdev, void *data)
3982 {
3983 struct mgmt_pending_cmd *cmd = data;
3984 struct mgmt_cp_set_local_name cp;
3985
3986 mutex_lock(&hdev->mgmt_pending_lock);
3987
3988 if (!__mgmt_pending_listed(hdev, cmd)) {
3989 mutex_unlock(&hdev->mgmt_pending_lock);
3990 return -ECANCELED;
3991 }
3992
3993 memcpy(&cp, cmd->param, sizeof(cp));
3994
3995 mutex_unlock(&hdev->mgmt_pending_lock);
3996
3997 if (lmp_bredr_capable(hdev)) {
3998 hci_update_name_sync(hdev, cp.name);
3999 hci_update_eir_sync(hdev);
4000 }
4001
4002 /* The name is stored in the scan response data and so
4003 * no need to update the advertising data here.
4004 */
4005 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
4006 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
4007
4008 return 0;
4009 }
4010
set_local_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4011 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
4012 u16 len)
4013 {
4014 struct mgmt_cp_set_local_name *cp = data;
4015 struct mgmt_pending_cmd *cmd;
4016 int err;
4017
4018 bt_dev_dbg(hdev, "sock %p", sk);
4019
4020 hci_dev_lock(hdev);
4021
4022 /* If the old values are the same as the new ones just return a
4023 * direct command complete event.
4024 */
4025 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
4026 !memcmp(hdev->short_name, cp->short_name,
4027 sizeof(hdev->short_name))) {
4028 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4029 data, len);
4030 goto failed;
4031 }
4032
4033 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
4034
4035 if (!hdev_is_powered(hdev)) {
4036 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4037
4038 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4039 data, len);
4040 if (err < 0)
4041 goto failed;
4042
4043 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
4044 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
4045 ext_info_changed(hdev, sk);
4046
4047 goto failed;
4048 }
4049
4050 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
4051 if (!cmd)
4052 err = -ENOMEM;
4053 else
4054 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
4055 set_name_complete);
4056
4057 if (err < 0) {
4058 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
4059 MGMT_STATUS_FAILED);
4060
4061 if (cmd)
4062 mgmt_pending_remove(cmd);
4063
4064 goto failed;
4065 }
4066
4067 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4068
4069 failed:
4070 hci_dev_unlock(hdev);
4071 return err;
4072 }
4073
appearance_changed_sync(struct hci_dev * hdev,void * data)4074 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
4075 {
4076 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
4077 }
4078
set_appearance(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4079 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
4080 u16 len)
4081 {
4082 struct mgmt_cp_set_appearance *cp = data;
4083 u16 appearance;
4084 int err;
4085
4086 bt_dev_dbg(hdev, "sock %p", sk);
4087
4088 if (!lmp_le_capable(hdev))
4089 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
4090 MGMT_STATUS_NOT_SUPPORTED);
4091
4092 appearance = le16_to_cpu(cp->appearance);
4093
4094 hci_dev_lock(hdev);
4095
4096 if (hdev->appearance != appearance) {
4097 hdev->appearance = appearance;
4098
4099 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4100 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
4101 NULL);
4102
4103 ext_info_changed(hdev, sk);
4104 }
4105
4106 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
4107 0);
4108
4109 hci_dev_unlock(hdev);
4110
4111 return err;
4112 }
4113
get_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4114 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4115 void *data, u16 len)
4116 {
4117 struct mgmt_rp_get_phy_configuration rp;
4118
4119 bt_dev_dbg(hdev, "sock %p", sk);
4120
4121 hci_dev_lock(hdev);
4122
4123 memset(&rp, 0, sizeof(rp));
4124
4125 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4126 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4127 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4128
4129 hci_dev_unlock(hdev);
4130
4131 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4132 &rp, sizeof(rp));
4133 }
4134
mgmt_phy_configuration_changed(struct hci_dev * hdev,struct sock * skip)4135 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4136 {
4137 struct mgmt_ev_phy_configuration_changed ev;
4138
4139 memset(&ev, 0, sizeof(ev));
4140
4141 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4142
4143 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4144 sizeof(ev), skip);
4145 }
4146
set_default_phy_complete(struct hci_dev * hdev,void * data,int err)4147 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4148 {
4149 struct mgmt_pending_cmd *cmd = data;
4150 struct sk_buff *skb;
4151 u8 status = mgmt_status(err);
4152
4153 skb = cmd->skb;
4154
4155 if (!status) {
4156 if (!skb)
4157 status = MGMT_STATUS_FAILED;
4158 else if (IS_ERR(skb))
4159 status = mgmt_status(PTR_ERR(skb));
4160 else
4161 status = mgmt_status(skb->data[0]);
4162 }
4163
4164 bt_dev_dbg(hdev, "status %d", status);
4165
4166 if (status) {
4167 mgmt_cmd_status(cmd->sk, hdev->id,
4168 MGMT_OP_SET_PHY_CONFIGURATION, status);
4169 } else {
4170 mgmt_cmd_complete(cmd->sk, hdev->id,
4171 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4172 NULL, 0);
4173
4174 mgmt_phy_configuration_changed(hdev, cmd->sk);
4175 }
4176
4177 if (skb && !IS_ERR(skb))
4178 kfree_skb(skb);
4179
4180 mgmt_pending_free(cmd);
4181 }
4182
set_default_phy_sync(struct hci_dev * hdev,void * data)4183 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4184 {
4185 struct mgmt_pending_cmd *cmd = data;
4186 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4187 struct hci_cp_le_set_default_phy cp_phy;
4188 u32 selected_phys;
4189
4190 selected_phys = __le32_to_cpu(cp->selected_phys);
4191
4192 memset(&cp_phy, 0, sizeof(cp_phy));
4193
4194 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4195 cp_phy.all_phys |= 0x01;
4196
4197 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4198 cp_phy.all_phys |= 0x02;
4199
4200 if (selected_phys & MGMT_PHY_LE_1M_TX)
4201 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4202
4203 if (selected_phys & MGMT_PHY_LE_2M_TX)
4204 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4205
4206 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4207 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4208
4209 if (selected_phys & MGMT_PHY_LE_1M_RX)
4210 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4211
4212 if (selected_phys & MGMT_PHY_LE_2M_RX)
4213 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4214
4215 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4216 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4217
4218 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4219 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4220
4221 return 0;
4222 }
4223
set_phy_configuration(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4224 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4225 void *data, u16 len)
4226 {
4227 struct mgmt_cp_set_phy_configuration *cp = data;
4228 struct mgmt_pending_cmd *cmd;
4229 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4230 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4231 bool changed = false;
4232 int err;
4233
4234 bt_dev_dbg(hdev, "sock %p", sk);
4235
4236 configurable_phys = get_configurable_phys(hdev);
4237 supported_phys = get_supported_phys(hdev);
4238 selected_phys = __le32_to_cpu(cp->selected_phys);
4239
4240 if (selected_phys & ~supported_phys)
4241 return mgmt_cmd_status(sk, hdev->id,
4242 MGMT_OP_SET_PHY_CONFIGURATION,
4243 MGMT_STATUS_INVALID_PARAMS);
4244
4245 unconfigure_phys = supported_phys & ~configurable_phys;
4246
4247 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4248 return mgmt_cmd_status(sk, hdev->id,
4249 MGMT_OP_SET_PHY_CONFIGURATION,
4250 MGMT_STATUS_INVALID_PARAMS);
4251
4252 if (selected_phys == get_selected_phys(hdev))
4253 return mgmt_cmd_complete(sk, hdev->id,
4254 MGMT_OP_SET_PHY_CONFIGURATION,
4255 0, NULL, 0);
4256
4257 hci_dev_lock(hdev);
4258
4259 if (!hdev_is_powered(hdev)) {
4260 err = mgmt_cmd_status(sk, hdev->id,
4261 MGMT_OP_SET_PHY_CONFIGURATION,
4262 MGMT_STATUS_REJECTED);
4263 goto unlock;
4264 }
4265
4266 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4267 err = mgmt_cmd_status(sk, hdev->id,
4268 MGMT_OP_SET_PHY_CONFIGURATION,
4269 MGMT_STATUS_BUSY);
4270 goto unlock;
4271 }
4272
4273 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4274 pkt_type |= (HCI_DH3 | HCI_DM3);
4275 else
4276 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4277
4278 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4279 pkt_type |= (HCI_DH5 | HCI_DM5);
4280 else
4281 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4282
4283 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4284 pkt_type &= ~HCI_2DH1;
4285 else
4286 pkt_type |= HCI_2DH1;
4287
4288 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4289 pkt_type &= ~HCI_2DH3;
4290 else
4291 pkt_type |= HCI_2DH3;
4292
4293 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4294 pkt_type &= ~HCI_2DH5;
4295 else
4296 pkt_type |= HCI_2DH5;
4297
4298 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4299 pkt_type &= ~HCI_3DH1;
4300 else
4301 pkt_type |= HCI_3DH1;
4302
4303 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4304 pkt_type &= ~HCI_3DH3;
4305 else
4306 pkt_type |= HCI_3DH3;
4307
4308 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4309 pkt_type &= ~HCI_3DH5;
4310 else
4311 pkt_type |= HCI_3DH5;
4312
4313 if (pkt_type != hdev->pkt_type) {
4314 hdev->pkt_type = pkt_type;
4315 changed = true;
4316 }
4317
4318 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4319 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4320 if (changed)
4321 mgmt_phy_configuration_changed(hdev, sk);
4322
4323 err = mgmt_cmd_complete(sk, hdev->id,
4324 MGMT_OP_SET_PHY_CONFIGURATION,
4325 0, NULL, 0);
4326
4327 goto unlock;
4328 }
4329
4330 cmd = mgmt_pending_new(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4331 len);
4332 if (!cmd)
4333 err = -ENOMEM;
4334 else
4335 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4336 set_default_phy_complete);
4337
4338 if (err < 0) {
4339 err = mgmt_cmd_status(sk, hdev->id,
4340 MGMT_OP_SET_PHY_CONFIGURATION,
4341 MGMT_STATUS_FAILED);
4342
4343 if (cmd)
4344 mgmt_pending_remove(cmd);
4345 }
4346
4347 unlock:
4348 hci_dev_unlock(hdev);
4349
4350 return err;
4351 }
4352
set_blocked_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4353 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4354 u16 len)
4355 {
4356 int err = MGMT_STATUS_SUCCESS;
4357 struct mgmt_cp_set_blocked_keys *keys = data;
4358 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4359 sizeof(struct mgmt_blocked_key_info));
4360 u16 key_count, expected_len;
4361 int i;
4362
4363 bt_dev_dbg(hdev, "sock %p", sk);
4364
4365 key_count = __le16_to_cpu(keys->key_count);
4366 if (key_count > max_key_count) {
4367 bt_dev_err(hdev, "too big key_count value %u", key_count);
4368 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4369 MGMT_STATUS_INVALID_PARAMS);
4370 }
4371
4372 expected_len = struct_size(keys, keys, key_count);
4373 if (expected_len != len) {
4374 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4375 expected_len, len);
4376 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4377 MGMT_STATUS_INVALID_PARAMS);
4378 }
4379
4380 hci_dev_lock(hdev);
4381
4382 hci_blocked_keys_clear(hdev);
4383
4384 for (i = 0; i < key_count; ++i) {
4385 struct blocked_key *b = kzalloc_obj(*b);
4386
4387 if (!b) {
4388 err = MGMT_STATUS_NO_RESOURCES;
4389 break;
4390 }
4391
4392 b->type = keys->keys[i].type;
4393 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4394 list_add_rcu(&b->list, &hdev->blocked_keys);
4395 }
4396 hci_dev_unlock(hdev);
4397
4398 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4399 err, NULL, 0);
4400 }
4401
set_wideband_speech(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)4402 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4403 void *data, u16 len)
4404 {
4405 struct mgmt_mode *cp = data;
4406 int err;
4407 bool changed = false;
4408
4409 bt_dev_dbg(hdev, "sock %p", sk);
4410
4411 if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
4412 return mgmt_cmd_status(sk, hdev->id,
4413 MGMT_OP_SET_WIDEBAND_SPEECH,
4414 MGMT_STATUS_NOT_SUPPORTED);
4415
4416 if (cp->val != 0x00 && cp->val != 0x01)
4417 return mgmt_cmd_status(sk, hdev->id,
4418 MGMT_OP_SET_WIDEBAND_SPEECH,
4419 MGMT_STATUS_INVALID_PARAMS);
4420
4421 hci_dev_lock(hdev);
4422
4423 if (hdev_is_powered(hdev) &&
4424 !!cp->val != hci_dev_test_flag(hdev,
4425 HCI_WIDEBAND_SPEECH_ENABLED)) {
4426 err = mgmt_cmd_status(sk, hdev->id,
4427 MGMT_OP_SET_WIDEBAND_SPEECH,
4428 MGMT_STATUS_REJECTED);
4429 goto unlock;
4430 }
4431
4432 if (cp->val)
4433 changed = !hci_dev_test_and_set_flag(hdev,
4434 HCI_WIDEBAND_SPEECH_ENABLED);
4435 else
4436 changed = hci_dev_test_and_clear_flag(hdev,
4437 HCI_WIDEBAND_SPEECH_ENABLED);
4438
4439 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4440 if (err < 0)
4441 goto unlock;
4442
4443 if (changed)
4444 err = new_settings(hdev, sk);
4445
4446 unlock:
4447 hci_dev_unlock(hdev);
4448 return err;
4449 }
4450
read_controller_cap(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4451 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4452 void *data, u16 data_len)
4453 {
4454 char buf[20];
4455 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4456 u16 cap_len = 0;
4457 u8 flags = 0;
4458 u8 tx_power_range[2];
4459
4460 bt_dev_dbg(hdev, "sock %p", sk);
4461
4462 memset(&buf, 0, sizeof(buf));
4463
4464 hci_dev_lock(hdev);
4465
4466 /* When the Read Simple Pairing Options command is supported, then
4467 * the remote public key validation is supported.
4468 *
4469 * Alternatively, when Microsoft extensions are available, they can
4470 * indicate support for public key validation as well.
4471 */
4472 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4473 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4474
4475 flags |= 0x02; /* Remote public key validation (LE) */
4476
4477 /* When the Read Encryption Key Size command is supported, then the
4478 * encryption key size is enforced.
4479 */
4480 if (hdev->commands[20] & 0x10)
4481 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4482
4483 flags |= 0x08; /* Encryption key size enforcement (LE) */
4484
4485 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4486 &flags, 1);
4487
4488 /* When the Read Simple Pairing Options command is supported, then
4489 * also max encryption key size information is provided.
4490 */
4491 if (hdev->commands[41] & 0x08)
4492 cap_len = eir_append_le16(rp->cap, cap_len,
4493 MGMT_CAP_MAX_ENC_KEY_SIZE,
4494 hdev->max_enc_key_size);
4495
4496 cap_len = eir_append_le16(rp->cap, cap_len,
4497 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4498 SMP_MAX_ENC_KEY_SIZE);
4499
4500 /* Append the min/max LE tx power parameters if we were able to fetch
4501 * it from the controller
4502 */
4503 if (hdev->commands[38] & 0x80) {
4504 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4505 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4506 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4507 tx_power_range, 2);
4508 }
4509
4510 rp->cap_len = cpu_to_le16(cap_len);
4511
4512 hci_dev_unlock(hdev);
4513
4514 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4515 rp, sizeof(*rp) + cap_len);
4516 }
4517
4518 #ifdef CONFIG_BT_FEATURE_DEBUG
4519 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4520 static const u8 debug_uuid[16] = {
4521 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4522 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4523 };
4524 #endif
4525
4526 /* 330859bc-7506-492d-9370-9a6f0614037f */
4527 static const u8 quality_report_uuid[16] = {
4528 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4529 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4530 };
4531
4532 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4533 static const u8 offload_codecs_uuid[16] = {
4534 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4535 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4536 };
4537
4538 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4539 static const u8 le_simultaneous_roles_uuid[16] = {
4540 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4541 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4542 };
4543
4544 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4545 static const u8 iso_socket_uuid[16] = {
4546 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4547 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4548 };
4549
4550 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4551 static const u8 mgmt_mesh_uuid[16] = {
4552 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4553 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4554 };
4555
read_exp_features_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)4556 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4557 void *data, u16 data_len)
4558 {
4559 struct mgmt_rp_read_exp_features_info *rp;
4560 size_t len;
4561 u16 idx = 0;
4562 u32 flags;
4563 int status;
4564
4565 bt_dev_dbg(hdev, "sock %p", sk);
4566
4567 /* Enough space for 7 features */
4568 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4569 rp = kzalloc(len, GFP_KERNEL);
4570 if (!rp)
4571 return -ENOMEM;
4572
4573 #ifdef CONFIG_BT_FEATURE_DEBUG
4574 flags = bt_dbg_get() ? BIT(0) : 0;
4575
4576 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4577 rp->features[idx].flags = cpu_to_le32(flags);
4578 idx++;
4579 #endif
4580
4581 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4582 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4583 flags = BIT(0);
4584 else
4585 flags = 0;
4586
4587 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4588 rp->features[idx].flags = cpu_to_le32(flags);
4589 idx++;
4590 }
4591
4592 if (hdev && (aosp_has_quality_report(hdev) ||
4593 hdev->set_quality_report)) {
4594 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4595 flags = BIT(0);
4596 else
4597 flags = 0;
4598
4599 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4600 rp->features[idx].flags = cpu_to_le32(flags);
4601 idx++;
4602 }
4603
4604 if (hdev && hdev->get_data_path_id) {
4605 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4606 flags = BIT(0);
4607 else
4608 flags = 0;
4609
4610 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4611 rp->features[idx].flags = cpu_to_le32(flags);
4612 idx++;
4613 }
4614
4615 if (IS_ENABLED(CONFIG_BT_LE)) {
4616 flags = iso_inited() ? BIT(0) : 0;
4617 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4618 rp->features[idx].flags = cpu_to_le32(flags);
4619 idx++;
4620 }
4621
4622 if (hdev && lmp_le_capable(hdev)) {
4623 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4624 flags = BIT(0);
4625 else
4626 flags = 0;
4627
4628 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4629 rp->features[idx].flags = cpu_to_le32(flags);
4630 idx++;
4631 }
4632
4633 rp->feature_count = cpu_to_le16(idx);
4634
4635 /* After reading the experimental features information, enable
4636 * the events to update client on any future change.
4637 */
4638 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4639
4640 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4641 MGMT_OP_READ_EXP_FEATURES_INFO,
4642 0, rp, sizeof(*rp) + (20 * idx));
4643
4644 kfree(rp);
4645 return status;
4646 }
4647
exp_feature_changed(struct hci_dev * hdev,const u8 * uuid,bool enabled,struct sock * skip)4648 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4649 bool enabled, struct sock *skip)
4650 {
4651 struct mgmt_ev_exp_feature_changed ev;
4652
4653 memset(&ev, 0, sizeof(ev));
4654 memcpy(ev.uuid, uuid, 16);
4655 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4656
4657 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4658 &ev, sizeof(ev),
4659 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4660 }
4661
4662 #define EXP_FEAT(_uuid, _set_func) \
4663 { \
4664 .uuid = _uuid, \
4665 .set_func = _set_func, \
4666 }
4667
4668 /* The zero key uuid is special. Multiple exp features are set through it. */
set_zero_key_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4669 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4670 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4671 {
4672 struct mgmt_rp_set_exp_feature rp;
4673
4674 memset(rp.uuid, 0, 16);
4675 rp.flags = cpu_to_le32(0);
4676
4677 #ifdef CONFIG_BT_FEATURE_DEBUG
4678 if (!hdev) {
4679 bool changed = bt_dbg_get();
4680
4681 bt_dbg_set(false);
4682
4683 if (changed)
4684 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4685 }
4686 #endif
4687
4688 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4689
4690 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4691 MGMT_OP_SET_EXP_FEATURE, 0,
4692 &rp, sizeof(rp));
4693 }
4694
4695 #ifdef CONFIG_BT_FEATURE_DEBUG
set_debug_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4696 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4697 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4698 {
4699 struct mgmt_rp_set_exp_feature rp;
4700
4701 bool val, changed;
4702 int err;
4703
4704 /* Command requires to use the non-controller index */
4705 if (hdev)
4706 return mgmt_cmd_status(sk, hdev->id,
4707 MGMT_OP_SET_EXP_FEATURE,
4708 MGMT_STATUS_INVALID_INDEX);
4709
4710 /* Parameters are limited to a single octet */
4711 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4712 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4713 MGMT_OP_SET_EXP_FEATURE,
4714 MGMT_STATUS_INVALID_PARAMS);
4715
4716 /* Only boolean on/off is supported */
4717 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4718 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4719 MGMT_OP_SET_EXP_FEATURE,
4720 MGMT_STATUS_INVALID_PARAMS);
4721
4722 val = !!cp->param[0];
4723 changed = val ? !bt_dbg_get() : bt_dbg_get();
4724 bt_dbg_set(val);
4725
4726 memcpy(rp.uuid, debug_uuid, 16);
4727 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4728
4729 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4730
4731 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4732 MGMT_OP_SET_EXP_FEATURE, 0,
4733 &rp, sizeof(rp));
4734
4735 if (changed)
4736 exp_feature_changed(hdev, debug_uuid, val, sk);
4737
4738 return err;
4739 }
4740 #endif
4741
set_mgmt_mesh_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4742 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4743 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4744 {
4745 struct mgmt_rp_set_exp_feature rp;
4746 bool val, changed;
4747 int err;
4748
4749 /* Command requires to use the controller index */
4750 if (!hdev)
4751 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4752 MGMT_OP_SET_EXP_FEATURE,
4753 MGMT_STATUS_INVALID_INDEX);
4754
4755 /* Parameters are limited to a single octet */
4756 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4757 return mgmt_cmd_status(sk, hdev->id,
4758 MGMT_OP_SET_EXP_FEATURE,
4759 MGMT_STATUS_INVALID_PARAMS);
4760
4761 /* Only boolean on/off is supported */
4762 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4763 return mgmt_cmd_status(sk, hdev->id,
4764 MGMT_OP_SET_EXP_FEATURE,
4765 MGMT_STATUS_INVALID_PARAMS);
4766
4767 val = !!cp->param[0];
4768
4769 if (val) {
4770 changed = !hci_dev_test_and_set_flag(hdev,
4771 HCI_MESH_EXPERIMENTAL);
4772 } else {
4773 hci_dev_clear_flag(hdev, HCI_MESH);
4774 changed = hci_dev_test_and_clear_flag(hdev,
4775 HCI_MESH_EXPERIMENTAL);
4776 }
4777
4778 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4779 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4780
4781 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4782
4783 err = mgmt_cmd_complete(sk, hdev->id,
4784 MGMT_OP_SET_EXP_FEATURE, 0,
4785 &rp, sizeof(rp));
4786
4787 if (changed)
4788 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4789
4790 return err;
4791 }
4792
set_quality_report_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4793 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4794 struct mgmt_cp_set_exp_feature *cp,
4795 u16 data_len)
4796 {
4797 struct mgmt_rp_set_exp_feature rp;
4798 bool val, changed;
4799 int err;
4800
4801 /* Command requires to use a valid controller index */
4802 if (!hdev)
4803 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4804 MGMT_OP_SET_EXP_FEATURE,
4805 MGMT_STATUS_INVALID_INDEX);
4806
4807 /* Parameters are limited to a single octet */
4808 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4809 return mgmt_cmd_status(sk, hdev->id,
4810 MGMT_OP_SET_EXP_FEATURE,
4811 MGMT_STATUS_INVALID_PARAMS);
4812
4813 /* Only boolean on/off is supported */
4814 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4815 return mgmt_cmd_status(sk, hdev->id,
4816 MGMT_OP_SET_EXP_FEATURE,
4817 MGMT_STATUS_INVALID_PARAMS);
4818
4819 hci_req_sync_lock(hdev);
4820
4821 val = !!cp->param[0];
4822 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4823
4824 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4825 err = mgmt_cmd_status(sk, hdev->id,
4826 MGMT_OP_SET_EXP_FEATURE,
4827 MGMT_STATUS_NOT_SUPPORTED);
4828 goto unlock_quality_report;
4829 }
4830
4831 if (changed) {
4832 if (hdev->set_quality_report)
4833 err = hdev->set_quality_report(hdev, val);
4834 else
4835 err = aosp_set_quality_report(hdev, val);
4836
4837 if (err) {
4838 err = mgmt_cmd_status(sk, hdev->id,
4839 MGMT_OP_SET_EXP_FEATURE,
4840 MGMT_STATUS_FAILED);
4841 goto unlock_quality_report;
4842 }
4843
4844 if (val)
4845 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4846 else
4847 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4848 }
4849
4850 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4851
4852 memcpy(rp.uuid, quality_report_uuid, 16);
4853 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4854 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4855
4856 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4857 &rp, sizeof(rp));
4858
4859 if (changed)
4860 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4861
4862 unlock_quality_report:
4863 hci_req_sync_unlock(hdev);
4864 return err;
4865 }
4866
set_offload_codec_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4867 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4868 struct mgmt_cp_set_exp_feature *cp,
4869 u16 data_len)
4870 {
4871 bool val, changed;
4872 int err;
4873 struct mgmt_rp_set_exp_feature rp;
4874
4875 /* Command requires to use a valid controller index */
4876 if (!hdev)
4877 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4878 MGMT_OP_SET_EXP_FEATURE,
4879 MGMT_STATUS_INVALID_INDEX);
4880
4881 /* Parameters are limited to a single octet */
4882 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4883 return mgmt_cmd_status(sk, hdev->id,
4884 MGMT_OP_SET_EXP_FEATURE,
4885 MGMT_STATUS_INVALID_PARAMS);
4886
4887 /* Only boolean on/off is supported */
4888 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4889 return mgmt_cmd_status(sk, hdev->id,
4890 MGMT_OP_SET_EXP_FEATURE,
4891 MGMT_STATUS_INVALID_PARAMS);
4892
4893 val = !!cp->param[0];
4894 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4895
4896 if (!hdev->get_data_path_id) {
4897 return mgmt_cmd_status(sk, hdev->id,
4898 MGMT_OP_SET_EXP_FEATURE,
4899 MGMT_STATUS_NOT_SUPPORTED);
4900 }
4901
4902 if (changed) {
4903 if (val)
4904 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4905 else
4906 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4907 }
4908
4909 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4910 val, changed);
4911
4912 memcpy(rp.uuid, offload_codecs_uuid, 16);
4913 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4914 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4915 err = mgmt_cmd_complete(sk, hdev->id,
4916 MGMT_OP_SET_EXP_FEATURE, 0,
4917 &rp, sizeof(rp));
4918
4919 if (changed)
4920 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4921
4922 return err;
4923 }
4924
set_le_simultaneous_roles_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4925 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4926 struct mgmt_cp_set_exp_feature *cp,
4927 u16 data_len)
4928 {
4929 bool val, changed;
4930 int err;
4931 struct mgmt_rp_set_exp_feature rp;
4932
4933 /* Command requires to use a valid controller index */
4934 if (!hdev)
4935 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4936 MGMT_OP_SET_EXP_FEATURE,
4937 MGMT_STATUS_INVALID_INDEX);
4938
4939 /* Parameters are limited to a single octet */
4940 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4941 return mgmt_cmd_status(sk, hdev->id,
4942 MGMT_OP_SET_EXP_FEATURE,
4943 MGMT_STATUS_INVALID_PARAMS);
4944
4945 /* Only boolean on/off is supported */
4946 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4947 return mgmt_cmd_status(sk, hdev->id,
4948 MGMT_OP_SET_EXP_FEATURE,
4949 MGMT_STATUS_INVALID_PARAMS);
4950
4951 val = !!cp->param[0];
4952 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4953
4954 if (!hci_dev_le_state_simultaneous(hdev)) {
4955 return mgmt_cmd_status(sk, hdev->id,
4956 MGMT_OP_SET_EXP_FEATURE,
4957 MGMT_STATUS_NOT_SUPPORTED);
4958 }
4959
4960 if (changed) {
4961 if (val)
4962 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4963 else
4964 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4965 }
4966
4967 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4968 val, changed);
4969
4970 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4971 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4972 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4973 err = mgmt_cmd_complete(sk, hdev->id,
4974 MGMT_OP_SET_EXP_FEATURE, 0,
4975 &rp, sizeof(rp));
4976
4977 if (changed)
4978 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4979
4980 return err;
4981 }
4982
4983 #ifdef CONFIG_BT_LE
set_iso_socket_func(struct sock * sk,struct hci_dev * hdev,struct mgmt_cp_set_exp_feature * cp,u16 data_len)4984 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4985 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4986 {
4987 struct mgmt_rp_set_exp_feature rp;
4988 bool val, changed = false;
4989 int err;
4990
4991 /* Command requires to use the non-controller index */
4992 if (hdev)
4993 return mgmt_cmd_status(sk, hdev->id,
4994 MGMT_OP_SET_EXP_FEATURE,
4995 MGMT_STATUS_INVALID_INDEX);
4996
4997 /* Parameters are limited to a single octet */
4998 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4999 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
5000 MGMT_OP_SET_EXP_FEATURE,
5001 MGMT_STATUS_INVALID_PARAMS);
5002
5003 /* Only boolean on/off is supported */
5004 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
5005 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
5006 MGMT_OP_SET_EXP_FEATURE,
5007 MGMT_STATUS_INVALID_PARAMS);
5008
5009 val = cp->param[0] ? true : false;
5010 if (val)
5011 err = iso_init();
5012 else
5013 err = iso_exit();
5014
5015 if (!err)
5016 changed = true;
5017
5018 memcpy(rp.uuid, iso_socket_uuid, 16);
5019 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
5020
5021 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
5022
5023 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
5024 MGMT_OP_SET_EXP_FEATURE, 0,
5025 &rp, sizeof(rp));
5026
5027 if (changed)
5028 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
5029
5030 return err;
5031 }
5032 #endif
5033
5034 static const struct mgmt_exp_feature {
5035 const u8 *uuid;
5036 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
5037 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
5038 } exp_features[] = {
5039 EXP_FEAT(ZERO_KEY, set_zero_key_func),
5040 #ifdef CONFIG_BT_FEATURE_DEBUG
5041 EXP_FEAT(debug_uuid, set_debug_func),
5042 #endif
5043 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5044 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5045 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5046 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5047 #ifdef CONFIG_BT_LE
5048 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5049 #endif
5050
5051 /* end with a null feature */
5052 EXP_FEAT(NULL, NULL)
5053 };
5054
set_exp_feature(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5055 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5056 void *data, u16 data_len)
5057 {
5058 struct mgmt_cp_set_exp_feature *cp = data;
5059 size_t i = 0;
5060
5061 bt_dev_dbg(hdev, "sock %p", sk);
5062
5063 for (i = 0; exp_features[i].uuid; i++) {
5064 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5065 return exp_features[i].set_func(sk, hdev, cp, data_len);
5066 }
5067
5068 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5069 MGMT_OP_SET_EXP_FEATURE,
5070 MGMT_STATUS_NOT_SUPPORTED);
5071 }
5072
get_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5073 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5074 u16 data_len)
5075 {
5076 struct mgmt_cp_get_device_flags *cp = data;
5077 struct mgmt_rp_get_device_flags rp;
5078 struct bdaddr_list_with_flags *br_params;
5079 struct hci_conn_params *params;
5080 u32 supported_flags;
5081 u32 current_flags = 0;
5082 u8 status = MGMT_STATUS_INVALID_PARAMS;
5083
5084 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5085 &cp->addr.bdaddr, cp->addr.type);
5086
5087 hci_dev_lock(hdev);
5088
5089 supported_flags = hdev->conn_flags;
5090
5091 memset(&rp, 0, sizeof(rp));
5092
5093 if (cp->addr.type == BDADDR_BREDR) {
5094 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5095 &cp->addr.bdaddr,
5096 cp->addr.type);
5097 if (!br_params)
5098 goto done;
5099
5100 current_flags = br_params->flags;
5101 } else {
5102 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5103 le_addr_type(cp->addr.type));
5104 if (!params)
5105 goto done;
5106
5107 current_flags = params->flags;
5108 }
5109
5110 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5111 rp.addr.type = cp->addr.type;
5112 rp.supported_flags = cpu_to_le32(supported_flags);
5113 rp.current_flags = cpu_to_le32(current_flags);
5114
5115 status = MGMT_STATUS_SUCCESS;
5116
5117 done:
5118 hci_dev_unlock(hdev);
5119
5120 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5121 &rp, sizeof(rp));
5122 }
5123
device_flags_changed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u32 supported_flags,u32 current_flags)5124 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5125 bdaddr_t *bdaddr, u8 bdaddr_type,
5126 u32 supported_flags, u32 current_flags)
5127 {
5128 struct mgmt_ev_device_flags_changed ev;
5129
5130 bacpy(&ev.addr.bdaddr, bdaddr);
5131 ev.addr.type = bdaddr_type;
5132 ev.supported_flags = cpu_to_le32(supported_flags);
5133 ev.current_flags = cpu_to_le32(current_flags);
5134
5135 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5136 }
5137
is_connected(struct hci_dev * hdev,bdaddr_t * addr,u8 type)5138 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5139 {
5140 struct hci_conn *conn;
5141
5142 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5143 if (!conn)
5144 return false;
5145
5146 if (conn->dst_type != type)
5147 return false;
5148
5149 if (conn->state != BT_CONNECTED)
5150 return false;
5151
5152 return true;
5153 }
5154
5155 /* This function requires the caller holds hdev->lock */
hci_conn_params_set(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type,u8 auto_connect)5156 static struct hci_conn_params *hci_conn_params_set(struct hci_dev *hdev,
5157 bdaddr_t *addr, u8 addr_type,
5158 u8 auto_connect)
5159 {
5160 struct hci_conn_params *params;
5161
5162 params = hci_conn_params_add(hdev, addr, addr_type);
5163 if (!params)
5164 return NULL;
5165
5166 if (params->auto_connect == auto_connect)
5167 return params;
5168
5169 hci_pend_le_list_del_init(params);
5170
5171 switch (auto_connect) {
5172 case HCI_AUTO_CONN_DISABLED:
5173 case HCI_AUTO_CONN_LINK_LOSS:
5174 /* If auto connect is being disabled when we're trying to
5175 * connect to device, keep connecting.
5176 */
5177 if (params->explicit_connect)
5178 hci_pend_le_list_add(params, &hdev->pend_le_conns);
5179 break;
5180 case HCI_AUTO_CONN_REPORT:
5181 if (params->explicit_connect)
5182 hci_pend_le_list_add(params, &hdev->pend_le_conns);
5183 else
5184 hci_pend_le_list_add(params, &hdev->pend_le_reports);
5185 break;
5186 case HCI_AUTO_CONN_DIRECT:
5187 case HCI_AUTO_CONN_ALWAYS:
5188 if (!is_connected(hdev, addr, addr_type))
5189 hci_pend_le_list_add(params, &hdev->pend_le_conns);
5190 break;
5191 }
5192
5193 params->auto_connect = auto_connect;
5194
5195 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
5196 addr, addr_type, auto_connect);
5197
5198 return params;
5199 }
5200
set_device_flags(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5201 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5202 u16 len)
5203 {
5204 struct mgmt_cp_set_device_flags *cp = data;
5205 struct bdaddr_list_with_flags *br_params;
5206 struct hci_conn_params *params;
5207 u8 status = MGMT_STATUS_INVALID_PARAMS;
5208 u32 supported_flags;
5209 u32 current_flags = __le32_to_cpu(cp->current_flags);
5210
5211 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5212 &cp->addr.bdaddr, cp->addr.type, current_flags);
5213
5214 // We should take hci_dev_lock() early, I think.. conn_flags can change
5215 supported_flags = hdev->conn_flags;
5216
5217 if ((supported_flags | current_flags) != supported_flags) {
5218 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5219 current_flags, supported_flags);
5220 goto done;
5221 }
5222
5223 hci_dev_lock(hdev);
5224
5225 if (cp->addr.type == BDADDR_BREDR) {
5226 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5227 &cp->addr.bdaddr,
5228 cp->addr.type);
5229
5230 if (br_params) {
5231 br_params->flags = current_flags;
5232 status = MGMT_STATUS_SUCCESS;
5233 } else {
5234 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5235 &cp->addr.bdaddr, cp->addr.type);
5236 }
5237
5238 goto unlock;
5239 }
5240
5241 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5242 le_addr_type(cp->addr.type));
5243 if (!params) {
5244 /* Create a new hci_conn_params if it doesn't exist */
5245 params = hci_conn_params_set(hdev, &cp->addr.bdaddr,
5246 le_addr_type(cp->addr.type),
5247 HCI_AUTO_CONN_DISABLED);
5248 if (!params) {
5249 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5250 &cp->addr.bdaddr,
5251 le_addr_type(cp->addr.type));
5252 goto unlock;
5253 }
5254 }
5255
5256 supported_flags = hdev->conn_flags;
5257
5258 if ((supported_flags | current_flags) != supported_flags) {
5259 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5260 current_flags, supported_flags);
5261 goto unlock;
5262 }
5263
5264 WRITE_ONCE(params->flags, current_flags);
5265 status = MGMT_STATUS_SUCCESS;
5266
5267 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5268 * has been set.
5269 */
5270 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5271 hci_update_passive_scan(hdev);
5272
5273 unlock:
5274 hci_dev_unlock(hdev);
5275
5276 done:
5277 if (status == MGMT_STATUS_SUCCESS)
5278 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5279 supported_flags, current_flags);
5280
5281 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5282 &cp->addr, sizeof(cp->addr));
5283 }
5284
mgmt_adv_monitor_added(struct sock * sk,struct hci_dev * hdev,u16 handle)5285 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5286 u16 handle)
5287 {
5288 struct mgmt_ev_adv_monitor_added ev;
5289
5290 ev.monitor_handle = cpu_to_le16(handle);
5291
5292 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5293 }
5294
mgmt_adv_monitor_removed(struct sock * sk,struct hci_dev * hdev,__le16 handle)5295 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5296 __le16 handle)
5297 {
5298 struct mgmt_ev_adv_monitor_removed ev;
5299
5300 ev.monitor_handle = handle;
5301
5302 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5303 }
5304
read_adv_mon_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5305 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5306 void *data, u16 len)
5307 {
5308 struct adv_monitor *monitor = NULL;
5309 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5310 int handle, err;
5311 size_t rp_size = 0;
5312 __u32 supported = 0;
5313 __u32 enabled = 0;
5314 __u16 num_handles = 0;
5315 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5316
5317 BT_DBG("request for %s", hdev->name);
5318
5319 hci_dev_lock(hdev);
5320
5321 if (msft_monitor_supported(hdev))
5322 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5323
5324 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5325 handles[num_handles++] = monitor->handle;
5326
5327 hci_dev_unlock(hdev);
5328
5329 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5330 rp = kmalloc(rp_size, GFP_KERNEL);
5331 if (!rp)
5332 return -ENOMEM;
5333
5334 /* All supported features are currently enabled */
5335 enabled = supported;
5336
5337 rp->supported_features = cpu_to_le32(supported);
5338 rp->enabled_features = cpu_to_le32(enabled);
5339 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5340 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5341 rp->num_handles = cpu_to_le16(num_handles);
5342 if (num_handles)
5343 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5344
5345 err = mgmt_cmd_complete(sk, hdev->id,
5346 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5347 MGMT_STATUS_SUCCESS, rp, rp_size);
5348
5349 kfree(rp);
5350
5351 return err;
5352 }
5353
mgmt_add_adv_patterns_monitor_complete(struct hci_dev * hdev,void * data,int status)5354 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5355 void *data, int status)
5356 {
5357 struct mgmt_rp_add_adv_patterns_monitor rp;
5358 struct mgmt_pending_cmd *cmd = data;
5359 struct adv_monitor *monitor;
5360
5361 /* This is likely the result of hdev being closed and mgmt_index_removed
5362 * is attempting to clean up any pending command so
5363 * hci_adv_monitors_clear is about to be called which will take care of
5364 * freeing the adv_monitor instances.
5365 */
5366 if (status == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
5367 return;
5368
5369 monitor = cmd->user_data;
5370
5371 hci_dev_lock(hdev);
5372
5373 rp.monitor_handle = cpu_to_le16(monitor->handle);
5374
5375 if (!status) {
5376 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5377 hdev->adv_monitors_cnt++;
5378 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5379 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5380 hci_update_passive_scan(hdev);
5381 }
5382
5383 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5384 mgmt_status(status), &rp, sizeof(rp));
5385 mgmt_pending_free(cmd);
5386
5387 hci_dev_unlock(hdev);
5388 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5389 rp.monitor_handle, status);
5390 }
5391
mgmt_add_adv_patterns_monitor_sync(struct hci_dev * hdev,void * data)5392 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5393 {
5394 struct mgmt_pending_cmd *cmd = data;
5395 struct adv_monitor *mon;
5396
5397 mutex_lock(&hdev->mgmt_pending_lock);
5398
5399 if (!__mgmt_pending_listed(hdev, cmd)) {
5400 mutex_unlock(&hdev->mgmt_pending_lock);
5401 return -ECANCELED;
5402 }
5403
5404 mon = cmd->user_data;
5405
5406 mutex_unlock(&hdev->mgmt_pending_lock);
5407
5408 return hci_add_adv_monitor(hdev, mon);
5409 }
5410
__add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,struct adv_monitor * m,u8 status,void * data,u16 len,u16 op)5411 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5412 struct adv_monitor *m, u8 status,
5413 void *data, u16 len, u16 op)
5414 {
5415 struct mgmt_pending_cmd *cmd;
5416 int err;
5417
5418 hci_dev_lock(hdev);
5419
5420 if (status)
5421 goto unlock;
5422
5423 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5424 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5425 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5426 status = MGMT_STATUS_BUSY;
5427 goto unlock;
5428 }
5429
5430 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5431 if (!cmd) {
5432 status = MGMT_STATUS_NO_RESOURCES;
5433 goto unlock;
5434 }
5435
5436 cmd->user_data = m;
5437 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5438 mgmt_add_adv_patterns_monitor_complete);
5439 if (err) {
5440 if (err == -ENOMEM)
5441 status = MGMT_STATUS_NO_RESOURCES;
5442 else
5443 status = MGMT_STATUS_FAILED;
5444
5445 goto unlock;
5446 }
5447
5448 hci_dev_unlock(hdev);
5449
5450 return 0;
5451
5452 unlock:
5453 hci_free_adv_monitor(hdev, m);
5454 hci_dev_unlock(hdev);
5455 return mgmt_cmd_status(sk, hdev->id, op, status);
5456 }
5457
parse_adv_monitor_rssi(struct adv_monitor * m,struct mgmt_adv_rssi_thresholds * rssi)5458 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5459 struct mgmt_adv_rssi_thresholds *rssi)
5460 {
5461 if (rssi) {
5462 m->rssi.low_threshold = rssi->low_threshold;
5463 m->rssi.low_threshold_timeout =
5464 __le16_to_cpu(rssi->low_threshold_timeout);
5465 m->rssi.high_threshold = rssi->high_threshold;
5466 m->rssi.high_threshold_timeout =
5467 __le16_to_cpu(rssi->high_threshold_timeout);
5468 m->rssi.sampling_period = rssi->sampling_period;
5469 } else {
5470 /* Default values. These numbers are the least constricting
5471 * parameters for MSFT API to work, so it behaves as if there
5472 * are no rssi parameter to consider. May need to be changed
5473 * if other API are to be supported.
5474 */
5475 m->rssi.low_threshold = -127;
5476 m->rssi.low_threshold_timeout = 60;
5477 m->rssi.high_threshold = -127;
5478 m->rssi.high_threshold_timeout = 0;
5479 m->rssi.sampling_period = 0;
5480 }
5481 }
5482
parse_adv_monitor_pattern(struct adv_monitor * m,u8 pattern_count,struct mgmt_adv_pattern * patterns)5483 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5484 struct mgmt_adv_pattern *patterns)
5485 {
5486 u8 offset = 0, length = 0;
5487 struct adv_pattern *p = NULL;
5488 int i;
5489
5490 for (i = 0; i < pattern_count; i++) {
5491 offset = patterns[i].offset;
5492 length = patterns[i].length;
5493 if (offset >= HCI_MAX_AD_LENGTH ||
5494 length > HCI_MAX_AD_LENGTH ||
5495 (offset + length) > HCI_MAX_AD_LENGTH)
5496 return MGMT_STATUS_INVALID_PARAMS;
5497
5498 p = kmalloc_obj(*p);
5499 if (!p)
5500 return MGMT_STATUS_NO_RESOURCES;
5501
5502 p->ad_type = patterns[i].ad_type;
5503 p->offset = patterns[i].offset;
5504 p->length = patterns[i].length;
5505 memcpy(p->value, patterns[i].value, p->length);
5506
5507 INIT_LIST_HEAD(&p->list);
5508 list_add(&p->list, &m->patterns);
5509 }
5510
5511 return MGMT_STATUS_SUCCESS;
5512 }
5513
add_adv_patterns_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5514 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5515 void *data, u16 len)
5516 {
5517 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5518 struct adv_monitor *m = NULL;
5519 u8 status = MGMT_STATUS_SUCCESS;
5520 size_t expected_size = sizeof(*cp);
5521
5522 BT_DBG("request for %s", hdev->name);
5523
5524 if (len <= sizeof(*cp)) {
5525 status = MGMT_STATUS_INVALID_PARAMS;
5526 goto done;
5527 }
5528
5529 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5530 if (len != expected_size) {
5531 status = MGMT_STATUS_INVALID_PARAMS;
5532 goto done;
5533 }
5534
5535 m = kzalloc_obj(*m);
5536 if (!m) {
5537 status = MGMT_STATUS_NO_RESOURCES;
5538 goto done;
5539 }
5540
5541 INIT_LIST_HEAD(&m->patterns);
5542
5543 parse_adv_monitor_rssi(m, NULL);
5544 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5545
5546 done:
5547 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5548 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5549 }
5550
add_adv_patterns_monitor_rssi(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5551 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5552 void *data, u16 len)
5553 {
5554 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5555 struct adv_monitor *m = NULL;
5556 u8 status = MGMT_STATUS_SUCCESS;
5557 size_t expected_size = sizeof(*cp);
5558
5559 BT_DBG("request for %s", hdev->name);
5560
5561 if (len <= sizeof(*cp)) {
5562 status = MGMT_STATUS_INVALID_PARAMS;
5563 goto done;
5564 }
5565
5566 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5567 if (len != expected_size) {
5568 status = MGMT_STATUS_INVALID_PARAMS;
5569 goto done;
5570 }
5571
5572 m = kzalloc_obj(*m);
5573 if (!m) {
5574 status = MGMT_STATUS_NO_RESOURCES;
5575 goto done;
5576 }
5577
5578 INIT_LIST_HEAD(&m->patterns);
5579
5580 parse_adv_monitor_rssi(m, &cp->rssi);
5581 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5582
5583 done:
5584 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5585 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5586 }
5587
mgmt_remove_adv_monitor_complete(struct hci_dev * hdev,void * data,int status)5588 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5589 void *data, int status)
5590 {
5591 struct mgmt_rp_remove_adv_monitor rp;
5592 struct mgmt_pending_cmd *cmd = data;
5593 struct mgmt_cp_remove_adv_monitor *cp;
5594
5595 if (status == -ECANCELED)
5596 return;
5597
5598 hci_dev_lock(hdev);
5599
5600 cp = cmd->param;
5601
5602 rp.monitor_handle = cp->monitor_handle;
5603
5604 if (!status) {
5605 mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5606 hci_update_passive_scan(hdev);
5607 }
5608
5609 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5610 mgmt_status(status), &rp, sizeof(rp));
5611 mgmt_pending_free(cmd);
5612
5613 hci_dev_unlock(hdev);
5614 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5615 rp.monitor_handle, status);
5616 }
5617
mgmt_remove_adv_monitor_sync(struct hci_dev * hdev,void * data)5618 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5619 {
5620 struct mgmt_pending_cmd *cmd = data;
5621 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5622 u16 handle = __le16_to_cpu(cp->monitor_handle);
5623
5624 if (!handle)
5625 return hci_remove_all_adv_monitor(hdev);
5626
5627 return hci_remove_single_adv_monitor(hdev, handle);
5628 }
5629
remove_adv_monitor(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5630 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5631 void *data, u16 len)
5632 {
5633 struct mgmt_pending_cmd *cmd;
5634 int err, status;
5635
5636 hci_dev_lock(hdev);
5637
5638 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5639 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5640 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5641 status = MGMT_STATUS_BUSY;
5642 goto unlock;
5643 }
5644
5645 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5646 if (!cmd) {
5647 status = MGMT_STATUS_NO_RESOURCES;
5648 goto unlock;
5649 }
5650
5651 err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5652 mgmt_remove_adv_monitor_complete);
5653
5654 if (err) {
5655 mgmt_pending_free(cmd);
5656
5657 if (err == -ENOMEM)
5658 status = MGMT_STATUS_NO_RESOURCES;
5659 else
5660 status = MGMT_STATUS_FAILED;
5661
5662 goto unlock;
5663 }
5664
5665 hci_dev_unlock(hdev);
5666
5667 return 0;
5668
5669 unlock:
5670 hci_dev_unlock(hdev);
5671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5672 status);
5673 }
5674
read_local_oob_data_complete(struct hci_dev * hdev,void * data,int err)5675 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data,
5676 int err)
5677 {
5678 struct mgmt_rp_read_local_oob_data mgmt_rp;
5679 size_t rp_size = sizeof(mgmt_rp);
5680 struct mgmt_pending_cmd *cmd = data;
5681 struct sk_buff *skb = cmd->skb;
5682 u8 status = mgmt_status(err);
5683
5684 if (!status) {
5685 if (!skb)
5686 status = MGMT_STATUS_FAILED;
5687 else if (IS_ERR(skb))
5688 status = mgmt_status(PTR_ERR(skb));
5689 else
5690 status = mgmt_status(skb->data[0]);
5691 }
5692
5693 bt_dev_dbg(hdev, "status %d", status);
5694
5695 if (status) {
5696 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5697 status);
5698 goto remove;
5699 }
5700
5701 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5702
5703 if (!bredr_sc_enabled(hdev)) {
5704 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5705
5706 if (skb->len < sizeof(*rp)) {
5707 mgmt_cmd_status(cmd->sk, hdev->id,
5708 MGMT_OP_READ_LOCAL_OOB_DATA,
5709 MGMT_STATUS_FAILED);
5710 goto remove;
5711 }
5712
5713 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5714 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5715
5716 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5717 } else {
5718 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5719
5720 if (skb->len < sizeof(*rp)) {
5721 mgmt_cmd_status(cmd->sk, hdev->id,
5722 MGMT_OP_READ_LOCAL_OOB_DATA,
5723 MGMT_STATUS_FAILED);
5724 goto remove;
5725 }
5726
5727 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5728 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5729
5730 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5731 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5732 }
5733
5734 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5735 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5736
5737 remove:
5738 if (skb && !IS_ERR(skb))
5739 kfree_skb(skb);
5740
5741 mgmt_pending_free(cmd);
5742 }
5743
read_local_oob_data_sync(struct hci_dev * hdev,void * data)5744 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5745 {
5746 struct mgmt_pending_cmd *cmd = data;
5747
5748 if (bredr_sc_enabled(hdev))
5749 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5750 else
5751 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5752
5753 if (IS_ERR(cmd->skb))
5754 return PTR_ERR(cmd->skb);
5755 else
5756 return 0;
5757 }
5758
read_local_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)5759 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5760 void *data, u16 data_len)
5761 {
5762 struct mgmt_pending_cmd *cmd;
5763 int err;
5764
5765 bt_dev_dbg(hdev, "sock %p", sk);
5766
5767 hci_dev_lock(hdev);
5768
5769 if (!hdev_is_powered(hdev)) {
5770 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5771 MGMT_STATUS_NOT_POWERED);
5772 goto unlock;
5773 }
5774
5775 if (!lmp_ssp_capable(hdev)) {
5776 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5777 MGMT_STATUS_NOT_SUPPORTED);
5778 goto unlock;
5779 }
5780
5781 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5782 if (!cmd)
5783 err = -ENOMEM;
5784 else
5785 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5786 read_local_oob_data_complete);
5787
5788 if (err < 0) {
5789 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5790 MGMT_STATUS_FAILED);
5791
5792 if (cmd)
5793 mgmt_pending_free(cmd);
5794 }
5795
5796 unlock:
5797 hci_dev_unlock(hdev);
5798 return err;
5799 }
5800
add_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5801 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5802 void *data, u16 len)
5803 {
5804 struct mgmt_addr_info *addr = data;
5805 int err;
5806
5807 bt_dev_dbg(hdev, "sock %p", sk);
5808
5809 if (!bdaddr_type_is_valid(addr->type))
5810 return mgmt_cmd_complete(sk, hdev->id,
5811 MGMT_OP_ADD_REMOTE_OOB_DATA,
5812 MGMT_STATUS_INVALID_PARAMS,
5813 addr, sizeof(*addr));
5814
5815 hci_dev_lock(hdev);
5816
5817 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5818 struct mgmt_cp_add_remote_oob_data *cp = data;
5819 u8 status;
5820
5821 if (cp->addr.type != BDADDR_BREDR) {
5822 err = mgmt_cmd_complete(sk, hdev->id,
5823 MGMT_OP_ADD_REMOTE_OOB_DATA,
5824 MGMT_STATUS_INVALID_PARAMS,
5825 &cp->addr, sizeof(cp->addr));
5826 goto unlock;
5827 }
5828
5829 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5830 cp->addr.type, cp->hash,
5831 cp->rand, NULL, NULL);
5832 if (err < 0)
5833 status = MGMT_STATUS_FAILED;
5834 else
5835 status = MGMT_STATUS_SUCCESS;
5836
5837 err = mgmt_cmd_complete(sk, hdev->id,
5838 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5839 &cp->addr, sizeof(cp->addr));
5840 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5841 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5842 u8 *rand192, *hash192, *rand256, *hash256;
5843 u8 status;
5844
5845 if (bdaddr_type_is_le(cp->addr.type)) {
5846 /* Enforce zero-valued 192-bit parameters as
5847 * long as legacy SMP OOB isn't implemented.
5848 */
5849 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5850 memcmp(cp->hash192, ZERO_KEY, 16)) {
5851 err = mgmt_cmd_complete(sk, hdev->id,
5852 MGMT_OP_ADD_REMOTE_OOB_DATA,
5853 MGMT_STATUS_INVALID_PARAMS,
5854 addr, sizeof(*addr));
5855 goto unlock;
5856 }
5857
5858 rand192 = NULL;
5859 hash192 = NULL;
5860 } else {
5861 /* In case one of the P-192 values is set to zero,
5862 * then just disable OOB data for P-192.
5863 */
5864 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5865 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5866 rand192 = NULL;
5867 hash192 = NULL;
5868 } else {
5869 rand192 = cp->rand192;
5870 hash192 = cp->hash192;
5871 }
5872 }
5873
5874 /* In case one of the P-256 values is set to zero, then just
5875 * disable OOB data for P-256.
5876 */
5877 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5878 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5879 rand256 = NULL;
5880 hash256 = NULL;
5881 } else {
5882 rand256 = cp->rand256;
5883 hash256 = cp->hash256;
5884 }
5885
5886 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5887 cp->addr.type, hash192, rand192,
5888 hash256, rand256);
5889 if (err < 0)
5890 status = MGMT_STATUS_FAILED;
5891 else
5892 status = MGMT_STATUS_SUCCESS;
5893
5894 err = mgmt_cmd_complete(sk, hdev->id,
5895 MGMT_OP_ADD_REMOTE_OOB_DATA,
5896 status, &cp->addr, sizeof(cp->addr));
5897 } else {
5898 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5899 len);
5900 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5901 MGMT_STATUS_INVALID_PARAMS);
5902 }
5903
5904 unlock:
5905 hci_dev_unlock(hdev);
5906 return err;
5907 }
5908
remove_remote_oob_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)5909 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5910 void *data, u16 len)
5911 {
5912 struct mgmt_cp_remove_remote_oob_data *cp = data;
5913 u8 status;
5914 int err;
5915
5916 bt_dev_dbg(hdev, "sock %p", sk);
5917
5918 if (cp->addr.type != BDADDR_BREDR)
5919 return mgmt_cmd_complete(sk, hdev->id,
5920 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5921 MGMT_STATUS_INVALID_PARAMS,
5922 &cp->addr, sizeof(cp->addr));
5923
5924 hci_dev_lock(hdev);
5925
5926 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5927 hci_remote_oob_data_clear(hdev);
5928 status = MGMT_STATUS_SUCCESS;
5929 goto done;
5930 }
5931
5932 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5933 if (err < 0)
5934 status = MGMT_STATUS_INVALID_PARAMS;
5935 else
5936 status = MGMT_STATUS_SUCCESS;
5937
5938 done:
5939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5940 status, &cp->addr, sizeof(cp->addr));
5941
5942 hci_dev_unlock(hdev);
5943 return err;
5944 }
5945
discovery_type_is_valid(struct hci_dev * hdev,uint8_t type,uint8_t * mgmt_status)5946 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5947 uint8_t *mgmt_status)
5948 {
5949 switch (type) {
5950 case DISCOV_TYPE_LE:
5951 *mgmt_status = mgmt_le_support(hdev);
5952 if (*mgmt_status)
5953 return false;
5954 break;
5955 case DISCOV_TYPE_INTERLEAVED:
5956 *mgmt_status = mgmt_le_support(hdev);
5957 if (*mgmt_status)
5958 return false;
5959 fallthrough;
5960 case DISCOV_TYPE_BREDR:
5961 *mgmt_status = mgmt_bredr_support(hdev);
5962 if (*mgmt_status)
5963 return false;
5964 break;
5965 default:
5966 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5967 return false;
5968 }
5969
5970 return true;
5971 }
5972
start_discovery_complete(struct hci_dev * hdev,void * data,int err)5973 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5974 {
5975 struct mgmt_pending_cmd *cmd = data;
5976
5977 bt_dev_dbg(hdev, "err %d", err);
5978
5979 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
5980 return;
5981
5982 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5983 cmd->param, 1);
5984 mgmt_pending_free(cmd);
5985
5986 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5987 DISCOVERY_FINDING);
5988 }
5989
start_discovery_sync(struct hci_dev * hdev,void * data)5990 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5991 {
5992 if (!mgmt_pending_listed(hdev, data))
5993 return -ECANCELED;
5994
5995 return hci_start_discovery_sync(hdev);
5996 }
5997
start_discovery_internal(struct sock * sk,struct hci_dev * hdev,u16 op,void * data,u16 len)5998 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5999 u16 op, void *data, u16 len)
6000 {
6001 struct mgmt_cp_start_discovery *cp = data;
6002 struct mgmt_pending_cmd *cmd;
6003 u8 status;
6004 int err;
6005
6006 bt_dev_dbg(hdev, "sock %p", sk);
6007
6008 hci_dev_lock(hdev);
6009
6010 if (!hdev_is_powered(hdev)) {
6011 err = mgmt_cmd_complete(sk, hdev->id, op,
6012 MGMT_STATUS_NOT_POWERED,
6013 &cp->type, sizeof(cp->type));
6014 goto failed;
6015 }
6016
6017 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6018 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6019 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
6020 &cp->type, sizeof(cp->type));
6021 goto failed;
6022 }
6023
6024 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6025 err = mgmt_cmd_complete(sk, hdev->id, op, status,
6026 &cp->type, sizeof(cp->type));
6027 goto failed;
6028 }
6029
6030 /* Can't start discovery when it is paused */
6031 if (hdev->discovery_paused) {
6032 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
6033 &cp->type, sizeof(cp->type));
6034 goto failed;
6035 }
6036
6037 /* Clear the discovery filter first to free any previously
6038 * allocated memory for the UUID list.
6039 */
6040 hci_discovery_filter_clear(hdev);
6041
6042 hdev->discovery.type = cp->type;
6043 hdev->discovery.report_invalid_rssi = false;
6044 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
6045 hdev->discovery.limited = true;
6046 else
6047 hdev->discovery.limited = false;
6048
6049 cmd = mgmt_pending_add(sk, op, hdev, data, len);
6050 if (!cmd) {
6051 err = -ENOMEM;
6052 goto failed;
6053 }
6054
6055 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6056 start_discovery_complete);
6057 if (err < 0) {
6058 mgmt_pending_remove(cmd);
6059 goto failed;
6060 }
6061
6062 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6063
6064 failed:
6065 hci_dev_unlock(hdev);
6066 return err;
6067 }
6068
start_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6069 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
6070 void *data, u16 len)
6071 {
6072 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
6073 data, len);
6074 }
6075
start_limited_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6076 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
6077 void *data, u16 len)
6078 {
6079 return start_discovery_internal(sk, hdev,
6080 MGMT_OP_START_LIMITED_DISCOVERY,
6081 data, len);
6082 }
6083
start_service_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6084 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
6085 void *data, u16 len)
6086 {
6087 struct mgmt_cp_start_service_discovery *cp = data;
6088 struct mgmt_pending_cmd *cmd;
6089 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6090 u16 uuid_count, expected_len;
6091 u8 status;
6092 int err;
6093
6094 bt_dev_dbg(hdev, "sock %p", sk);
6095
6096 hci_dev_lock(hdev);
6097
6098 if (!hdev_is_powered(hdev)) {
6099 err = mgmt_cmd_complete(sk, hdev->id,
6100 MGMT_OP_START_SERVICE_DISCOVERY,
6101 MGMT_STATUS_NOT_POWERED,
6102 &cp->type, sizeof(cp->type));
6103 goto failed;
6104 }
6105
6106 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6107 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6108 err = mgmt_cmd_complete(sk, hdev->id,
6109 MGMT_OP_START_SERVICE_DISCOVERY,
6110 MGMT_STATUS_BUSY, &cp->type,
6111 sizeof(cp->type));
6112 goto failed;
6113 }
6114
6115 if (hdev->discovery_paused) {
6116 err = mgmt_cmd_complete(sk, hdev->id,
6117 MGMT_OP_START_SERVICE_DISCOVERY,
6118 MGMT_STATUS_BUSY, &cp->type,
6119 sizeof(cp->type));
6120 goto failed;
6121 }
6122
6123 uuid_count = __le16_to_cpu(cp->uuid_count);
6124 if (uuid_count > max_uuid_count) {
6125 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6126 uuid_count);
6127 err = mgmt_cmd_complete(sk, hdev->id,
6128 MGMT_OP_START_SERVICE_DISCOVERY,
6129 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6130 sizeof(cp->type));
6131 goto failed;
6132 }
6133
6134 expected_len = sizeof(*cp) + uuid_count * 16;
6135 if (expected_len != len) {
6136 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6137 expected_len, len);
6138 err = mgmt_cmd_complete(sk, hdev->id,
6139 MGMT_OP_START_SERVICE_DISCOVERY,
6140 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6141 sizeof(cp->type));
6142 goto failed;
6143 }
6144
6145 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6146 err = mgmt_cmd_complete(sk, hdev->id,
6147 MGMT_OP_START_SERVICE_DISCOVERY,
6148 status, &cp->type, sizeof(cp->type));
6149 goto failed;
6150 }
6151
6152 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6153 hdev, data, len);
6154 if (!cmd) {
6155 err = -ENOMEM;
6156 goto failed;
6157 }
6158
6159 /* Clear the discovery filter first to free any previously
6160 * allocated memory for the UUID list.
6161 */
6162 hci_discovery_filter_clear(hdev);
6163
6164 hdev->discovery.result_filtering = true;
6165 hdev->discovery.type = cp->type;
6166 hdev->discovery.rssi = cp->rssi;
6167 hdev->discovery.uuid_count = uuid_count;
6168
6169 if (uuid_count > 0) {
6170 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6171 GFP_KERNEL);
6172 if (!hdev->discovery.uuids) {
6173 err = mgmt_cmd_complete(sk, hdev->id,
6174 MGMT_OP_START_SERVICE_DISCOVERY,
6175 MGMT_STATUS_FAILED,
6176 &cp->type, sizeof(cp->type));
6177 mgmt_pending_remove(cmd);
6178 goto failed;
6179 }
6180 }
6181
6182 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6183 start_discovery_complete);
6184 if (err < 0) {
6185 mgmt_pending_remove(cmd);
6186 goto failed;
6187 }
6188
6189 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6190
6191 failed:
6192 hci_dev_unlock(hdev);
6193 return err;
6194 }
6195
stop_discovery_complete(struct hci_dev * hdev,void * data,int err)6196 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6197 {
6198 struct mgmt_pending_cmd *cmd = data;
6199
6200 if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
6201 return;
6202
6203 bt_dev_dbg(hdev, "err %d", err);
6204
6205 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6206 cmd->param, 1);
6207 mgmt_pending_free(cmd);
6208
6209 if (!err)
6210 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6211 }
6212
stop_discovery_sync(struct hci_dev * hdev,void * data)6213 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6214 {
6215 if (!mgmt_pending_listed(hdev, data))
6216 return -ECANCELED;
6217
6218 return hci_stop_discovery_sync(hdev);
6219 }
6220
stop_discovery(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6221 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6222 u16 len)
6223 {
6224 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6225 struct mgmt_pending_cmd *cmd;
6226 int err;
6227
6228 bt_dev_dbg(hdev, "sock %p", sk);
6229
6230 hci_dev_lock(hdev);
6231
6232 if (!hci_discovery_active(hdev)) {
6233 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6234 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6235 sizeof(mgmt_cp->type));
6236 goto unlock;
6237 }
6238
6239 if (hdev->discovery.type != mgmt_cp->type) {
6240 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6241 MGMT_STATUS_INVALID_PARAMS,
6242 &mgmt_cp->type, sizeof(mgmt_cp->type));
6243 goto unlock;
6244 }
6245
6246 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6247 if (!cmd) {
6248 err = -ENOMEM;
6249 goto unlock;
6250 }
6251
6252 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6253 stop_discovery_complete);
6254 if (err < 0) {
6255 mgmt_pending_remove(cmd);
6256 goto unlock;
6257 }
6258
6259 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6260
6261 unlock:
6262 hci_dev_unlock(hdev);
6263 return err;
6264 }
6265
confirm_name(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6266 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6267 u16 len)
6268 {
6269 struct mgmt_cp_confirm_name *cp = data;
6270 struct inquiry_entry *e;
6271 int err;
6272
6273 bt_dev_dbg(hdev, "sock %p", sk);
6274
6275 hci_dev_lock(hdev);
6276
6277 if (!hci_discovery_active(hdev)) {
6278 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6279 MGMT_STATUS_FAILED, &cp->addr,
6280 sizeof(cp->addr));
6281 goto failed;
6282 }
6283
6284 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6285 if (!e) {
6286 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6287 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6288 sizeof(cp->addr));
6289 goto failed;
6290 }
6291
6292 if (cp->name_known) {
6293 e->name_state = NAME_KNOWN;
6294 list_del(&e->list);
6295 } else {
6296 e->name_state = NAME_NEEDED;
6297 hci_inquiry_cache_update_resolve(hdev, e);
6298 }
6299
6300 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6301 &cp->addr, sizeof(cp->addr));
6302
6303 failed:
6304 hci_dev_unlock(hdev);
6305 return err;
6306 }
6307
block_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6308 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6309 u16 len)
6310 {
6311 struct mgmt_cp_block_device *cp = data;
6312 u8 status;
6313 int err;
6314
6315 bt_dev_dbg(hdev, "sock %p", sk);
6316
6317 if (!bdaddr_type_is_valid(cp->addr.type))
6318 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6319 MGMT_STATUS_INVALID_PARAMS,
6320 &cp->addr, sizeof(cp->addr));
6321
6322 hci_dev_lock(hdev);
6323
6324 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6325 cp->addr.type);
6326 if (err < 0) {
6327 status = MGMT_STATUS_FAILED;
6328 goto done;
6329 }
6330
6331 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6332 sk);
6333 status = MGMT_STATUS_SUCCESS;
6334
6335 done:
6336 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6337 &cp->addr, sizeof(cp->addr));
6338
6339 hci_dev_unlock(hdev);
6340
6341 return err;
6342 }
6343
unblock_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6344 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6345 u16 len)
6346 {
6347 struct mgmt_cp_unblock_device *cp = data;
6348 u8 status;
6349 int err;
6350
6351 bt_dev_dbg(hdev, "sock %p", sk);
6352
6353 if (!bdaddr_type_is_valid(cp->addr.type))
6354 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6355 MGMT_STATUS_INVALID_PARAMS,
6356 &cp->addr, sizeof(cp->addr));
6357
6358 hci_dev_lock(hdev);
6359
6360 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6361 cp->addr.type);
6362 if (err < 0) {
6363 status = MGMT_STATUS_INVALID_PARAMS;
6364 goto done;
6365 }
6366
6367 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6368 sk);
6369 status = MGMT_STATUS_SUCCESS;
6370
6371 done:
6372 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6373 &cp->addr, sizeof(cp->addr));
6374
6375 hci_dev_unlock(hdev);
6376
6377 return err;
6378 }
6379
set_device_id_sync(struct hci_dev * hdev,void * data)6380 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6381 {
6382 return hci_update_eir_sync(hdev);
6383 }
6384
set_device_id(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6385 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6386 u16 len)
6387 {
6388 struct mgmt_cp_set_device_id *cp = data;
6389 int err;
6390 __u16 source;
6391
6392 bt_dev_dbg(hdev, "sock %p", sk);
6393
6394 source = __le16_to_cpu(cp->source);
6395
6396 if (source > 0x0002)
6397 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6398 MGMT_STATUS_INVALID_PARAMS);
6399
6400 hci_dev_lock(hdev);
6401
6402 hdev->devid_source = source;
6403 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6404 hdev->devid_product = __le16_to_cpu(cp->product);
6405 hdev->devid_version = __le16_to_cpu(cp->version);
6406
6407 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6408 NULL, 0);
6409
6410 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6411
6412 hci_dev_unlock(hdev);
6413
6414 return err;
6415 }
6416
enable_advertising_instance(struct hci_dev * hdev,int err)6417 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6418 {
6419 if (err)
6420 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6421 else
6422 bt_dev_dbg(hdev, "status %d", err);
6423 }
6424
set_advertising_complete(struct hci_dev * hdev,void * data,int err)6425 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6426 {
6427 struct mgmt_pending_cmd *cmd = data;
6428 struct cmd_lookup match = { NULL, hdev };
6429 u8 instance;
6430 struct adv_info *adv_instance;
6431 u8 status = mgmt_status(err);
6432
6433 if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
6434 return;
6435
6436 if (status) {
6437 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
6438 mgmt_pending_free(cmd);
6439 return;
6440 }
6441
6442 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6443 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6444 else
6445 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6446
6447 settings_rsp(cmd, &match);
6448 mgmt_pending_free(cmd);
6449
6450 new_settings(hdev, match.sk);
6451
6452 if (match.sk)
6453 sock_put(match.sk);
6454
6455 /* If "Set Advertising" was just disabled and instance advertising was
6456 * set up earlier, then re-enable multi-instance advertising.
6457 */
6458 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6459 list_empty(&hdev->adv_instances))
6460 return;
6461
6462 instance = hdev->cur_adv_instance;
6463 if (!instance) {
6464 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6465 struct adv_info, list);
6466 if (!adv_instance)
6467 return;
6468
6469 instance = adv_instance->instance;
6470 }
6471
6472 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6473
6474 enable_advertising_instance(hdev, err);
6475 }
6476
set_adv_sync(struct hci_dev * hdev,void * data)6477 static int set_adv_sync(struct hci_dev *hdev, void *data)
6478 {
6479 struct mgmt_pending_cmd *cmd = data;
6480 struct mgmt_mode cp;
6481 u8 val;
6482
6483 mutex_lock(&hdev->mgmt_pending_lock);
6484
6485 if (!__mgmt_pending_listed(hdev, cmd)) {
6486 mutex_unlock(&hdev->mgmt_pending_lock);
6487 return -ECANCELED;
6488 }
6489
6490 memcpy(&cp, cmd->param, sizeof(cp));
6491
6492 mutex_unlock(&hdev->mgmt_pending_lock);
6493
6494 val = !!cp.val;
6495
6496 if (cp.val == 0x02)
6497 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6498 else
6499 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6500
6501 cancel_adv_timeout(hdev);
6502
6503 if (val) {
6504 /* Switch to instance "0" for the Set Advertising setting.
6505 * We cannot use update_[adv|scan_rsp]_data() here as the
6506 * HCI_ADVERTISING flag is not yet set.
6507 */
6508 hdev->cur_adv_instance = 0x00;
6509
6510 if (ext_adv_capable(hdev)) {
6511 hci_start_ext_adv_sync(hdev, 0x00);
6512 } else {
6513 hci_update_adv_data_sync(hdev, 0x00);
6514 hci_update_scan_rsp_data_sync(hdev, 0x00);
6515 hci_enable_advertising_sync(hdev);
6516 }
6517 } else {
6518 hci_disable_advertising_sync(hdev);
6519 }
6520
6521 return 0;
6522 }
6523
set_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6524 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6525 u16 len)
6526 {
6527 struct mgmt_mode *cp = data;
6528 struct mgmt_pending_cmd *cmd;
6529 u8 val, status;
6530 int err;
6531
6532 bt_dev_dbg(hdev, "sock %p", sk);
6533
6534 status = mgmt_le_support(hdev);
6535 if (status)
6536 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6537 status);
6538
6539 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6540 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6541 MGMT_STATUS_INVALID_PARAMS);
6542
6543 if (hdev->advertising_paused)
6544 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6545 MGMT_STATUS_BUSY);
6546
6547 hci_dev_lock(hdev);
6548
6549 val = !!cp->val;
6550
6551 /* The following conditions are ones which mean that we should
6552 * not do any HCI communication but directly send a mgmt
6553 * response to user space (after toggling the flag if
6554 * necessary).
6555 */
6556 if (!hdev_is_powered(hdev) ||
6557 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6558 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6559 hci_dev_test_flag(hdev, HCI_MESH) ||
6560 hci_conn_num(hdev, LE_LINK) > 0 ||
6561 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6562 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6563 bool changed;
6564
6565 if (cp->val) {
6566 hdev->cur_adv_instance = 0x00;
6567 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6568 if (cp->val == 0x02)
6569 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6570 else
6571 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6572 } else {
6573 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6574 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6575 }
6576
6577 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6578 if (err < 0)
6579 goto unlock;
6580
6581 if (changed)
6582 err = new_settings(hdev, sk);
6583
6584 goto unlock;
6585 }
6586
6587 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6588 pending_find(MGMT_OP_SET_LE, hdev)) {
6589 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6590 MGMT_STATUS_BUSY);
6591 goto unlock;
6592 }
6593
6594 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6595 if (!cmd)
6596 err = -ENOMEM;
6597 else
6598 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6599 set_advertising_complete);
6600
6601 if (err < 0 && cmd)
6602 mgmt_pending_remove(cmd);
6603
6604 unlock:
6605 hci_dev_unlock(hdev);
6606 return err;
6607 }
6608
set_static_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6609 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6610 void *data, u16 len)
6611 {
6612 struct mgmt_cp_set_static_address *cp = data;
6613 int err;
6614
6615 bt_dev_dbg(hdev, "sock %p", sk);
6616
6617 if (!lmp_le_capable(hdev))
6618 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6619 MGMT_STATUS_NOT_SUPPORTED);
6620
6621 if (hdev_is_powered(hdev))
6622 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6623 MGMT_STATUS_REJECTED);
6624
6625 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6626 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6627 return mgmt_cmd_status(sk, hdev->id,
6628 MGMT_OP_SET_STATIC_ADDRESS,
6629 MGMT_STATUS_INVALID_PARAMS);
6630
6631 /* Two most significant bits shall be set */
6632 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6633 return mgmt_cmd_status(sk, hdev->id,
6634 MGMT_OP_SET_STATIC_ADDRESS,
6635 MGMT_STATUS_INVALID_PARAMS);
6636 }
6637
6638 hci_dev_lock(hdev);
6639
6640 bacpy(&hdev->static_addr, &cp->bdaddr);
6641
6642 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6643 if (err < 0)
6644 goto unlock;
6645
6646 err = new_settings(hdev, sk);
6647
6648 unlock:
6649 hci_dev_unlock(hdev);
6650 return err;
6651 }
6652
set_scan_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6653 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6654 void *data, u16 len)
6655 {
6656 struct mgmt_cp_set_scan_params *cp = data;
6657 __u16 interval, window;
6658 int err;
6659
6660 bt_dev_dbg(hdev, "sock %p", sk);
6661
6662 if (!lmp_le_capable(hdev))
6663 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6664 MGMT_STATUS_NOT_SUPPORTED);
6665
6666 /* Keep allowed ranges in sync with set_mesh() */
6667 interval = __le16_to_cpu(cp->interval);
6668
6669 if (interval < 0x0004 || interval > 0x4000)
6670 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6671 MGMT_STATUS_INVALID_PARAMS);
6672
6673 window = __le16_to_cpu(cp->window);
6674
6675 if (window < 0x0004 || window > 0x4000)
6676 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6677 MGMT_STATUS_INVALID_PARAMS);
6678
6679 if (window > interval)
6680 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6681 MGMT_STATUS_INVALID_PARAMS);
6682
6683 hci_dev_lock(hdev);
6684
6685 hdev->le_scan_interval = interval;
6686 hdev->le_scan_window = window;
6687
6688 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6689 NULL, 0);
6690
6691 /* If background scan is running, restart it so new parameters are
6692 * loaded.
6693 */
6694 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6695 hdev->discovery.state == DISCOVERY_STOPPED)
6696 hci_update_passive_scan(hdev);
6697
6698 hci_dev_unlock(hdev);
6699
6700 return err;
6701 }
6702
fast_connectable_complete(struct hci_dev * hdev,void * data,int err)6703 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6704 {
6705 struct mgmt_pending_cmd *cmd = data;
6706
6707 bt_dev_dbg(hdev, "err %d", err);
6708
6709 if (err) {
6710 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6711 mgmt_status(err));
6712 } else {
6713 struct mgmt_mode *cp = cmd->param;
6714
6715 if (cp->val)
6716 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6717 else
6718 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6719
6720 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6721 new_settings(hdev, cmd->sk);
6722 }
6723
6724 mgmt_pending_free(cmd);
6725 }
6726
write_fast_connectable_sync(struct hci_dev * hdev,void * data)6727 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6728 {
6729 struct mgmt_pending_cmd *cmd = data;
6730 struct mgmt_mode *cp = cmd->param;
6731
6732 return hci_write_fast_connectable_sync(hdev, cp->val);
6733 }
6734
set_fast_connectable(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6735 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6736 void *data, u16 len)
6737 {
6738 struct mgmt_mode *cp = data;
6739 struct mgmt_pending_cmd *cmd;
6740 int err;
6741
6742 bt_dev_dbg(hdev, "sock %p", sk);
6743
6744 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6745 hdev->hci_ver < BLUETOOTH_VER_1_2)
6746 return mgmt_cmd_status(sk, hdev->id,
6747 MGMT_OP_SET_FAST_CONNECTABLE,
6748 MGMT_STATUS_NOT_SUPPORTED);
6749
6750 if (cp->val != 0x00 && cp->val != 0x01)
6751 return mgmt_cmd_status(sk, hdev->id,
6752 MGMT_OP_SET_FAST_CONNECTABLE,
6753 MGMT_STATUS_INVALID_PARAMS);
6754
6755 hci_dev_lock(hdev);
6756
6757 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6758 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6759 goto unlock;
6760 }
6761
6762 if (!hdev_is_powered(hdev)) {
6763 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6764 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6765 new_settings(hdev, sk);
6766 goto unlock;
6767 }
6768
6769 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6770 len);
6771 if (!cmd)
6772 err = -ENOMEM;
6773 else
6774 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6775 fast_connectable_complete);
6776
6777 if (err < 0) {
6778 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6779 MGMT_STATUS_FAILED);
6780
6781 if (cmd)
6782 mgmt_pending_free(cmd);
6783 }
6784
6785 unlock:
6786 hci_dev_unlock(hdev);
6787
6788 return err;
6789 }
6790
set_bredr_complete(struct hci_dev * hdev,void * data,int err)6791 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6792 {
6793 struct mgmt_pending_cmd *cmd = data;
6794
6795 bt_dev_dbg(hdev, "err %d", err);
6796
6797 if (err) {
6798 u8 mgmt_err = mgmt_status(err);
6799
6800 /* We need to restore the flag if related HCI commands
6801 * failed.
6802 */
6803 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6804
6805 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6806 } else {
6807 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6808 new_settings(hdev, cmd->sk);
6809 }
6810
6811 mgmt_pending_free(cmd);
6812 }
6813
set_bredr_sync(struct hci_dev * hdev,void * data)6814 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6815 {
6816 int status;
6817
6818 status = hci_write_fast_connectable_sync(hdev, false);
6819
6820 if (!status)
6821 status = hci_update_scan_sync(hdev);
6822
6823 /* Since only the advertising data flags will change, there
6824 * is no need to update the scan response data.
6825 */
6826 if (!status)
6827 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6828
6829 return status;
6830 }
6831
set_bredr(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6832 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6833 {
6834 struct mgmt_mode *cp = data;
6835 struct mgmt_pending_cmd *cmd;
6836 int err;
6837
6838 bt_dev_dbg(hdev, "sock %p", sk);
6839
6840 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6842 MGMT_STATUS_NOT_SUPPORTED);
6843
6844 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6846 MGMT_STATUS_REJECTED);
6847
6848 if (cp->val != 0x00 && cp->val != 0x01)
6849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6850 MGMT_STATUS_INVALID_PARAMS);
6851
6852 hci_dev_lock(hdev);
6853
6854 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6855 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6856 goto unlock;
6857 }
6858
6859 if (!hdev_is_powered(hdev)) {
6860 if (!cp->val) {
6861 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6862 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6863 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6864 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6865 }
6866
6867 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6868
6869 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6870 if (err < 0)
6871 goto unlock;
6872
6873 err = new_settings(hdev, sk);
6874 goto unlock;
6875 }
6876
6877 /* Reject disabling when powered on */
6878 if (!cp->val) {
6879 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6880 MGMT_STATUS_REJECTED);
6881 goto unlock;
6882 } else {
6883 /* When configuring a dual-mode controller to operate
6884 * with LE only and using a static address, then switching
6885 * BR/EDR back on is not allowed.
6886 *
6887 * Dual-mode controllers shall operate with the public
6888 * address as its identity address for BR/EDR and LE. So
6889 * reject the attempt to create an invalid configuration.
6890 *
6891 * The same restrictions applies when secure connections
6892 * has been enabled. For BR/EDR this is a controller feature
6893 * while for LE it is a host stack feature. This means that
6894 * switching BR/EDR back on when secure connections has been
6895 * enabled is not a supported transaction.
6896 */
6897 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6898 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6899 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6900 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6901 MGMT_STATUS_REJECTED);
6902 goto unlock;
6903 }
6904 }
6905
6906 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6907 if (!cmd)
6908 err = -ENOMEM;
6909 else
6910 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6911 set_bredr_complete);
6912
6913 if (err < 0) {
6914 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6915 MGMT_STATUS_FAILED);
6916 if (cmd)
6917 mgmt_pending_free(cmd);
6918
6919 goto unlock;
6920 }
6921
6922 /* We need to flip the bit already here so that
6923 * hci_req_update_adv_data generates the correct flags.
6924 */
6925 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6926
6927 unlock:
6928 hci_dev_unlock(hdev);
6929 return err;
6930 }
6931
set_secure_conn_complete(struct hci_dev * hdev,void * data,int err)6932 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6933 {
6934 struct mgmt_pending_cmd *cmd = data;
6935 struct mgmt_mode *cp;
6936
6937 bt_dev_dbg(hdev, "err %d", err);
6938
6939 if (err) {
6940 u8 mgmt_err = mgmt_status(err);
6941
6942 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6943 goto done;
6944 }
6945
6946 cp = cmd->param;
6947
6948 switch (cp->val) {
6949 case 0x00:
6950 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6951 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6952 break;
6953 case 0x01:
6954 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6955 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6956 break;
6957 case 0x02:
6958 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6959 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6960 break;
6961 }
6962
6963 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6964 new_settings(hdev, cmd->sk);
6965
6966 done:
6967 mgmt_pending_free(cmd);
6968 }
6969
set_secure_conn_sync(struct hci_dev * hdev,void * data)6970 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6971 {
6972 struct mgmt_pending_cmd *cmd = data;
6973 struct mgmt_mode *cp = cmd->param;
6974 u8 val = !!cp->val;
6975
6976 /* Force write of val */
6977 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6978
6979 return hci_write_sc_support_sync(hdev, val);
6980 }
6981
set_secure_conn(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)6982 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6983 void *data, u16 len)
6984 {
6985 struct mgmt_mode *cp = data;
6986 struct mgmt_pending_cmd *cmd;
6987 u8 val;
6988 int err;
6989
6990 bt_dev_dbg(hdev, "sock %p", sk);
6991
6992 if (!lmp_sc_capable(hdev) &&
6993 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6995 MGMT_STATUS_NOT_SUPPORTED);
6996
6997 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6998 lmp_sc_capable(hdev) &&
6999 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
7000 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
7001 MGMT_STATUS_REJECTED);
7002
7003 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
7004 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
7005 MGMT_STATUS_INVALID_PARAMS);
7006
7007 hci_dev_lock(hdev);
7008
7009 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
7010 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7011 bool changed;
7012
7013 if (cp->val) {
7014 changed = !hci_dev_test_and_set_flag(hdev,
7015 HCI_SC_ENABLED);
7016 if (cp->val == 0x02)
7017 hci_dev_set_flag(hdev, HCI_SC_ONLY);
7018 else
7019 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
7020 } else {
7021 changed = hci_dev_test_and_clear_flag(hdev,
7022 HCI_SC_ENABLED);
7023 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
7024 }
7025
7026 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
7027 if (err < 0)
7028 goto failed;
7029
7030 if (changed)
7031 err = new_settings(hdev, sk);
7032
7033 goto failed;
7034 }
7035
7036 val = !!cp->val;
7037
7038 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7039 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7040 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
7041 goto failed;
7042 }
7043
7044 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
7045 if (!cmd)
7046 err = -ENOMEM;
7047 else
7048 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
7049 set_secure_conn_complete);
7050
7051 if (err < 0) {
7052 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
7053 MGMT_STATUS_FAILED);
7054 if (cmd)
7055 mgmt_pending_free(cmd);
7056 }
7057
7058 failed:
7059 hci_dev_unlock(hdev);
7060 return err;
7061 }
7062
set_debug_keys(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7063 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
7064 void *data, u16 len)
7065 {
7066 struct mgmt_mode *cp = data;
7067 bool changed, use_changed;
7068 int err;
7069
7070 bt_dev_dbg(hdev, "sock %p", sk);
7071
7072 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
7073 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
7074 MGMT_STATUS_INVALID_PARAMS);
7075
7076 hci_dev_lock(hdev);
7077
7078 if (cp->val)
7079 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
7080 else
7081 changed = hci_dev_test_and_clear_flag(hdev,
7082 HCI_KEEP_DEBUG_KEYS);
7083
7084 if (cp->val == 0x02)
7085 use_changed = !hci_dev_test_and_set_flag(hdev,
7086 HCI_USE_DEBUG_KEYS);
7087 else
7088 use_changed = hci_dev_test_and_clear_flag(hdev,
7089 HCI_USE_DEBUG_KEYS);
7090
7091 if (hdev_is_powered(hdev) && use_changed &&
7092 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7093 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7094 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7095 sizeof(mode), &mode);
7096 }
7097
7098 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7099 if (err < 0)
7100 goto unlock;
7101
7102 if (changed)
7103 err = new_settings(hdev, sk);
7104
7105 unlock:
7106 hci_dev_unlock(hdev);
7107 return err;
7108 }
7109
set_privacy(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7110 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7111 u16 len)
7112 {
7113 struct mgmt_cp_set_privacy *cp = cp_data;
7114 bool changed;
7115 int err;
7116
7117 bt_dev_dbg(hdev, "sock %p", sk);
7118
7119 if (!lmp_le_capable(hdev))
7120 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7121 MGMT_STATUS_NOT_SUPPORTED);
7122
7123 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7124 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7125 MGMT_STATUS_INVALID_PARAMS);
7126
7127 if (hdev_is_powered(hdev))
7128 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7129 MGMT_STATUS_REJECTED);
7130
7131 hci_dev_lock(hdev);
7132
7133 /* If user space supports this command it is also expected to
7134 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7135 */
7136 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7137
7138 if (cp->privacy) {
7139 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7140 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7141 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7142 hci_adv_instances_set_rpa_expired(hdev, true);
7143 if (cp->privacy == 0x02)
7144 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7145 else
7146 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7147 } else {
7148 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7149 memset(hdev->irk, 0, sizeof(hdev->irk));
7150 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7151 hci_adv_instances_set_rpa_expired(hdev, false);
7152 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7153 }
7154
7155 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7156 if (err < 0)
7157 goto unlock;
7158
7159 if (changed)
7160 err = new_settings(hdev, sk);
7161
7162 unlock:
7163 hci_dev_unlock(hdev);
7164 return err;
7165 }
7166
irk_is_valid(struct mgmt_irk_info * irk)7167 static bool irk_is_valid(struct mgmt_irk_info *irk)
7168 {
7169 switch (irk->addr.type) {
7170 case BDADDR_LE_PUBLIC:
7171 return true;
7172
7173 case BDADDR_LE_RANDOM:
7174 /* Two most significant bits shall be set */
7175 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7176 return false;
7177 return true;
7178 }
7179
7180 return false;
7181 }
7182
load_irks(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7183 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7184 u16 len)
7185 {
7186 struct mgmt_cp_load_irks *cp = cp_data;
7187 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7188 sizeof(struct mgmt_irk_info));
7189 u16 irk_count, expected_len;
7190 int i, err;
7191
7192 bt_dev_dbg(hdev, "sock %p", sk);
7193
7194 if (!lmp_le_capable(hdev))
7195 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7196 MGMT_STATUS_NOT_SUPPORTED);
7197
7198 irk_count = __le16_to_cpu(cp->irk_count);
7199 if (irk_count > max_irk_count) {
7200 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7201 irk_count);
7202 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7203 MGMT_STATUS_INVALID_PARAMS);
7204 }
7205
7206 expected_len = struct_size(cp, irks, irk_count);
7207 if (expected_len != len) {
7208 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7209 expected_len, len);
7210 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7211 MGMT_STATUS_INVALID_PARAMS);
7212 }
7213
7214 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7215
7216 for (i = 0; i < irk_count; i++) {
7217 struct mgmt_irk_info *key = &cp->irks[i];
7218
7219 if (!irk_is_valid(key))
7220 return mgmt_cmd_status(sk, hdev->id,
7221 MGMT_OP_LOAD_IRKS,
7222 MGMT_STATUS_INVALID_PARAMS);
7223 }
7224
7225 hci_dev_lock(hdev);
7226
7227 hci_smp_irks_clear(hdev);
7228
7229 for (i = 0; i < irk_count; i++) {
7230 struct mgmt_irk_info *irk = &cp->irks[i];
7231
7232 if (hci_is_blocked_key(hdev,
7233 HCI_BLOCKED_KEY_TYPE_IRK,
7234 irk->val)) {
7235 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7236 &irk->addr.bdaddr);
7237 continue;
7238 }
7239
7240 hci_add_irk(hdev, &irk->addr.bdaddr,
7241 le_addr_type(irk->addr.type), irk->val,
7242 BDADDR_ANY);
7243 }
7244
7245 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7246
7247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7248
7249 hci_dev_unlock(hdev);
7250
7251 return err;
7252 }
7253
ltk_is_valid(struct mgmt_ltk_info * key)7254 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7255 {
7256 if (key->initiator != 0x00 && key->initiator != 0x01)
7257 return false;
7258
7259 if (key->enc_size > sizeof(key->val))
7260 return false;
7261
7262 switch (key->addr.type) {
7263 case BDADDR_LE_PUBLIC:
7264 return true;
7265
7266 case BDADDR_LE_RANDOM:
7267 /* Two most significant bits shall be set */
7268 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7269 return false;
7270 return true;
7271 }
7272
7273 return false;
7274 }
7275
load_long_term_keys(struct sock * sk,struct hci_dev * hdev,void * cp_data,u16 len)7276 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7277 void *cp_data, u16 len)
7278 {
7279 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7280 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7281 sizeof(struct mgmt_ltk_info));
7282 u16 key_count, expected_len;
7283 int i, err;
7284
7285 bt_dev_dbg(hdev, "sock %p", sk);
7286
7287 if (!lmp_le_capable(hdev))
7288 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7289 MGMT_STATUS_NOT_SUPPORTED);
7290
7291 key_count = __le16_to_cpu(cp->key_count);
7292 if (key_count > max_key_count) {
7293 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7294 key_count);
7295 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7296 MGMT_STATUS_INVALID_PARAMS);
7297 }
7298
7299 expected_len = struct_size(cp, keys, key_count);
7300 if (expected_len != len) {
7301 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7302 expected_len, len);
7303 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7304 MGMT_STATUS_INVALID_PARAMS);
7305 }
7306
7307 bt_dev_dbg(hdev, "key_count %u", key_count);
7308
7309 hci_dev_lock(hdev);
7310
7311 hci_smp_ltks_clear(hdev);
7312
7313 for (i = 0; i < key_count; i++) {
7314 struct mgmt_ltk_info *key = &cp->keys[i];
7315 u8 type, authenticated;
7316
7317 if (hci_is_blocked_key(hdev,
7318 HCI_BLOCKED_KEY_TYPE_LTK,
7319 key->val)) {
7320 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7321 &key->addr.bdaddr);
7322 continue;
7323 }
7324
7325 if (!ltk_is_valid(key)) {
7326 bt_dev_warn(hdev, "Invalid LTK for %pMR",
7327 &key->addr.bdaddr);
7328 continue;
7329 }
7330
7331 switch (key->type) {
7332 case MGMT_LTK_UNAUTHENTICATED:
7333 authenticated = 0x00;
7334 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7335 break;
7336 case MGMT_LTK_AUTHENTICATED:
7337 authenticated = 0x01;
7338 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7339 break;
7340 case MGMT_LTK_P256_UNAUTH:
7341 authenticated = 0x00;
7342 type = SMP_LTK_P256;
7343 break;
7344 case MGMT_LTK_P256_AUTH:
7345 authenticated = 0x01;
7346 type = SMP_LTK_P256;
7347 break;
7348 case MGMT_LTK_P256_DEBUG:
7349 authenticated = 0x00;
7350 type = SMP_LTK_P256_DEBUG;
7351 fallthrough;
7352 default:
7353 continue;
7354 }
7355
7356 hci_add_ltk(hdev, &key->addr.bdaddr,
7357 le_addr_type(key->addr.type), type, authenticated,
7358 key->val, key->enc_size, key->ediv, key->rand);
7359 }
7360
7361 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7362 NULL, 0);
7363
7364 hci_dev_unlock(hdev);
7365
7366 return err;
7367 }
7368
get_conn_info_complete(struct hci_dev * hdev,void * data,int err)7369 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7370 {
7371 struct mgmt_pending_cmd *cmd = data;
7372 struct hci_conn *conn = cmd->user_data;
7373 struct mgmt_cp_get_conn_info *cp = cmd->param;
7374 struct mgmt_rp_get_conn_info rp;
7375 u8 status;
7376
7377 bt_dev_dbg(hdev, "err %d", err);
7378
7379 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7380
7381 status = mgmt_status(err);
7382 if (status == MGMT_STATUS_SUCCESS) {
7383 rp.rssi = conn->rssi;
7384 rp.tx_power = conn->tx_power;
7385 rp.max_tx_power = conn->max_tx_power;
7386 } else {
7387 rp.rssi = HCI_RSSI_INVALID;
7388 rp.tx_power = HCI_TX_POWER_INVALID;
7389 rp.max_tx_power = HCI_TX_POWER_INVALID;
7390 }
7391
7392 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7393 &rp, sizeof(rp));
7394
7395 mgmt_pending_free(cmd);
7396 }
7397
get_conn_info_sync(struct hci_dev * hdev,void * data)7398 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7399 {
7400 struct mgmt_pending_cmd *cmd = data;
7401 struct mgmt_cp_get_conn_info *cp = cmd->param;
7402 struct hci_conn *conn;
7403 int err;
7404 __le16 handle;
7405
7406 /* Make sure we are still connected */
7407 if (cp->addr.type == BDADDR_BREDR)
7408 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7409 &cp->addr.bdaddr);
7410 else
7411 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7412
7413 if (!conn || conn->state != BT_CONNECTED)
7414 return MGMT_STATUS_NOT_CONNECTED;
7415
7416 cmd->user_data = conn;
7417 handle = cpu_to_le16(conn->handle);
7418
7419 /* Refresh RSSI each time */
7420 err = hci_read_rssi_sync(hdev, handle);
7421
7422 /* For LE links TX power does not change thus we don't need to
7423 * query for it once value is known.
7424 */
7425 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7426 conn->tx_power == HCI_TX_POWER_INVALID))
7427 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7428
7429 /* Max TX power needs to be read only once per connection */
7430 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7431 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7432
7433 return err;
7434 }
7435
get_conn_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7436 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7437 u16 len)
7438 {
7439 struct mgmt_cp_get_conn_info *cp = data;
7440 struct mgmt_rp_get_conn_info rp;
7441 struct hci_conn *conn;
7442 unsigned long conn_info_age;
7443 int err = 0;
7444
7445 bt_dev_dbg(hdev, "sock %p", sk);
7446
7447 memset(&rp, 0, sizeof(rp));
7448 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7449 rp.addr.type = cp->addr.type;
7450
7451 if (!bdaddr_type_is_valid(cp->addr.type))
7452 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7453 MGMT_STATUS_INVALID_PARAMS,
7454 &rp, sizeof(rp));
7455
7456 hci_dev_lock(hdev);
7457
7458 if (!hdev_is_powered(hdev)) {
7459 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7460 MGMT_STATUS_NOT_POWERED, &rp,
7461 sizeof(rp));
7462 goto unlock;
7463 }
7464
7465 if (cp->addr.type == BDADDR_BREDR)
7466 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7467 &cp->addr.bdaddr);
7468 else
7469 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7470
7471 if (!conn || conn->state != BT_CONNECTED) {
7472 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7473 MGMT_STATUS_NOT_CONNECTED, &rp,
7474 sizeof(rp));
7475 goto unlock;
7476 }
7477
7478 /* To avoid client trying to guess when to poll again for information we
7479 * calculate conn info age as random value between min/max set in hdev.
7480 */
7481 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7482 hdev->conn_info_max_age - 1);
7483
7484 /* Query controller to refresh cached values if they are too old or were
7485 * never read.
7486 */
7487 if (time_after(jiffies, conn->conn_info_timestamp +
7488 msecs_to_jiffies(conn_info_age)) ||
7489 !conn->conn_info_timestamp) {
7490 struct mgmt_pending_cmd *cmd;
7491
7492 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7493 len);
7494 if (!cmd) {
7495 err = -ENOMEM;
7496 } else {
7497 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7498 cmd, get_conn_info_complete);
7499 }
7500
7501 if (err < 0) {
7502 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7503 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7504
7505 if (cmd)
7506 mgmt_pending_free(cmd);
7507
7508 goto unlock;
7509 }
7510
7511 conn->conn_info_timestamp = jiffies;
7512 } else {
7513 /* Cache is valid, just reply with values cached in hci_conn */
7514 rp.rssi = conn->rssi;
7515 rp.tx_power = conn->tx_power;
7516 rp.max_tx_power = conn->max_tx_power;
7517
7518 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7519 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7520 }
7521
7522 unlock:
7523 hci_dev_unlock(hdev);
7524 return err;
7525 }
7526
get_clock_info_complete(struct hci_dev * hdev,void * data,int err)7527 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7528 {
7529 struct mgmt_pending_cmd *cmd = data;
7530 struct mgmt_cp_get_clock_info *cp = cmd->param;
7531 struct mgmt_rp_get_clock_info rp;
7532 struct hci_conn *conn = cmd->user_data;
7533 u8 status = mgmt_status(err);
7534
7535 bt_dev_dbg(hdev, "err %d", err);
7536
7537 memset(&rp, 0, sizeof(rp));
7538 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7539 rp.addr.type = cp->addr.type;
7540
7541 if (err)
7542 goto complete;
7543
7544 rp.local_clock = cpu_to_le32(hdev->clock);
7545
7546 if (conn) {
7547 rp.piconet_clock = cpu_to_le32(conn->clock);
7548 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7549 }
7550
7551 complete:
7552 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7553 sizeof(rp));
7554
7555 mgmt_pending_free(cmd);
7556 }
7557
get_clock_info_sync(struct hci_dev * hdev,void * data)7558 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7559 {
7560 struct mgmt_pending_cmd *cmd = data;
7561 struct mgmt_cp_get_clock_info *cp = cmd->param;
7562 struct hci_cp_read_clock hci_cp;
7563 struct hci_conn *conn;
7564
7565 memset(&hci_cp, 0, sizeof(hci_cp));
7566 hci_read_clock_sync(hdev, &hci_cp);
7567
7568 /* Make sure connection still exists */
7569 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7570 if (!conn || conn->state != BT_CONNECTED)
7571 return MGMT_STATUS_NOT_CONNECTED;
7572
7573 cmd->user_data = conn;
7574 hci_cp.handle = cpu_to_le16(conn->handle);
7575 hci_cp.which = 0x01; /* Piconet clock */
7576
7577 return hci_read_clock_sync(hdev, &hci_cp);
7578 }
7579
get_clock_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7580 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7581 u16 len)
7582 {
7583 struct mgmt_cp_get_clock_info *cp = data;
7584 struct mgmt_rp_get_clock_info rp;
7585 struct mgmt_pending_cmd *cmd;
7586 struct hci_conn *conn;
7587 int err;
7588
7589 bt_dev_dbg(hdev, "sock %p", sk);
7590
7591 memset(&rp, 0, sizeof(rp));
7592 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7593 rp.addr.type = cp->addr.type;
7594
7595 if (cp->addr.type != BDADDR_BREDR)
7596 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7597 MGMT_STATUS_INVALID_PARAMS,
7598 &rp, sizeof(rp));
7599
7600 hci_dev_lock(hdev);
7601
7602 if (!hdev_is_powered(hdev)) {
7603 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7604 MGMT_STATUS_NOT_POWERED, &rp,
7605 sizeof(rp));
7606 goto unlock;
7607 }
7608
7609 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7610 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7611 &cp->addr.bdaddr);
7612 if (!conn || conn->state != BT_CONNECTED) {
7613 err = mgmt_cmd_complete(sk, hdev->id,
7614 MGMT_OP_GET_CLOCK_INFO,
7615 MGMT_STATUS_NOT_CONNECTED,
7616 &rp, sizeof(rp));
7617 goto unlock;
7618 }
7619 } else {
7620 conn = NULL;
7621 }
7622
7623 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7624 if (!cmd)
7625 err = -ENOMEM;
7626 else
7627 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7628 get_clock_info_complete);
7629
7630 if (err < 0) {
7631 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7632 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7633
7634 if (cmd)
7635 mgmt_pending_free(cmd);
7636 }
7637
7638
7639 unlock:
7640 hci_dev_unlock(hdev);
7641 return err;
7642 }
7643
device_added(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type,u8 action)7644 static void device_added(struct sock *sk, struct hci_dev *hdev,
7645 bdaddr_t *bdaddr, u8 type, u8 action)
7646 {
7647 struct mgmt_ev_device_added ev;
7648
7649 bacpy(&ev.addr.bdaddr, bdaddr);
7650 ev.addr.type = type;
7651 ev.action = action;
7652
7653 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7654 }
7655
add_device_complete(struct hci_dev * hdev,void * data,int err)7656 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7657 {
7658 struct mgmt_pending_cmd *cmd = data;
7659 struct mgmt_cp_add_device *cp = cmd->param;
7660
7661 if (!err) {
7662 struct hci_conn_params *params;
7663
7664 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7665 le_addr_type(cp->addr.type));
7666
7667 device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7668 cp->action);
7669 device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7670 cp->addr.type, hdev->conn_flags,
7671 params ? params->flags : 0);
7672 }
7673
7674 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7675 mgmt_status(err), &cp->addr, sizeof(cp->addr));
7676 mgmt_pending_free(cmd);
7677 }
7678
add_device_sync(struct hci_dev * hdev,void * data)7679 static int add_device_sync(struct hci_dev *hdev, void *data)
7680 {
7681 return hci_update_passive_scan_sync(hdev);
7682 }
7683
add_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7684 static int add_device(struct sock *sk, struct hci_dev *hdev,
7685 void *data, u16 len)
7686 {
7687 struct mgmt_pending_cmd *cmd;
7688 struct mgmt_cp_add_device *cp = data;
7689 u8 auto_conn, addr_type;
7690 struct hci_conn_params *params;
7691 int err;
7692 u32 current_flags = 0;
7693 u32 supported_flags;
7694
7695 bt_dev_dbg(hdev, "sock %p", sk);
7696
7697 if (!bdaddr_type_is_valid(cp->addr.type) ||
7698 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7699 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7700 MGMT_STATUS_INVALID_PARAMS,
7701 &cp->addr, sizeof(cp->addr));
7702
7703 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7704 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7705 MGMT_STATUS_INVALID_PARAMS,
7706 &cp->addr, sizeof(cp->addr));
7707
7708 hci_dev_lock(hdev);
7709
7710 if (cp->addr.type == BDADDR_BREDR) {
7711 /* Only incoming connections action is supported for now */
7712 if (cp->action != 0x01) {
7713 err = mgmt_cmd_complete(sk, hdev->id,
7714 MGMT_OP_ADD_DEVICE,
7715 MGMT_STATUS_INVALID_PARAMS,
7716 &cp->addr, sizeof(cp->addr));
7717 goto unlock;
7718 }
7719
7720 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7721 &cp->addr.bdaddr,
7722 cp->addr.type, 0);
7723 if (err)
7724 goto unlock;
7725
7726 hci_update_scan(hdev);
7727
7728 goto added;
7729 }
7730
7731 addr_type = le_addr_type(cp->addr.type);
7732
7733 if (cp->action == 0x02)
7734 auto_conn = HCI_AUTO_CONN_ALWAYS;
7735 else if (cp->action == 0x01)
7736 auto_conn = HCI_AUTO_CONN_DIRECT;
7737 else
7738 auto_conn = HCI_AUTO_CONN_REPORT;
7739
7740 /* Kernel internally uses conn_params with resolvable private
7741 * address, but Add Device allows only identity addresses.
7742 * Make sure it is enforced before calling
7743 * hci_conn_params_lookup.
7744 */
7745 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7746 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7747 MGMT_STATUS_INVALID_PARAMS,
7748 &cp->addr, sizeof(cp->addr));
7749 goto unlock;
7750 }
7751
7752 /* If the connection parameters don't exist for this device,
7753 * they will be created and configured with defaults.
7754 */
7755 params = hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7756 auto_conn);
7757 if (!params) {
7758 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7759 MGMT_STATUS_FAILED, &cp->addr,
7760 sizeof(cp->addr));
7761 goto unlock;
7762 }
7763
7764 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7765 if (!cmd) {
7766 err = -ENOMEM;
7767 goto unlock;
7768 }
7769
7770 err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7771 add_device_complete);
7772 if (err < 0) {
7773 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7774 MGMT_STATUS_FAILED, &cp->addr,
7775 sizeof(cp->addr));
7776 mgmt_pending_free(cmd);
7777 }
7778
7779 goto unlock;
7780
7781 added:
7782 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7783 supported_flags = hdev->conn_flags;
7784 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7785 supported_flags, current_flags);
7786
7787 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7788 MGMT_STATUS_SUCCESS, &cp->addr,
7789 sizeof(cp->addr));
7790
7791 unlock:
7792 hci_dev_unlock(hdev);
7793 return err;
7794 }
7795
device_removed(struct sock * sk,struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)7796 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7797 bdaddr_t *bdaddr, u8 type)
7798 {
7799 struct mgmt_ev_device_removed ev;
7800
7801 bacpy(&ev.addr.bdaddr, bdaddr);
7802 ev.addr.type = type;
7803
7804 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7805 }
7806
remove_device_sync(struct hci_dev * hdev,void * data)7807 static int remove_device_sync(struct hci_dev *hdev, void *data)
7808 {
7809 return hci_update_passive_scan_sync(hdev);
7810 }
7811
remove_device(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7812 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7813 void *data, u16 len)
7814 {
7815 struct mgmt_cp_remove_device *cp = data;
7816 int err;
7817
7818 bt_dev_dbg(hdev, "sock %p", sk);
7819
7820 hci_dev_lock(hdev);
7821
7822 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7823 struct hci_conn_params *params;
7824 u8 addr_type;
7825
7826 if (!bdaddr_type_is_valid(cp->addr.type)) {
7827 err = mgmt_cmd_complete(sk, hdev->id,
7828 MGMT_OP_REMOVE_DEVICE,
7829 MGMT_STATUS_INVALID_PARAMS,
7830 &cp->addr, sizeof(cp->addr));
7831 goto unlock;
7832 }
7833
7834 if (cp->addr.type == BDADDR_BREDR) {
7835 err = hci_bdaddr_list_del(&hdev->accept_list,
7836 &cp->addr.bdaddr,
7837 cp->addr.type);
7838 if (err) {
7839 err = mgmt_cmd_complete(sk, hdev->id,
7840 MGMT_OP_REMOVE_DEVICE,
7841 MGMT_STATUS_INVALID_PARAMS,
7842 &cp->addr,
7843 sizeof(cp->addr));
7844 goto unlock;
7845 }
7846
7847 hci_update_scan(hdev);
7848
7849 device_removed(sk, hdev, &cp->addr.bdaddr,
7850 cp->addr.type);
7851 goto complete;
7852 }
7853
7854 addr_type = le_addr_type(cp->addr.type);
7855
7856 /* Kernel internally uses conn_params with resolvable private
7857 * address, but Remove Device allows only identity addresses.
7858 * Make sure it is enforced before calling
7859 * hci_conn_params_lookup.
7860 */
7861 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7862 err = mgmt_cmd_complete(sk, hdev->id,
7863 MGMT_OP_REMOVE_DEVICE,
7864 MGMT_STATUS_INVALID_PARAMS,
7865 &cp->addr, sizeof(cp->addr));
7866 goto unlock;
7867 }
7868
7869 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7870 addr_type);
7871 if (!params) {
7872 err = mgmt_cmd_complete(sk, hdev->id,
7873 MGMT_OP_REMOVE_DEVICE,
7874 MGMT_STATUS_INVALID_PARAMS,
7875 &cp->addr, sizeof(cp->addr));
7876 goto unlock;
7877 }
7878
7879 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7880 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7881 err = mgmt_cmd_complete(sk, hdev->id,
7882 MGMT_OP_REMOVE_DEVICE,
7883 MGMT_STATUS_INVALID_PARAMS,
7884 &cp->addr, sizeof(cp->addr));
7885 goto unlock;
7886 }
7887
7888 hci_conn_params_free(params);
7889
7890 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7891 } else {
7892 struct hci_conn_params *p, *tmp;
7893 struct bdaddr_list *b, *btmp;
7894
7895 if (cp->addr.type) {
7896 err = mgmt_cmd_complete(sk, hdev->id,
7897 MGMT_OP_REMOVE_DEVICE,
7898 MGMT_STATUS_INVALID_PARAMS,
7899 &cp->addr, sizeof(cp->addr));
7900 goto unlock;
7901 }
7902
7903 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7904 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7905 list_del(&b->list);
7906 kfree(b);
7907 }
7908
7909 hci_update_scan(hdev);
7910
7911 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7912 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7913 continue;
7914 device_removed(sk, hdev, &p->addr, p->addr_type);
7915 if (p->explicit_connect) {
7916 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7917 continue;
7918 }
7919 hci_conn_params_free(p);
7920 }
7921
7922 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7923 }
7924
7925 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7926
7927 complete:
7928 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7929 MGMT_STATUS_SUCCESS, &cp->addr,
7930 sizeof(cp->addr));
7931 unlock:
7932 hci_dev_unlock(hdev);
7933 return err;
7934 }
7935
conn_update_sync(struct hci_dev * hdev,void * data)7936 static int conn_update_sync(struct hci_dev *hdev, void *data)
7937 {
7938 struct hci_conn_params *params = data;
7939 struct hci_conn *conn;
7940
7941 conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type);
7942 if (!conn)
7943 return -ECANCELED;
7944
7945 return hci_le_conn_update_sync(hdev, conn, params);
7946 }
7947
load_conn_param(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)7948 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7949 u16 len)
7950 {
7951 struct mgmt_cp_load_conn_param *cp = data;
7952 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7953 sizeof(struct mgmt_conn_param));
7954 u16 param_count, expected_len;
7955 int i;
7956
7957 if (!lmp_le_capable(hdev))
7958 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7959 MGMT_STATUS_NOT_SUPPORTED);
7960
7961 param_count = __le16_to_cpu(cp->param_count);
7962 if (param_count > max_param_count) {
7963 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7964 param_count);
7965 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7966 MGMT_STATUS_INVALID_PARAMS);
7967 }
7968
7969 expected_len = struct_size(cp, params, param_count);
7970 if (expected_len != len) {
7971 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7972 expected_len, len);
7973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7974 MGMT_STATUS_INVALID_PARAMS);
7975 }
7976
7977 bt_dev_dbg(hdev, "param_count %u", param_count);
7978
7979 hci_dev_lock(hdev);
7980
7981 if (param_count > 1)
7982 hci_conn_params_clear_disabled(hdev);
7983
7984 for (i = 0; i < param_count; i++) {
7985 struct mgmt_conn_param *param = &cp->params[i];
7986 struct hci_conn_params *hci_param;
7987 u16 min, max, latency, timeout;
7988 bool update = false;
7989 u8 addr_type;
7990
7991 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7992 param->addr.type);
7993
7994 if (param->addr.type == BDADDR_LE_PUBLIC) {
7995 addr_type = ADDR_LE_DEV_PUBLIC;
7996 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7997 addr_type = ADDR_LE_DEV_RANDOM;
7998 } else {
7999 bt_dev_err(hdev, "ignoring invalid connection parameters");
8000 continue;
8001 }
8002
8003 min = le16_to_cpu(param->min_interval);
8004 max = le16_to_cpu(param->max_interval);
8005 latency = le16_to_cpu(param->latency);
8006 timeout = le16_to_cpu(param->timeout);
8007
8008 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
8009 min, max, latency, timeout);
8010
8011 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8012 bt_dev_err(hdev, "ignoring invalid connection parameters");
8013 continue;
8014 }
8015
8016 /* Detect when the loading is for an existing parameter then
8017 * attempt to trigger the connection update procedure.
8018 */
8019 if (!i && param_count == 1) {
8020 hci_param = hci_conn_params_lookup(hdev,
8021 ¶m->addr.bdaddr,
8022 addr_type);
8023 if (hci_param)
8024 update = true;
8025 else
8026 hci_conn_params_clear_disabled(hdev);
8027 }
8028
8029 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8030 addr_type);
8031 if (!hci_param) {
8032 bt_dev_err(hdev, "failed to add connection parameters");
8033 continue;
8034 }
8035
8036 hci_param->conn_min_interval = min;
8037 hci_param->conn_max_interval = max;
8038 hci_param->conn_latency = latency;
8039 hci_param->supervision_timeout = timeout;
8040
8041 /* Check if we need to trigger a connection update */
8042 if (update) {
8043 struct hci_conn *conn;
8044
8045 /* Lookup for existing connection as central and check
8046 * if parameters match and if they don't then trigger
8047 * a connection update.
8048 */
8049 conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
8050 addr_type);
8051 if (conn && conn->role == HCI_ROLE_MASTER &&
8052 (conn->le_conn_min_interval != min ||
8053 conn->le_conn_max_interval != max ||
8054 conn->le_conn_latency != latency ||
8055 conn->le_supv_timeout != timeout))
8056 hci_cmd_sync_queue(hdev, conn_update_sync,
8057 hci_param, NULL);
8058 }
8059 }
8060
8061 hci_dev_unlock(hdev);
8062
8063 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8064 NULL, 0);
8065 }
8066
set_external_config(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8067 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8068 void *data, u16 len)
8069 {
8070 struct mgmt_cp_set_external_config *cp = data;
8071 bool changed;
8072 int err;
8073
8074 bt_dev_dbg(hdev, "sock %p", sk);
8075
8076 if (hdev_is_powered(hdev))
8077 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8078 MGMT_STATUS_REJECTED);
8079
8080 if (cp->config != 0x00 && cp->config != 0x01)
8081 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8082 MGMT_STATUS_INVALID_PARAMS);
8083
8084 if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
8085 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8086 MGMT_STATUS_NOT_SUPPORTED);
8087
8088 hci_dev_lock(hdev);
8089
8090 if (cp->config)
8091 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8092 else
8093 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8094
8095 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8096 if (err < 0)
8097 goto unlock;
8098
8099 if (!changed)
8100 goto unlock;
8101
8102 err = new_options(hdev, sk);
8103
8104 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8105 mgmt_index_removed(hdev);
8106
8107 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8108 hci_dev_set_flag(hdev, HCI_CONFIG);
8109 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8110
8111 queue_work(hdev->req_workqueue, &hdev->power_on);
8112 } else {
8113 set_bit(HCI_RAW, &hdev->flags);
8114 mgmt_index_added(hdev);
8115 }
8116 }
8117
8118 unlock:
8119 hci_dev_unlock(hdev);
8120 return err;
8121 }
8122
set_public_address(struct sock * sk,struct hci_dev * hdev,void * data,u16 len)8123 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8124 void *data, u16 len)
8125 {
8126 struct mgmt_cp_set_public_address *cp = data;
8127 bool changed;
8128 int err;
8129
8130 bt_dev_dbg(hdev, "sock %p", sk);
8131
8132 if (hdev_is_powered(hdev))
8133 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8134 MGMT_STATUS_REJECTED);
8135
8136 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8137 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8138 MGMT_STATUS_INVALID_PARAMS);
8139
8140 if (!hdev->set_bdaddr)
8141 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8142 MGMT_STATUS_NOT_SUPPORTED);
8143
8144 hci_dev_lock(hdev);
8145
8146 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8147 bacpy(&hdev->public_addr, &cp->bdaddr);
8148
8149 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8150 if (err < 0)
8151 goto unlock;
8152
8153 if (!changed)
8154 goto unlock;
8155
8156 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8157 err = new_options(hdev, sk);
8158
8159 if (is_configured(hdev)) {
8160 mgmt_index_removed(hdev);
8161
8162 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8163
8164 hci_dev_set_flag(hdev, HCI_CONFIG);
8165 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8166
8167 queue_work(hdev->req_workqueue, &hdev->power_on);
8168 }
8169
8170 unlock:
8171 hci_dev_unlock(hdev);
8172 return err;
8173 }
8174
read_local_oob_ext_data_complete(struct hci_dev * hdev,void * data,int err)8175 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8176 int err)
8177 {
8178 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8179 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8180 u8 *h192, *r192, *h256, *r256;
8181 struct mgmt_pending_cmd *cmd = data;
8182 struct sk_buff *skb = cmd->skb;
8183 u8 status = mgmt_status(err);
8184 u16 eir_len;
8185
8186 if (!status) {
8187 if (!skb)
8188 status = MGMT_STATUS_FAILED;
8189 else if (IS_ERR(skb))
8190 status = mgmt_status(PTR_ERR(skb));
8191 else
8192 status = mgmt_status(skb->data[0]);
8193 }
8194
8195 bt_dev_dbg(hdev, "status %u", status);
8196
8197 mgmt_cp = cmd->param;
8198
8199 if (status) {
8200 status = mgmt_status(status);
8201 eir_len = 0;
8202
8203 h192 = NULL;
8204 r192 = NULL;
8205 h256 = NULL;
8206 r256 = NULL;
8207 } else if (!bredr_sc_enabled(hdev)) {
8208 struct hci_rp_read_local_oob_data *rp;
8209
8210 if (skb->len != sizeof(*rp)) {
8211 status = MGMT_STATUS_FAILED;
8212 eir_len = 0;
8213 } else {
8214 status = MGMT_STATUS_SUCCESS;
8215 rp = (void *)skb->data;
8216
8217 eir_len = 5 + 18 + 18;
8218 h192 = rp->hash;
8219 r192 = rp->rand;
8220 h256 = NULL;
8221 r256 = NULL;
8222 }
8223 } else {
8224 struct hci_rp_read_local_oob_ext_data *rp;
8225
8226 if (skb->len != sizeof(*rp)) {
8227 status = MGMT_STATUS_FAILED;
8228 eir_len = 0;
8229 } else {
8230 status = MGMT_STATUS_SUCCESS;
8231 rp = (void *)skb->data;
8232
8233 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8234 eir_len = 5 + 18 + 18;
8235 h192 = NULL;
8236 r192 = NULL;
8237 } else {
8238 eir_len = 5 + 18 + 18 + 18 + 18;
8239 h192 = rp->hash192;
8240 r192 = rp->rand192;
8241 }
8242
8243 h256 = rp->hash256;
8244 r256 = rp->rand256;
8245 }
8246 }
8247
8248 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8249 if (!mgmt_rp)
8250 goto done;
8251
8252 if (eir_len == 0)
8253 goto send_rsp;
8254
8255 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8256 hdev->dev_class, 3);
8257
8258 if (h192 && r192) {
8259 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8260 EIR_SSP_HASH_C192, h192, 16);
8261 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8262 EIR_SSP_RAND_R192, r192, 16);
8263 }
8264
8265 if (h256 && r256) {
8266 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8267 EIR_SSP_HASH_C256, h256, 16);
8268 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8269 EIR_SSP_RAND_R256, r256, 16);
8270 }
8271
8272 send_rsp:
8273 mgmt_rp->type = mgmt_cp->type;
8274 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8275
8276 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8277 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8278 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8279 if (err < 0 || status)
8280 goto done;
8281
8282 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8283
8284 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8285 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8286 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8287 done:
8288 if (skb && !IS_ERR(skb))
8289 kfree_skb(skb);
8290
8291 kfree(mgmt_rp);
8292 mgmt_pending_free(cmd);
8293 }
8294
read_local_ssp_oob_req(struct hci_dev * hdev,struct sock * sk,struct mgmt_cp_read_local_oob_ext_data * cp)8295 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8296 struct mgmt_cp_read_local_oob_ext_data *cp)
8297 {
8298 struct mgmt_pending_cmd *cmd;
8299 int err;
8300
8301 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8302 cp, sizeof(*cp));
8303 if (!cmd)
8304 return -ENOMEM;
8305
8306 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8307 read_local_oob_ext_data_complete);
8308
8309 if (err < 0) {
8310 mgmt_pending_remove(cmd);
8311 return err;
8312 }
8313
8314 return 0;
8315 }
8316
read_local_oob_ext_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8317 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8318 void *data, u16 data_len)
8319 {
8320 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8321 struct mgmt_rp_read_local_oob_ext_data *rp;
8322 size_t rp_len;
8323 u16 eir_len;
8324 u8 status, flags, role, addr[7], hash[16], rand[16];
8325 int err;
8326
8327 bt_dev_dbg(hdev, "sock %p", sk);
8328
8329 if (hdev_is_powered(hdev)) {
8330 switch (cp->type) {
8331 case BIT(BDADDR_BREDR):
8332 status = mgmt_bredr_support(hdev);
8333 if (status)
8334 eir_len = 0;
8335 else
8336 eir_len = 5;
8337 break;
8338 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8339 status = mgmt_le_support(hdev);
8340 if (status)
8341 eir_len = 0;
8342 else
8343 eir_len = 9 + 3 + 18 + 18 + 3;
8344 break;
8345 default:
8346 status = MGMT_STATUS_INVALID_PARAMS;
8347 eir_len = 0;
8348 break;
8349 }
8350 } else {
8351 status = MGMT_STATUS_NOT_POWERED;
8352 eir_len = 0;
8353 }
8354
8355 rp_len = sizeof(*rp) + eir_len;
8356 rp = kmalloc(rp_len, GFP_ATOMIC);
8357 if (!rp)
8358 return -ENOMEM;
8359
8360 if (!status && !lmp_ssp_capable(hdev)) {
8361 status = MGMT_STATUS_NOT_SUPPORTED;
8362 eir_len = 0;
8363 }
8364
8365 if (status)
8366 goto complete;
8367
8368 hci_dev_lock(hdev);
8369
8370 eir_len = 0;
8371 switch (cp->type) {
8372 case BIT(BDADDR_BREDR):
8373 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8374 err = read_local_ssp_oob_req(hdev, sk, cp);
8375 hci_dev_unlock(hdev);
8376 if (!err)
8377 goto done;
8378
8379 status = MGMT_STATUS_FAILED;
8380 goto complete;
8381 } else {
8382 eir_len = eir_append_data(rp->eir, eir_len,
8383 EIR_CLASS_OF_DEV,
8384 hdev->dev_class, 3);
8385 }
8386 break;
8387 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8388 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8389 smp_generate_oob(hdev, hash, rand) < 0) {
8390 hci_dev_unlock(hdev);
8391 status = MGMT_STATUS_FAILED;
8392 goto complete;
8393 }
8394
8395 /* This should return the active RPA, but since the RPA
8396 * is only programmed on demand, it is really hard to fill
8397 * this in at the moment. For now disallow retrieving
8398 * local out-of-band data when privacy is in use.
8399 *
8400 * Returning the identity address will not help here since
8401 * pairing happens before the identity resolving key is
8402 * known and thus the connection establishment happens
8403 * based on the RPA and not the identity address.
8404 */
8405 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8406 hci_dev_unlock(hdev);
8407 status = MGMT_STATUS_REJECTED;
8408 goto complete;
8409 }
8410
8411 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8412 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8413 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8414 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8415 memcpy(addr, &hdev->static_addr, 6);
8416 addr[6] = 0x01;
8417 } else {
8418 memcpy(addr, &hdev->bdaddr, 6);
8419 addr[6] = 0x00;
8420 }
8421
8422 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8423 addr, sizeof(addr));
8424
8425 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8426 role = 0x02;
8427 else
8428 role = 0x01;
8429
8430 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8431 &role, sizeof(role));
8432
8433 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8434 eir_len = eir_append_data(rp->eir, eir_len,
8435 EIR_LE_SC_CONFIRM,
8436 hash, sizeof(hash));
8437
8438 eir_len = eir_append_data(rp->eir, eir_len,
8439 EIR_LE_SC_RANDOM,
8440 rand, sizeof(rand));
8441 }
8442
8443 flags = mgmt_get_adv_discov_flags(hdev);
8444
8445 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8446 flags |= LE_AD_NO_BREDR;
8447
8448 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8449 &flags, sizeof(flags));
8450 break;
8451 }
8452
8453 hci_dev_unlock(hdev);
8454
8455 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8456
8457 status = MGMT_STATUS_SUCCESS;
8458
8459 complete:
8460 rp->type = cp->type;
8461 rp->eir_len = cpu_to_le16(eir_len);
8462
8463 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8464 status, rp, sizeof(*rp) + eir_len);
8465 if (err < 0 || status)
8466 goto done;
8467
8468 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8469 rp, sizeof(*rp) + eir_len,
8470 HCI_MGMT_OOB_DATA_EVENTS, sk);
8471
8472 done:
8473 kfree(rp);
8474
8475 return err;
8476 }
8477
get_supported_adv_flags(struct hci_dev * hdev)8478 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8479 {
8480 u32 flags = 0;
8481
8482 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8483 flags |= MGMT_ADV_FLAG_DISCOV;
8484 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8485 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8486 flags |= MGMT_ADV_FLAG_APPEARANCE;
8487 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8488 flags |= MGMT_ADV_PARAM_DURATION;
8489 flags |= MGMT_ADV_PARAM_TIMEOUT;
8490 flags |= MGMT_ADV_PARAM_INTERVALS;
8491 flags |= MGMT_ADV_PARAM_TX_POWER;
8492 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8493
8494 /* In extended adv TX_POWER returned from Set Adv Param
8495 * will be always valid.
8496 */
8497 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8498 flags |= MGMT_ADV_FLAG_TX_POWER;
8499
8500 if (ext_adv_capable(hdev)) {
8501 flags |= MGMT_ADV_FLAG_SEC_1M;
8502 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8503 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8504
8505 if (le_2m_capable(hdev))
8506 flags |= MGMT_ADV_FLAG_SEC_2M;
8507
8508 if (le_coded_capable(hdev))
8509 flags |= MGMT_ADV_FLAG_SEC_CODED;
8510 }
8511
8512 return flags;
8513 }
8514
read_adv_features(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8515 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8516 void *data, u16 data_len)
8517 {
8518 struct mgmt_rp_read_adv_features *rp;
8519 size_t rp_len;
8520 int err;
8521 struct adv_info *adv_instance;
8522 u32 supported_flags;
8523 u8 *instance;
8524
8525 bt_dev_dbg(hdev, "sock %p", sk);
8526
8527 if (!lmp_le_capable(hdev))
8528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8529 MGMT_STATUS_REJECTED);
8530
8531 hci_dev_lock(hdev);
8532
8533 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8534 rp = kmalloc(rp_len, GFP_ATOMIC);
8535 if (!rp) {
8536 hci_dev_unlock(hdev);
8537 return -ENOMEM;
8538 }
8539
8540 supported_flags = get_supported_adv_flags(hdev);
8541
8542 rp->supported_flags = cpu_to_le32(supported_flags);
8543 rp->max_adv_data_len = max_adv_len(hdev);
8544 rp->max_scan_rsp_len = max_adv_len(hdev);
8545 rp->max_instances = hdev->le_num_of_adv_sets;
8546 rp->num_instances = hdev->adv_instance_cnt;
8547
8548 instance = rp->instance;
8549 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8550 /* Only instances 1-le_num_of_adv_sets are externally visible */
8551 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8552 *instance = adv_instance->instance;
8553 instance++;
8554 } else {
8555 rp->num_instances--;
8556 rp_len--;
8557 }
8558 }
8559
8560 hci_dev_unlock(hdev);
8561
8562 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8563 MGMT_STATUS_SUCCESS, rp, rp_len);
8564
8565 kfree(rp);
8566
8567 return err;
8568 }
8569
calculate_name_len(struct hci_dev * hdev)8570 static u8 calculate_name_len(struct hci_dev *hdev)
8571 {
8572 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8573
8574 return eir_append_local_name(hdev, buf, 0);
8575 }
8576
tlv_data_max_len(struct hci_dev * hdev,u32 adv_flags,bool is_adv_data)8577 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8578 bool is_adv_data)
8579 {
8580 u8 max_len = max_adv_len(hdev);
8581
8582 if (is_adv_data) {
8583 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8584 MGMT_ADV_FLAG_LIMITED_DISCOV |
8585 MGMT_ADV_FLAG_MANAGED_FLAGS))
8586 max_len -= 3;
8587
8588 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8589 max_len -= 3;
8590 } else {
8591 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8592 max_len -= calculate_name_len(hdev);
8593
8594 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8595 max_len -= 4;
8596 }
8597
8598 return max_len;
8599 }
8600
flags_managed(u32 adv_flags)8601 static bool flags_managed(u32 adv_flags)
8602 {
8603 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8604 MGMT_ADV_FLAG_LIMITED_DISCOV |
8605 MGMT_ADV_FLAG_MANAGED_FLAGS);
8606 }
8607
tx_power_managed(u32 adv_flags)8608 static bool tx_power_managed(u32 adv_flags)
8609 {
8610 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8611 }
8612
name_managed(u32 adv_flags)8613 static bool name_managed(u32 adv_flags)
8614 {
8615 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8616 }
8617
appearance_managed(u32 adv_flags)8618 static bool appearance_managed(u32 adv_flags)
8619 {
8620 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8621 }
8622
tlv_data_is_valid(struct hci_dev * hdev,u32 adv_flags,u8 * data,u8 len,bool is_adv_data)8623 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8624 u8 len, bool is_adv_data)
8625 {
8626 int i, cur_len;
8627 u8 max_len;
8628
8629 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8630
8631 if (len > max_len)
8632 return false;
8633
8634 /* Make sure that the data is correctly formatted. */
8635 for (i = 0; i < len; i += (cur_len + 1)) {
8636 cur_len = data[i];
8637
8638 if (!cur_len)
8639 continue;
8640
8641 if (data[i + 1] == EIR_FLAGS &&
8642 (!is_adv_data || flags_managed(adv_flags)))
8643 return false;
8644
8645 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8646 return false;
8647
8648 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8649 return false;
8650
8651 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8652 return false;
8653
8654 if (data[i + 1] == EIR_APPEARANCE &&
8655 appearance_managed(adv_flags))
8656 return false;
8657
8658 /* If the current field length would exceed the total data
8659 * length, then it's invalid.
8660 */
8661 if (i + cur_len >= len)
8662 return false;
8663 }
8664
8665 return true;
8666 }
8667
requested_adv_flags_are_valid(struct hci_dev * hdev,u32 adv_flags)8668 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8669 {
8670 u32 supported_flags, phy_flags;
8671
8672 /* The current implementation only supports a subset of the specified
8673 * flags. Also need to check mutual exclusiveness of sec flags.
8674 */
8675 supported_flags = get_supported_adv_flags(hdev);
8676 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8677 if (adv_flags & ~supported_flags ||
8678 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8679 return false;
8680
8681 return true;
8682 }
8683
adv_busy(struct hci_dev * hdev)8684 static bool adv_busy(struct hci_dev *hdev)
8685 {
8686 return pending_find(MGMT_OP_SET_LE, hdev);
8687 }
8688
add_adv_complete(struct hci_dev * hdev,struct sock * sk,u8 instance,int err)8689 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8690 int err)
8691 {
8692 struct adv_info *adv, *n;
8693
8694 bt_dev_dbg(hdev, "err %d", err);
8695
8696 hci_dev_lock(hdev);
8697
8698 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8699 u8 instance;
8700
8701 if (!adv->pending)
8702 continue;
8703
8704 if (!err) {
8705 adv->pending = false;
8706 continue;
8707 }
8708
8709 instance = adv->instance;
8710
8711 if (hdev->cur_adv_instance == instance)
8712 cancel_adv_timeout(hdev);
8713
8714 hci_remove_adv_instance(hdev, instance);
8715 mgmt_advertising_removed(sk, hdev, instance);
8716 }
8717
8718 hci_dev_unlock(hdev);
8719 }
8720
add_advertising_complete(struct hci_dev * hdev,void * data,int err)8721 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8722 {
8723 struct mgmt_pending_cmd *cmd = data;
8724 struct mgmt_cp_add_advertising *cp = cmd->param;
8725 struct mgmt_rp_add_advertising rp;
8726
8727 memset(&rp, 0, sizeof(rp));
8728
8729 rp.instance = cp->instance;
8730
8731 if (err)
8732 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8733 mgmt_status(err));
8734 else
8735 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8736 mgmt_status(err), &rp, sizeof(rp));
8737
8738 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8739
8740 mgmt_pending_free(cmd);
8741 }
8742
add_advertising_sync(struct hci_dev * hdev,void * data)8743 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8744 {
8745 struct mgmt_pending_cmd *cmd = data;
8746 struct mgmt_cp_add_advertising *cp = cmd->param;
8747
8748 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8749 }
8750
add_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8751 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8752 void *data, u16 data_len)
8753 {
8754 struct mgmt_cp_add_advertising *cp = data;
8755 struct mgmt_rp_add_advertising rp;
8756 u32 flags;
8757 u8 status;
8758 u16 timeout, duration;
8759 unsigned int prev_instance_cnt;
8760 u8 schedule_instance = 0;
8761 struct adv_info *adv, *next_instance;
8762 int err;
8763 struct mgmt_pending_cmd *cmd;
8764
8765 bt_dev_dbg(hdev, "sock %p", sk);
8766
8767 status = mgmt_le_support(hdev);
8768 if (status)
8769 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8770 status);
8771
8772 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8773 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8774 MGMT_STATUS_INVALID_PARAMS);
8775
8776 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8777 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8778 MGMT_STATUS_INVALID_PARAMS);
8779
8780 flags = __le32_to_cpu(cp->flags);
8781 timeout = __le16_to_cpu(cp->timeout);
8782 duration = __le16_to_cpu(cp->duration);
8783
8784 if (!requested_adv_flags_are_valid(hdev, flags))
8785 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8786 MGMT_STATUS_INVALID_PARAMS);
8787
8788 hci_dev_lock(hdev);
8789
8790 if (timeout && !hdev_is_powered(hdev)) {
8791 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8792 MGMT_STATUS_REJECTED);
8793 goto unlock;
8794 }
8795
8796 if (adv_busy(hdev)) {
8797 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8798 MGMT_STATUS_BUSY);
8799 goto unlock;
8800 }
8801
8802 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8803 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8804 cp->scan_rsp_len, false)) {
8805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8806 MGMT_STATUS_INVALID_PARAMS);
8807 goto unlock;
8808 }
8809
8810 prev_instance_cnt = hdev->adv_instance_cnt;
8811
8812 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8813 cp->adv_data_len, cp->data,
8814 cp->scan_rsp_len,
8815 cp->data + cp->adv_data_len,
8816 timeout, duration,
8817 HCI_ADV_TX_POWER_NO_PREFERENCE,
8818 hdev->le_adv_min_interval,
8819 hdev->le_adv_max_interval, 0);
8820 if (IS_ERR(adv)) {
8821 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8822 MGMT_STATUS_FAILED);
8823 goto unlock;
8824 }
8825
8826 /* Only trigger an advertising added event if a new instance was
8827 * actually added.
8828 */
8829 if (hdev->adv_instance_cnt > prev_instance_cnt)
8830 mgmt_advertising_added(sk, hdev, cp->instance);
8831
8832 if (hdev->cur_adv_instance == cp->instance) {
8833 /* If the currently advertised instance is being changed then
8834 * cancel the current advertising and schedule the next
8835 * instance. If there is only one instance then the overridden
8836 * advertising data will be visible right away.
8837 */
8838 cancel_adv_timeout(hdev);
8839
8840 next_instance = hci_get_next_instance(hdev, cp->instance);
8841 if (next_instance)
8842 schedule_instance = next_instance->instance;
8843 } else if (!hdev->adv_instance_timeout) {
8844 /* Immediately advertise the new instance if no other
8845 * instance is currently being advertised.
8846 */
8847 schedule_instance = cp->instance;
8848 }
8849
8850 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8851 * there is no instance to be advertised then we have no HCI
8852 * communication to make. Simply return.
8853 */
8854 if (!hdev_is_powered(hdev) ||
8855 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8856 !schedule_instance) {
8857 rp.instance = cp->instance;
8858 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8859 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8860 goto unlock;
8861 }
8862
8863 /* We're good to go, update advertising data, parameters, and start
8864 * advertising.
8865 */
8866 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8867 data_len);
8868 if (!cmd) {
8869 err = -ENOMEM;
8870 goto unlock;
8871 }
8872
8873 cp->instance = schedule_instance;
8874
8875 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8876 add_advertising_complete);
8877 if (err < 0)
8878 mgmt_pending_free(cmd);
8879
8880 unlock:
8881 hci_dev_unlock(hdev);
8882
8883 return err;
8884 }
8885
add_ext_adv_params_complete(struct hci_dev * hdev,void * data,int err)8886 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8887 int err)
8888 {
8889 struct mgmt_pending_cmd *cmd = data;
8890 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8891 struct mgmt_rp_add_ext_adv_params rp;
8892 struct adv_info *adv;
8893 u32 flags;
8894
8895 BT_DBG("%s", hdev->name);
8896
8897 hci_dev_lock(hdev);
8898
8899 adv = hci_find_adv_instance(hdev, cp->instance);
8900 if (!adv)
8901 goto unlock;
8902
8903 rp.instance = cp->instance;
8904 rp.tx_power = adv->tx_power;
8905
8906 /* While we're at it, inform userspace of the available space for this
8907 * advertisement, given the flags that will be used.
8908 */
8909 flags = __le32_to_cpu(cp->flags);
8910 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8911 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8912
8913 if (err) {
8914 /* If this advertisement was previously advertising and we
8915 * failed to update it, we signal that it has been removed and
8916 * delete its structure
8917 */
8918 if (!adv->pending)
8919 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8920
8921 hci_remove_adv_instance(hdev, cp->instance);
8922
8923 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8924 mgmt_status(err));
8925 } else {
8926 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8927 mgmt_status(err), &rp, sizeof(rp));
8928 }
8929
8930 unlock:
8931 mgmt_pending_free(cmd);
8932
8933 hci_dev_unlock(hdev);
8934 }
8935
add_ext_adv_params_sync(struct hci_dev * hdev,void * data)8936 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8937 {
8938 struct mgmt_pending_cmd *cmd = data;
8939 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8940
8941 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8942 }
8943
add_ext_adv_params(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)8944 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8945 void *data, u16 data_len)
8946 {
8947 struct mgmt_cp_add_ext_adv_params *cp = data;
8948 struct mgmt_rp_add_ext_adv_params rp;
8949 struct mgmt_pending_cmd *cmd = NULL;
8950 struct adv_info *adv;
8951 u32 flags, min_interval, max_interval;
8952 u16 timeout, duration;
8953 u8 status;
8954 s8 tx_power;
8955 int err;
8956
8957 BT_DBG("%s", hdev->name);
8958
8959 status = mgmt_le_support(hdev);
8960 if (status)
8961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8962 status);
8963
8964 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8965 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8966 MGMT_STATUS_INVALID_PARAMS);
8967
8968 /* The purpose of breaking add_advertising into two separate MGMT calls
8969 * for params and data is to allow more parameters to be added to this
8970 * structure in the future. For this reason, we verify that we have the
8971 * bare minimum structure we know of when the interface was defined. Any
8972 * extra parameters we don't know about will be ignored in this request.
8973 */
8974 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8975 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8976 MGMT_STATUS_INVALID_PARAMS);
8977
8978 flags = __le32_to_cpu(cp->flags);
8979
8980 if (!requested_adv_flags_are_valid(hdev, flags))
8981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8982 MGMT_STATUS_INVALID_PARAMS);
8983
8984 hci_dev_lock(hdev);
8985
8986 /* In new interface, we require that we are powered to register */
8987 if (!hdev_is_powered(hdev)) {
8988 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8989 MGMT_STATUS_REJECTED);
8990 goto unlock;
8991 }
8992
8993 if (adv_busy(hdev)) {
8994 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8995 MGMT_STATUS_BUSY);
8996 goto unlock;
8997 }
8998
8999 /* Parse defined parameters from request, use defaults otherwise */
9000 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
9001 __le16_to_cpu(cp->timeout) : 0;
9002
9003 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
9004 __le16_to_cpu(cp->duration) :
9005 hdev->def_multi_adv_rotation_duration;
9006
9007 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9008 __le32_to_cpu(cp->min_interval) :
9009 hdev->le_adv_min_interval;
9010
9011 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
9012 __le32_to_cpu(cp->max_interval) :
9013 hdev->le_adv_max_interval;
9014
9015 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
9016 cp->tx_power :
9017 HCI_ADV_TX_POWER_NO_PREFERENCE;
9018
9019 /* Create advertising instance with no advertising or response data */
9020 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
9021 timeout, duration, tx_power, min_interval,
9022 max_interval, 0);
9023
9024 if (IS_ERR(adv)) {
9025 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
9026 MGMT_STATUS_FAILED);
9027 goto unlock;
9028 }
9029
9030 /* Submit request for advertising params if ext adv available */
9031 if (ext_adv_capable(hdev)) {
9032 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
9033 data, data_len);
9034 if (!cmd) {
9035 err = -ENOMEM;
9036 hci_remove_adv_instance(hdev, cp->instance);
9037 goto unlock;
9038 }
9039
9040 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
9041 add_ext_adv_params_complete);
9042 if (err < 0)
9043 mgmt_pending_free(cmd);
9044 } else {
9045 rp.instance = cp->instance;
9046 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9047 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9048 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9049 err = mgmt_cmd_complete(sk, hdev->id,
9050 MGMT_OP_ADD_EXT_ADV_PARAMS,
9051 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9052 }
9053
9054 unlock:
9055 hci_dev_unlock(hdev);
9056
9057 return err;
9058 }
9059
add_ext_adv_data_complete(struct hci_dev * hdev,void * data,int err)9060 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9061 {
9062 struct mgmt_pending_cmd *cmd = data;
9063 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9064 struct mgmt_rp_add_advertising rp;
9065
9066 add_adv_complete(hdev, cmd->sk, cp->instance, err);
9067
9068 memset(&rp, 0, sizeof(rp));
9069
9070 rp.instance = cp->instance;
9071
9072 if (err)
9073 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9074 mgmt_status(err));
9075 else
9076 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9077 mgmt_status(err), &rp, sizeof(rp));
9078
9079 mgmt_pending_free(cmd);
9080 }
9081
add_ext_adv_data_sync(struct hci_dev * hdev,void * data)9082 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9083 {
9084 struct mgmt_pending_cmd *cmd = data;
9085 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9086 int err;
9087
9088 if (ext_adv_capable(hdev)) {
9089 err = hci_update_adv_data_sync(hdev, cp->instance);
9090 if (err)
9091 return err;
9092
9093 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9094 if (err)
9095 return err;
9096
9097 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9098 }
9099
9100 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9101 }
9102
add_ext_adv_data(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9103 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9104 u16 data_len)
9105 {
9106 struct mgmt_cp_add_ext_adv_data *cp = data;
9107 struct mgmt_rp_add_ext_adv_data rp;
9108 u8 schedule_instance = 0;
9109 struct adv_info *next_instance;
9110 struct adv_info *adv_instance;
9111 int err = 0;
9112 struct mgmt_pending_cmd *cmd;
9113
9114 BT_DBG("%s", hdev->name);
9115
9116 hci_dev_lock(hdev);
9117
9118 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9119
9120 if (!adv_instance) {
9121 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9122 MGMT_STATUS_INVALID_PARAMS);
9123 goto unlock;
9124 }
9125
9126 /* In new interface, we require that we are powered to register */
9127 if (!hdev_is_powered(hdev)) {
9128 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9129 MGMT_STATUS_REJECTED);
9130 goto clear_new_instance;
9131 }
9132
9133 if (adv_busy(hdev)) {
9134 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9135 MGMT_STATUS_BUSY);
9136 goto clear_new_instance;
9137 }
9138
9139 /* Validate new data */
9140 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9141 cp->adv_data_len, true) ||
9142 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9143 cp->adv_data_len, cp->scan_rsp_len, false)) {
9144 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9145 MGMT_STATUS_INVALID_PARAMS);
9146 goto clear_new_instance;
9147 }
9148
9149 /* Set the data in the advertising instance */
9150 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9151 cp->data, cp->scan_rsp_len,
9152 cp->data + cp->adv_data_len);
9153
9154 /* If using software rotation, determine next instance to use */
9155 if (hdev->cur_adv_instance == cp->instance) {
9156 /* If the currently advertised instance is being changed
9157 * then cancel the current advertising and schedule the
9158 * next instance. If there is only one instance then the
9159 * overridden advertising data will be visible right
9160 * away
9161 */
9162 cancel_adv_timeout(hdev);
9163
9164 next_instance = hci_get_next_instance(hdev, cp->instance);
9165 if (next_instance)
9166 schedule_instance = next_instance->instance;
9167 } else if (!hdev->adv_instance_timeout) {
9168 /* Immediately advertise the new instance if no other
9169 * instance is currently being advertised.
9170 */
9171 schedule_instance = cp->instance;
9172 }
9173
9174 /* If the HCI_ADVERTISING flag is set or there is no instance to
9175 * be advertised then we have no HCI communication to make.
9176 * Simply return.
9177 */
9178 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9179 if (adv_instance->pending) {
9180 mgmt_advertising_added(sk, hdev, cp->instance);
9181 adv_instance->pending = false;
9182 }
9183 rp.instance = cp->instance;
9184 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9185 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9186 goto unlock;
9187 }
9188
9189 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9190 data_len);
9191 if (!cmd) {
9192 err = -ENOMEM;
9193 goto clear_new_instance;
9194 }
9195
9196 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9197 add_ext_adv_data_complete);
9198 if (err < 0) {
9199 mgmt_pending_free(cmd);
9200 goto clear_new_instance;
9201 }
9202
9203 /* We were successful in updating data, so trigger advertising_added
9204 * event if this is an instance that wasn't previously advertising. If
9205 * a failure occurs in the requests we initiated, we will remove the
9206 * instance again in add_advertising_complete
9207 */
9208 if (adv_instance->pending)
9209 mgmt_advertising_added(sk, hdev, cp->instance);
9210
9211 goto unlock;
9212
9213 clear_new_instance:
9214 hci_remove_adv_instance(hdev, cp->instance);
9215
9216 unlock:
9217 hci_dev_unlock(hdev);
9218
9219 return err;
9220 }
9221
remove_advertising_complete(struct hci_dev * hdev,void * data,int err)9222 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9223 int err)
9224 {
9225 struct mgmt_pending_cmd *cmd = data;
9226 struct mgmt_cp_remove_advertising *cp = cmd->param;
9227 struct mgmt_rp_remove_advertising rp;
9228
9229 bt_dev_dbg(hdev, "err %d", err);
9230
9231 memset(&rp, 0, sizeof(rp));
9232 rp.instance = cp->instance;
9233
9234 if (err)
9235 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9236 mgmt_status(err));
9237 else
9238 mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9239 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9240
9241 mgmt_pending_free(cmd);
9242 }
9243
remove_advertising_sync(struct hci_dev * hdev,void * data)9244 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9245 {
9246 struct mgmt_pending_cmd *cmd = data;
9247 struct mgmt_cp_remove_advertising *cp = cmd->param;
9248 int err;
9249
9250 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9251 if (err)
9252 return err;
9253
9254 if (list_empty(&hdev->adv_instances))
9255 err = hci_disable_advertising_sync(hdev);
9256
9257 return err;
9258 }
9259
remove_advertising(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9260 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9261 void *data, u16 data_len)
9262 {
9263 struct mgmt_cp_remove_advertising *cp = data;
9264 struct mgmt_pending_cmd *cmd;
9265 int err;
9266
9267 bt_dev_dbg(hdev, "sock %p", sk);
9268
9269 hci_dev_lock(hdev);
9270
9271 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9272 err = mgmt_cmd_status(sk, hdev->id,
9273 MGMT_OP_REMOVE_ADVERTISING,
9274 MGMT_STATUS_INVALID_PARAMS);
9275 goto unlock;
9276 }
9277
9278 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9279 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9280 MGMT_STATUS_BUSY);
9281 goto unlock;
9282 }
9283
9284 if (list_empty(&hdev->adv_instances)) {
9285 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9286 MGMT_STATUS_INVALID_PARAMS);
9287 goto unlock;
9288 }
9289
9290 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9291 data_len);
9292 if (!cmd) {
9293 err = -ENOMEM;
9294 goto unlock;
9295 }
9296
9297 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9298 remove_advertising_complete);
9299 if (err < 0)
9300 mgmt_pending_free(cmd);
9301
9302 unlock:
9303 hci_dev_unlock(hdev);
9304
9305 return err;
9306 }
9307
get_adv_size_info(struct sock * sk,struct hci_dev * hdev,void * data,u16 data_len)9308 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9309 void *data, u16 data_len)
9310 {
9311 struct mgmt_cp_get_adv_size_info *cp = data;
9312 struct mgmt_rp_get_adv_size_info rp;
9313 u32 flags, supported_flags;
9314
9315 bt_dev_dbg(hdev, "sock %p", sk);
9316
9317 if (!lmp_le_capable(hdev))
9318 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9319 MGMT_STATUS_REJECTED);
9320
9321 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9322 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9323 MGMT_STATUS_INVALID_PARAMS);
9324
9325 flags = __le32_to_cpu(cp->flags);
9326
9327 /* The current implementation only supports a subset of the specified
9328 * flags.
9329 */
9330 supported_flags = get_supported_adv_flags(hdev);
9331 if (flags & ~supported_flags)
9332 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9333 MGMT_STATUS_INVALID_PARAMS);
9334
9335 rp.instance = cp->instance;
9336 rp.flags = cp->flags;
9337 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9338 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9339
9340 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9341 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9342 }
9343
9344 static const struct hci_mgmt_handler mgmt_handlers[] = {
9345 { NULL }, /* 0x0000 (no command) */
9346 { read_version, MGMT_READ_VERSION_SIZE,
9347 HCI_MGMT_NO_HDEV |
9348 HCI_MGMT_UNTRUSTED },
9349 { read_commands, MGMT_READ_COMMANDS_SIZE,
9350 HCI_MGMT_NO_HDEV |
9351 HCI_MGMT_UNTRUSTED },
9352 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9353 HCI_MGMT_NO_HDEV |
9354 HCI_MGMT_UNTRUSTED },
9355 { read_controller_info, MGMT_READ_INFO_SIZE,
9356 HCI_MGMT_UNTRUSTED },
9357 { set_powered, MGMT_SETTING_SIZE },
9358 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9359 { set_connectable, MGMT_SETTING_SIZE },
9360 { set_fast_connectable, MGMT_SETTING_SIZE },
9361 { set_bondable, MGMT_SETTING_SIZE },
9362 { set_link_security, MGMT_SETTING_SIZE },
9363 { set_ssp, MGMT_SETTING_SIZE },
9364 { set_hs, MGMT_SETTING_SIZE },
9365 { set_le, MGMT_SETTING_SIZE },
9366 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9367 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9368 { add_uuid, MGMT_ADD_UUID_SIZE },
9369 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9370 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9371 HCI_MGMT_VAR_LEN },
9372 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9373 HCI_MGMT_VAR_LEN },
9374 { disconnect, MGMT_DISCONNECT_SIZE },
9375 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9376 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9377 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9378 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9379 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9380 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9381 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9382 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9383 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9384 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9385 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9386 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9387 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9388 HCI_MGMT_VAR_LEN },
9389 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9390 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9391 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9392 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9393 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9394 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9395 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9396 { set_advertising, MGMT_SETTING_SIZE },
9397 { set_bredr, MGMT_SETTING_SIZE },
9398 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9399 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9400 { set_secure_conn, MGMT_SETTING_SIZE },
9401 { set_debug_keys, MGMT_SETTING_SIZE },
9402 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9403 { load_irks, MGMT_LOAD_IRKS_SIZE,
9404 HCI_MGMT_VAR_LEN },
9405 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9406 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9407 { add_device, MGMT_ADD_DEVICE_SIZE },
9408 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9409 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9410 HCI_MGMT_VAR_LEN },
9411 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9412 HCI_MGMT_NO_HDEV |
9413 HCI_MGMT_UNTRUSTED },
9414 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9415 HCI_MGMT_UNCONFIGURED |
9416 HCI_MGMT_UNTRUSTED },
9417 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9418 HCI_MGMT_UNCONFIGURED },
9419 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9420 HCI_MGMT_UNCONFIGURED },
9421 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9422 HCI_MGMT_VAR_LEN },
9423 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9424 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9425 HCI_MGMT_NO_HDEV |
9426 HCI_MGMT_UNTRUSTED },
9427 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9428 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9429 HCI_MGMT_VAR_LEN },
9430 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9431 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9432 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9433 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9434 HCI_MGMT_UNTRUSTED },
9435 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9436 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9437 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9438 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9439 HCI_MGMT_VAR_LEN },
9440 { set_wideband_speech, MGMT_SETTING_SIZE },
9441 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9442 HCI_MGMT_UNTRUSTED },
9443 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9444 HCI_MGMT_UNTRUSTED |
9445 HCI_MGMT_HDEV_OPTIONAL },
9446 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9447 HCI_MGMT_VAR_LEN |
9448 HCI_MGMT_HDEV_OPTIONAL },
9449 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9450 HCI_MGMT_UNTRUSTED },
9451 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9452 HCI_MGMT_VAR_LEN },
9453 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9454 HCI_MGMT_UNTRUSTED },
9455 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9456 HCI_MGMT_VAR_LEN },
9457 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9458 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9459 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9460 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9461 HCI_MGMT_VAR_LEN },
9462 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9463 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9464 HCI_MGMT_VAR_LEN },
9465 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9466 HCI_MGMT_VAR_LEN },
9467 { add_adv_patterns_monitor_rssi,
9468 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9469 HCI_MGMT_VAR_LEN },
9470 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9471 HCI_MGMT_VAR_LEN },
9472 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9473 { mesh_send, MGMT_MESH_SEND_SIZE,
9474 HCI_MGMT_VAR_LEN },
9475 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9476 { mgmt_hci_cmd_sync, MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9477 };
9478
mgmt_index_added(struct hci_dev * hdev)9479 void mgmt_index_added(struct hci_dev *hdev)
9480 {
9481 struct mgmt_ev_ext_index ev;
9482
9483 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9484 return;
9485
9486 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9487 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9488 HCI_MGMT_UNCONF_INDEX_EVENTS);
9489 ev.type = 0x01;
9490 } else {
9491 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9492 HCI_MGMT_INDEX_EVENTS);
9493 ev.type = 0x00;
9494 }
9495
9496 ev.bus = hdev->bus;
9497
9498 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9499 HCI_MGMT_EXT_INDEX_EVENTS);
9500 }
9501
mgmt_index_removed(struct hci_dev * hdev)9502 void mgmt_index_removed(struct hci_dev *hdev)
9503 {
9504 struct mgmt_ev_ext_index ev;
9505 struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9506
9507 if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9508 return;
9509
9510 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9511
9512 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9513 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9514 HCI_MGMT_UNCONF_INDEX_EVENTS);
9515 ev.type = 0x01;
9516 } else {
9517 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9518 HCI_MGMT_INDEX_EVENTS);
9519 ev.type = 0x00;
9520 }
9521
9522 ev.bus = hdev->bus;
9523
9524 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9525 HCI_MGMT_EXT_INDEX_EVENTS);
9526
9527 /* Cancel any remaining timed work */
9528 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9529 return;
9530 cancel_delayed_work_sync(&hdev->discov_off);
9531 cancel_delayed_work_sync(&hdev->service_cache);
9532 cancel_delayed_work_sync(&hdev->rpa_expired);
9533 cancel_delayed_work_sync(&hdev->mesh_send_done);
9534 }
9535
mgmt_power_on(struct hci_dev * hdev,int err)9536 void mgmt_power_on(struct hci_dev *hdev, int err)
9537 {
9538 struct cmd_lookup match = { NULL, hdev };
9539
9540 bt_dev_dbg(hdev, "err %d", err);
9541
9542 hci_dev_lock(hdev);
9543
9544 if (!err) {
9545 restart_le_actions(hdev);
9546 hci_update_passive_scan(hdev);
9547 }
9548
9549 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9550 &match);
9551
9552 new_settings(hdev, match.sk);
9553
9554 if (match.sk)
9555 sock_put(match.sk);
9556
9557 hci_dev_unlock(hdev);
9558 }
9559
__mgmt_power_off(struct hci_dev * hdev)9560 void __mgmt_power_off(struct hci_dev *hdev)
9561 {
9562 struct cmd_lookup match = { NULL, hdev };
9563 u8 zero_cod[] = { 0, 0, 0 };
9564
9565 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9566 &match);
9567
9568 /* If the power off is because of hdev unregistration let
9569 * use the appropriate INVALID_INDEX status. Otherwise use
9570 * NOT_POWERED. We cover both scenarios here since later in
9571 * mgmt_index_removed() any hci_conn callbacks will have already
9572 * been triggered, potentially causing misleading DISCONNECTED
9573 * status responses.
9574 */
9575 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9576 match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9577 else
9578 match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9579
9580 mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9581
9582 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9583 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9584 zero_cod, sizeof(zero_cod),
9585 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9586 ext_info_changed(hdev, NULL);
9587 }
9588
9589 new_settings(hdev, match.sk);
9590
9591 if (match.sk)
9592 sock_put(match.sk);
9593 }
9594
mgmt_set_powered_failed(struct hci_dev * hdev,int err)9595 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9596 {
9597 struct mgmt_pending_cmd *cmd;
9598 u8 status;
9599
9600 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9601 if (!cmd)
9602 return;
9603
9604 if (err == -ERFKILL)
9605 status = MGMT_STATUS_RFKILLED;
9606 else
9607 status = MGMT_STATUS_FAILED;
9608
9609 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9610
9611 mgmt_pending_remove(cmd);
9612 }
9613
mgmt_new_link_key(struct hci_dev * hdev,struct link_key * key,bool persistent)9614 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9615 bool persistent)
9616 {
9617 struct mgmt_ev_new_link_key ev;
9618
9619 memset(&ev, 0, sizeof(ev));
9620
9621 ev.store_hint = persistent;
9622 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9623 ev.key.addr.type = BDADDR_BREDR;
9624 ev.key.type = key->type;
9625 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9626 ev.key.pin_len = key->pin_len;
9627
9628 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9629 }
9630
mgmt_ltk_type(struct smp_ltk * ltk)9631 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9632 {
9633 switch (ltk->type) {
9634 case SMP_LTK:
9635 case SMP_LTK_RESPONDER:
9636 if (ltk->authenticated)
9637 return MGMT_LTK_AUTHENTICATED;
9638 return MGMT_LTK_UNAUTHENTICATED;
9639 case SMP_LTK_P256:
9640 if (ltk->authenticated)
9641 return MGMT_LTK_P256_AUTH;
9642 return MGMT_LTK_P256_UNAUTH;
9643 case SMP_LTK_P256_DEBUG:
9644 return MGMT_LTK_P256_DEBUG;
9645 }
9646
9647 return MGMT_LTK_UNAUTHENTICATED;
9648 }
9649
mgmt_new_ltk(struct hci_dev * hdev,struct smp_ltk * key,bool persistent)9650 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9651 {
9652 struct mgmt_ev_new_long_term_key ev;
9653
9654 memset(&ev, 0, sizeof(ev));
9655
9656 /* Devices using resolvable or non-resolvable random addresses
9657 * without providing an identity resolving key don't require
9658 * to store long term keys. Their addresses will change the
9659 * next time around.
9660 *
9661 * Only when a remote device provides an identity address
9662 * make sure the long term key is stored. If the remote
9663 * identity is known, the long term keys are internally
9664 * mapped to the identity address. So allow static random
9665 * and public addresses here.
9666 */
9667 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9668 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9669 ev.store_hint = 0x00;
9670 else
9671 ev.store_hint = persistent;
9672
9673 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9674 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9675 ev.key.type = mgmt_ltk_type(key);
9676 ev.key.enc_size = key->enc_size;
9677 ev.key.ediv = key->ediv;
9678 ev.key.rand = key->rand;
9679
9680 if (key->type == SMP_LTK)
9681 ev.key.initiator = 1;
9682
9683 /* Make sure we copy only the significant bytes based on the
9684 * encryption key size, and set the rest of the value to zeroes.
9685 */
9686 memcpy(ev.key.val, key->val, key->enc_size);
9687 memset(ev.key.val + key->enc_size, 0,
9688 sizeof(ev.key.val) - key->enc_size);
9689
9690 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9691 }
9692
mgmt_new_irk(struct hci_dev * hdev,struct smp_irk * irk,bool persistent)9693 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9694 {
9695 struct mgmt_ev_new_irk ev;
9696
9697 memset(&ev, 0, sizeof(ev));
9698
9699 ev.store_hint = persistent;
9700
9701 bacpy(&ev.rpa, &irk->rpa);
9702 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9703 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9704 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9705
9706 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9707 }
9708
mgmt_new_csrk(struct hci_dev * hdev,struct smp_csrk * csrk,bool persistent)9709 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9710 bool persistent)
9711 {
9712 struct mgmt_ev_new_csrk ev;
9713
9714 memset(&ev, 0, sizeof(ev));
9715
9716 /* Devices using resolvable or non-resolvable random addresses
9717 * without providing an identity resolving key don't require
9718 * to store signature resolving keys. Their addresses will change
9719 * the next time around.
9720 *
9721 * Only when a remote device provides an identity address
9722 * make sure the signature resolving key is stored. So allow
9723 * static random and public addresses here.
9724 */
9725 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9726 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9727 ev.store_hint = 0x00;
9728 else
9729 ev.store_hint = persistent;
9730
9731 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9732 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9733 ev.key.type = csrk->type;
9734 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9735
9736 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9737 }
9738
mgmt_new_conn_param(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 store_hint,u16 min_interval,u16 max_interval,u16 latency,u16 timeout)9739 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9740 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9741 u16 max_interval, u16 latency, u16 timeout)
9742 {
9743 struct mgmt_ev_new_conn_param ev;
9744
9745 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9746 return;
9747
9748 memset(&ev, 0, sizeof(ev));
9749 bacpy(&ev.addr.bdaddr, bdaddr);
9750 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9751 ev.store_hint = store_hint;
9752 ev.min_interval = cpu_to_le16(min_interval);
9753 ev.max_interval = cpu_to_le16(max_interval);
9754 ev.latency = cpu_to_le16(latency);
9755 ev.timeout = cpu_to_le16(timeout);
9756
9757 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9758 }
9759
mgmt_device_connected(struct hci_dev * hdev,struct hci_conn * conn,u8 * name,u8 name_len)9760 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9761 u8 *name, u8 name_len)
9762 {
9763 struct sk_buff *skb;
9764 struct mgmt_ev_device_connected *ev;
9765 u16 eir_len = 0;
9766 u32 flags = 0;
9767
9768 if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9769 return;
9770
9771 /* allocate buff for LE or BR/EDR adv */
9772 if (conn->le_adv_data_len > 0)
9773 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9774 sizeof(*ev) + conn->le_adv_data_len);
9775 else
9776 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9777 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9778 eir_precalc_len(sizeof(conn->dev_class)));
9779
9780 if (!skb)
9781 return;
9782
9783 ev = skb_put(skb, sizeof(*ev));
9784 bacpy(&ev->addr.bdaddr, &conn->dst);
9785 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9786
9787 if (conn->out)
9788 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9789
9790 ev->flags = __cpu_to_le32(flags);
9791
9792 /* We must ensure that the EIR Data fields are ordered and
9793 * unique. Keep it simple for now and avoid the problem by not
9794 * adding any BR/EDR data to the LE adv.
9795 */
9796 if (conn->le_adv_data_len > 0) {
9797 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9798 eir_len = conn->le_adv_data_len;
9799 } else {
9800 if (name)
9801 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9802
9803 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9804 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9805 conn->dev_class, sizeof(conn->dev_class));
9806 }
9807
9808 ev->eir_len = cpu_to_le16(eir_len);
9809
9810 mgmt_event_skb(skb, NULL);
9811 }
9812
unpair_device_rsp(struct mgmt_pending_cmd * cmd,void * data)9813 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9814 {
9815 struct hci_dev *hdev = data;
9816 struct mgmt_cp_unpair_device *cp = cmd->param;
9817
9818 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9819
9820 cmd->cmd_complete(cmd, 0);
9821 }
9822
mgmt_powering_down(struct hci_dev * hdev)9823 bool mgmt_powering_down(struct hci_dev *hdev)
9824 {
9825 struct mgmt_pending_cmd *cmd;
9826 struct mgmt_mode *cp;
9827
9828 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9829 return true;
9830
9831 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9832 if (!cmd)
9833 return false;
9834
9835 cp = cmd->param;
9836 if (!cp->val)
9837 return true;
9838
9839 return false;
9840 }
9841
mgmt_device_disconnected(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 reason,bool mgmt_connected)9842 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9843 u8 link_type, u8 addr_type, u8 reason,
9844 bool mgmt_connected)
9845 {
9846 struct mgmt_ev_device_disconnected ev;
9847 struct sock *sk = NULL;
9848
9849 if (!mgmt_connected)
9850 return;
9851
9852 if (link_type != ACL_LINK &&
9853 link_type != LE_LINK &&
9854 link_type != BIS_LINK)
9855 return;
9856
9857 bacpy(&ev.addr.bdaddr, bdaddr);
9858 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9859 ev.reason = reason;
9860
9861 /* Report disconnects due to suspend */
9862 if (hdev->suspended)
9863 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9864
9865 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9866
9867 if (sk)
9868 sock_put(sk);
9869 }
9870
mgmt_disconnect_failed(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9871 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9872 u8 link_type, u8 addr_type, u8 status)
9873 {
9874 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9875 struct mgmt_cp_disconnect *cp;
9876 struct mgmt_pending_cmd *cmd;
9877
9878 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9879 unpair_device_rsp, hdev);
9880
9881 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9882 if (!cmd)
9883 return;
9884
9885 cp = cmd->param;
9886
9887 if (bacmp(bdaddr, &cp->addr.bdaddr))
9888 return;
9889
9890 if (cp->addr.type != bdaddr_type)
9891 return;
9892
9893 cmd->cmd_complete(cmd, mgmt_status(status));
9894 mgmt_pending_remove(cmd);
9895 }
9896
mgmt_connect_failed(struct hci_dev * hdev,struct hci_conn * conn,u8 status)9897 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9898 {
9899 struct mgmt_ev_connect_failed ev;
9900
9901 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9902 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9903 conn->dst_type, status, true);
9904 return;
9905 }
9906
9907 bacpy(&ev.addr.bdaddr, &conn->dst);
9908 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9909 ev.status = mgmt_status(status);
9910
9911 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9912 }
9913
mgmt_pin_code_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 secure)9914 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9915 {
9916 struct mgmt_ev_pin_code_request ev;
9917
9918 bacpy(&ev.addr.bdaddr, bdaddr);
9919 ev.addr.type = BDADDR_BREDR;
9920 ev.secure = secure;
9921
9922 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9923 }
9924
mgmt_pin_code_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9925 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9926 u8 status)
9927 {
9928 struct mgmt_pending_cmd *cmd;
9929
9930 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9931 if (!cmd)
9932 return;
9933
9934 cmd->cmd_complete(cmd, mgmt_status(status));
9935 mgmt_pending_remove(cmd);
9936 }
9937
mgmt_pin_code_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 status)9938 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9939 u8 status)
9940 {
9941 struct mgmt_pending_cmd *cmd;
9942
9943 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9944 if (!cmd)
9945 return;
9946
9947 cmd->cmd_complete(cmd, mgmt_status(status));
9948 mgmt_pending_remove(cmd);
9949 }
9950
mgmt_user_confirm_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 value,u8 confirm_hint)9951 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9952 u8 link_type, u8 addr_type, u32 value,
9953 u8 confirm_hint)
9954 {
9955 struct mgmt_ev_user_confirm_request ev;
9956
9957 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9958
9959 bacpy(&ev.addr.bdaddr, bdaddr);
9960 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9961 ev.confirm_hint = confirm_hint;
9962 ev.value = cpu_to_le32(value);
9963
9964 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9965 NULL);
9966 }
9967
mgmt_user_passkey_request(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type)9968 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9969 u8 link_type, u8 addr_type)
9970 {
9971 struct mgmt_ev_user_passkey_request ev;
9972
9973 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9974
9975 bacpy(&ev.addr.bdaddr, bdaddr);
9976 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9977
9978 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9979 NULL);
9980 }
9981
user_pairing_resp_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status,u8 opcode)9982 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9983 u8 link_type, u8 addr_type, u8 status,
9984 u8 opcode)
9985 {
9986 struct mgmt_pending_cmd *cmd;
9987
9988 cmd = pending_find(opcode, hdev);
9989 if (!cmd)
9990 return -ENOENT;
9991
9992 cmd->cmd_complete(cmd, mgmt_status(status));
9993 mgmt_pending_remove(cmd);
9994
9995 return 0;
9996 }
9997
mgmt_user_confirm_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)9998 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9999 u8 link_type, u8 addr_type, u8 status)
10000 {
10001 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10002 status, MGMT_OP_USER_CONFIRM_REPLY);
10003 }
10004
mgmt_user_confirm_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)10005 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10006 u8 link_type, u8 addr_type, u8 status)
10007 {
10008 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10009 status,
10010 MGMT_OP_USER_CONFIRM_NEG_REPLY);
10011 }
10012
mgmt_user_passkey_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)10013 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10014 u8 link_type, u8 addr_type, u8 status)
10015 {
10016 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10017 status, MGMT_OP_USER_PASSKEY_REPLY);
10018 }
10019
mgmt_user_passkey_neg_reply_complete(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 status)10020 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
10021 u8 link_type, u8 addr_type, u8 status)
10022 {
10023 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
10024 status,
10025 MGMT_OP_USER_PASSKEY_NEG_REPLY);
10026 }
10027
mgmt_user_passkey_notify(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u32 passkey,u8 entered)10028 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
10029 u8 link_type, u8 addr_type, u32 passkey,
10030 u8 entered)
10031 {
10032 struct mgmt_ev_passkey_notify ev;
10033
10034 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10035
10036 bacpy(&ev.addr.bdaddr, bdaddr);
10037 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10038 ev.passkey = __cpu_to_le32(passkey);
10039 ev.entered = entered;
10040
10041 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10042 }
10043
mgmt_auth_failed(struct hci_conn * conn,u8 hci_status)10044 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10045 {
10046 struct mgmt_ev_auth_failed ev;
10047 struct mgmt_pending_cmd *cmd;
10048 u8 status = mgmt_status(hci_status);
10049
10050 bacpy(&ev.addr.bdaddr, &conn->dst);
10051 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10052 ev.status = status;
10053
10054 cmd = find_pairing(conn);
10055
10056 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10057 cmd ? cmd->sk : NULL);
10058
10059 if (cmd) {
10060 cmd->cmd_complete(cmd, status);
10061 mgmt_pending_remove(cmd);
10062 }
10063 }
10064
mgmt_auth_enable_complete(struct hci_dev * hdev,u8 status)10065 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10066 {
10067 struct cmd_lookup match = { NULL, hdev };
10068 bool changed;
10069
10070 if (status) {
10071 u8 mgmt_err = mgmt_status(status);
10072 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10073 cmd_status_rsp, &mgmt_err);
10074 return;
10075 }
10076
10077 if (test_bit(HCI_AUTH, &hdev->flags))
10078 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10079 else
10080 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10081
10082 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10083 settings_rsp, &match);
10084
10085 if (changed)
10086 new_settings(hdev, match.sk);
10087
10088 if (match.sk)
10089 sock_put(match.sk);
10090 }
10091
sk_lookup(struct mgmt_pending_cmd * cmd,void * data)10092 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10093 {
10094 struct cmd_lookup *match = data;
10095
10096 if (match->sk == NULL) {
10097 match->sk = cmd->sk;
10098 sock_hold(match->sk);
10099 }
10100 }
10101
mgmt_set_class_of_dev_complete(struct hci_dev * hdev,u8 * dev_class,u8 status)10102 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10103 u8 status)
10104 {
10105 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10106
10107 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
10108 &match);
10109 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
10110 &match);
10111 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
10112 &match);
10113
10114 if (!status) {
10115 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10116 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10117 ext_info_changed(hdev, NULL);
10118 }
10119
10120 if (match.sk)
10121 sock_put(match.sk);
10122 }
10123
mgmt_set_local_name_complete(struct hci_dev * hdev,u8 * name,u8 status)10124 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10125 {
10126 struct mgmt_cp_set_local_name ev;
10127 struct mgmt_pending_cmd *cmd;
10128
10129 if (status)
10130 return;
10131
10132 memset(&ev, 0, sizeof(ev));
10133 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10134 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10135
10136 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10137 if (!cmd) {
10138 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10139
10140 /* If this is a HCI command related to powering on the
10141 * HCI dev don't send any mgmt signals.
10142 */
10143 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10144 return;
10145
10146 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10147 return;
10148 }
10149
10150 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10151 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10152 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10153 }
10154
has_uuid(u8 * uuid,u16 uuid_count,u8 (* uuids)[16])10155 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10156 {
10157 int i;
10158
10159 for (i = 0; i < uuid_count; i++) {
10160 if (!memcmp(uuid, uuids[i], 16))
10161 return true;
10162 }
10163
10164 return false;
10165 }
10166
eir_has_uuids(u8 * eir,u16 eir_len,u16 uuid_count,u8 (* uuids)[16])10167 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10168 {
10169 u16 parsed = 0;
10170
10171 while (parsed < eir_len) {
10172 u8 field_len = eir[0];
10173 u8 uuid[16];
10174 int i;
10175
10176 if (field_len == 0)
10177 break;
10178
10179 if (eir_len - parsed < field_len + 1)
10180 break;
10181
10182 switch (eir[1]) {
10183 case EIR_UUID16_ALL:
10184 case EIR_UUID16_SOME:
10185 for (i = 0; i + 3 <= field_len; i += 2) {
10186 memcpy(uuid, bluetooth_base_uuid, 16);
10187 uuid[13] = eir[i + 3];
10188 uuid[12] = eir[i + 2];
10189 if (has_uuid(uuid, uuid_count, uuids))
10190 return true;
10191 }
10192 break;
10193 case EIR_UUID32_ALL:
10194 case EIR_UUID32_SOME:
10195 for (i = 0; i + 5 <= field_len; i += 4) {
10196 memcpy(uuid, bluetooth_base_uuid, 16);
10197 uuid[15] = eir[i + 5];
10198 uuid[14] = eir[i + 4];
10199 uuid[13] = eir[i + 3];
10200 uuid[12] = eir[i + 2];
10201 if (has_uuid(uuid, uuid_count, uuids))
10202 return true;
10203 }
10204 break;
10205 case EIR_UUID128_ALL:
10206 case EIR_UUID128_SOME:
10207 for (i = 0; i + 17 <= field_len; i += 16) {
10208 memcpy(uuid, eir + i + 2, 16);
10209 if (has_uuid(uuid, uuid_count, uuids))
10210 return true;
10211 }
10212 break;
10213 }
10214
10215 parsed += field_len + 1;
10216 eir += field_len + 1;
10217 }
10218
10219 return false;
10220 }
10221
is_filter_match(struct hci_dev * hdev,s8 rssi,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len)10222 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10223 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10224 {
10225 /* If a RSSI threshold has been specified, and
10226 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10227 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10228 * is set, let it through for further processing, as we might need to
10229 * restart the scan.
10230 *
10231 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10232 * the results are also dropped.
10233 */
10234 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10235 (rssi == HCI_RSSI_INVALID ||
10236 (rssi < hdev->discovery.rssi &&
10237 !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER))))
10238 return false;
10239
10240 if (hdev->discovery.uuid_count != 0) {
10241 /* If a list of UUIDs is provided in filter, results with no
10242 * matching UUID should be dropped.
10243 */
10244 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10245 hdev->discovery.uuids) &&
10246 !eir_has_uuids(scan_rsp, scan_rsp_len,
10247 hdev->discovery.uuid_count,
10248 hdev->discovery.uuids))
10249 return false;
10250 }
10251
10252 /* If duplicate filtering does not report RSSI changes, then restart
10253 * scanning to ensure updated result with updated RSSI values.
10254 */
10255 if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) {
10256 /* Validate RSSI value against the RSSI threshold once more. */
10257 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10258 rssi < hdev->discovery.rssi)
10259 return false;
10260 }
10261
10262 return true;
10263 }
10264
mgmt_adv_monitor_device_lost(struct hci_dev * hdev,u16 handle,bdaddr_t * bdaddr,u8 addr_type)10265 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10266 bdaddr_t *bdaddr, u8 addr_type)
10267 {
10268 struct mgmt_ev_adv_monitor_device_lost ev;
10269
10270 ev.monitor_handle = cpu_to_le16(handle);
10271 bacpy(&ev.addr.bdaddr, bdaddr);
10272 ev.addr.type = addr_type;
10273
10274 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10275 NULL);
10276 }
10277
mgmt_send_adv_monitor_device_found(struct hci_dev * hdev,struct sk_buff * skb,struct sock * skip_sk,u16 handle)10278 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10279 struct sk_buff *skb,
10280 struct sock *skip_sk,
10281 u16 handle)
10282 {
10283 struct sk_buff *advmon_skb;
10284 size_t advmon_skb_len;
10285 __le16 *monitor_handle;
10286
10287 if (!skb)
10288 return;
10289
10290 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10291 sizeof(struct mgmt_ev_device_found)) + skb->len;
10292 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10293 advmon_skb_len);
10294 if (!advmon_skb)
10295 return;
10296
10297 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10298 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10299 * store monitor_handle of the matched monitor.
10300 */
10301 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10302 *monitor_handle = cpu_to_le16(handle);
10303 skb_put_data(advmon_skb, skb->data, skb->len);
10304
10305 mgmt_event_skb(advmon_skb, skip_sk);
10306 }
10307
mgmt_adv_monitor_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,bool report_device,struct sk_buff * skb,struct sock * skip_sk)10308 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10309 bdaddr_t *bdaddr, bool report_device,
10310 struct sk_buff *skb,
10311 struct sock *skip_sk)
10312 {
10313 struct monitored_device *dev, *tmp;
10314 bool matched = false;
10315 bool notified = false;
10316
10317 /* We have received the Advertisement Report because:
10318 * 1. the kernel has initiated active discovery
10319 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10320 * passive scanning
10321 * 3. if none of the above is true, we have one or more active
10322 * Advertisement Monitor
10323 *
10324 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10325 * and report ONLY one advertisement per device for the matched Monitor
10326 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10327 *
10328 * For case 3, since we are not active scanning and all advertisements
10329 * received are due to a matched Advertisement Monitor, report all
10330 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10331 */
10332 if (report_device && !hdev->advmon_pend_notify) {
10333 mgmt_event_skb(skb, skip_sk);
10334 return;
10335 }
10336
10337 hdev->advmon_pend_notify = false;
10338
10339 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10340 if (!bacmp(&dev->bdaddr, bdaddr)) {
10341 matched = true;
10342
10343 if (!dev->notified) {
10344 mgmt_send_adv_monitor_device_found(hdev, skb,
10345 skip_sk,
10346 dev->handle);
10347 notified = true;
10348 dev->notified = true;
10349 }
10350 }
10351
10352 if (!dev->notified)
10353 hdev->advmon_pend_notify = true;
10354 }
10355
10356 if (!report_device &&
10357 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10358 /* Handle 0 indicates that we are not active scanning and this
10359 * is a subsequent advertisement report for an already matched
10360 * Advertisement Monitor or the controller offloading support
10361 * is not available.
10362 */
10363 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10364 }
10365
10366 if (report_device)
10367 mgmt_event_skb(skb, skip_sk);
10368 else
10369 kfree_skb(skb);
10370 }
10371
mesh_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10372 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10373 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10374 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10375 u64 instant)
10376 {
10377 struct sk_buff *skb;
10378 struct mgmt_ev_mesh_device_found *ev;
10379 int i, j;
10380
10381 if (!hdev->mesh_ad_types[0])
10382 goto accepted;
10383
10384 /* Scan for requested AD types */
10385 if (eir_len > 0) {
10386 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10387 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10388 if (!hdev->mesh_ad_types[j])
10389 break;
10390
10391 if (hdev->mesh_ad_types[j] == eir[i + 1])
10392 goto accepted;
10393 }
10394 }
10395 }
10396
10397 if (scan_rsp_len > 0) {
10398 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10399 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10400 if (!hdev->mesh_ad_types[j])
10401 break;
10402
10403 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10404 goto accepted;
10405 }
10406 }
10407 }
10408
10409 return;
10410
10411 accepted:
10412 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10413 sizeof(*ev) + eir_len + scan_rsp_len);
10414 if (!skb)
10415 return;
10416
10417 ev = skb_put(skb, sizeof(*ev));
10418
10419 bacpy(&ev->addr.bdaddr, bdaddr);
10420 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10421 ev->rssi = rssi;
10422 ev->flags = cpu_to_le32(flags);
10423 ev->instant = cpu_to_le64(instant);
10424
10425 if (eir_len > 0)
10426 /* Copy EIR or advertising data into event */
10427 skb_put_data(skb, eir, eir_len);
10428
10429 if (scan_rsp_len > 0)
10430 /* Append scan response data to event */
10431 skb_put_data(skb, scan_rsp, scan_rsp_len);
10432
10433 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10434
10435 mgmt_event_skb(skb, NULL);
10436 }
10437
mgmt_device_found(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,u8 * dev_class,s8 rssi,u32 flags,u8 * eir,u16 eir_len,u8 * scan_rsp,u8 scan_rsp_len,u64 instant)10438 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10439 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10440 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10441 u64 instant)
10442 {
10443 struct sk_buff *skb;
10444 struct mgmt_ev_device_found *ev;
10445 bool report_device = hci_discovery_active(hdev);
10446
10447 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10448 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10449 eir, eir_len, scan_rsp, scan_rsp_len,
10450 instant);
10451
10452 /* Don't send events for a non-kernel initiated discovery. With
10453 * LE one exception is if we have pend_le_reports > 0 in which
10454 * case we're doing passive scanning and want these events.
10455 */
10456 if (!hci_discovery_active(hdev)) {
10457 if (link_type == ACL_LINK)
10458 return;
10459 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10460 report_device = true;
10461 else if (!hci_is_adv_monitoring(hdev))
10462 return;
10463 }
10464
10465 if (hdev->discovery.result_filtering) {
10466 /* We are using service discovery */
10467 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10468 scan_rsp_len))
10469 return;
10470 }
10471
10472 if (hdev->discovery.limited) {
10473 /* Check for limited discoverable bit */
10474 if (dev_class) {
10475 if (!(dev_class[1] & 0x20))
10476 return;
10477 } else {
10478 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10479 if (!flags || !(flags[0] & LE_AD_LIMITED))
10480 return;
10481 }
10482 }
10483
10484 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10485 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10486 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10487 if (!skb)
10488 return;
10489
10490 ev = skb_put(skb, sizeof(*ev));
10491
10492 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10493 * RSSI value was reported as 0 when not available. This behavior
10494 * is kept when using device discovery. This is required for full
10495 * backwards compatibility with the API.
10496 *
10497 * However when using service discovery, the value 127 will be
10498 * returned when the RSSI is not available.
10499 */
10500 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10501 link_type == ACL_LINK)
10502 rssi = 0;
10503
10504 bacpy(&ev->addr.bdaddr, bdaddr);
10505 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10506 ev->rssi = rssi;
10507 ev->flags = cpu_to_le32(flags);
10508
10509 if (eir_len > 0)
10510 /* Copy EIR or advertising data into event */
10511 skb_put_data(skb, eir, eir_len);
10512
10513 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10514 u8 eir_cod[5];
10515
10516 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10517 dev_class, 3);
10518 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10519 }
10520
10521 if (scan_rsp_len > 0)
10522 /* Append scan response data to event */
10523 skb_put_data(skb, scan_rsp, scan_rsp_len);
10524
10525 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10526
10527 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10528 }
10529
mgmt_remote_name(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 link_type,u8 addr_type,s8 rssi,u8 * name,u8 name_len)10530 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10531 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10532 {
10533 struct sk_buff *skb;
10534 struct mgmt_ev_device_found *ev;
10535 u16 eir_len = 0;
10536 u32 flags = 0;
10537
10538 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10539 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10540 if (!skb)
10541 return;
10542
10543 ev = skb_put(skb, sizeof(*ev));
10544 bacpy(&ev->addr.bdaddr, bdaddr);
10545 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10546 ev->rssi = rssi;
10547
10548 if (name)
10549 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10550 else
10551 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10552
10553 ev->eir_len = cpu_to_le16(eir_len);
10554 ev->flags = cpu_to_le32(flags);
10555
10556 mgmt_event_skb(skb, NULL);
10557 }
10558
mgmt_discovering(struct hci_dev * hdev,u8 discovering)10559 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10560 {
10561 struct mgmt_ev_discovering ev;
10562
10563 bt_dev_dbg(hdev, "discovering %u", discovering);
10564
10565 memset(&ev, 0, sizeof(ev));
10566 ev.type = hdev->discovery.type;
10567 ev.discovering = discovering;
10568
10569 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10570 }
10571
mgmt_suspending(struct hci_dev * hdev,u8 state)10572 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10573 {
10574 struct mgmt_ev_controller_suspend ev;
10575
10576 ev.suspend_state = state;
10577 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10578 }
10579
mgmt_resuming(struct hci_dev * hdev,u8 reason,bdaddr_t * bdaddr,u8 addr_type)10580 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10581 u8 addr_type)
10582 {
10583 struct mgmt_ev_controller_resume ev;
10584
10585 ev.wake_reason = reason;
10586 if (bdaddr) {
10587 bacpy(&ev.addr.bdaddr, bdaddr);
10588 ev.addr.type = addr_type;
10589 } else {
10590 memset(&ev.addr, 0, sizeof(ev.addr));
10591 }
10592
10593 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10594 }
10595
10596 static struct hci_mgmt_chan chan = {
10597 .channel = HCI_CHANNEL_CONTROL,
10598 .handler_count = ARRAY_SIZE(mgmt_handlers),
10599 .handlers = mgmt_handlers,
10600 .hdev_init = mgmt_init_hdev,
10601 };
10602
mgmt_init(void)10603 int mgmt_init(void)
10604 {
10605 return hci_mgmt_chan_register(&chan);
10606 }
10607
mgmt_exit(void)10608 void mgmt_exit(void)
10609 {
10610 hci_mgmt_chan_unregister(&chan);
10611 }
10612
mgmt_cleanup(struct sock * sk)10613 void mgmt_cleanup(struct sock *sk)
10614 {
10615 struct mgmt_mesh_tx *mesh_tx;
10616 struct hci_dev *hdev;
10617
10618 read_lock(&hci_dev_list_lock);
10619
10620 list_for_each_entry(hdev, &hci_dev_list, list) {
10621 do {
10622 mesh_tx = mgmt_mesh_next(hdev, sk);
10623
10624 if (mesh_tx)
10625 mesh_send_complete(hdev, mesh_tx, true);
10626 } while (mesh_tx);
10627 }
10628
10629 read_unlock(&hci_dev_list_lock);
10630 }
10631