Lines Matching +full:local +full:- +full:bd +full:- +full:address +full:- +full:broken

2    BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 Copyright 2023-2024 NXP
70 /* This function requires the caller holds hdev->lock */
74 struct hci_dev *hdev = conn->hdev; in hci_connect_le_scan_cleanup()
79 bdaddr = &conn->dst; in hci_connect_le_scan_cleanup()
80 bdaddr_type = conn->dst_type; in hci_connect_le_scan_cleanup()
82 /* Check if we need to convert to identity address */ in hci_connect_le_scan_cleanup()
85 bdaddr = &irk->bdaddr; in hci_connect_le_scan_cleanup()
86 bdaddr_type = irk->addr_type; in hci_connect_le_scan_cleanup()
89 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr, in hci_connect_le_scan_cleanup()
94 if (params->conn) { in hci_connect_le_scan_cleanup()
95 hci_conn_drop(params->conn); in hci_connect_le_scan_cleanup()
96 hci_conn_put(params->conn); in hci_connect_le_scan_cleanup()
97 params->conn = NULL; in hci_connect_le_scan_cleanup()
100 if (!params->explicit_connect) in hci_connect_le_scan_cleanup()
117 params->explicit_connect = false; in hci_connect_le_scan_cleanup()
121 switch (params->auto_connect) { in hci_connect_le_scan_cleanup()
128 hci_pend_le_list_add(params, &hdev->pend_le_conns); in hci_connect_le_scan_cleanup()
131 hci_pend_le_list_add(params, &hdev->pend_le_reports); in hci_connect_le_scan_cleanup()
142 struct hci_dev *hdev = conn->hdev; in hci_conn_cleanup()
144 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) in hci_conn_cleanup()
145 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); in hci_conn_cleanup()
147 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) in hci_conn_cleanup()
148 hci_remove_link_key(hdev, &conn->dst); in hci_conn_cleanup()
154 if (HCI_CONN_HANDLE_UNSET(conn->handle)) in hci_conn_cleanup()
155 ida_free(&hdev->unset_handle_ida, conn->handle); in hci_conn_cleanup()
157 if (conn->cleanup) in hci_conn_cleanup()
158 conn->cleanup(conn); in hci_conn_cleanup()
160 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { in hci_conn_cleanup()
161 switch (conn->setting & SCO_AIRMODE_MASK) { in hci_conn_cleanup()
164 if (hdev->notify) in hci_conn_cleanup()
165 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO); in hci_conn_cleanup()
169 if (hdev->notify) in hci_conn_cleanup()
170 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); in hci_conn_cleanup()
173 debugfs_remove_recursive(conn->debugfs); in hci_conn_cleanup()
189 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER && in hci_disconnect()
190 (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) { in hci_disconnect()
191 struct hci_dev *hdev = conn->hdev; in hci_disconnect()
194 clkoff_cp.handle = cpu_to_le16(conn->handle); in hci_disconnect()
204 struct hci_dev *hdev = conn->hdev; in hci_add_sco()
209 conn->state = BT_CONNECT; in hci_add_sco()
210 conn->out = true; in hci_add_sco()
212 conn->attempt++; in hci_add_sco()
215 cp.pkt_type = cpu_to_le16(conn->pkt_type); in hci_add_sco()
223 if (!conn->parent) in find_next_esco_param()
226 for (; conn->attempt <= size; conn->attempt++) { in find_next_esco_param()
227 if (lmp_esco_2m_capable(conn->parent) || in find_next_esco_param()
228 (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3)) in find_next_esco_param()
231 conn, conn->attempt); in find_next_esco_param()
234 return conn->attempt <= size; in find_next_esco_param()
247 if (!codec->data_path || !hdev->get_codec_config_data) in configure_datapath_sync()
250 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len, in configure_datapath_sync()
257 err = -ENOMEM; in configure_datapath_sync()
261 err = hdev->get_data_path_id(hdev, &cmd->data_path_id); in configure_datapath_sync()
265 cmd->vnd_len = vnd_len; in configure_datapath_sync()
266 memcpy(cmd->vnd_data, vnd_data, vnd_len); in configure_datapath_sync()
268 cmd->direction = 0x00; in configure_datapath_sync()
272 cmd->direction = 0x01; in configure_datapath_sync()
286 struct hci_conn *conn = conn_handle->conn; in hci_enhanced_setup_sync()
287 __u16 handle = conn_handle->handle; in hci_enhanced_setup_sync()
294 return -ECANCELED; in hci_enhanced_setup_sync()
298 configure_datapath_sync(hdev, &conn->codec); in hci_enhanced_setup_sync()
300 conn->state = BT_CONNECT; in hci_enhanced_setup_sync()
301 conn->out = true; in hci_enhanced_setup_sync()
303 conn->attempt++; in hci_enhanced_setup_sync()
312 switch (conn->codec.id) { in hci_enhanced_setup_sync()
316 return -EINVAL; in hci_enhanced_setup_sync()
318 param = &esco_param_msbc[conn->attempt - 1]; in hci_enhanced_setup_sync()
333 cp.in_data_path = conn->codec.data_path; in hci_enhanced_setup_sync()
334 cp.out_data_path = conn->codec.data_path; in hci_enhanced_setup_sync()
343 param = &esco_param_msbc[conn->attempt - 1]; in hci_enhanced_setup_sync()
358 cp.in_data_path = conn->codec.data_path; in hci_enhanced_setup_sync()
359 cp.out_data_path = conn->codec.data_path; in hci_enhanced_setup_sync()
365 if (conn->parent && lmp_esco_capable(conn->parent)) { in hci_enhanced_setup_sync()
368 return -EINVAL; in hci_enhanced_setup_sync()
369 param = &esco_param_cvsd[conn->attempt - 1]; in hci_enhanced_setup_sync()
371 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) in hci_enhanced_setup_sync()
372 return -EINVAL; in hci_enhanced_setup_sync()
373 param = &sco_param_cvsd[conn->attempt - 1]; in hci_enhanced_setup_sync()
389 cp.in_data_path = conn->codec.data_path; in hci_enhanced_setup_sync()
390 cp.out_data_path = conn->codec.data_path; in hci_enhanced_setup_sync()
395 return -EINVAL; in hci_enhanced_setup_sync()
398 cp.retrans_effort = param->retrans_effort; in hci_enhanced_setup_sync()
399 cp.pkt_type = __cpu_to_le16(param->pkt_type); in hci_enhanced_setup_sync()
400 cp.max_latency = __cpu_to_le16(param->max_latency); in hci_enhanced_setup_sync()
403 return -EIO; in hci_enhanced_setup_sync()
410 struct hci_dev *hdev = conn->hdev; in hci_setup_sync_conn()
416 conn->state = BT_CONNECT; in hci_setup_sync_conn()
417 conn->out = true; in hci_setup_sync_conn()
419 conn->attempt++; in hci_setup_sync_conn()
425 cp.voice_setting = cpu_to_le16(conn->setting); in hci_setup_sync_conn()
427 switch (conn->setting & SCO_AIRMODE_MASK) { in hci_setup_sync_conn()
432 param = &esco_param_msbc[conn->attempt - 1]; in hci_setup_sync_conn()
435 if (conn->parent && lmp_esco_capable(conn->parent)) { in hci_setup_sync_conn()
439 param = &esco_param_cvsd[conn->attempt - 1]; in hci_setup_sync_conn()
441 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) in hci_setup_sync_conn()
443 param = &sco_param_cvsd[conn->attempt - 1]; in hci_setup_sync_conn()
450 cp.retrans_effort = param->retrans_effort; in hci_setup_sync_conn()
451 cp.pkt_type = __cpu_to_le16(param->pkt_type); in hci_setup_sync_conn()
452 cp.max_latency = __cpu_to_le16(param->max_latency); in hci_setup_sync_conn()
465 if (enhanced_sync_conn_capable(conn->hdev)) { in hci_setup_sync()
471 conn_handle->conn = conn; in hci_setup_sync()
472 conn_handle->handle = handle; in hci_setup_sync()
473 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync, in hci_setup_sync()
487 struct hci_dev *hdev = conn->hdev; in hci_le_conn_update()
493 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); in hci_le_conn_update()
495 params->conn_min_interval = min; in hci_le_conn_update()
496 params->conn_max_interval = max; in hci_le_conn_update()
497 params->conn_latency = latency; in hci_le_conn_update()
498 params->supervision_timeout = to_multiplier; in hci_le_conn_update()
504 cp.handle = cpu_to_le16(conn->handle); in hci_le_conn_update()
523 struct hci_dev *hdev = conn->hdev; in hci_le_start_enc()
530 cp.handle = cpu_to_le16(conn->handle); in hci_le_start_enc()
543 link = list_first_entry_or_null(&conn->link_list, struct hci_link, list); in hci_sco_setup()
544 if (!link || !link->conn) in hci_sco_setup()
550 if (lmp_esco_capable(conn->hdev)) in hci_sco_setup()
551 hci_setup_sync(link->conn, conn->handle); in hci_sco_setup()
553 hci_add_sco(link->conn, conn->handle); in hci_sco_setup()
555 hci_connect_cfm(link->conn, status); in hci_sco_setup()
556 hci_conn_del(link->conn); in hci_sco_setup()
564 int refcnt = atomic_read(&conn->refcnt); in hci_conn_timeout()
566 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); in hci_conn_timeout()
588 struct hci_dev *hdev = conn->hdev; in hci_conn_idle()
590 BT_DBG("hcon %p mode %d", conn, conn->mode); in hci_conn_idle()
595 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) in hci_conn_idle()
600 cp.handle = cpu_to_le16(conn->handle); in hci_conn_idle()
607 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { in hci_conn_idle()
609 cp.handle = cpu_to_le16(conn->handle); in hci_conn_idle()
610 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); in hci_conn_idle()
611 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); in hci_conn_idle()
623 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), in hci_conn_auto_accept()
624 &conn->dst); in hci_conn_auto_accept()
648 struct hci_dev *hdev = conn->hdev; in le_conn_timeout()
654 * happen with broken hardware or if low duty cycle was used in le_conn_timeout()
657 if (conn->role == HCI_ROLE_SLAVE) { in le_conn_timeout()
689 /* Skip if not broadcast/ANY address */ in bis_list()
690 if (bacmp(&conn->dst, BDADDR_ANY)) in bis_list()
693 if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET || in bis_list()
694 d->bis != conn->iso_qos.bcast.bis) in bis_list()
697 d->count++; in bis_list()
704 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis); in terminate_big_sync()
706 hci_disable_per_advertising_sync(hdev, d->bis); in terminate_big_sync()
707 hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL); in terminate_big_sync()
710 if (!d->big_term) in terminate_big_sync()
713 return hci_le_terminate_big_sync(hdev, d->big, in terminate_big_sync()
727 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big, in hci_le_terminate_big()
728 conn->iso_qos.bcast.bis); in hci_le_terminate_big()
732 return -ENOMEM; in hci_le_terminate_big()
734 d->big = conn->iso_qos.bcast.big; in hci_le_terminate_big()
735 d->bis = conn->iso_qos.bcast.bis; in hci_le_terminate_big()
736 d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags); in hci_le_terminate_big()
750 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big, in big_terminate_sync()
751 d->sync_handle); in big_terminate_sync()
753 if (d->big_sync_term) in big_terminate_sync()
754 hci_le_big_terminate_sync(hdev, d->big); in big_terminate_sync()
756 if (d->pa_sync_term) in big_terminate_sync()
757 return hci_le_pa_terminate_sync(hdev, d->sync_handle); in big_terminate_sync()
767 if (d->big != conn->iso_qos.bcast.big) in find_bis()
770 d->count++; in find_bis()
778 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle); in hci_le_big_terminate()
782 return -ENOMEM; in hci_le_big_terminate()
784 d->big = big; in hci_le_big_terminate()
785 d->sync_handle = conn->sync_handle; in hci_le_big_terminate()
787 if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) { in hci_le_big_terminate()
791 if (!d->count) in hci_le_big_terminate()
792 d->pa_sync_term = true; in hci_le_big_terminate()
794 d->count = 0; in hci_le_big_terminate()
797 if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) { in hci_le_big_terminate()
801 if (!d->count) in hci_le_big_terminate()
802 d->big_sync_term = true; in hci_le_big_terminate()
821 struct hci_dev *hdev = conn->hdev; in bis_cleanup()
826 if (conn->role == HCI_ROLE_MASTER) { in bis_cleanup()
827 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags)) in bis_cleanup()
833 bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big); in bis_cleanup()
839 hci_le_big_terminate(hdev, conn->iso_qos.bcast.big, in bis_cleanup()
864 if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig) in find_cis()
867 d->count++; in find_cis()
876 struct hci_dev *hdev = conn->hdev; in cis_cleanup()
879 if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET) in cis_cleanup()
883 d.cig = conn->iso_qos.ucast.cig; in cis_cleanup()
894 hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig); in cis_cleanup()
899 return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1, in hci_conn_hash_alloc_unset()
910 if (!hdev->acl_mtu) in __hci_conn_add()
911 return ERR_PTR(-ECONNREFUSED); in __hci_conn_add()
914 if (hdev->iso_mtu) in __hci_conn_add()
919 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) in __hci_conn_add()
920 return ERR_PTR(-ECONNREFUSED); in __hci_conn_add()
921 if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU) in __hci_conn_add()
922 return ERR_PTR(-ECONNREFUSED); in __hci_conn_add()
926 if (!hdev->sco_pkts) in __hci_conn_add()
928 return ERR_PTR(-ECONNREFUSED); in __hci_conn_add()
931 return ERR_PTR(-ECONNREFUSED); in __hci_conn_add()
938 return ERR_PTR(-ENOMEM); in __hci_conn_add()
940 bacpy(&conn->dst, dst); in __hci_conn_add()
941 bacpy(&conn->src, &hdev->bdaddr); in __hci_conn_add()
942 conn->handle = handle; in __hci_conn_add()
943 conn->hdev = hdev; in __hci_conn_add()
944 conn->type = type; in __hci_conn_add()
945 conn->role = role; in __hci_conn_add()
946 conn->mode = HCI_CM_ACTIVE; in __hci_conn_add()
947 conn->state = BT_OPEN; in __hci_conn_add()
948 conn->auth_type = HCI_AT_GENERAL_BONDING; in __hci_conn_add()
949 conn->io_capability = hdev->io_capability; in __hci_conn_add()
950 conn->remote_auth = 0xff; in __hci_conn_add()
951 conn->key_type = 0xff; in __hci_conn_add()
952 conn->rssi = HCI_RSSI_INVALID; in __hci_conn_add()
953 conn->tx_power = HCI_TX_POWER_INVALID; in __hci_conn_add()
954 conn->max_tx_power = HCI_TX_POWER_INVALID; in __hci_conn_add()
955 conn->sync_handle = HCI_SYNC_HANDLE_INVALID; in __hci_conn_add()
956 conn->sid = HCI_SID_INVALID; in __hci_conn_add()
958 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); in __hci_conn_add()
959 conn->disc_timeout = HCI_DISCONN_TIMEOUT; in __hci_conn_add()
962 conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; in __hci_conn_add()
964 if (conn->role == HCI_ROLE_MASTER) in __hci_conn_add()
965 conn->out = true; in __hci_conn_add()
969 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; in __hci_conn_add()
970 conn->mtu = hdev->acl_mtu; in __hci_conn_add()
973 /* conn->src should reflect the local identity address */ in __hci_conn_add()
974 hci_copy_identity_address(hdev, &conn->src, &conn->src_type); in __hci_conn_add()
975 conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu; in __hci_conn_add()
978 /* conn->src should reflect the local identity address */ in __hci_conn_add()
979 hci_copy_identity_address(hdev, &conn->src, &conn->src_type); in __hci_conn_add()
983 conn->cleanup = bis_cleanup; in __hci_conn_add()
984 else if (conn->role == HCI_ROLE_MASTER) in __hci_conn_add()
985 conn->cleanup = cis_cleanup; in __hci_conn_add()
987 conn->mtu = hdev->iso_mtu ? hdev->iso_mtu : in __hci_conn_add()
988 hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu; in __hci_conn_add()
992 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | in __hci_conn_add()
993 (hdev->esco_type & EDR_ESCO_MASK); in __hci_conn_add()
995 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; in __hci_conn_add()
997 conn->mtu = hdev->sco_mtu; in __hci_conn_add()
1000 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; in __hci_conn_add()
1001 conn->mtu = hdev->sco_mtu; in __hci_conn_add()
1005 skb_queue_head_init(&conn->data_q); in __hci_conn_add()
1006 skb_queue_head_init(&conn->tx_q.queue); in __hci_conn_add()
1008 INIT_LIST_HEAD(&conn->chan_list); in __hci_conn_add()
1009 INIT_LIST_HEAD(&conn->link_list); in __hci_conn_add()
1011 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); in __hci_conn_add()
1012 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept); in __hci_conn_add()
1013 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle); in __hci_conn_add()
1014 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout); in __hci_conn_add()
1016 atomic_set(&conn->refcnt, 0); in __hci_conn_add()
1026 if (conn->type != SCO_LINK && conn->type != ESCO_LINK) { in __hci_conn_add()
1027 if (hdev->notify) in __hci_conn_add()
1028 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); in __hci_conn_add()
1045 return ERR_PTR(-ECONNREFUSED); in hci_conn_add_unset()
1054 return ERR_PTR(-EINVAL); in hci_conn_add()
1068 switch (conn->type) { in hci_conn_cleanup_child()
1071 if (HCI_CONN_HANDLE_UNSET(conn->handle)) in hci_conn_cleanup_child()
1075 if ((conn->state != BT_CONNECTED && in hci_conn_cleanup_child()
1076 !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) || in hci_conn_cleanup_child()
1077 test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) in hci_conn_cleanup_child()
1085 struct hci_dev *hdev = conn->hdev; in hci_conn_unlink()
1089 if (!conn->parent) { in hci_conn_unlink()
1092 list_for_each_entry_safe(link, t, &conn->link_list, list) { in hci_conn_unlink()
1093 struct hci_conn *child = link->conn; in hci_conn_unlink()
1102 if (!test_bit(HCI_UP, &hdev->flags)) in hci_conn_unlink()
1105 hci_conn_cleanup_child(child, conn->abort_reason); in hci_conn_unlink()
1111 if (!conn->link) in hci_conn_unlink()
1114 list_del_rcu(&conn->link->list); in hci_conn_unlink()
1117 hci_conn_drop(conn->parent); in hci_conn_unlink()
1118 hci_conn_put(conn->parent); in hci_conn_unlink()
1119 conn->parent = NULL; in hci_conn_unlink()
1121 kfree(conn->link); in hci_conn_unlink()
1122 conn->link = NULL; in hci_conn_unlink()
1127 struct hci_dev *hdev = conn->hdev; in hci_conn_del()
1129 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle); in hci_conn_del()
1133 disable_delayed_work_sync(&conn->disc_work); in hci_conn_del()
1134 disable_delayed_work_sync(&conn->auto_accept_work); in hci_conn_del()
1135 disable_delayed_work_sync(&conn->idle_work); in hci_conn_del()
1137 if (conn->type == ACL_LINK) { in hci_conn_del()
1139 hdev->acl_cnt += conn->sent; in hci_conn_del()
1140 } else if (conn->type == LE_LINK) { in hci_conn_del()
1141 cancel_delayed_work(&conn->le_conn_timeout); in hci_conn_del()
1143 if (hdev->le_pkts) in hci_conn_del()
1144 hdev->le_cnt += conn->sent; in hci_conn_del()
1146 hdev->acl_cnt += conn->sent; in hci_conn_del()
1149 if (conn->type == ISO_LINK) { in hci_conn_del()
1150 if (hdev->iso_pkts) in hci_conn_del()
1151 hdev->iso_cnt += conn->sent; in hci_conn_del()
1152 else if (hdev->le_pkts) in hci_conn_del()
1153 hdev->le_cnt += conn->sent; in hci_conn_del()
1155 hdev->acl_cnt += conn->sent; in hci_conn_del()
1159 skb_queue_purge(&conn->data_q); in hci_conn_del()
1160 skb_queue_purge(&conn->tx_q.queue); in hci_conn_del()
1178 BT_DBG("%pMR -> %pMR", src, dst); in hci_get_route()
1183 if (!test_bit(HCI_UP, &d->flags) || in hci_get_route()
1188 * No source address - find interface with bdaddr != dst in hci_get_route()
1189 * Source address - find interface with bdaddr == src in hci_get_route()
1199 bacpy(&id_addr, &d->bdaddr); in hci_get_route()
1208 /* Convert from HCI to three-value type */ in hci_get_route()
1219 if (bacmp(&d->bdaddr, dst)) { in hci_get_route()
1233 /* This function requires the caller holds hdev->lock */
1236 struct hci_dev *hdev = conn->hdev; in hci_le_conn_failed()
1246 /* This function requires the caller holds hdev->lock */
1249 struct hci_dev *hdev = conn->hdev; in hci_conn_failed()
1253 switch (conn->type) { in hci_conn_failed()
1265 test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags); in hci_conn_failed()
1266 test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags); in hci_conn_failed()
1268 conn->state = BT_CLOSED; in hci_conn_failed()
1273 /* This function requires the caller holds hdev->lock */
1276 struct hci_dev *hdev = conn->hdev; in hci_conn_set_handle()
1280 if (conn->handle == handle) in hci_conn_set_handle()
1292 if (conn->abort_reason) in hci_conn_set_handle()
1293 return conn->abort_reason; in hci_conn_set_handle()
1295 if (HCI_CONN_HANDLE_UNSET(conn->handle)) in hci_conn_set_handle()
1296 ida_free(&hdev->unset_handle_ida, conn->handle); in hci_conn_set_handle()
1298 conn->handle = handle; in hci_conn_set_handle()
1314 return ERR_PTR(-ECONNREFUSED); in hci_connect_le()
1316 return ERR_PTR(-EOPNOTSUPP); in hci_connect_le()
1320 * time, we return -EBUSY if there is any connection attempt running. in hci_connect_le()
1323 return ERR_PTR(-EBUSY); in hci_connect_le()
1331 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) { in hci_connect_le()
1332 return ERR_PTR(-EBUSY); in hci_connect_le()
1335 /* Check if the destination address has been resolved by the controller in hci_connect_le()
1336 * since if it did then the identity address shall be used. in hci_connect_le()
1339 /* When given an identity address with existing identity in hci_connect_le()
1341 * to a resolvable random address. in hci_connect_le()
1343 * Storing the resolvable random address is required here in hci_connect_le()
1344 * to handle connection failures. The address will later in hci_connect_le()
1345 * be resolved back into the original identity address in hci_connect_le()
1349 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) { in hci_connect_le()
1350 dst = &irk->rpa; in hci_connect_le()
1356 bacpy(&conn->dst, dst); in hci_connect_le()
1362 conn->pending_sec_level = sec_level; in hci_connect_le()
1365 conn->dst_type = dst_type; in hci_connect_le()
1366 conn->sec_level = BT_SECURITY_LOW; in hci_connect_le()
1367 conn->conn_timeout = conn_timeout; in hci_connect_le()
1368 conn->le_adv_phy = phy; in hci_connect_le()
1369 conn->le_adv_sec_phy = sec_phy; in hci_connect_le()
1388 if (conn->state != BT_CONNECTED) in is_connected()
1394 /* This function requires the caller holds hdev->lock */
1401 return -EISCONN; in hci_explicit_conn_params_set()
1407 return -ENOMEM; in hci_explicit_conn_params_set()
1413 params->auto_connect = HCI_AUTO_CONN_EXPLICIT; in hci_explicit_conn_params_set()
1417 if (params->auto_connect == HCI_AUTO_CONN_DISABLED || in hci_explicit_conn_params_set()
1418 params->auto_connect == HCI_AUTO_CONN_REPORT || in hci_explicit_conn_params_set()
1419 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { in hci_explicit_conn_params_set()
1421 hci_pend_le_list_add(params, &hdev->pend_le_conns); in hci_explicit_conn_params_set()
1424 params->explicit_connect = true; in hci_explicit_conn_params_set()
1427 params->auto_connect); in hci_explicit_conn_params_set()
1438 if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) { in qos_set_big()
1447 return -EADDRNOTAVAIL; in qos_set_big()
1450 qos->bcast.big = big; in qos_set_big()
1462 if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) { in qos_set_bis()
1463 if (qos->bcast.big != BT_ISO_QOS_BIG_UNSET) { in qos_set_bis()
1464 conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big); in qos_set_bis()
1470 qos->bcast.bis = conn->iso_qos.bcast.bis; in qos_set_bis()
1478 for (bis = 0x01; bis < hdev->le_num_of_adv_sets; in qos_set_bis()
1486 if (bis == hdev->le_num_of_adv_sets) in qos_set_bis()
1487 return -EADDRNOTAVAIL; in qos_set_bis()
1490 qos->bcast.bis = bis; in qos_set_bis()
1496 /* This function requires the caller holds hdev->lock */
1507 return ERR_PTR(-ECONNREFUSED); in hci_add_bis()
1508 return ERR_PTR(-EOPNOTSUPP); in hci_add_bis()
1520 conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big, in hci_add_bis()
1521 qos->bcast.big); in hci_add_bis()
1523 return ERR_PTR(-EADDRINUSE); in hci_add_bis()
1528 conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big); in hci_add_bis()
1530 if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) || in hci_add_bis()
1531 base_len != conn->le_per_adv_data_len || in hci_add_bis()
1532 memcmp(conn->le_per_adv_data, base, base_len))) in hci_add_bis()
1533 return ERR_PTR(-EADDRINUSE); in hci_add_bis()
1539 conn->state = BT_CONNECT; in hci_add_bis()
1545 /* This function requires the caller holds hdev->lock */
1556 return ERR_PTR(-ECONNREFUSED); in hci_connect_le_scan()
1558 return ERR_PTR(-EOPNOTSUPP); in hci_connect_le_scan()
1562 * established. To be able to handle these ATT messages, the user- in hci_connect_le_scan()
1572 if (conn->pending_sec_level < sec_level) in hci_connect_le_scan()
1573 conn->pending_sec_level = sec_level; in hci_connect_le_scan()
1585 return ERR_PTR(-EBUSY); in hci_connect_le_scan()
1588 conn->state = BT_CONNECT; in hci_connect_le_scan()
1589 set_bit(HCI_CONN_SCANNING, &conn->flags); in hci_connect_le_scan()
1590 conn->dst_type = dst_type; in hci_connect_le_scan()
1591 conn->sec_level = BT_SECURITY_LOW; in hci_connect_le_scan()
1592 conn->pending_sec_level = sec_level; in hci_connect_le_scan()
1593 conn->conn_timeout = conn_timeout; in hci_connect_le_scan()
1594 conn->conn_reason = conn_reason; in hci_connect_le_scan()
1611 return ERR_PTR(-ECONNREFUSED); in hci_connect_acl()
1613 return ERR_PTR(-EOPNOTSUPP); in hci_connect_acl()
1616 /* Reject outgoing connection to device with same BD ADDR against in hci_connect_acl()
1617 * CVE-2020-26555 in hci_connect_acl()
1619 if (!bacmp(&hdev->bdaddr, dst)) { in hci_connect_acl()
1622 return ERR_PTR(-ECONNREFUSED); in hci_connect_acl()
1634 acl->conn_reason = conn_reason; in hci_connect_acl()
1635 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { in hci_connect_acl()
1638 acl->sec_level = BT_SECURITY_LOW; in hci_connect_acl()
1639 acl->pending_sec_level = sec_level; in hci_connect_acl()
1640 acl->auth_type = auth_type; in hci_connect_acl()
1641 acl->conn_timeout = timeout; in hci_connect_acl()
1656 struct hci_dev *hdev = parent->hdev; in hci_conn_link()
1661 if (conn->link) in hci_conn_link()
1662 return conn->link; in hci_conn_link()
1664 if (conn->parent) in hci_conn_link()
1671 link->conn = hci_conn_hold(conn); in hci_conn_link()
1672 conn->link = link; in hci_conn_link()
1673 conn->parent = hci_conn_get(parent); in hci_conn_link()
1676 list_add_tail_rcu(&link->list, &parent->link_list); in hci_conn_link()
1707 return ERR_PTR(-ENOLINK); in hci_connect_sco()
1710 sco->setting = setting; in hci_connect_sco()
1711 sco->codec = *codec; in hci_connect_sco()
1713 if (acl->state == BT_CONNECTED && in hci_connect_sco()
1714 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { in hci_connect_sco()
1715 set_bit(HCI_CONN_POWER_SAVE, &acl->flags); in hci_connect_sco()
1718 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) { in hci_connect_sco()
1720 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags); in hci_connect_sco()
1732 struct hci_dev *hdev = conn->hdev; in hci_le_create_big()
1738 data.big = qos->bcast.big; in hci_le_create_big()
1739 data.bis = qos->bcast.bis; in hci_le_create_big()
1746 cp.handle = qos->bcast.big; in hci_le_create_big()
1747 cp.adv_handle = qos->bcast.bis; in hci_le_create_big()
1749 hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval); in hci_le_create_big()
1750 cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu); in hci_le_create_big()
1751 cp.bis.latency = cpu_to_le16(qos->bcast.out.latency); in hci_le_create_big()
1752 cp.bis.rtn = qos->bcast.out.rtn; in hci_le_create_big()
1753 cp.bis.phy = qos->bcast.out.phy; in hci_le_create_big()
1754 cp.bis.packing = qos->bcast.packing; in hci_le_create_big()
1755 cp.bis.framing = qos->bcast.framing; in hci_le_create_big()
1756 cp.bis.encryption = qos->bcast.encryption; in hci_le_create_big()
1757 memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode)); in hci_le_create_big()
1775 qos = &conn->iso_qos; in set_cig_params_sync()
1776 pdu->cig_id = cig_id; in set_cig_params_sync()
1777 hci_cpu_to_le24(qos->ucast.out.interval, pdu->c_interval); in set_cig_params_sync()
1778 hci_cpu_to_le24(qos->ucast.in.interval, pdu->p_interval); in set_cig_params_sync()
1779 pdu->sca = qos->ucast.sca; in set_cig_params_sync()
1780 pdu->packing = qos->ucast.packing; in set_cig_params_sync()
1781 pdu->framing = qos->ucast.framing; in set_cig_params_sync()
1782 pdu->c_latency = cpu_to_le16(qos->ucast.out.latency); in set_cig_params_sync()
1783 pdu->p_latency = cpu_to_le16(qos->ucast.in.latency); in set_cig_params_sync()
1790 aux_num_cis < pdu->num_cis; cis_id++) { in set_cig_params_sync()
1797 qos = &conn->iso_qos; in set_cig_params_sync()
1799 cis = &pdu->cis[aux_num_cis++]; in set_cig_params_sync()
1800 cis->cis_id = cis_id; in set_cig_params_sync()
1801 cis->c_sdu = cpu_to_le16(conn->iso_qos.ucast.out.sdu); in set_cig_params_sync()
1802 cis->p_sdu = cpu_to_le16(conn->iso_qos.ucast.in.sdu); in set_cig_params_sync()
1803 cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy : in set_cig_params_sync()
1804 qos->ucast.in.phy; in set_cig_params_sync()
1805 cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy : in set_cig_params_sync()
1806 qos->ucast.out.phy; in set_cig_params_sync()
1807 cis->c_rtn = qos->ucast.out.rtn; in set_cig_params_sync()
1808 cis->p_rtn = qos->ucast.in.rtn; in set_cig_params_sync()
1810 pdu->num_cis = aux_num_cis; in set_cig_params_sync()
1812 if (!pdu->num_cis) in set_cig_params_sync()
1816 struct_size(pdu, cis, pdu->num_cis), in set_cig_params_sync()
1822 struct hci_dev *hdev = conn->hdev; in hci_le_set_cig_params()
1828 if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) { in hci_le_set_cig_params()
1847 qos->ucast.cig = data.cig; in hci_le_set_cig_params()
1850 if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) { in hci_le_set_cig_params()
1851 if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig, in hci_le_set_cig_params()
1852 qos->ucast.cis)) in hci_le_set_cig_params()
1858 for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0; in hci_le_set_cig_params()
1863 qos->ucast.cis = data.cis; in hci_le_set_cig_params()
1868 if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) in hci_le_set_cig_params()
1873 UINT_PTR(qos->ucast.cig), NULL) < 0) in hci_le_set_cig_params()
1884 cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig, in hci_bind_cis()
1885 qos->ucast.cis); in hci_bind_cis()
1890 cis->cleanup = cis_cleanup; in hci_bind_cis()
1891 cis->dst_type = dst_type; in hci_bind_cis()
1892 cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET; in hci_bind_cis()
1893 cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET; in hci_bind_cis()
1896 if (cis->state == BT_CONNECTED) in hci_bind_cis()
1900 if (cis->state == BT_BOUND && in hci_bind_cis()
1901 !memcmp(&cis->iso_qos, qos, sizeof(*qos))) in hci_bind_cis()
1905 cis->le_tx_phy = qos->ucast.out.phy; in hci_bind_cis()
1906 cis->le_rx_phy = qos->ucast.in.phy; in hci_bind_cis()
1911 if (!qos->ucast.out.interval) in hci_bind_cis()
1912 qos->ucast.out.interval = qos->ucast.in.interval; in hci_bind_cis()
1917 if (!qos->ucast.in.interval) in hci_bind_cis()
1918 qos->ucast.in.interval = qos->ucast.out.interval; in hci_bind_cis()
1923 if (!qos->ucast.out.latency) in hci_bind_cis()
1924 qos->ucast.out.latency = qos->ucast.in.latency; in hci_bind_cis()
1929 if (!qos->ucast.in.latency) in hci_bind_cis()
1930 qos->ucast.in.latency = qos->ucast.out.latency; in hci_bind_cis()
1934 return ERR_PTR(-EINVAL); in hci_bind_cis()
1939 cis->iso_qos = *qos; in hci_bind_cis()
1940 cis->state = BT_BOUND; in hci_bind_cis()
1947 struct hci_dev *hdev = conn->hdev; in hci_iso_setup_path()
1952 if (conn->iso_qos.ucast.out.sdu) { in hci_iso_setup_path()
1953 cmd.handle = cpu_to_le16(conn->handle); in hci_iso_setup_path()
1963 if (conn->iso_qos.ucast.in.sdu) { in hci_iso_setup_path()
1964 cmd.handle = cpu_to_le16(conn->handle); in hci_iso_setup_path()
1979 if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY)) in hci_conn_check_create_cis()
1980 return -EINVAL; in hci_conn_check_create_cis()
1982 if (!conn->parent || conn->parent->state != BT_CONNECTED || in hci_conn_check_create_cis()
1983 conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle)) in hci_conn_check_create_cis()
2001 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { in hci_le_create_cis_pending()
2002 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) { in hci_le_create_cis_pending()
2004 return -EBUSY; in hci_le_create_cis_pending()
2024 if (!qos->sdu && qos->phy) in hci_iso_qos_setup()
2025 qos->sdu = conn->mtu; in hci_iso_qos_setup()
2028 if (qos->phy == BT_ISO_PHY_ANY) in hci_iso_qos_setup()
2029 qos->phy = phy; in hci_iso_qos_setup()
2032 if (!qos->interval) in hci_iso_qos_setup()
2034 qos->interval = conn->le_conn_interval * 1250; in hci_iso_qos_setup()
2037 if (!qos->latency) in hci_iso_qos_setup()
2038 qos->latency = conn->le_conn_latency; in hci_iso_qos_setup()
2044 struct bt_iso_qos *qos = &conn->iso_qos; in create_big_sync()
2049 if (qos->bcast.out.phy == 0x02) in create_big_sync()
2053 interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor; in create_big_sync()
2055 if (qos->bcast.bis) in create_big_sync()
2058 err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len, in create_big_sync()
2059 conn->le_per_adv_data, flags, interval, in create_big_sync()
2064 return hci_le_create_big(conn, &conn->iso_qos); in create_big_sync()
2077 conn->iso_qos = *qos; in hci_pa_create_sync()
2078 conn->dst_type = dst_type; in hci_pa_create_sync()
2079 conn->sid = sid; in hci_pa_create_sync()
2080 conn->state = BT_LISTEN; in hci_pa_create_sync()
2081 conn->conn_timeout = msecs_to_jiffies(qos->bcast.sync_timeout * 10); in hci_pa_create_sync()
2097 return -EINVAL; in hci_conn_big_create_sync()
2105 hcon->iso_qos = *qos; in hci_conn_big_create_sync()
2107 hcon->num_bis = num_bis; in hci_conn_big_create_sync()
2108 memcpy(hcon->bis, bis, num_bis); in hci_conn_big_create_sync()
2109 hcon->conn_timeout = msecs_to_jiffies(qos->bcast.timeout * 10); in hci_conn_big_create_sync()
2138 conn = hci_conn_hash_lookup_big_state(hdev, qos->bcast.big, BT_OPEN); in hci_bind_bis()
2140 memcpy(qos, &conn->iso_qos, sizeof(*qos)); in hci_bind_bis()
2141 conn->state = BT_CONNECTED; in hci_bind_bis()
2155 conn->le_tx_phy = qos->bcast.out.phy; in hci_bind_bis()
2156 conn->le_tx_phy = qos->bcast.out.phy; in hci_bind_bis()
2160 memcpy(conn->le_per_adv_data, eir, sizeof(eir)); in hci_bind_bis()
2161 conn->le_per_adv_data_len = base_len; in hci_bind_bis()
2164 hci_iso_qos_setup(hdev, conn, &qos->bcast.out, in hci_bind_bis()
2165 conn->le_tx_phy ? conn->le_tx_phy : in hci_bind_bis()
2166 hdev->le_tx_def_phys); in hci_bind_bis()
2168 conn->iso_qos = *qos; in hci_bind_bis()
2169 conn->state = BT_BOUND; in hci_bind_bis()
2173 conn->iso_qos.bcast.big); in hci_bind_bis()
2178 return ERR_PTR(-ENOLINK); in hci_bind_bis()
2188 /* Skip if not broadcast/ANY address */ in bis_mark_per_adv()
2189 if (bacmp(&conn->dst, BDADDR_ANY)) in bis_mark_per_adv()
2192 if (d->big != conn->iso_qos.bcast.big || in bis_mark_per_adv()
2193 d->bis == BT_ISO_QOS_BIS_UNSET || in bis_mark_per_adv()
2194 d->bis != conn->iso_qos.bcast.bis) in bis_mark_per_adv()
2197 set_bit(HCI_CONN_PER_ADV, &conn->flags); in bis_mark_per_adv()
2212 if (conn->state == BT_CONNECTED) in hci_connect_bis()
2215 data.big = qos->bcast.big; in hci_connect_bis()
2216 data.bis = qos->bcast.bis; in hci_connect_bis()
2256 hci_iso_qos_setup(hdev, le, &qos->ucast.out, in hci_connect_cis()
2257 le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys); in hci_connect_cis()
2258 hci_iso_qos_setup(hdev, le, &qos->ucast.in, in hci_connect_cis()
2259 le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys); in hci_connect_cis()
2271 return ERR_PTR(-ENOLINK); in hci_connect_cis()
2274 cis->state = BT_CONNECT; in hci_connect_cis()
2287 * Connections is used and the link is encrypted with AES-CCM in hci_conn_check_link_mode()
2288 * using a P-256 authenticated combination key. in hci_conn_check_link_mode()
2290 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) { in hci_conn_check_link_mode()
2292 !test_bit(HCI_CONN_AES_CCM, &conn->flags) || in hci_conn_check_link_mode()
2293 conn->key_type != HCI_LK_AUTH_COMBINATION_P256) in hci_conn_check_link_mode()
2302 * 128-bit equivalent strength for link and encryption keys in hci_conn_check_link_mode()
2304 * SAFER+ not allowed, and P-192 not allowed; encryption key in hci_conn_check_link_mode()
2307 if (conn->sec_level == BT_SECURITY_FIPS && in hci_conn_check_link_mode()
2308 !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { in hci_conn_check_link_mode()
2309 bt_dev_err(conn->hdev, in hci_conn_check_link_mode()
2310 "Invalid security: Missing AES-CCM usage"); in hci_conn_check_link_mode()
2315 !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) in hci_conn_check_link_mode()
2326 if (conn->pending_sec_level > sec_level) in hci_conn_auth()
2327 sec_level = conn->pending_sec_level; in hci_conn_auth()
2329 if (sec_level > conn->sec_level) in hci_conn_auth()
2330 conn->pending_sec_level = sec_level; in hci_conn_auth()
2331 else if (test_bit(HCI_CONN_AUTH, &conn->flags)) in hci_conn_auth()
2335 auth_type |= (conn->auth_type & 0x01); in hci_conn_auth()
2337 conn->auth_type = auth_type; in hci_conn_auth()
2339 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { in hci_conn_auth()
2342 cp.handle = cpu_to_le16(conn->handle); in hci_conn_auth()
2343 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, in hci_conn_auth()
2349 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) in hci_conn_auth()
2350 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); in hci_conn_auth()
2361 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { in hci_conn_encrypt()
2363 cp.handle = cpu_to_le16(conn->handle); in hci_conn_encrypt()
2365 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), in hci_conn_encrypt()
2376 if (conn->type == LE_LINK) in hci_conn_security()
2389 if (!test_bit(HCI_CONN_AUTH, &conn->flags)) in hci_conn_security()
2392 switch (conn->key_type) { in hci_conn_security()
2421 if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16) in hci_conn_security()
2429 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) in hci_conn_security()
2433 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); in hci_conn_security()
2439 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) { in hci_conn_security()
2443 if (!conn->enc_key_size) in hci_conn_security()
2460 /* Accept if non-secure or higher security level is required */ in hci_conn_check_secure()
2465 if (conn->sec_level == BT_SECURITY_HIGH || in hci_conn_check_secure()
2466 conn->sec_level == BT_SECURITY_FIPS) in hci_conn_check_secure()
2479 if (role == conn->role) in hci_conn_switch_role()
2482 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) { in hci_conn_switch_role()
2484 bacpy(&cp.bdaddr, &conn->dst); in hci_conn_switch_role()
2486 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); in hci_conn_switch_role()
2496 struct hci_dev *hdev = conn->hdev; in hci_conn_enter_active_mode()
2498 BT_DBG("hcon %p mode %d", conn, conn->mode); in hci_conn_enter_active_mode()
2500 if (conn->mode != HCI_CM_SNIFF) in hci_conn_enter_active_mode()
2503 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active) in hci_conn_enter_active_mode()
2506 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { in hci_conn_enter_active_mode()
2508 cp.handle = cpu_to_le16(conn->handle); in hci_conn_enter_active_mode()
2513 if (hdev->idle_timeout > 0) in hci_conn_enter_active_mode()
2514 queue_delayed_work(hdev->workqueue, &conn->idle_work, in hci_conn_enter_active_mode()
2515 msecs_to_jiffies(hdev->idle_timeout)); in hci_conn_enter_active_mode()
2521 struct list_head *head = &hdev->conn_hash.list; in hci_conn_hash_flush()
2524 BT_DBG("hdev %s", hdev->name); in hci_conn_hash_flush()
2533 conn->state = BT_CLOSED; in hci_conn_hash_flush()
2543 if (conn->role == HCI_ROLE_MASTER) in get_link_mode()
2546 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) in get_link_mode()
2549 if (test_bit(HCI_CONN_AUTH, &conn->flags)) in get_link_mode()
2552 if (test_bit(HCI_CONN_SECURE, &conn->flags)) in get_link_mode()
2555 if (test_bit(HCI_CONN_FIPS, &conn->flags)) in get_link_mode()
2570 return -EFAULT; in hci_get_conn_list()
2573 return -EINVAL; in hci_get_conn_list()
2579 return -ENOMEM; in hci_get_conn_list()
2584 return -ENODEV; in hci_get_conn_list()
2587 ci = cl->conn_info; in hci_get_conn_list()
2590 list_for_each_entry(c, &hdev->conn_hash.list, list) { in hci_get_conn_list()
2591 bacpy(&(ci + n)->bdaddr, &c->dst); in hci_get_conn_list()
2592 (ci + n)->handle = c->handle; in hci_get_conn_list()
2593 (ci + n)->type = c->type; in hci_get_conn_list()
2594 (ci + n)->out = c->out; in hci_get_conn_list()
2595 (ci + n)->state = c->state; in hci_get_conn_list()
2596 (ci + n)->link_mode = get_link_mode(c); in hci_get_conn_list()
2602 cl->dev_id = hdev->id; in hci_get_conn_list()
2603 cl->conn_num = n; in hci_get_conn_list()
2611 return err ? -EFAULT : 0; in hci_get_conn_list()
2622 return -EFAULT; in hci_get_conn_info()
2627 bacpy(&ci.bdaddr, &conn->dst); in hci_get_conn_info()
2628 ci.handle = conn->handle; in hci_get_conn_info()
2629 ci.type = conn->type; in hci_get_conn_info()
2630 ci.out = conn->out; in hci_get_conn_info()
2631 ci.state = conn->state; in hci_get_conn_info()
2637 return -ENOENT; in hci_get_conn_info()
2639 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0; in hci_get_conn_info()
2648 return -EFAULT; in hci_get_auth_info()
2653 req.type = conn->auth_type; in hci_get_auth_info()
2657 return -ENOENT; in hci_get_auth_info()
2659 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; in hci_get_auth_info()
2664 struct hci_dev *hdev = conn->hdev; in hci_chan_create()
2667 BT_DBG("%s hcon %p", hdev->name, conn); in hci_chan_create()
2669 if (test_bit(HCI_CONN_DROP, &conn->flags)) { in hci_chan_create()
2678 chan->conn = hci_conn_get(conn); in hci_chan_create()
2679 skb_queue_head_init(&chan->data_q); in hci_chan_create()
2680 chan->state = BT_CONNECTED; in hci_chan_create()
2682 list_add_rcu(&chan->list, &conn->chan_list); in hci_chan_create()
2689 struct hci_conn *conn = chan->conn; in hci_chan_del()
2690 struct hci_dev *hdev = conn->hdev; in hci_chan_del()
2692 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan); in hci_chan_del()
2694 list_del_rcu(&chan->list); in hci_chan_del()
2699 set_bit(HCI_CONN_DROP, &conn->flags); in hci_chan_del()
2703 skb_queue_purge(&chan->data_q); in hci_chan_del()
2713 list_for_each_entry_safe(chan, n, &conn->chan_list, list) in hci_chan_list_flush()
2722 list_for_each_entry(hchan, &hcon->chan_list, list) { in __hci_chan_lookup_handle()
2723 if (hchan->handle == handle) in __hci_chan_lookup_handle()
2732 struct hci_conn_hash *h = &hdev->conn_hash; in hci_chan_lookup_handle()
2738 list_for_each_entry_rcu(hcon, &h->list, list) { in hci_chan_lookup_handle()
2757 switch (conn->type) { in hci_conn_get_phy()
2772 if (conn->pkt_type & (HCI_DM3 | HCI_DH3)) in hci_conn_get_phy()
2775 if (conn->pkt_type & (HCI_DM5 | HCI_DH5)) in hci_conn_get_phy()
2779 * 2-DH1, 2-DH3 and 2-DH5. in hci_conn_get_phy()
2781 if (!(conn->pkt_type & HCI_2DH1)) in hci_conn_get_phy()
2784 if (!(conn->pkt_type & HCI_2DH3)) in hci_conn_get_phy()
2787 if (!(conn->pkt_type & HCI_2DH5)) in hci_conn_get_phy()
2791 * 3-DH1, 3-DH3 and 3-DH5. in hci_conn_get_phy()
2793 if (!(conn->pkt_type & HCI_3DH1)) in hci_conn_get_phy()
2796 if (!(conn->pkt_type & HCI_3DH3)) in hci_conn_get_phy()
2799 if (!(conn->pkt_type & HCI_3DH5)) in hci_conn_get_phy()
2808 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5))) in hci_conn_get_phy()
2811 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */ in hci_conn_get_phy()
2812 if (!(conn->pkt_type & ESCO_2EV3)) in hci_conn_get_phy()
2815 if (!(conn->pkt_type & ESCO_2EV5)) in hci_conn_get_phy()
2818 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */ in hci_conn_get_phy()
2819 if (!(conn->pkt_type & ESCO_3EV3)) in hci_conn_get_phy()
2822 if (!(conn->pkt_type & ESCO_3EV5)) in hci_conn_get_phy()
2828 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M) in hci_conn_get_phy()
2831 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M) in hci_conn_get_phy()
2834 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M) in hci_conn_get_phy()
2837 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M) in hci_conn_get_phy()
2840 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED) in hci_conn_get_phy()
2843 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED) in hci_conn_get_phy()
2857 return -ECANCELED; in abort_conn_sync()
2859 return hci_abort_conn_sync(hdev, conn, conn->abort_reason); in abort_conn_sync()
2864 struct hci_dev *hdev = conn->hdev; in hci_abort_conn()
2869 if (conn->abort_reason) in hci_abort_conn()
2872 bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason); in hci_abort_conn()
2874 conn->abort_reason = reason; in hci_abort_conn()
2883 if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) { in hci_abort_conn()
2884 switch (hci_skb_event(hdev->sent_cmd)) { in hci_abort_conn()
2907 struct sock *sk = skb ? skb->sk : NULL; in hci_setup_tx_timestamp()
2922 sock_tx_timestamp(sk, sockc, &skb_shinfo(skb)->tx_flags); in hci_setup_tx_timestamp()
2924 if (sk->sk_type == SOCK_STREAM) in hci_setup_tx_timestamp()
2925 key = atomic_add_return(key_offset, &sk->sk_tskey); in hci_setup_tx_timestamp()
2927 if (sockc->tsflags & SOF_TIMESTAMPING_OPT_ID && in hci_setup_tx_timestamp()
2928 sockc->tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) { in hci_setup_tx_timestamp()
2929 if (sockc->tsflags & SOCKCM_FLAG_TS_OPT_ID) { in hci_setup_tx_timestamp()
2930 skb_shinfo(skb)->tskey = sockc->ts_opt_id; in hci_setup_tx_timestamp()
2932 if (sk->sk_type != SOCK_STREAM) in hci_setup_tx_timestamp()
2933 key = atomic_inc_return(&sk->sk_tskey); in hci_setup_tx_timestamp()
2934 skb_shinfo(skb)->tskey = key - 1; in hci_setup_tx_timestamp()
2941 struct tx_queue *comp = &conn->tx_q; in hci_conn_tx_queue()
2945 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) in hci_conn_tx_queue()
2946 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SND); in hci_conn_tx_queue()
2953 switch (conn->type) { in hci_conn_tx_queue()
2960 if (!hci_dev_test_flag(conn->hdev, HCI_SCO_FLOWCTL)) in hci_conn_tx_queue()
2967 if (skb->sk && (skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP)) in hci_conn_tx_queue()
2971 if (!track && !comp->tracked) { in hci_conn_tx_queue()
2972 comp->extra++; in hci_conn_tx_queue()
2981 comp->tracked++; in hci_conn_tx_queue()
2988 skb_queue_tail(&comp->queue, skb); in hci_conn_tx_queue()
2995 comp->tracked = 0; in hci_conn_tx_queue()
2996 comp->extra += skb_queue_len(&comp->queue) + 1; in hci_conn_tx_queue()
2997 skb_queue_purge(&comp->queue); in hci_conn_tx_queue()
3002 struct tx_queue *comp = &conn->tx_q; in hci_conn_tx_dequeue()
3009 if (comp->extra && (comp->tracked || skb_queue_empty(&comp->queue))) { in hci_conn_tx_dequeue()
3010 comp->extra--; in hci_conn_tx_dequeue()
3014 skb = skb_dequeue(&comp->queue); in hci_conn_tx_dequeue()
3018 if (skb->sk) { in hci_conn_tx_dequeue()
3019 comp->tracked--; in hci_conn_tx_dequeue()
3020 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, in hci_conn_tx_dequeue()
3029 if (conn->type == ACL_LINK) { in hci_conn_key_enc_size()
3032 key = hci_find_link_key(conn->hdev, &conn->dst); in hci_conn_key_enc_size()
3036 return &key->pin_len; in hci_conn_key_enc_size()
3037 } else if (conn->type == LE_LINK) { in hci_conn_key_enc_size()
3040 ltk = hci_find_ltk(conn->hdev, &conn->dst, conn->dst_type, in hci_conn_key_enc_size()
3041 conn->role); in hci_conn_key_enc_size()
3045 return &ltk->enc_size; in hci_conn_key_enc_size()