xref: /linux/net/bluetooth/l2cap_core.c (revision 6315d93541f8a5f77c5ef5c4f25233e66d189603)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44 
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 				       u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 			   void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 		     struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65 
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns a reference locked channel.
116  */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 
130 	return c;
131 }
132 
133 /* Find channel with given DCID.
134  * Returns a reference locked channel.
135  */
136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 						 u16 cid)
138 {
139 	struct l2cap_chan *c;
140 
141 	c = __l2cap_get_chan_by_dcid(conn, cid);
142 	if (c) {
143 		/* Only lock if chan reference is not 0 */
144 		c = l2cap_chan_hold_unless_zero(c);
145 		if (c)
146 			l2cap_chan_lock(c);
147 	}
148 
149 	return c;
150 }
151 
152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 						    u8 ident)
154 {
155 	struct l2cap_chan *c;
156 
157 	list_for_each_entry(c, &conn->chan_l, list) {
158 		if (c->ident == ident)
159 			return c;
160 	}
161 	return NULL;
162 }
163 
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 						      u8 src_type)
166 {
167 	struct l2cap_chan *c;
168 
169 	list_for_each_entry(c, &chan_list, global_l) {
170 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 			continue;
172 
173 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 			continue;
175 
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 							 chan->src_type)) {
214 				chan->psm   = cpu_to_le16(p);
215 				chan->sport = cpu_to_le16(p);
216 				err = 0;
217 				break;
218 			}
219 	}
220 
221 done:
222 	write_unlock(&chan_list_lock);
223 	return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226 
227 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
228 {
229 	write_lock(&chan_list_lock);
230 
231 	/* Override the defaults (which are for conn-oriented) */
232 	chan->omtu = L2CAP_DEFAULT_MTU;
233 	chan->chan_type = L2CAP_CHAN_FIXED;
234 
235 	chan->scid = scid;
236 
237 	write_unlock(&chan_list_lock);
238 
239 	return 0;
240 }
241 
242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 	u16 cid, dyn_end;
245 
246 	if (conn->hcon->type == LE_LINK)
247 		dyn_end = L2CAP_CID_LE_DYN_END;
248 	else
249 		dyn_end = L2CAP_CID_DYN_END;
250 
251 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 		if (!__l2cap_get_chan_by_scid(conn, cid))
253 			return cid;
254 	}
255 
256 	return 0;
257 }
258 
259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 	       state_to_string(state));
263 
264 	chan->state = state;
265 	chan->ops->state_change(chan, state, 0);
266 }
267 
268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 						int state, int err)
270 {
271 	chan->state = state;
272 	chan->ops->state_change(chan, chan->state, err);
273 }
274 
275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 	chan->ops->state_change(chan, chan->state, err);
278 }
279 
280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 	if (!delayed_work_pending(&chan->monitor_timer) &&
283 	    chan->retrans_timeout) {
284 		l2cap_set_timer(chan, &chan->retrans_timer,
285 				secs_to_jiffies(chan->retrans_timeout));
286 	}
287 }
288 
289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 	__clear_retrans_timer(chan);
292 	if (chan->monitor_timeout) {
293 		l2cap_set_timer(chan, &chan->monitor_timer,
294 				secs_to_jiffies(chan->monitor_timeout));
295 	}
296 }
297 
298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 					       u16 seq)
300 {
301 	struct sk_buff *skb;
302 
303 	skb_queue_walk(head, skb) {
304 		if (bt_cb(skb)->l2cap.txseq == seq)
305 			return skb;
306 	}
307 
308 	return NULL;
309 }
310 
311 /* ---- L2CAP sequence number lists ---- */
312 
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314  * SREJ requests that are received and for frames that are to be
315  * retransmitted. These seq_list functions implement a singly-linked
316  * list in an array, where membership in the list can also be checked
317  * in constant time. Items can also be added to the tail of the list
318  * and removed from the head in constant time, without further memory
319  * allocs or frees.
320  */
321 
322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 	size_t alloc_size, i;
325 
326 	/* Allocated size is a power of 2 to map sequence numbers
327 	 * (which may be up to 14 bits) in to a smaller array that is
328 	 * sized for the negotiated ERTM transmit windows.
329 	 */
330 	alloc_size = roundup_pow_of_two(size);
331 
332 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 	if (!seq_list->list)
334 		return -ENOMEM;
335 
336 	seq_list->mask = alloc_size - 1;
337 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 	for (i = 0; i < alloc_size; i++)
340 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341 
342 	return 0;
343 }
344 
345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 	kfree(seq_list->list);
348 }
349 
350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 					   u16 seq)
352 {
353 	/* Constant-time check for list membership */
354 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356 
357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 	u16 seq = seq_list->head;
360 	u16 mask = seq_list->mask;
361 
362 	seq_list->head = seq_list->list[seq & mask];
363 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364 
365 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 	}
369 
370 	return seq;
371 }
372 
373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 	u16 i;
376 
377 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 		return;
379 
380 	for (i = 0; i <= seq_list->mask; i++)
381 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382 
383 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386 
387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 	u16 mask = seq_list->mask;
390 
391 	/* All appends happen in constant time */
392 
393 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 		return;
395 
396 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 		seq_list->head = seq;
398 	else
399 		seq_list->list[seq_list->tail & mask] = seq;
400 
401 	seq_list->tail = seq;
402 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404 
405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 					       chan_timer.work);
409 	struct l2cap_conn *conn = chan->conn;
410 	int reason;
411 
412 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413 
414 	if (!conn)
415 		return;
416 
417 	mutex_lock(&conn->lock);
418 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 	 * this work. No need to call l2cap_chan_hold(chan) here again.
420 	 */
421 	l2cap_chan_lock(chan);
422 
423 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 		reason = ECONNREFUSED;
425 	else if (chan->state == BT_CONNECT &&
426 		 chan->sec_level != BT_SECURITY_SDP)
427 		reason = ECONNREFUSED;
428 	else
429 		reason = ETIMEDOUT;
430 
431 	l2cap_chan_close(chan, reason);
432 
433 	chan->ops->close(chan);
434 
435 	l2cap_chan_unlock(chan);
436 	l2cap_chan_put(chan);
437 
438 	mutex_unlock(&conn->lock);
439 }
440 
441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 	struct l2cap_chan *chan;
444 
445 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
446 	if (!chan)
447 		return NULL;
448 
449 	skb_queue_head_init(&chan->tx_q);
450 	skb_queue_head_init(&chan->srej_q);
451 	mutex_init(&chan->lock);
452 
453 	/* Set default lock nesting level */
454 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455 
456 	/* Available receive buffer space is initially unknown */
457 	chan->rx_avail = -1;
458 
459 	write_lock(&chan_list_lock);
460 	list_add(&chan->global_l, &chan_list);
461 	write_unlock(&chan_list_lock);
462 
463 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467 
468 	chan->state = BT_OPEN;
469 
470 	kref_init(&chan->kref);
471 
472 	/* This flag is cleared in l2cap_chan_ready() */
473 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474 
475 	BT_DBG("chan %p", chan);
476 
477 	return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480 
481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484 
485 	BT_DBG("chan %p", chan);
486 
487 	write_lock(&chan_list_lock);
488 	list_del(&chan->global_l);
489 	write_unlock(&chan_list_lock);
490 
491 	kfree(chan);
492 }
493 
494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497 
498 	kref_get(&c->kref);
499 }
500 
501 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
502 {
503 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
504 
505 	if (!kref_get_unless_zero(&c->kref))
506 		return NULL;
507 
508 	return c;
509 }
510 
511 void l2cap_chan_put(struct l2cap_chan *c)
512 {
513 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
514 
515 	kref_put(&c->kref, l2cap_chan_destroy);
516 }
517 EXPORT_SYMBOL_GPL(l2cap_chan_put);
518 
519 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
520 {
521 	chan->fcs  = L2CAP_FCS_CRC16;
522 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
523 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
524 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
525 	chan->remote_max_tx = chan->max_tx;
526 	chan->remote_tx_win = chan->tx_win;
527 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
528 	chan->sec_level = BT_SECURITY_LOW;
529 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
530 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
531 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
532 
533 	chan->conf_state = 0;
534 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
535 
536 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
537 }
538 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
539 
540 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
541 {
542 	size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
543 
544 	if (chan->mps == 0)
545 		return 0;
546 
547 	/* If we don't know the available space in the receiver buffer, give
548 	 * enough credits for a full packet.
549 	 */
550 	if (chan->rx_avail == -1)
551 		return (chan->imtu / chan->mps) + 1;
552 
553 	/* If we know how much space is available in the receive buffer, give
554 	 * out as many credits as would fill the buffer.
555 	 */
556 	if (chan->rx_avail <= sdu_len)
557 		return 0;
558 
559 	return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
560 }
561 
562 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
563 {
564 	chan->sdu = NULL;
565 	chan->sdu_last_frag = NULL;
566 	chan->sdu_len = 0;
567 	chan->tx_credits = tx_credits;
568 	/* Derive MPS from connection MTU to stop HCI fragmentation */
569 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
570 	chan->rx_credits = l2cap_le_rx_credits(chan);
571 
572 	skb_queue_head_init(&chan->tx_q);
573 }
574 
575 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
576 {
577 	l2cap_le_flowctl_init(chan, tx_credits);
578 
579 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
580 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
581 		chan->mps = L2CAP_ECRED_MIN_MPS;
582 		chan->rx_credits = l2cap_le_rx_credits(chan);
583 	}
584 }
585 
586 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
587 {
588 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
589 	       __le16_to_cpu(chan->psm), chan->dcid);
590 
591 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592 
593 	chan->conn = conn;
594 
595 	switch (chan->chan_type) {
596 	case L2CAP_CHAN_CONN_ORIENTED:
597 		/* Alloc CID for connection-oriented socket */
598 		chan->scid = l2cap_alloc_cid(conn);
599 		if (conn->hcon->type == ACL_LINK)
600 			chan->omtu = L2CAP_DEFAULT_MTU;
601 		break;
602 
603 	case L2CAP_CHAN_CONN_LESS:
604 		/* Connectionless socket */
605 		chan->scid = L2CAP_CID_CONN_LESS;
606 		chan->dcid = L2CAP_CID_CONN_LESS;
607 		chan->omtu = L2CAP_DEFAULT_MTU;
608 		break;
609 
610 	case L2CAP_CHAN_FIXED:
611 		/* Caller will set CID and CID specific MTU values */
612 		break;
613 
614 	default:
615 		/* Raw socket can send/recv signalling messages only */
616 		chan->scid = L2CAP_CID_SIGNALING;
617 		chan->dcid = L2CAP_CID_SIGNALING;
618 		chan->omtu = L2CAP_DEFAULT_MTU;
619 	}
620 
621 	chan->local_id		= L2CAP_BESTEFFORT_ID;
622 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
623 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
624 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
625 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
626 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
627 
628 	l2cap_chan_hold(chan);
629 
630 	/* Only keep a reference for fixed channels if they requested it */
631 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
632 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
633 		hci_conn_hold(conn->hcon);
634 
635 	/* Append to the list since the order matters for ECRED */
636 	list_add_tail(&chan->list, &conn->chan_l);
637 }
638 
639 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
640 {
641 	mutex_lock(&conn->lock);
642 	__l2cap_chan_add(conn, chan);
643 	mutex_unlock(&conn->lock);
644 }
645 
646 void l2cap_chan_del(struct l2cap_chan *chan, int err)
647 {
648 	struct l2cap_conn *conn = chan->conn;
649 
650 	__clear_chan_timer(chan);
651 
652 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
653 	       state_to_string(chan->state));
654 
655 	chan->ops->teardown(chan, err);
656 
657 	if (conn) {
658 		/* Delete from channel list */
659 		list_del(&chan->list);
660 
661 		l2cap_chan_put(chan);
662 
663 		chan->conn = NULL;
664 
665 		/* Reference was only held for non-fixed channels or
666 		 * fixed channels that explicitly requested it using the
667 		 * FLAG_HOLD_HCI_CONN flag.
668 		 */
669 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
670 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
671 			hci_conn_drop(conn->hcon);
672 	}
673 
674 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
675 		return;
676 
677 	switch (chan->mode) {
678 	case L2CAP_MODE_BASIC:
679 		break;
680 
681 	case L2CAP_MODE_LE_FLOWCTL:
682 	case L2CAP_MODE_EXT_FLOWCTL:
683 		skb_queue_purge(&chan->tx_q);
684 		break;
685 
686 	case L2CAP_MODE_ERTM:
687 		__clear_retrans_timer(chan);
688 		__clear_monitor_timer(chan);
689 		__clear_ack_timer(chan);
690 
691 		skb_queue_purge(&chan->srej_q);
692 
693 		l2cap_seq_list_free(&chan->srej_list);
694 		l2cap_seq_list_free(&chan->retrans_list);
695 		fallthrough;
696 
697 	case L2CAP_MODE_STREAMING:
698 		skb_queue_purge(&chan->tx_q);
699 		break;
700 	}
701 }
702 EXPORT_SYMBOL_GPL(l2cap_chan_del);
703 
704 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
705 				 l2cap_chan_func_t func, void *data)
706 {
707 	struct l2cap_chan *chan, *l;
708 
709 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
710 		if (chan->ident == id)
711 			func(chan, data);
712 	}
713 }
714 
715 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
716 			      void *data)
717 {
718 	struct l2cap_chan *chan;
719 
720 	list_for_each_entry(chan, &conn->chan_l, list) {
721 		func(chan, data);
722 	}
723 }
724 
725 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
726 		     void *data)
727 {
728 	if (!conn)
729 		return;
730 
731 	mutex_lock(&conn->lock);
732 	__l2cap_chan_list(conn, func, data);
733 	mutex_unlock(&conn->lock);
734 }
735 
736 EXPORT_SYMBOL_GPL(l2cap_chan_list);
737 
738 static void l2cap_conn_update_id_addr(struct work_struct *work)
739 {
740 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
741 					       id_addr_timer.work);
742 	struct hci_conn *hcon = conn->hcon;
743 	struct l2cap_chan *chan;
744 
745 	mutex_lock(&conn->lock);
746 
747 	list_for_each_entry(chan, &conn->chan_l, list) {
748 		l2cap_chan_lock(chan);
749 		bacpy(&chan->dst, &hcon->dst);
750 		chan->dst_type = bdaddr_dst_type(hcon);
751 		l2cap_chan_unlock(chan);
752 	}
753 
754 	mutex_unlock(&conn->lock);
755 }
756 
757 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
758 {
759 	struct l2cap_conn *conn = chan->conn;
760 	struct l2cap_le_conn_rsp rsp;
761 	u16 result;
762 
763 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
764 		result = L2CAP_CR_LE_AUTHORIZATION;
765 	else
766 		result = L2CAP_CR_LE_BAD_PSM;
767 
768 	l2cap_state_change(chan, BT_DISCONN);
769 
770 	rsp.dcid    = cpu_to_le16(chan->scid);
771 	rsp.mtu     = cpu_to_le16(chan->imtu);
772 	rsp.mps     = cpu_to_le16(chan->mps);
773 	rsp.credits = cpu_to_le16(chan->rx_credits);
774 	rsp.result  = cpu_to_le16(result);
775 
776 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
777 		       &rsp);
778 }
779 
780 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
781 {
782 	l2cap_state_change(chan, BT_DISCONN);
783 
784 	__l2cap_ecred_conn_rsp_defer(chan);
785 }
786 
787 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
788 {
789 	struct l2cap_conn *conn = chan->conn;
790 	struct l2cap_conn_rsp rsp;
791 	u16 result;
792 
793 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
794 		result = L2CAP_CR_SEC_BLOCK;
795 	else
796 		result = L2CAP_CR_BAD_PSM;
797 
798 	l2cap_state_change(chan, BT_DISCONN);
799 
800 	rsp.scid   = cpu_to_le16(chan->dcid);
801 	rsp.dcid   = cpu_to_le16(chan->scid);
802 	rsp.result = cpu_to_le16(result);
803 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
804 
805 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
806 }
807 
808 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
809 {
810 	struct l2cap_conn *conn = chan->conn;
811 
812 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
813 
814 	switch (chan->state) {
815 	case BT_LISTEN:
816 		chan->ops->teardown(chan, 0);
817 		break;
818 
819 	case BT_CONNECTED:
820 	case BT_CONFIG:
821 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
822 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
823 			l2cap_send_disconn_req(chan, reason);
824 		} else
825 			l2cap_chan_del(chan, reason);
826 		break;
827 
828 	case BT_CONNECT2:
829 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
830 			if (conn->hcon->type == ACL_LINK)
831 				l2cap_chan_connect_reject(chan);
832 			else if (conn->hcon->type == LE_LINK) {
833 				switch (chan->mode) {
834 				case L2CAP_MODE_LE_FLOWCTL:
835 					l2cap_chan_le_connect_reject(chan);
836 					break;
837 				case L2CAP_MODE_EXT_FLOWCTL:
838 					l2cap_chan_ecred_connect_reject(chan);
839 					return;
840 				}
841 			}
842 		}
843 
844 		l2cap_chan_del(chan, reason);
845 		break;
846 
847 	case BT_CONNECT:
848 	case BT_DISCONN:
849 		l2cap_chan_del(chan, reason);
850 		break;
851 
852 	default:
853 		chan->ops->teardown(chan, 0);
854 		break;
855 	}
856 }
857 EXPORT_SYMBOL(l2cap_chan_close);
858 
859 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
860 {
861 	switch (chan->chan_type) {
862 	case L2CAP_CHAN_RAW:
863 		switch (chan->sec_level) {
864 		case BT_SECURITY_HIGH:
865 		case BT_SECURITY_FIPS:
866 			return HCI_AT_DEDICATED_BONDING_MITM;
867 		case BT_SECURITY_MEDIUM:
868 			return HCI_AT_DEDICATED_BONDING;
869 		default:
870 			return HCI_AT_NO_BONDING;
871 		}
872 		break;
873 	case L2CAP_CHAN_CONN_LESS:
874 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
875 			if (chan->sec_level == BT_SECURITY_LOW)
876 				chan->sec_level = BT_SECURITY_SDP;
877 		}
878 		if (chan->sec_level == BT_SECURITY_HIGH ||
879 		    chan->sec_level == BT_SECURITY_FIPS)
880 			return HCI_AT_NO_BONDING_MITM;
881 		else
882 			return HCI_AT_NO_BONDING;
883 		break;
884 	case L2CAP_CHAN_CONN_ORIENTED:
885 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
886 			if (chan->sec_level == BT_SECURITY_LOW)
887 				chan->sec_level = BT_SECURITY_SDP;
888 
889 			if (chan->sec_level == BT_SECURITY_HIGH ||
890 			    chan->sec_level == BT_SECURITY_FIPS)
891 				return HCI_AT_NO_BONDING_MITM;
892 			else
893 				return HCI_AT_NO_BONDING;
894 		}
895 		fallthrough;
896 
897 	default:
898 		switch (chan->sec_level) {
899 		case BT_SECURITY_HIGH:
900 		case BT_SECURITY_FIPS:
901 			return HCI_AT_GENERAL_BONDING_MITM;
902 		case BT_SECURITY_MEDIUM:
903 			return HCI_AT_GENERAL_BONDING;
904 		default:
905 			return HCI_AT_NO_BONDING;
906 		}
907 		break;
908 	}
909 }
910 
911 /* Service level security */
912 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
913 {
914 	struct l2cap_conn *conn = chan->conn;
915 	__u8 auth_type;
916 
917 	if (conn->hcon->type == LE_LINK)
918 		return smp_conn_security(conn->hcon, chan->sec_level);
919 
920 	auth_type = l2cap_get_auth_type(chan);
921 
922 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
923 				 initiator);
924 }
925 
926 static u8 l2cap_get_ident(struct l2cap_conn *conn)
927 {
928 	u8 id;
929 
930 	/* Get next available identificator.
931 	 *    1 - 128 are used by kernel.
932 	 *  129 - 199 are reserved.
933 	 *  200 - 254 are used by utilities like l2ping, etc.
934 	 */
935 
936 	mutex_lock(&conn->ident_lock);
937 
938 	if (++conn->tx_ident > 128)
939 		conn->tx_ident = 1;
940 
941 	id = conn->tx_ident;
942 
943 	mutex_unlock(&conn->ident_lock);
944 
945 	return id;
946 }
947 
948 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
949 			   u8 flags)
950 {
951 	/* Check if the hcon still valid before attempting to send */
952 	if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
953 		hci_send_acl(conn->hchan, skb, flags);
954 	else
955 		kfree_skb(skb);
956 }
957 
958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 			   void *data)
960 {
961 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 	u8 flags;
963 
964 	BT_DBG("code 0x%2.2x", code);
965 
966 	if (!skb)
967 		return;
968 
969 	/* Use NO_FLUSH if supported or we have an LE link (which does
970 	 * not support auto-flushing packets) */
971 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 	    conn->hcon->type == LE_LINK)
973 		flags = ACL_START_NO_FLUSH;
974 	else
975 		flags = ACL_START;
976 
977 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 	skb->priority = HCI_PRIO_MAX;
979 
980 	l2cap_send_acl(conn, skb, flags);
981 }
982 
983 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
984 {
985 	struct hci_conn *hcon = chan->conn->hcon;
986 	u16 flags;
987 
988 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
989 	       skb->priority);
990 
991 	/* Use NO_FLUSH for LE links (where this is the only option) or
992 	 * if the BR/EDR link supports it and flushing has not been
993 	 * explicitly requested (through FLAG_FLUSHABLE).
994 	 */
995 	if (hcon->type == LE_LINK ||
996 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
997 	     lmp_no_flush_capable(hcon->hdev)))
998 		flags = ACL_START_NO_FLUSH;
999 	else
1000 		flags = ACL_START;
1001 
1002 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1003 	hci_send_acl(chan->conn->hchan, skb, flags);
1004 }
1005 
1006 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1007 {
1008 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1009 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1010 
1011 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1012 		/* S-Frame */
1013 		control->sframe = 1;
1014 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1015 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1016 
1017 		control->sar = 0;
1018 		control->txseq = 0;
1019 	} else {
1020 		/* I-Frame */
1021 		control->sframe = 0;
1022 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1023 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1024 
1025 		control->poll = 0;
1026 		control->super = 0;
1027 	}
1028 }
1029 
1030 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1031 {
1032 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1033 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1034 
1035 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1036 		/* S-Frame */
1037 		control->sframe = 1;
1038 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1039 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1040 
1041 		control->sar = 0;
1042 		control->txseq = 0;
1043 	} else {
1044 		/* I-Frame */
1045 		control->sframe = 0;
1046 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1047 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1048 
1049 		control->poll = 0;
1050 		control->super = 0;
1051 	}
1052 }
1053 
1054 static inline void __unpack_control(struct l2cap_chan *chan,
1055 				    struct sk_buff *skb)
1056 {
1057 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1058 		__unpack_extended_control(get_unaligned_le32(skb->data),
1059 					  &bt_cb(skb)->l2cap);
1060 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1061 	} else {
1062 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1063 					  &bt_cb(skb)->l2cap);
1064 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1065 	}
1066 }
1067 
1068 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1069 {
1070 	u32 packed;
1071 
1072 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1073 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1074 
1075 	if (control->sframe) {
1076 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1077 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1078 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1079 	} else {
1080 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1081 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1082 	}
1083 
1084 	return packed;
1085 }
1086 
1087 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1088 {
1089 	u16 packed;
1090 
1091 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1092 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1093 
1094 	if (control->sframe) {
1095 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1096 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1097 		packed |= L2CAP_CTRL_FRAME_TYPE;
1098 	} else {
1099 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1100 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1101 	}
1102 
1103 	return packed;
1104 }
1105 
1106 static inline void __pack_control(struct l2cap_chan *chan,
1107 				  struct l2cap_ctrl *control,
1108 				  struct sk_buff *skb)
1109 {
1110 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1111 		put_unaligned_le32(__pack_extended_control(control),
1112 				   skb->data + L2CAP_HDR_SIZE);
1113 	} else {
1114 		put_unaligned_le16(__pack_enhanced_control(control),
1115 				   skb->data + L2CAP_HDR_SIZE);
1116 	}
1117 }
1118 
1119 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1120 {
1121 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1122 		return L2CAP_EXT_HDR_SIZE;
1123 	else
1124 		return L2CAP_ENH_HDR_SIZE;
1125 }
1126 
1127 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1128 					       u32 control)
1129 {
1130 	struct sk_buff *skb;
1131 	struct l2cap_hdr *lh;
1132 	int hlen = __ertm_hdr_size(chan);
1133 
1134 	if (chan->fcs == L2CAP_FCS_CRC16)
1135 		hlen += L2CAP_FCS_SIZE;
1136 
1137 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1138 
1139 	if (!skb)
1140 		return ERR_PTR(-ENOMEM);
1141 
1142 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1143 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1144 	lh->cid = cpu_to_le16(chan->dcid);
1145 
1146 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1147 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1148 	else
1149 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1150 
1151 	if (chan->fcs == L2CAP_FCS_CRC16) {
1152 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1153 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1154 	}
1155 
1156 	skb->priority = HCI_PRIO_MAX;
1157 	return skb;
1158 }
1159 
1160 static void l2cap_send_sframe(struct l2cap_chan *chan,
1161 			      struct l2cap_ctrl *control)
1162 {
1163 	struct sk_buff *skb;
1164 	u32 control_field;
1165 
1166 	BT_DBG("chan %p, control %p", chan, control);
1167 
1168 	if (!control->sframe)
1169 		return;
1170 
1171 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1172 	    !control->poll)
1173 		control->final = 1;
1174 
1175 	if (control->super == L2CAP_SUPER_RR)
1176 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1177 	else if (control->super == L2CAP_SUPER_RNR)
1178 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1179 
1180 	if (control->super != L2CAP_SUPER_SREJ) {
1181 		chan->last_acked_seq = control->reqseq;
1182 		__clear_ack_timer(chan);
1183 	}
1184 
1185 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1186 	       control->final, control->poll, control->super);
1187 
1188 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1189 		control_field = __pack_extended_control(control);
1190 	else
1191 		control_field = __pack_enhanced_control(control);
1192 
1193 	skb = l2cap_create_sframe_pdu(chan, control_field);
1194 	if (!IS_ERR(skb))
1195 		l2cap_do_send(chan, skb);
1196 }
1197 
1198 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1199 {
1200 	struct l2cap_ctrl control;
1201 
1202 	BT_DBG("chan %p, poll %d", chan, poll);
1203 
1204 	memset(&control, 0, sizeof(control));
1205 	control.sframe = 1;
1206 	control.poll = poll;
1207 
1208 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1209 		control.super = L2CAP_SUPER_RNR;
1210 	else
1211 		control.super = L2CAP_SUPER_RR;
1212 
1213 	control.reqseq = chan->buffer_seq;
1214 	l2cap_send_sframe(chan, &control);
1215 }
1216 
1217 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1218 {
1219 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1220 		return true;
1221 
1222 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1223 }
1224 
1225 void l2cap_send_conn_req(struct l2cap_chan *chan)
1226 {
1227 	struct l2cap_conn *conn = chan->conn;
1228 	struct l2cap_conn_req req;
1229 
1230 	req.scid = cpu_to_le16(chan->scid);
1231 	req.psm  = chan->psm;
1232 
1233 	chan->ident = l2cap_get_ident(conn);
1234 
1235 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1236 
1237 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1238 }
1239 
1240 static void l2cap_chan_ready(struct l2cap_chan *chan)
1241 {
1242 	/* The channel may have already been flagged as connected in
1243 	 * case of receiving data before the L2CAP info req/rsp
1244 	 * procedure is complete.
1245 	 */
1246 	if (chan->state == BT_CONNECTED)
1247 		return;
1248 
1249 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1250 	chan->conf_state = 0;
1251 	__clear_chan_timer(chan);
1252 
1253 	switch (chan->mode) {
1254 	case L2CAP_MODE_LE_FLOWCTL:
1255 	case L2CAP_MODE_EXT_FLOWCTL:
1256 		if (!chan->tx_credits)
1257 			chan->ops->suspend(chan);
1258 		break;
1259 	}
1260 
1261 	chan->state = BT_CONNECTED;
1262 
1263 	chan->ops->ready(chan);
1264 }
1265 
1266 static void l2cap_le_connect(struct l2cap_chan *chan)
1267 {
1268 	struct l2cap_conn *conn = chan->conn;
1269 	struct l2cap_le_conn_req req;
1270 
1271 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1272 		return;
1273 
1274 	if (!chan->imtu)
1275 		chan->imtu = chan->conn->mtu;
1276 
1277 	l2cap_le_flowctl_init(chan, 0);
1278 
1279 	memset(&req, 0, sizeof(req));
1280 	req.psm     = chan->psm;
1281 	req.scid    = cpu_to_le16(chan->scid);
1282 	req.mtu     = cpu_to_le16(chan->imtu);
1283 	req.mps     = cpu_to_le16(chan->mps);
1284 	req.credits = cpu_to_le16(chan->rx_credits);
1285 
1286 	chan->ident = l2cap_get_ident(conn);
1287 
1288 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1289 		       sizeof(req), &req);
1290 }
1291 
1292 struct l2cap_ecred_conn_data {
1293 	struct {
1294 		struct l2cap_ecred_conn_req_hdr req;
1295 		__le16 scid[5];
1296 	} __packed pdu;
1297 	struct l2cap_chan *chan;
1298 	struct pid *pid;
1299 	int count;
1300 };
1301 
1302 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1303 {
1304 	struct l2cap_ecred_conn_data *conn = data;
1305 	struct pid *pid;
1306 
1307 	if (chan == conn->chan)
1308 		return;
1309 
1310 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1311 		return;
1312 
1313 	pid = chan->ops->get_peer_pid(chan);
1314 
1315 	/* Only add deferred channels with the same PID/PSM */
1316 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1317 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1318 		return;
1319 
1320 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1321 		return;
1322 
1323 	l2cap_ecred_init(chan, 0);
1324 
1325 	/* Set the same ident so we can match on the rsp */
1326 	chan->ident = conn->chan->ident;
1327 
1328 	/* Include all channels deferred */
1329 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1330 
1331 	conn->count++;
1332 }
1333 
1334 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1335 {
1336 	struct l2cap_conn *conn = chan->conn;
1337 	struct l2cap_ecred_conn_data data;
1338 
1339 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1340 		return;
1341 
1342 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1343 		return;
1344 
1345 	l2cap_ecred_init(chan, 0);
1346 
1347 	memset(&data, 0, sizeof(data));
1348 	data.pdu.req.psm     = chan->psm;
1349 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1350 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1351 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1352 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1353 
1354 	chan->ident = l2cap_get_ident(conn);
1355 
1356 	data.count = 1;
1357 	data.chan = chan;
1358 	data.pid = chan->ops->get_peer_pid(chan);
1359 
1360 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1361 
1362 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1363 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1364 		       &data.pdu);
1365 }
1366 
1367 static void l2cap_le_start(struct l2cap_chan *chan)
1368 {
1369 	struct l2cap_conn *conn = chan->conn;
1370 
1371 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1372 		return;
1373 
1374 	if (!chan->psm) {
1375 		l2cap_chan_ready(chan);
1376 		return;
1377 	}
1378 
1379 	if (chan->state == BT_CONNECT) {
1380 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1381 			l2cap_ecred_connect(chan);
1382 		else
1383 			l2cap_le_connect(chan);
1384 	}
1385 }
1386 
1387 static void l2cap_start_connection(struct l2cap_chan *chan)
1388 {
1389 	if (chan->conn->hcon->type == LE_LINK) {
1390 		l2cap_le_start(chan);
1391 	} else {
1392 		l2cap_send_conn_req(chan);
1393 	}
1394 }
1395 
1396 static void l2cap_request_info(struct l2cap_conn *conn)
1397 {
1398 	struct l2cap_info_req req;
1399 
1400 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1401 		return;
1402 
1403 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1404 
1405 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1406 	conn->info_ident = l2cap_get_ident(conn);
1407 
1408 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1409 
1410 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1411 		       sizeof(req), &req);
1412 }
1413 
1414 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1415 {
1416 	/* The minimum encryption key size needs to be enforced by the
1417 	 * host stack before establishing any L2CAP connections. The
1418 	 * specification in theory allows a minimum of 1, but to align
1419 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1420 	 *
1421 	 * This check might also be called for unencrypted connections
1422 	 * that have no key size requirements. Ensure that the link is
1423 	 * actually encrypted before enforcing a key size.
1424 	 */
1425 	int min_key_size = hcon->hdev->min_enc_key_size;
1426 
1427 	/* On FIPS security level, key size must be 16 bytes */
1428 	if (hcon->sec_level == BT_SECURITY_FIPS)
1429 		min_key_size = 16;
1430 
1431 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1432 		hcon->enc_key_size >= min_key_size);
1433 }
1434 
1435 static void l2cap_do_start(struct l2cap_chan *chan)
1436 {
1437 	struct l2cap_conn *conn = chan->conn;
1438 
1439 	if (conn->hcon->type == LE_LINK) {
1440 		l2cap_le_start(chan);
1441 		return;
1442 	}
1443 
1444 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1445 		l2cap_request_info(conn);
1446 		return;
1447 	}
1448 
1449 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1450 		return;
1451 
1452 	if (!l2cap_chan_check_security(chan, true) ||
1453 	    !__l2cap_no_conn_pending(chan))
1454 		return;
1455 
1456 	if (l2cap_check_enc_key_size(conn->hcon))
1457 		l2cap_start_connection(chan);
1458 	else
1459 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1460 }
1461 
1462 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1463 {
1464 	u32 local_feat_mask = l2cap_feat_mask;
1465 	if (!disable_ertm)
1466 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1467 
1468 	switch (mode) {
1469 	case L2CAP_MODE_ERTM:
1470 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1471 	case L2CAP_MODE_STREAMING:
1472 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1473 	default:
1474 		return 0x00;
1475 	}
1476 }
1477 
1478 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1479 {
1480 	struct l2cap_conn *conn = chan->conn;
1481 	struct l2cap_disconn_req req;
1482 
1483 	if (!conn)
1484 		return;
1485 
1486 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1487 		__clear_retrans_timer(chan);
1488 		__clear_monitor_timer(chan);
1489 		__clear_ack_timer(chan);
1490 	}
1491 
1492 	req.dcid = cpu_to_le16(chan->dcid);
1493 	req.scid = cpu_to_le16(chan->scid);
1494 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1495 		       sizeof(req), &req);
1496 
1497 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1498 }
1499 
1500 /* ---- L2CAP connections ---- */
1501 static void l2cap_conn_start(struct l2cap_conn *conn)
1502 {
1503 	struct l2cap_chan *chan, *tmp;
1504 
1505 	BT_DBG("conn %p", conn);
1506 
1507 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1508 		l2cap_chan_lock(chan);
1509 
1510 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1511 			l2cap_chan_ready(chan);
1512 			l2cap_chan_unlock(chan);
1513 			continue;
1514 		}
1515 
1516 		if (chan->state == BT_CONNECT) {
1517 			if (!l2cap_chan_check_security(chan, true) ||
1518 			    !__l2cap_no_conn_pending(chan)) {
1519 				l2cap_chan_unlock(chan);
1520 				continue;
1521 			}
1522 
1523 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1524 			    && test_bit(CONF_STATE2_DEVICE,
1525 					&chan->conf_state)) {
1526 				l2cap_chan_close(chan, ECONNRESET);
1527 				l2cap_chan_unlock(chan);
1528 				continue;
1529 			}
1530 
1531 			if (l2cap_check_enc_key_size(conn->hcon))
1532 				l2cap_start_connection(chan);
1533 			else
1534 				l2cap_chan_close(chan, ECONNREFUSED);
1535 
1536 		} else if (chan->state == BT_CONNECT2) {
1537 			struct l2cap_conn_rsp rsp;
1538 			char buf[128];
1539 			rsp.scid = cpu_to_le16(chan->dcid);
1540 			rsp.dcid = cpu_to_le16(chan->scid);
1541 
1542 			if (l2cap_chan_check_security(chan, false)) {
1543 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1544 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1545 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1546 					chan->ops->defer(chan);
1547 
1548 				} else {
1549 					l2cap_state_change(chan, BT_CONFIG);
1550 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1551 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1552 				}
1553 			} else {
1554 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1555 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1556 			}
1557 
1558 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1559 				       sizeof(rsp), &rsp);
1560 
1561 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1562 			    rsp.result != L2CAP_CR_SUCCESS) {
1563 				l2cap_chan_unlock(chan);
1564 				continue;
1565 			}
1566 
1567 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1568 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1569 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1570 			chan->num_conf_req++;
1571 		}
1572 
1573 		l2cap_chan_unlock(chan);
1574 	}
1575 }
1576 
1577 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1578 {
1579 	struct hci_conn *hcon = conn->hcon;
1580 	struct hci_dev *hdev = hcon->hdev;
1581 
1582 	BT_DBG("%s conn %p", hdev->name, conn);
1583 
1584 	/* For outgoing pairing which doesn't necessarily have an
1585 	 * associated socket (e.g. mgmt_pair_device).
1586 	 */
1587 	if (hcon->out)
1588 		smp_conn_security(hcon, hcon->pending_sec_level);
1589 
1590 	/* For LE peripheral connections, make sure the connection interval
1591 	 * is in the range of the minimum and maximum interval that has
1592 	 * been configured for this connection. If not, then trigger
1593 	 * the connection update procedure.
1594 	 */
1595 	if (hcon->role == HCI_ROLE_SLAVE &&
1596 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1597 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1598 		struct l2cap_conn_param_update_req req;
1599 
1600 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1601 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1602 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1603 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1604 
1605 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1606 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1607 	}
1608 }
1609 
1610 static void l2cap_conn_ready(struct l2cap_conn *conn)
1611 {
1612 	struct l2cap_chan *chan;
1613 	struct hci_conn *hcon = conn->hcon;
1614 
1615 	BT_DBG("conn %p", conn);
1616 
1617 	if (hcon->type == ACL_LINK)
1618 		l2cap_request_info(conn);
1619 
1620 	mutex_lock(&conn->lock);
1621 
1622 	list_for_each_entry(chan, &conn->chan_l, list) {
1623 
1624 		l2cap_chan_lock(chan);
1625 
1626 		if (hcon->type == LE_LINK) {
1627 			l2cap_le_start(chan);
1628 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1629 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1630 				l2cap_chan_ready(chan);
1631 		} else if (chan->state == BT_CONNECT) {
1632 			l2cap_do_start(chan);
1633 		}
1634 
1635 		l2cap_chan_unlock(chan);
1636 	}
1637 
1638 	mutex_unlock(&conn->lock);
1639 
1640 	if (hcon->type == LE_LINK)
1641 		l2cap_le_conn_ready(conn);
1642 
1643 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1644 }
1645 
1646 /* Notify sockets that we cannot guaranty reliability anymore */
1647 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1648 {
1649 	struct l2cap_chan *chan;
1650 
1651 	BT_DBG("conn %p", conn);
1652 
1653 	list_for_each_entry(chan, &conn->chan_l, list) {
1654 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1655 			l2cap_chan_set_err(chan, err);
1656 	}
1657 }
1658 
1659 static void l2cap_info_timeout(struct work_struct *work)
1660 {
1661 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1662 					       info_timer.work);
1663 
1664 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1665 	conn->info_ident = 0;
1666 
1667 	mutex_lock(&conn->lock);
1668 	l2cap_conn_start(conn);
1669 	mutex_unlock(&conn->lock);
1670 }
1671 
1672 /*
1673  * l2cap_user
1674  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1675  * callback is called during registration. The ->remove callback is called
1676  * during unregistration.
1677  * An l2cap_user object can either be explicitly unregistered or when the
1678  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1679  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1680  * External modules must own a reference to the l2cap_conn object if they intend
1681  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1682  * any time if they don't.
1683  */
1684 
1685 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1686 {
1687 	struct hci_dev *hdev = conn->hcon->hdev;
1688 	int ret;
1689 
1690 	/* We need to check whether l2cap_conn is registered. If it is not, we
1691 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1692 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1693 	 * relies on the parent hci_conn object to be locked. This itself relies
1694 	 * on the hci_dev object to be locked. So we must lock the hci device
1695 	 * here, too. */
1696 
1697 	hci_dev_lock(hdev);
1698 
1699 	if (!list_empty(&user->list)) {
1700 		ret = -EINVAL;
1701 		goto out_unlock;
1702 	}
1703 
1704 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1705 	if (!conn->hchan) {
1706 		ret = -ENODEV;
1707 		goto out_unlock;
1708 	}
1709 
1710 	ret = user->probe(conn, user);
1711 	if (ret)
1712 		goto out_unlock;
1713 
1714 	list_add(&user->list, &conn->users);
1715 	ret = 0;
1716 
1717 out_unlock:
1718 	hci_dev_unlock(hdev);
1719 	return ret;
1720 }
1721 EXPORT_SYMBOL(l2cap_register_user);
1722 
1723 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1724 {
1725 	struct hci_dev *hdev = conn->hcon->hdev;
1726 
1727 	hci_dev_lock(hdev);
1728 
1729 	if (list_empty(&user->list))
1730 		goto out_unlock;
1731 
1732 	list_del_init(&user->list);
1733 	user->remove(conn, user);
1734 
1735 out_unlock:
1736 	hci_dev_unlock(hdev);
1737 }
1738 EXPORT_SYMBOL(l2cap_unregister_user);
1739 
1740 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1741 {
1742 	struct l2cap_user *user;
1743 
1744 	while (!list_empty(&conn->users)) {
1745 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1746 		list_del_init(&user->list);
1747 		user->remove(conn, user);
1748 	}
1749 }
1750 
1751 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1752 {
1753 	struct l2cap_conn *conn = hcon->l2cap_data;
1754 	struct l2cap_chan *chan, *l;
1755 
1756 	if (!conn)
1757 		return;
1758 
1759 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1760 
1761 	mutex_lock(&conn->lock);
1762 
1763 	kfree_skb(conn->rx_skb);
1764 
1765 	skb_queue_purge(&conn->pending_rx);
1766 
1767 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1768 	 * might block if we are running on a worker from the same workqueue
1769 	 * pending_rx_work is waiting on.
1770 	 */
1771 	if (work_pending(&conn->pending_rx_work))
1772 		cancel_work_sync(&conn->pending_rx_work);
1773 
1774 	cancel_delayed_work_sync(&conn->id_addr_timer);
1775 
1776 	l2cap_unregister_all_users(conn);
1777 
1778 	/* Force the connection to be immediately dropped */
1779 	hcon->disc_timeout = 0;
1780 
1781 	/* Kill channels */
1782 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1783 		l2cap_chan_hold(chan);
1784 		l2cap_chan_lock(chan);
1785 
1786 		l2cap_chan_del(chan, err);
1787 
1788 		chan->ops->close(chan);
1789 
1790 		l2cap_chan_unlock(chan);
1791 		l2cap_chan_put(chan);
1792 	}
1793 
1794 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1795 		cancel_delayed_work_sync(&conn->info_timer);
1796 
1797 	hci_chan_del(conn->hchan);
1798 	conn->hchan = NULL;
1799 
1800 	hcon->l2cap_data = NULL;
1801 	mutex_unlock(&conn->lock);
1802 	l2cap_conn_put(conn);
1803 }
1804 
1805 static void l2cap_conn_free(struct kref *ref)
1806 {
1807 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1808 
1809 	hci_conn_put(conn->hcon);
1810 	kfree(conn);
1811 }
1812 
1813 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1814 {
1815 	kref_get(&conn->ref);
1816 	return conn;
1817 }
1818 EXPORT_SYMBOL(l2cap_conn_get);
1819 
1820 void l2cap_conn_put(struct l2cap_conn *conn)
1821 {
1822 	kref_put(&conn->ref, l2cap_conn_free);
1823 }
1824 EXPORT_SYMBOL(l2cap_conn_put);
1825 
1826 /* ---- Socket interface ---- */
1827 
1828 /* Find socket with psm and source / destination bdaddr.
1829  * Returns closest match.
1830  */
1831 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1832 						   bdaddr_t *src,
1833 						   bdaddr_t *dst,
1834 						   u8 link_type)
1835 {
1836 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1837 
1838 	read_lock(&chan_list_lock);
1839 
1840 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1841 		if (state && c->state != state)
1842 			continue;
1843 
1844 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1845 			continue;
1846 
1847 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1848 			continue;
1849 
1850 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1851 			int src_match, dst_match;
1852 			int src_any, dst_any;
1853 
1854 			/* Exact match. */
1855 			src_match = !bacmp(&c->src, src);
1856 			dst_match = !bacmp(&c->dst, dst);
1857 			if (src_match && dst_match) {
1858 				if (!l2cap_chan_hold_unless_zero(c))
1859 					continue;
1860 
1861 				read_unlock(&chan_list_lock);
1862 				return c;
1863 			}
1864 
1865 			/* Closest match */
1866 			src_any = !bacmp(&c->src, BDADDR_ANY);
1867 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1868 			if ((src_match && dst_any) || (src_any && dst_match) ||
1869 			    (src_any && dst_any))
1870 				c1 = c;
1871 		}
1872 	}
1873 
1874 	if (c1)
1875 		c1 = l2cap_chan_hold_unless_zero(c1);
1876 
1877 	read_unlock(&chan_list_lock);
1878 
1879 	return c1;
1880 }
1881 
1882 static void l2cap_monitor_timeout(struct work_struct *work)
1883 {
1884 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1885 					       monitor_timer.work);
1886 
1887 	BT_DBG("chan %p", chan);
1888 
1889 	l2cap_chan_lock(chan);
1890 
1891 	if (!chan->conn) {
1892 		l2cap_chan_unlock(chan);
1893 		l2cap_chan_put(chan);
1894 		return;
1895 	}
1896 
1897 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1898 
1899 	l2cap_chan_unlock(chan);
1900 	l2cap_chan_put(chan);
1901 }
1902 
1903 static void l2cap_retrans_timeout(struct work_struct *work)
1904 {
1905 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1906 					       retrans_timer.work);
1907 
1908 	BT_DBG("chan %p", chan);
1909 
1910 	l2cap_chan_lock(chan);
1911 
1912 	if (!chan->conn) {
1913 		l2cap_chan_unlock(chan);
1914 		l2cap_chan_put(chan);
1915 		return;
1916 	}
1917 
1918 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1919 	l2cap_chan_unlock(chan);
1920 	l2cap_chan_put(chan);
1921 }
1922 
1923 static void l2cap_streaming_send(struct l2cap_chan *chan,
1924 				 struct sk_buff_head *skbs)
1925 {
1926 	struct sk_buff *skb;
1927 	struct l2cap_ctrl *control;
1928 
1929 	BT_DBG("chan %p, skbs %p", chan, skbs);
1930 
1931 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1932 
1933 	while (!skb_queue_empty(&chan->tx_q)) {
1934 
1935 		skb = skb_dequeue(&chan->tx_q);
1936 
1937 		bt_cb(skb)->l2cap.retries = 1;
1938 		control = &bt_cb(skb)->l2cap;
1939 
1940 		control->reqseq = 0;
1941 		control->txseq = chan->next_tx_seq;
1942 
1943 		__pack_control(chan, control, skb);
1944 
1945 		if (chan->fcs == L2CAP_FCS_CRC16) {
1946 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1947 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1948 		}
1949 
1950 		l2cap_do_send(chan, skb);
1951 
1952 		BT_DBG("Sent txseq %u", control->txseq);
1953 
1954 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1955 		chan->frames_sent++;
1956 	}
1957 }
1958 
1959 static int l2cap_ertm_send(struct l2cap_chan *chan)
1960 {
1961 	struct sk_buff *skb, *tx_skb;
1962 	struct l2cap_ctrl *control;
1963 	int sent = 0;
1964 
1965 	BT_DBG("chan %p", chan);
1966 
1967 	if (chan->state != BT_CONNECTED)
1968 		return -ENOTCONN;
1969 
1970 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1971 		return 0;
1972 
1973 	while (chan->tx_send_head &&
1974 	       chan->unacked_frames < chan->remote_tx_win &&
1975 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1976 
1977 		skb = chan->tx_send_head;
1978 
1979 		bt_cb(skb)->l2cap.retries = 1;
1980 		control = &bt_cb(skb)->l2cap;
1981 
1982 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1983 			control->final = 1;
1984 
1985 		control->reqseq = chan->buffer_seq;
1986 		chan->last_acked_seq = chan->buffer_seq;
1987 		control->txseq = chan->next_tx_seq;
1988 
1989 		__pack_control(chan, control, skb);
1990 
1991 		if (chan->fcs == L2CAP_FCS_CRC16) {
1992 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1993 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1994 		}
1995 
1996 		/* Clone after data has been modified. Data is assumed to be
1997 		   read-only (for locking purposes) on cloned sk_buffs.
1998 		 */
1999 		tx_skb = skb_clone(skb, GFP_KERNEL);
2000 
2001 		if (!tx_skb)
2002 			break;
2003 
2004 		__set_retrans_timer(chan);
2005 
2006 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2007 		chan->unacked_frames++;
2008 		chan->frames_sent++;
2009 		sent++;
2010 
2011 		if (skb_queue_is_last(&chan->tx_q, skb))
2012 			chan->tx_send_head = NULL;
2013 		else
2014 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2015 
2016 		l2cap_do_send(chan, tx_skb);
2017 		BT_DBG("Sent txseq %u", control->txseq);
2018 	}
2019 
2020 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2021 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2022 
2023 	return sent;
2024 }
2025 
2026 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2027 {
2028 	struct l2cap_ctrl control;
2029 	struct sk_buff *skb;
2030 	struct sk_buff *tx_skb;
2031 	u16 seq;
2032 
2033 	BT_DBG("chan %p", chan);
2034 
2035 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2036 		return;
2037 
2038 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2039 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2040 
2041 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2042 		if (!skb) {
2043 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2044 			       seq);
2045 			continue;
2046 		}
2047 
2048 		bt_cb(skb)->l2cap.retries++;
2049 		control = bt_cb(skb)->l2cap;
2050 
2051 		if (chan->max_tx != 0 &&
2052 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2053 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2054 			l2cap_send_disconn_req(chan, ECONNRESET);
2055 			l2cap_seq_list_clear(&chan->retrans_list);
2056 			break;
2057 		}
2058 
2059 		control.reqseq = chan->buffer_seq;
2060 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2061 			control.final = 1;
2062 		else
2063 			control.final = 0;
2064 
2065 		if (skb_cloned(skb)) {
2066 			/* Cloned sk_buffs are read-only, so we need a
2067 			 * writeable copy
2068 			 */
2069 			tx_skb = skb_copy(skb, GFP_KERNEL);
2070 		} else {
2071 			tx_skb = skb_clone(skb, GFP_KERNEL);
2072 		}
2073 
2074 		if (!tx_skb) {
2075 			l2cap_seq_list_clear(&chan->retrans_list);
2076 			break;
2077 		}
2078 
2079 		/* Update skb contents */
2080 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2081 			put_unaligned_le32(__pack_extended_control(&control),
2082 					   tx_skb->data + L2CAP_HDR_SIZE);
2083 		} else {
2084 			put_unaligned_le16(__pack_enhanced_control(&control),
2085 					   tx_skb->data + L2CAP_HDR_SIZE);
2086 		}
2087 
2088 		/* Update FCS */
2089 		if (chan->fcs == L2CAP_FCS_CRC16) {
2090 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2091 					tx_skb->len - L2CAP_FCS_SIZE);
2092 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2093 						L2CAP_FCS_SIZE);
2094 		}
2095 
2096 		l2cap_do_send(chan, tx_skb);
2097 
2098 		BT_DBG("Resent txseq %d", control.txseq);
2099 
2100 		chan->last_acked_seq = chan->buffer_seq;
2101 	}
2102 }
2103 
2104 static void l2cap_retransmit(struct l2cap_chan *chan,
2105 			     struct l2cap_ctrl *control)
2106 {
2107 	BT_DBG("chan %p, control %p", chan, control);
2108 
2109 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2110 	l2cap_ertm_resend(chan);
2111 }
2112 
2113 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2114 				 struct l2cap_ctrl *control)
2115 {
2116 	struct sk_buff *skb;
2117 
2118 	BT_DBG("chan %p, control %p", chan, control);
2119 
2120 	if (control->poll)
2121 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2122 
2123 	l2cap_seq_list_clear(&chan->retrans_list);
2124 
2125 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2126 		return;
2127 
2128 	if (chan->unacked_frames) {
2129 		skb_queue_walk(&chan->tx_q, skb) {
2130 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2131 			    skb == chan->tx_send_head)
2132 				break;
2133 		}
2134 
2135 		skb_queue_walk_from(&chan->tx_q, skb) {
2136 			if (skb == chan->tx_send_head)
2137 				break;
2138 
2139 			l2cap_seq_list_append(&chan->retrans_list,
2140 					      bt_cb(skb)->l2cap.txseq);
2141 		}
2142 
2143 		l2cap_ertm_resend(chan);
2144 	}
2145 }
2146 
2147 static void l2cap_send_ack(struct l2cap_chan *chan)
2148 {
2149 	struct l2cap_ctrl control;
2150 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2151 					 chan->last_acked_seq);
2152 	int threshold;
2153 
2154 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2155 	       chan, chan->last_acked_seq, chan->buffer_seq);
2156 
2157 	memset(&control, 0, sizeof(control));
2158 	control.sframe = 1;
2159 
2160 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2161 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2162 		__clear_ack_timer(chan);
2163 		control.super = L2CAP_SUPER_RNR;
2164 		control.reqseq = chan->buffer_seq;
2165 		l2cap_send_sframe(chan, &control);
2166 	} else {
2167 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2168 			l2cap_ertm_send(chan);
2169 			/* If any i-frames were sent, they included an ack */
2170 			if (chan->buffer_seq == chan->last_acked_seq)
2171 				frames_to_ack = 0;
2172 		}
2173 
2174 		/* Ack now if the window is 3/4ths full.
2175 		 * Calculate without mul or div
2176 		 */
2177 		threshold = chan->ack_win;
2178 		threshold += threshold << 1;
2179 		threshold >>= 2;
2180 
2181 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2182 		       threshold);
2183 
2184 		if (frames_to_ack >= threshold) {
2185 			__clear_ack_timer(chan);
2186 			control.super = L2CAP_SUPER_RR;
2187 			control.reqseq = chan->buffer_seq;
2188 			l2cap_send_sframe(chan, &control);
2189 			frames_to_ack = 0;
2190 		}
2191 
2192 		if (frames_to_ack)
2193 			__set_ack_timer(chan);
2194 	}
2195 }
2196 
2197 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2198 					 struct msghdr *msg, int len,
2199 					 int count, struct sk_buff *skb)
2200 {
2201 	struct l2cap_conn *conn = chan->conn;
2202 	struct sk_buff **frag;
2203 	int sent = 0;
2204 
2205 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2206 		return -EFAULT;
2207 
2208 	sent += count;
2209 	len  -= count;
2210 
2211 	/* Continuation fragments (no L2CAP header) */
2212 	frag = &skb_shinfo(skb)->frag_list;
2213 	while (len) {
2214 		struct sk_buff *tmp;
2215 
2216 		count = min_t(unsigned int, conn->mtu, len);
2217 
2218 		tmp = chan->ops->alloc_skb(chan, 0, count,
2219 					   msg->msg_flags & MSG_DONTWAIT);
2220 		if (IS_ERR(tmp))
2221 			return PTR_ERR(tmp);
2222 
2223 		*frag = tmp;
2224 
2225 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2226 				   &msg->msg_iter))
2227 			return -EFAULT;
2228 
2229 		sent += count;
2230 		len  -= count;
2231 
2232 		skb->len += (*frag)->len;
2233 		skb->data_len += (*frag)->len;
2234 
2235 		frag = &(*frag)->next;
2236 	}
2237 
2238 	return sent;
2239 }
2240 
2241 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2242 						 struct msghdr *msg, size_t len)
2243 {
2244 	struct l2cap_conn *conn = chan->conn;
2245 	struct sk_buff *skb;
2246 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2247 	struct l2cap_hdr *lh;
2248 
2249 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2250 	       __le16_to_cpu(chan->psm), len);
2251 
2252 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2253 
2254 	skb = chan->ops->alloc_skb(chan, hlen, count,
2255 				   msg->msg_flags & MSG_DONTWAIT);
2256 	if (IS_ERR(skb))
2257 		return skb;
2258 
2259 	/* Create L2CAP header */
2260 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2261 	lh->cid = cpu_to_le16(chan->dcid);
2262 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2263 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2264 
2265 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2266 	if (unlikely(err < 0)) {
2267 		kfree_skb(skb);
2268 		return ERR_PTR(err);
2269 	}
2270 	return skb;
2271 }
2272 
2273 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2274 					      struct msghdr *msg, size_t len)
2275 {
2276 	struct l2cap_conn *conn = chan->conn;
2277 	struct sk_buff *skb;
2278 	int err, count;
2279 	struct l2cap_hdr *lh;
2280 
2281 	BT_DBG("chan %p len %zu", chan, len);
2282 
2283 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2284 
2285 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2286 				   msg->msg_flags & MSG_DONTWAIT);
2287 	if (IS_ERR(skb))
2288 		return skb;
2289 
2290 	/* Create L2CAP header */
2291 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2292 	lh->cid = cpu_to_le16(chan->dcid);
2293 	lh->len = cpu_to_le16(len);
2294 
2295 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2296 	if (unlikely(err < 0)) {
2297 		kfree_skb(skb);
2298 		return ERR_PTR(err);
2299 	}
2300 	return skb;
2301 }
2302 
2303 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2304 					       struct msghdr *msg, size_t len,
2305 					       u16 sdulen)
2306 {
2307 	struct l2cap_conn *conn = chan->conn;
2308 	struct sk_buff *skb;
2309 	int err, count, hlen;
2310 	struct l2cap_hdr *lh;
2311 
2312 	BT_DBG("chan %p len %zu", chan, len);
2313 
2314 	if (!conn)
2315 		return ERR_PTR(-ENOTCONN);
2316 
2317 	hlen = __ertm_hdr_size(chan);
2318 
2319 	if (sdulen)
2320 		hlen += L2CAP_SDULEN_SIZE;
2321 
2322 	if (chan->fcs == L2CAP_FCS_CRC16)
2323 		hlen += L2CAP_FCS_SIZE;
2324 
2325 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2326 
2327 	skb = chan->ops->alloc_skb(chan, hlen, count,
2328 				   msg->msg_flags & MSG_DONTWAIT);
2329 	if (IS_ERR(skb))
2330 		return skb;
2331 
2332 	/* Create L2CAP header */
2333 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2334 	lh->cid = cpu_to_le16(chan->dcid);
2335 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2336 
2337 	/* Control header is populated later */
2338 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2339 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2340 	else
2341 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2342 
2343 	if (sdulen)
2344 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2345 
2346 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2347 	if (unlikely(err < 0)) {
2348 		kfree_skb(skb);
2349 		return ERR_PTR(err);
2350 	}
2351 
2352 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2353 	bt_cb(skb)->l2cap.retries = 0;
2354 	return skb;
2355 }
2356 
2357 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2358 			     struct sk_buff_head *seg_queue,
2359 			     struct msghdr *msg, size_t len)
2360 {
2361 	struct sk_buff *skb;
2362 	u16 sdu_len;
2363 	size_t pdu_len;
2364 	u8 sar;
2365 
2366 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2367 
2368 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2369 	 * so fragmented skbs are not used.  The HCI layer's handling
2370 	 * of fragmented skbs is not compatible with ERTM's queueing.
2371 	 */
2372 
2373 	/* PDU size is derived from the HCI MTU */
2374 	pdu_len = chan->conn->mtu;
2375 
2376 	/* Constrain PDU size for BR/EDR connections */
2377 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2378 
2379 	/* Adjust for largest possible L2CAP overhead. */
2380 	if (chan->fcs)
2381 		pdu_len -= L2CAP_FCS_SIZE;
2382 
2383 	pdu_len -= __ertm_hdr_size(chan);
2384 
2385 	/* Remote device may have requested smaller PDUs */
2386 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2387 
2388 	if (len <= pdu_len) {
2389 		sar = L2CAP_SAR_UNSEGMENTED;
2390 		sdu_len = 0;
2391 		pdu_len = len;
2392 	} else {
2393 		sar = L2CAP_SAR_START;
2394 		sdu_len = len;
2395 	}
2396 
2397 	while (len > 0) {
2398 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2399 
2400 		if (IS_ERR(skb)) {
2401 			__skb_queue_purge(seg_queue);
2402 			return PTR_ERR(skb);
2403 		}
2404 
2405 		bt_cb(skb)->l2cap.sar = sar;
2406 		__skb_queue_tail(seg_queue, skb);
2407 
2408 		len -= pdu_len;
2409 		if (sdu_len)
2410 			sdu_len = 0;
2411 
2412 		if (len <= pdu_len) {
2413 			sar = L2CAP_SAR_END;
2414 			pdu_len = len;
2415 		} else {
2416 			sar = L2CAP_SAR_CONTINUE;
2417 		}
2418 	}
2419 
2420 	return 0;
2421 }
2422 
2423 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2424 						   struct msghdr *msg,
2425 						   size_t len, u16 sdulen)
2426 {
2427 	struct l2cap_conn *conn = chan->conn;
2428 	struct sk_buff *skb;
2429 	int err, count, hlen;
2430 	struct l2cap_hdr *lh;
2431 
2432 	BT_DBG("chan %p len %zu", chan, len);
2433 
2434 	if (!conn)
2435 		return ERR_PTR(-ENOTCONN);
2436 
2437 	hlen = L2CAP_HDR_SIZE;
2438 
2439 	if (sdulen)
2440 		hlen += L2CAP_SDULEN_SIZE;
2441 
2442 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2443 
2444 	skb = chan->ops->alloc_skb(chan, hlen, count,
2445 				   msg->msg_flags & MSG_DONTWAIT);
2446 	if (IS_ERR(skb))
2447 		return skb;
2448 
2449 	/* Create L2CAP header */
2450 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2451 	lh->cid = cpu_to_le16(chan->dcid);
2452 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2453 
2454 	if (sdulen)
2455 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2456 
2457 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2458 	if (unlikely(err < 0)) {
2459 		kfree_skb(skb);
2460 		return ERR_PTR(err);
2461 	}
2462 
2463 	return skb;
2464 }
2465 
2466 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2467 				struct sk_buff_head *seg_queue,
2468 				struct msghdr *msg, size_t len)
2469 {
2470 	struct sk_buff *skb;
2471 	size_t pdu_len;
2472 	u16 sdu_len;
2473 
2474 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2475 
2476 	sdu_len = len;
2477 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2478 
2479 	while (len > 0) {
2480 		if (len <= pdu_len)
2481 			pdu_len = len;
2482 
2483 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2484 		if (IS_ERR(skb)) {
2485 			__skb_queue_purge(seg_queue);
2486 			return PTR_ERR(skb);
2487 		}
2488 
2489 		__skb_queue_tail(seg_queue, skb);
2490 
2491 		len -= pdu_len;
2492 
2493 		if (sdu_len) {
2494 			sdu_len = 0;
2495 			pdu_len += L2CAP_SDULEN_SIZE;
2496 		}
2497 	}
2498 
2499 	return 0;
2500 }
2501 
2502 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2503 {
2504 	int sent = 0;
2505 
2506 	BT_DBG("chan %p", chan);
2507 
2508 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2509 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2510 		chan->tx_credits--;
2511 		sent++;
2512 	}
2513 
2514 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2515 	       skb_queue_len(&chan->tx_q));
2516 }
2517 
2518 static void l2cap_tx_timestamp(struct sk_buff *skb,
2519 			       const struct sockcm_cookie *sockc,
2520 			       size_t len)
2521 {
2522 	struct sock *sk = skb ? skb->sk : NULL;
2523 
2524 	if (sk && sk->sk_type == SOCK_STREAM)
2525 		hci_setup_tx_timestamp(skb, len, sockc);
2526 	else
2527 		hci_setup_tx_timestamp(skb, 1, sockc);
2528 }
2529 
2530 static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue,
2531 				   const struct sockcm_cookie *sockc,
2532 				   size_t len)
2533 {
2534 	struct sk_buff *skb = skb_peek(queue);
2535 	struct sock *sk = skb ? skb->sk : NULL;
2536 
2537 	if (sk && sk->sk_type == SOCK_STREAM)
2538 		l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len);
2539 	else
2540 		l2cap_tx_timestamp(skb, sockc, len);
2541 }
2542 
2543 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2544 		    const struct sockcm_cookie *sockc)
2545 {
2546 	struct sk_buff *skb;
2547 	int err;
2548 	struct sk_buff_head seg_queue;
2549 
2550 	if (!chan->conn)
2551 		return -ENOTCONN;
2552 
2553 	/* Connectionless channel */
2554 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2555 		skb = l2cap_create_connless_pdu(chan, msg, len);
2556 		if (IS_ERR(skb))
2557 			return PTR_ERR(skb);
2558 
2559 		l2cap_tx_timestamp(skb, sockc, len);
2560 
2561 		l2cap_do_send(chan, skb);
2562 		return len;
2563 	}
2564 
2565 	switch (chan->mode) {
2566 	case L2CAP_MODE_LE_FLOWCTL:
2567 	case L2CAP_MODE_EXT_FLOWCTL:
2568 		/* Check outgoing MTU */
2569 		if (len > chan->omtu)
2570 			return -EMSGSIZE;
2571 
2572 		__skb_queue_head_init(&seg_queue);
2573 
2574 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2575 
2576 		if (chan->state != BT_CONNECTED) {
2577 			__skb_queue_purge(&seg_queue);
2578 			err = -ENOTCONN;
2579 		}
2580 
2581 		if (err)
2582 			return err;
2583 
2584 		l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2585 
2586 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2587 
2588 		l2cap_le_flowctl_send(chan);
2589 
2590 		if (!chan->tx_credits)
2591 			chan->ops->suspend(chan);
2592 
2593 		err = len;
2594 
2595 		break;
2596 
2597 	case L2CAP_MODE_BASIC:
2598 		/* Check outgoing MTU */
2599 		if (len > chan->omtu)
2600 			return -EMSGSIZE;
2601 
2602 		/* Create a basic PDU */
2603 		skb = l2cap_create_basic_pdu(chan, msg, len);
2604 		if (IS_ERR(skb))
2605 			return PTR_ERR(skb);
2606 
2607 		l2cap_tx_timestamp(skb, sockc, len);
2608 
2609 		l2cap_do_send(chan, skb);
2610 		err = len;
2611 		break;
2612 
2613 	case L2CAP_MODE_ERTM:
2614 	case L2CAP_MODE_STREAMING:
2615 		/* Check outgoing MTU */
2616 		if (len > chan->omtu) {
2617 			err = -EMSGSIZE;
2618 			break;
2619 		}
2620 
2621 		__skb_queue_head_init(&seg_queue);
2622 
2623 		/* Do segmentation before calling in to the state machine,
2624 		 * since it's possible to block while waiting for memory
2625 		 * allocation.
2626 		 */
2627 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2628 
2629 		if (err)
2630 			break;
2631 
2632 		if (chan->mode == L2CAP_MODE_ERTM) {
2633 			/* TODO: ERTM mode timestamping */
2634 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2635 		} else {
2636 			l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2637 			l2cap_streaming_send(chan, &seg_queue);
2638 		}
2639 
2640 		err = len;
2641 
2642 		/* If the skbs were not queued for sending, they'll still be in
2643 		 * seg_queue and need to be purged.
2644 		 */
2645 		__skb_queue_purge(&seg_queue);
2646 		break;
2647 
2648 	default:
2649 		BT_DBG("bad state %1.1x", chan->mode);
2650 		err = -EBADFD;
2651 	}
2652 
2653 	return err;
2654 }
2655 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2656 
2657 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2658 {
2659 	struct l2cap_ctrl control;
2660 	u16 seq;
2661 
2662 	BT_DBG("chan %p, txseq %u", chan, txseq);
2663 
2664 	memset(&control, 0, sizeof(control));
2665 	control.sframe = 1;
2666 	control.super = L2CAP_SUPER_SREJ;
2667 
2668 	for (seq = chan->expected_tx_seq; seq != txseq;
2669 	     seq = __next_seq(chan, seq)) {
2670 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2671 			control.reqseq = seq;
2672 			l2cap_send_sframe(chan, &control);
2673 			l2cap_seq_list_append(&chan->srej_list, seq);
2674 		}
2675 	}
2676 
2677 	chan->expected_tx_seq = __next_seq(chan, txseq);
2678 }
2679 
2680 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2681 {
2682 	struct l2cap_ctrl control;
2683 
2684 	BT_DBG("chan %p", chan);
2685 
2686 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2687 		return;
2688 
2689 	memset(&control, 0, sizeof(control));
2690 	control.sframe = 1;
2691 	control.super = L2CAP_SUPER_SREJ;
2692 	control.reqseq = chan->srej_list.tail;
2693 	l2cap_send_sframe(chan, &control);
2694 }
2695 
2696 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2697 {
2698 	struct l2cap_ctrl control;
2699 	u16 initial_head;
2700 	u16 seq;
2701 
2702 	BT_DBG("chan %p, txseq %u", chan, txseq);
2703 
2704 	memset(&control, 0, sizeof(control));
2705 	control.sframe = 1;
2706 	control.super = L2CAP_SUPER_SREJ;
2707 
2708 	/* Capture initial list head to allow only one pass through the list. */
2709 	initial_head = chan->srej_list.head;
2710 
2711 	do {
2712 		seq = l2cap_seq_list_pop(&chan->srej_list);
2713 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2714 			break;
2715 
2716 		control.reqseq = seq;
2717 		l2cap_send_sframe(chan, &control);
2718 		l2cap_seq_list_append(&chan->srej_list, seq);
2719 	} while (chan->srej_list.head != initial_head);
2720 }
2721 
2722 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2723 {
2724 	struct sk_buff *acked_skb;
2725 	u16 ackseq;
2726 
2727 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2728 
2729 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2730 		return;
2731 
2732 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2733 	       chan->expected_ack_seq, chan->unacked_frames);
2734 
2735 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2736 	     ackseq = __next_seq(chan, ackseq)) {
2737 
2738 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2739 		if (acked_skb) {
2740 			skb_unlink(acked_skb, &chan->tx_q);
2741 			kfree_skb(acked_skb);
2742 			chan->unacked_frames--;
2743 		}
2744 	}
2745 
2746 	chan->expected_ack_seq = reqseq;
2747 
2748 	if (chan->unacked_frames == 0)
2749 		__clear_retrans_timer(chan);
2750 
2751 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2752 }
2753 
2754 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2755 {
2756 	BT_DBG("chan %p", chan);
2757 
2758 	chan->expected_tx_seq = chan->buffer_seq;
2759 	l2cap_seq_list_clear(&chan->srej_list);
2760 	skb_queue_purge(&chan->srej_q);
2761 	chan->rx_state = L2CAP_RX_STATE_RECV;
2762 }
2763 
2764 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2765 				struct l2cap_ctrl *control,
2766 				struct sk_buff_head *skbs, u8 event)
2767 {
2768 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2769 	       event);
2770 
2771 	switch (event) {
2772 	case L2CAP_EV_DATA_REQUEST:
2773 		if (chan->tx_send_head == NULL)
2774 			chan->tx_send_head = skb_peek(skbs);
2775 
2776 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2777 		l2cap_ertm_send(chan);
2778 		break;
2779 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2780 		BT_DBG("Enter LOCAL_BUSY");
2781 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2782 
2783 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2784 			/* The SREJ_SENT state must be aborted if we are to
2785 			 * enter the LOCAL_BUSY state.
2786 			 */
2787 			l2cap_abort_rx_srej_sent(chan);
2788 		}
2789 
2790 		l2cap_send_ack(chan);
2791 
2792 		break;
2793 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2794 		BT_DBG("Exit LOCAL_BUSY");
2795 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2796 
2797 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2798 			struct l2cap_ctrl local_control;
2799 
2800 			memset(&local_control, 0, sizeof(local_control));
2801 			local_control.sframe = 1;
2802 			local_control.super = L2CAP_SUPER_RR;
2803 			local_control.poll = 1;
2804 			local_control.reqseq = chan->buffer_seq;
2805 			l2cap_send_sframe(chan, &local_control);
2806 
2807 			chan->retry_count = 1;
2808 			__set_monitor_timer(chan);
2809 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2810 		}
2811 		break;
2812 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2813 		l2cap_process_reqseq(chan, control->reqseq);
2814 		break;
2815 	case L2CAP_EV_EXPLICIT_POLL:
2816 		l2cap_send_rr_or_rnr(chan, 1);
2817 		chan->retry_count = 1;
2818 		__set_monitor_timer(chan);
2819 		__clear_ack_timer(chan);
2820 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2821 		break;
2822 	case L2CAP_EV_RETRANS_TO:
2823 		l2cap_send_rr_or_rnr(chan, 1);
2824 		chan->retry_count = 1;
2825 		__set_monitor_timer(chan);
2826 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2827 		break;
2828 	case L2CAP_EV_RECV_FBIT:
2829 		/* Nothing to process */
2830 		break;
2831 	default:
2832 		break;
2833 	}
2834 }
2835 
2836 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2837 				  struct l2cap_ctrl *control,
2838 				  struct sk_buff_head *skbs, u8 event)
2839 {
2840 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2841 	       event);
2842 
2843 	switch (event) {
2844 	case L2CAP_EV_DATA_REQUEST:
2845 		if (chan->tx_send_head == NULL)
2846 			chan->tx_send_head = skb_peek(skbs);
2847 		/* Queue data, but don't send. */
2848 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2849 		break;
2850 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2851 		BT_DBG("Enter LOCAL_BUSY");
2852 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2853 
2854 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2855 			/* The SREJ_SENT state must be aborted if we are to
2856 			 * enter the LOCAL_BUSY state.
2857 			 */
2858 			l2cap_abort_rx_srej_sent(chan);
2859 		}
2860 
2861 		l2cap_send_ack(chan);
2862 
2863 		break;
2864 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2865 		BT_DBG("Exit LOCAL_BUSY");
2866 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2867 
2868 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2869 			struct l2cap_ctrl local_control;
2870 			memset(&local_control, 0, sizeof(local_control));
2871 			local_control.sframe = 1;
2872 			local_control.super = L2CAP_SUPER_RR;
2873 			local_control.poll = 1;
2874 			local_control.reqseq = chan->buffer_seq;
2875 			l2cap_send_sframe(chan, &local_control);
2876 
2877 			chan->retry_count = 1;
2878 			__set_monitor_timer(chan);
2879 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2880 		}
2881 		break;
2882 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2883 		l2cap_process_reqseq(chan, control->reqseq);
2884 		fallthrough;
2885 
2886 	case L2CAP_EV_RECV_FBIT:
2887 		if (control && control->final) {
2888 			__clear_monitor_timer(chan);
2889 			if (chan->unacked_frames > 0)
2890 				__set_retrans_timer(chan);
2891 			chan->retry_count = 0;
2892 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2893 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2894 		}
2895 		break;
2896 	case L2CAP_EV_EXPLICIT_POLL:
2897 		/* Ignore */
2898 		break;
2899 	case L2CAP_EV_MONITOR_TO:
2900 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2901 			l2cap_send_rr_or_rnr(chan, 1);
2902 			__set_monitor_timer(chan);
2903 			chan->retry_count++;
2904 		} else {
2905 			l2cap_send_disconn_req(chan, ECONNABORTED);
2906 		}
2907 		break;
2908 	default:
2909 		break;
2910 	}
2911 }
2912 
2913 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2914 		     struct sk_buff_head *skbs, u8 event)
2915 {
2916 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2917 	       chan, control, skbs, event, chan->tx_state);
2918 
2919 	switch (chan->tx_state) {
2920 	case L2CAP_TX_STATE_XMIT:
2921 		l2cap_tx_state_xmit(chan, control, skbs, event);
2922 		break;
2923 	case L2CAP_TX_STATE_WAIT_F:
2924 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2925 		break;
2926 	default:
2927 		/* Ignore event */
2928 		break;
2929 	}
2930 }
2931 
2932 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2933 			     struct l2cap_ctrl *control)
2934 {
2935 	BT_DBG("chan %p, control %p", chan, control);
2936 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2937 }
2938 
2939 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2940 				  struct l2cap_ctrl *control)
2941 {
2942 	BT_DBG("chan %p, control %p", chan, control);
2943 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2944 }
2945 
2946 /* Copy frame to all raw sockets on that connection */
2947 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2948 {
2949 	struct sk_buff *nskb;
2950 	struct l2cap_chan *chan;
2951 
2952 	BT_DBG("conn %p", conn);
2953 
2954 	list_for_each_entry(chan, &conn->chan_l, list) {
2955 		if (chan->chan_type != L2CAP_CHAN_RAW)
2956 			continue;
2957 
2958 		/* Don't send frame to the channel it came from */
2959 		if (bt_cb(skb)->l2cap.chan == chan)
2960 			continue;
2961 
2962 		nskb = skb_clone(skb, GFP_KERNEL);
2963 		if (!nskb)
2964 			continue;
2965 		if (chan->ops->recv(chan, nskb))
2966 			kfree_skb(nskb);
2967 	}
2968 }
2969 
2970 /* ---- L2CAP signalling commands ---- */
2971 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2972 				       u8 ident, u16 dlen, void *data)
2973 {
2974 	struct sk_buff *skb, **frag;
2975 	struct l2cap_cmd_hdr *cmd;
2976 	struct l2cap_hdr *lh;
2977 	int len, count;
2978 
2979 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2980 	       conn, code, ident, dlen);
2981 
2982 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2983 		return NULL;
2984 
2985 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2986 	count = min_t(unsigned int, conn->mtu, len);
2987 
2988 	skb = bt_skb_alloc(count, GFP_KERNEL);
2989 	if (!skb)
2990 		return NULL;
2991 
2992 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2993 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2994 
2995 	if (conn->hcon->type == LE_LINK)
2996 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2997 	else
2998 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2999 
3000 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3001 	cmd->code  = code;
3002 	cmd->ident = ident;
3003 	cmd->len   = cpu_to_le16(dlen);
3004 
3005 	if (dlen) {
3006 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3007 		skb_put_data(skb, data, count);
3008 		data += count;
3009 	}
3010 
3011 	len -= skb->len;
3012 
3013 	/* Continuation fragments (no L2CAP header) */
3014 	frag = &skb_shinfo(skb)->frag_list;
3015 	while (len) {
3016 		count = min_t(unsigned int, conn->mtu, len);
3017 
3018 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3019 		if (!*frag)
3020 			goto fail;
3021 
3022 		skb_put_data(*frag, data, count);
3023 
3024 		len  -= count;
3025 		data += count;
3026 
3027 		frag = &(*frag)->next;
3028 	}
3029 
3030 	return skb;
3031 
3032 fail:
3033 	kfree_skb(skb);
3034 	return NULL;
3035 }
3036 
3037 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3038 				     unsigned long *val)
3039 {
3040 	struct l2cap_conf_opt *opt = *ptr;
3041 	int len;
3042 
3043 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3044 	*ptr += len;
3045 
3046 	*type = opt->type;
3047 	*olen = opt->len;
3048 
3049 	switch (opt->len) {
3050 	case 1:
3051 		*val = *((u8 *) opt->val);
3052 		break;
3053 
3054 	case 2:
3055 		*val = get_unaligned_le16(opt->val);
3056 		break;
3057 
3058 	case 4:
3059 		*val = get_unaligned_le32(opt->val);
3060 		break;
3061 
3062 	default:
3063 		*val = (unsigned long) opt->val;
3064 		break;
3065 	}
3066 
3067 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3068 	return len;
3069 }
3070 
3071 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3072 {
3073 	struct l2cap_conf_opt *opt = *ptr;
3074 
3075 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3076 
3077 	if (size < L2CAP_CONF_OPT_SIZE + len)
3078 		return;
3079 
3080 	opt->type = type;
3081 	opt->len  = len;
3082 
3083 	switch (len) {
3084 	case 1:
3085 		*((u8 *) opt->val)  = val;
3086 		break;
3087 
3088 	case 2:
3089 		put_unaligned_le16(val, opt->val);
3090 		break;
3091 
3092 	case 4:
3093 		put_unaligned_le32(val, opt->val);
3094 		break;
3095 
3096 	default:
3097 		memcpy(opt->val, (void *) val, len);
3098 		break;
3099 	}
3100 
3101 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3102 }
3103 
3104 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3105 {
3106 	struct l2cap_conf_efs efs;
3107 
3108 	switch (chan->mode) {
3109 	case L2CAP_MODE_ERTM:
3110 		efs.id		= chan->local_id;
3111 		efs.stype	= chan->local_stype;
3112 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3113 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3114 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3115 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3116 		break;
3117 
3118 	case L2CAP_MODE_STREAMING:
3119 		efs.id		= 1;
3120 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3121 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3122 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3123 		efs.acc_lat	= 0;
3124 		efs.flush_to	= 0;
3125 		break;
3126 
3127 	default:
3128 		return;
3129 	}
3130 
3131 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3132 			   (unsigned long) &efs, size);
3133 }
3134 
3135 static void l2cap_ack_timeout(struct work_struct *work)
3136 {
3137 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3138 					       ack_timer.work);
3139 	u16 frames_to_ack;
3140 
3141 	BT_DBG("chan %p", chan);
3142 
3143 	l2cap_chan_lock(chan);
3144 
3145 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3146 				     chan->last_acked_seq);
3147 
3148 	if (frames_to_ack)
3149 		l2cap_send_rr_or_rnr(chan, 0);
3150 
3151 	l2cap_chan_unlock(chan);
3152 	l2cap_chan_put(chan);
3153 }
3154 
3155 int l2cap_ertm_init(struct l2cap_chan *chan)
3156 {
3157 	int err;
3158 
3159 	chan->next_tx_seq = 0;
3160 	chan->expected_tx_seq = 0;
3161 	chan->expected_ack_seq = 0;
3162 	chan->unacked_frames = 0;
3163 	chan->buffer_seq = 0;
3164 	chan->frames_sent = 0;
3165 	chan->last_acked_seq = 0;
3166 	chan->sdu = NULL;
3167 	chan->sdu_last_frag = NULL;
3168 	chan->sdu_len = 0;
3169 
3170 	skb_queue_head_init(&chan->tx_q);
3171 
3172 	if (chan->mode != L2CAP_MODE_ERTM)
3173 		return 0;
3174 
3175 	chan->rx_state = L2CAP_RX_STATE_RECV;
3176 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3177 
3178 	skb_queue_head_init(&chan->srej_q);
3179 
3180 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3181 	if (err < 0)
3182 		return err;
3183 
3184 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3185 	if (err < 0)
3186 		l2cap_seq_list_free(&chan->srej_list);
3187 
3188 	return err;
3189 }
3190 
3191 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3192 {
3193 	switch (mode) {
3194 	case L2CAP_MODE_STREAMING:
3195 	case L2CAP_MODE_ERTM:
3196 		if (l2cap_mode_supported(mode, remote_feat_mask))
3197 			return mode;
3198 		fallthrough;
3199 	default:
3200 		return L2CAP_MODE_BASIC;
3201 	}
3202 }
3203 
3204 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3205 {
3206 	return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3207 }
3208 
3209 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3210 {
3211 	return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3212 }
3213 
3214 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3215 				      struct l2cap_conf_rfc *rfc)
3216 {
3217 	rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3218 	rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3219 }
3220 
3221 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3222 {
3223 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3224 	    __l2cap_ews_supported(chan->conn)) {
3225 		/* use extended control field */
3226 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3227 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3228 	} else {
3229 		chan->tx_win = min_t(u16, chan->tx_win,
3230 				     L2CAP_DEFAULT_TX_WINDOW);
3231 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3232 	}
3233 	chan->ack_win = chan->tx_win;
3234 }
3235 
3236 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3237 {
3238 	struct hci_conn *conn = chan->conn->hcon;
3239 
3240 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3241 
3242 	/* The 2-DH1 packet has between 2 and 56 information bytes
3243 	 * (including the 2-byte payload header)
3244 	 */
3245 	if (!(conn->pkt_type & HCI_2DH1))
3246 		chan->imtu = 54;
3247 
3248 	/* The 3-DH1 packet has between 2 and 85 information bytes
3249 	 * (including the 2-byte payload header)
3250 	 */
3251 	if (!(conn->pkt_type & HCI_3DH1))
3252 		chan->imtu = 83;
3253 
3254 	/* The 2-DH3 packet has between 2 and 369 information bytes
3255 	 * (including the 2-byte payload header)
3256 	 */
3257 	if (!(conn->pkt_type & HCI_2DH3))
3258 		chan->imtu = 367;
3259 
3260 	/* The 3-DH3 packet has between 2 and 554 information bytes
3261 	 * (including the 2-byte payload header)
3262 	 */
3263 	if (!(conn->pkt_type & HCI_3DH3))
3264 		chan->imtu = 552;
3265 
3266 	/* The 2-DH5 packet has between 2 and 681 information bytes
3267 	 * (including the 2-byte payload header)
3268 	 */
3269 	if (!(conn->pkt_type & HCI_2DH5))
3270 		chan->imtu = 679;
3271 
3272 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3273 	 * (including the 2-byte payload header)
3274 	 */
3275 	if (!(conn->pkt_type & HCI_3DH5))
3276 		chan->imtu = 1021;
3277 }
3278 
3279 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3280 {
3281 	struct l2cap_conf_req *req = data;
3282 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3283 	void *ptr = req->data;
3284 	void *endptr = data + data_size;
3285 	u16 size;
3286 
3287 	BT_DBG("chan %p", chan);
3288 
3289 	if (chan->num_conf_req || chan->num_conf_rsp)
3290 		goto done;
3291 
3292 	switch (chan->mode) {
3293 	case L2CAP_MODE_STREAMING:
3294 	case L2CAP_MODE_ERTM:
3295 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3296 			break;
3297 
3298 		if (__l2cap_efs_supported(chan->conn))
3299 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3300 
3301 		fallthrough;
3302 	default:
3303 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3304 		break;
3305 	}
3306 
3307 done:
3308 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3309 		if (!chan->imtu)
3310 			l2cap_mtu_auto(chan);
3311 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3312 				   endptr - ptr);
3313 	}
3314 
3315 	switch (chan->mode) {
3316 	case L2CAP_MODE_BASIC:
3317 		if (disable_ertm)
3318 			break;
3319 
3320 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3321 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3322 			break;
3323 
3324 		rfc.mode            = L2CAP_MODE_BASIC;
3325 		rfc.txwin_size      = 0;
3326 		rfc.max_transmit    = 0;
3327 		rfc.retrans_timeout = 0;
3328 		rfc.monitor_timeout = 0;
3329 		rfc.max_pdu_size    = 0;
3330 
3331 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3332 				   (unsigned long) &rfc, endptr - ptr);
3333 		break;
3334 
3335 	case L2CAP_MODE_ERTM:
3336 		rfc.mode            = L2CAP_MODE_ERTM;
3337 		rfc.max_transmit    = chan->max_tx;
3338 
3339 		__l2cap_set_ertm_timeouts(chan, &rfc);
3340 
3341 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3342 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3343 			     L2CAP_FCS_SIZE);
3344 		rfc.max_pdu_size = cpu_to_le16(size);
3345 
3346 		l2cap_txwin_setup(chan);
3347 
3348 		rfc.txwin_size = min_t(u16, chan->tx_win,
3349 				       L2CAP_DEFAULT_TX_WINDOW);
3350 
3351 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3352 				   (unsigned long) &rfc, endptr - ptr);
3353 
3354 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3355 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3356 
3357 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3358 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3359 					   chan->tx_win, endptr - ptr);
3360 
3361 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3362 			if (chan->fcs == L2CAP_FCS_NONE ||
3363 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3364 				chan->fcs = L2CAP_FCS_NONE;
3365 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3366 						   chan->fcs, endptr - ptr);
3367 			}
3368 		break;
3369 
3370 	case L2CAP_MODE_STREAMING:
3371 		l2cap_txwin_setup(chan);
3372 		rfc.mode            = L2CAP_MODE_STREAMING;
3373 		rfc.txwin_size      = 0;
3374 		rfc.max_transmit    = 0;
3375 		rfc.retrans_timeout = 0;
3376 		rfc.monitor_timeout = 0;
3377 
3378 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3379 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3380 			     L2CAP_FCS_SIZE);
3381 		rfc.max_pdu_size = cpu_to_le16(size);
3382 
3383 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3384 				   (unsigned long) &rfc, endptr - ptr);
3385 
3386 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3387 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3388 
3389 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3390 			if (chan->fcs == L2CAP_FCS_NONE ||
3391 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3392 				chan->fcs = L2CAP_FCS_NONE;
3393 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3394 						   chan->fcs, endptr - ptr);
3395 			}
3396 		break;
3397 	}
3398 
3399 	req->dcid  = cpu_to_le16(chan->dcid);
3400 	req->flags = cpu_to_le16(0);
3401 
3402 	return ptr - data;
3403 }
3404 
3405 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3406 {
3407 	struct l2cap_conf_rsp *rsp = data;
3408 	void *ptr = rsp->data;
3409 	void *endptr = data + data_size;
3410 	void *req = chan->conf_req;
3411 	int len = chan->conf_len;
3412 	int type, hint, olen;
3413 	unsigned long val;
3414 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3415 	struct l2cap_conf_efs efs;
3416 	u8 remote_efs = 0;
3417 	u16 mtu = L2CAP_DEFAULT_MTU;
3418 	u16 result = L2CAP_CONF_SUCCESS;
3419 	u16 size;
3420 
3421 	BT_DBG("chan %p", chan);
3422 
3423 	while (len >= L2CAP_CONF_OPT_SIZE) {
3424 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3425 		if (len < 0)
3426 			break;
3427 
3428 		hint  = type & L2CAP_CONF_HINT;
3429 		type &= L2CAP_CONF_MASK;
3430 
3431 		switch (type) {
3432 		case L2CAP_CONF_MTU:
3433 			if (olen != 2)
3434 				break;
3435 			mtu = val;
3436 			break;
3437 
3438 		case L2CAP_CONF_FLUSH_TO:
3439 			if (olen != 2)
3440 				break;
3441 			chan->flush_to = val;
3442 			break;
3443 
3444 		case L2CAP_CONF_QOS:
3445 			break;
3446 
3447 		case L2CAP_CONF_RFC:
3448 			if (olen != sizeof(rfc))
3449 				break;
3450 			memcpy(&rfc, (void *) val, olen);
3451 			break;
3452 
3453 		case L2CAP_CONF_FCS:
3454 			if (olen != 1)
3455 				break;
3456 			if (val == L2CAP_FCS_NONE)
3457 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3458 			break;
3459 
3460 		case L2CAP_CONF_EFS:
3461 			if (olen != sizeof(efs))
3462 				break;
3463 			remote_efs = 1;
3464 			memcpy(&efs, (void *) val, olen);
3465 			break;
3466 
3467 		case L2CAP_CONF_EWS:
3468 			if (olen != 2)
3469 				break;
3470 			return -ECONNREFUSED;
3471 
3472 		default:
3473 			if (hint)
3474 				break;
3475 			result = L2CAP_CONF_UNKNOWN;
3476 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3477 			break;
3478 		}
3479 	}
3480 
3481 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3482 		goto done;
3483 
3484 	switch (chan->mode) {
3485 	case L2CAP_MODE_STREAMING:
3486 	case L2CAP_MODE_ERTM:
3487 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3488 			chan->mode = l2cap_select_mode(rfc.mode,
3489 						       chan->conn->feat_mask);
3490 			break;
3491 		}
3492 
3493 		if (remote_efs) {
3494 			if (__l2cap_efs_supported(chan->conn))
3495 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3496 			else
3497 				return -ECONNREFUSED;
3498 		}
3499 
3500 		if (chan->mode != rfc.mode)
3501 			return -ECONNREFUSED;
3502 
3503 		break;
3504 	}
3505 
3506 done:
3507 	if (chan->mode != rfc.mode) {
3508 		result = L2CAP_CONF_UNACCEPT;
3509 		rfc.mode = chan->mode;
3510 
3511 		if (chan->num_conf_rsp == 1)
3512 			return -ECONNREFUSED;
3513 
3514 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3515 				   (unsigned long) &rfc, endptr - ptr);
3516 	}
3517 
3518 	if (result == L2CAP_CONF_SUCCESS) {
3519 		/* Configure output options and let the other side know
3520 		 * which ones we don't like. */
3521 
3522 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3523 			result = L2CAP_CONF_UNACCEPT;
3524 		else {
3525 			chan->omtu = mtu;
3526 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3527 		}
3528 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3529 
3530 		if (remote_efs) {
3531 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3532 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3533 			    efs.stype != chan->local_stype) {
3534 
3535 				result = L2CAP_CONF_UNACCEPT;
3536 
3537 				if (chan->num_conf_req >= 1)
3538 					return -ECONNREFUSED;
3539 
3540 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3541 						   sizeof(efs),
3542 						   (unsigned long) &efs, endptr - ptr);
3543 			} else {
3544 				/* Send PENDING Conf Rsp */
3545 				result = L2CAP_CONF_PENDING;
3546 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3547 			}
3548 		}
3549 
3550 		switch (rfc.mode) {
3551 		case L2CAP_MODE_BASIC:
3552 			chan->fcs = L2CAP_FCS_NONE;
3553 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3554 			break;
3555 
3556 		case L2CAP_MODE_ERTM:
3557 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3558 				chan->remote_tx_win = rfc.txwin_size;
3559 			else
3560 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3561 
3562 			chan->remote_max_tx = rfc.max_transmit;
3563 
3564 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3565 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3566 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3567 			rfc.max_pdu_size = cpu_to_le16(size);
3568 			chan->remote_mps = size;
3569 
3570 			__l2cap_set_ertm_timeouts(chan, &rfc);
3571 
3572 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3573 
3574 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3575 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3576 
3577 			if (remote_efs &&
3578 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3579 				chan->remote_id = efs.id;
3580 				chan->remote_stype = efs.stype;
3581 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3582 				chan->remote_flush_to =
3583 					le32_to_cpu(efs.flush_to);
3584 				chan->remote_acc_lat =
3585 					le32_to_cpu(efs.acc_lat);
3586 				chan->remote_sdu_itime =
3587 					le32_to_cpu(efs.sdu_itime);
3588 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3589 						   sizeof(efs),
3590 						   (unsigned long) &efs, endptr - ptr);
3591 			}
3592 			break;
3593 
3594 		case L2CAP_MODE_STREAMING:
3595 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3596 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3597 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3598 			rfc.max_pdu_size = cpu_to_le16(size);
3599 			chan->remote_mps = size;
3600 
3601 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3602 
3603 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3604 					   (unsigned long) &rfc, endptr - ptr);
3605 
3606 			break;
3607 
3608 		default:
3609 			result = L2CAP_CONF_UNACCEPT;
3610 
3611 			memset(&rfc, 0, sizeof(rfc));
3612 			rfc.mode = chan->mode;
3613 		}
3614 
3615 		if (result == L2CAP_CONF_SUCCESS)
3616 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3617 	}
3618 	rsp->scid   = cpu_to_le16(chan->dcid);
3619 	rsp->result = cpu_to_le16(result);
3620 	rsp->flags  = cpu_to_le16(0);
3621 
3622 	return ptr - data;
3623 }
3624 
3625 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3626 				void *data, size_t size, u16 *result)
3627 {
3628 	struct l2cap_conf_req *req = data;
3629 	void *ptr = req->data;
3630 	void *endptr = data + size;
3631 	int type, olen;
3632 	unsigned long val;
3633 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3634 	struct l2cap_conf_efs efs;
3635 
3636 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3637 
3638 	while (len >= L2CAP_CONF_OPT_SIZE) {
3639 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3640 		if (len < 0)
3641 			break;
3642 
3643 		switch (type) {
3644 		case L2CAP_CONF_MTU:
3645 			if (olen != 2)
3646 				break;
3647 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3648 				*result = L2CAP_CONF_UNACCEPT;
3649 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3650 			} else
3651 				chan->imtu = val;
3652 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3653 					   endptr - ptr);
3654 			break;
3655 
3656 		case L2CAP_CONF_FLUSH_TO:
3657 			if (olen != 2)
3658 				break;
3659 			chan->flush_to = val;
3660 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3661 					   chan->flush_to, endptr - ptr);
3662 			break;
3663 
3664 		case L2CAP_CONF_RFC:
3665 			if (olen != sizeof(rfc))
3666 				break;
3667 			memcpy(&rfc, (void *)val, olen);
3668 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3669 			    rfc.mode != chan->mode)
3670 				return -ECONNREFUSED;
3671 			chan->fcs = 0;
3672 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3673 					   (unsigned long) &rfc, endptr - ptr);
3674 			break;
3675 
3676 		case L2CAP_CONF_EWS:
3677 			if (olen != 2)
3678 				break;
3679 			chan->ack_win = min_t(u16, val, chan->ack_win);
3680 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3681 					   chan->tx_win, endptr - ptr);
3682 			break;
3683 
3684 		case L2CAP_CONF_EFS:
3685 			if (olen != sizeof(efs))
3686 				break;
3687 			memcpy(&efs, (void *)val, olen);
3688 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3689 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3690 			    efs.stype != chan->local_stype)
3691 				return -ECONNREFUSED;
3692 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3693 					   (unsigned long) &efs, endptr - ptr);
3694 			break;
3695 
3696 		case L2CAP_CONF_FCS:
3697 			if (olen != 1)
3698 				break;
3699 			if (*result == L2CAP_CONF_PENDING)
3700 				if (val == L2CAP_FCS_NONE)
3701 					set_bit(CONF_RECV_NO_FCS,
3702 						&chan->conf_state);
3703 			break;
3704 		}
3705 	}
3706 
3707 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3708 		return -ECONNREFUSED;
3709 
3710 	chan->mode = rfc.mode;
3711 
3712 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3713 		switch (rfc.mode) {
3714 		case L2CAP_MODE_ERTM:
3715 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3716 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3717 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3718 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3719 				chan->ack_win = min_t(u16, chan->ack_win,
3720 						      rfc.txwin_size);
3721 
3722 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3723 				chan->local_msdu = le16_to_cpu(efs.msdu);
3724 				chan->local_sdu_itime =
3725 					le32_to_cpu(efs.sdu_itime);
3726 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3727 				chan->local_flush_to =
3728 					le32_to_cpu(efs.flush_to);
3729 			}
3730 			break;
3731 
3732 		case L2CAP_MODE_STREAMING:
3733 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3734 		}
3735 	}
3736 
3737 	req->dcid   = cpu_to_le16(chan->dcid);
3738 	req->flags  = cpu_to_le16(0);
3739 
3740 	return ptr - data;
3741 }
3742 
3743 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3744 				u16 result, u16 flags)
3745 {
3746 	struct l2cap_conf_rsp *rsp = data;
3747 	void *ptr = rsp->data;
3748 
3749 	BT_DBG("chan %p", chan);
3750 
3751 	rsp->scid   = cpu_to_le16(chan->dcid);
3752 	rsp->result = cpu_to_le16(result);
3753 	rsp->flags  = cpu_to_le16(flags);
3754 
3755 	return ptr - data;
3756 }
3757 
3758 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3759 {
3760 	struct l2cap_le_conn_rsp rsp;
3761 	struct l2cap_conn *conn = chan->conn;
3762 
3763 	BT_DBG("chan %p", chan);
3764 
3765 	rsp.dcid    = cpu_to_le16(chan->scid);
3766 	rsp.mtu     = cpu_to_le16(chan->imtu);
3767 	rsp.mps     = cpu_to_le16(chan->mps);
3768 	rsp.credits = cpu_to_le16(chan->rx_credits);
3769 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3770 
3771 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3772 		       &rsp);
3773 }
3774 
3775 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3776 {
3777 	int *result = data;
3778 
3779 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3780 		return;
3781 
3782 	switch (chan->state) {
3783 	case BT_CONNECT2:
3784 		/* If channel still pending accept add to result */
3785 		(*result)++;
3786 		return;
3787 	case BT_CONNECTED:
3788 		return;
3789 	default:
3790 		/* If not connected or pending accept it has been refused */
3791 		*result = -ECONNREFUSED;
3792 		return;
3793 	}
3794 }
3795 
3796 struct l2cap_ecred_rsp_data {
3797 	struct {
3798 		struct l2cap_ecred_conn_rsp_hdr rsp;
3799 		__le16 scid[L2CAP_ECRED_MAX_CID];
3800 	} __packed pdu;
3801 	int count;
3802 };
3803 
3804 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3805 {
3806 	struct l2cap_ecred_rsp_data *rsp = data;
3807 	struct l2cap_ecred_conn_rsp *rsp_flex =
3808 		container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3809 
3810 	/* Check if channel for outgoing connection or if it wasn't deferred
3811 	 * since in those cases it must be skipped.
3812 	 */
3813 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3814 	    !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3815 		return;
3816 
3817 	/* Reset ident so only one response is sent */
3818 	chan->ident = 0;
3819 
3820 	/* Include all channels pending with the same ident */
3821 	if (!rsp->pdu.rsp.result)
3822 		rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3823 	else
3824 		l2cap_chan_del(chan, ECONNRESET);
3825 }
3826 
3827 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3828 {
3829 	struct l2cap_conn *conn = chan->conn;
3830 	struct l2cap_ecred_rsp_data data;
3831 	u16 id = chan->ident;
3832 	int result = 0;
3833 
3834 	if (!id)
3835 		return;
3836 
3837 	BT_DBG("chan %p id %d", chan, id);
3838 
3839 	memset(&data, 0, sizeof(data));
3840 
3841 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3842 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3843 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3844 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3845 
3846 	/* Verify that all channels are ready */
3847 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3848 
3849 	if (result > 0)
3850 		return;
3851 
3852 	if (result < 0)
3853 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3854 
3855 	/* Build response */
3856 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3857 
3858 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3859 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3860 		       &data.pdu);
3861 }
3862 
3863 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3864 {
3865 	struct l2cap_conn_rsp rsp;
3866 	struct l2cap_conn *conn = chan->conn;
3867 	u8 buf[128];
3868 	u8 rsp_code;
3869 
3870 	rsp.scid   = cpu_to_le16(chan->dcid);
3871 	rsp.dcid   = cpu_to_le16(chan->scid);
3872 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3873 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3874 	rsp_code = L2CAP_CONN_RSP;
3875 
3876 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3877 
3878 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3879 
3880 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3881 		return;
3882 
3883 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3884 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3885 	chan->num_conf_req++;
3886 }
3887 
3888 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3889 {
3890 	int type, olen;
3891 	unsigned long val;
3892 	/* Use sane default values in case a misbehaving remote device
3893 	 * did not send an RFC or extended window size option.
3894 	 */
3895 	u16 txwin_ext = chan->ack_win;
3896 	struct l2cap_conf_rfc rfc = {
3897 		.mode = chan->mode,
3898 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3899 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3900 		.max_pdu_size = cpu_to_le16(chan->imtu),
3901 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3902 	};
3903 
3904 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3905 
3906 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3907 		return;
3908 
3909 	while (len >= L2CAP_CONF_OPT_SIZE) {
3910 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3911 		if (len < 0)
3912 			break;
3913 
3914 		switch (type) {
3915 		case L2CAP_CONF_RFC:
3916 			if (olen != sizeof(rfc))
3917 				break;
3918 			memcpy(&rfc, (void *)val, olen);
3919 			break;
3920 		case L2CAP_CONF_EWS:
3921 			if (olen != 2)
3922 				break;
3923 			txwin_ext = val;
3924 			break;
3925 		}
3926 	}
3927 
3928 	switch (rfc.mode) {
3929 	case L2CAP_MODE_ERTM:
3930 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3931 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3932 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3933 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3934 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3935 		else
3936 			chan->ack_win = min_t(u16, chan->ack_win,
3937 					      rfc.txwin_size);
3938 		break;
3939 	case L2CAP_MODE_STREAMING:
3940 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3941 	}
3942 }
3943 
3944 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3945 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3946 				    u8 *data)
3947 {
3948 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3949 
3950 	if (cmd_len < sizeof(*rej))
3951 		return -EPROTO;
3952 
3953 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3954 		return 0;
3955 
3956 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3957 	    cmd->ident == conn->info_ident) {
3958 		cancel_delayed_work(&conn->info_timer);
3959 
3960 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3961 		conn->info_ident = 0;
3962 
3963 		l2cap_conn_start(conn);
3964 	}
3965 
3966 	return 0;
3967 }
3968 
3969 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3970 			  u8 *data, u8 rsp_code)
3971 {
3972 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3973 	struct l2cap_conn_rsp rsp;
3974 	struct l2cap_chan *chan = NULL, *pchan = NULL;
3975 	int result, status = L2CAP_CS_NO_INFO;
3976 
3977 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3978 	__le16 psm = req->psm;
3979 
3980 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3981 
3982 	/* Check if we have socket listening on psm */
3983 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3984 					 &conn->hcon->dst, ACL_LINK);
3985 	if (!pchan) {
3986 		result = L2CAP_CR_BAD_PSM;
3987 		goto response;
3988 	}
3989 
3990 	l2cap_chan_lock(pchan);
3991 
3992 	/* Check if the ACL is secure enough (if not SDP) */
3993 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3994 	    (!hci_conn_check_link_mode(conn->hcon) ||
3995 	    !l2cap_check_enc_key_size(conn->hcon))) {
3996 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3997 		result = L2CAP_CR_SEC_BLOCK;
3998 		goto response;
3999 	}
4000 
4001 	result = L2CAP_CR_NO_MEM;
4002 
4003 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4004 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4005 		result = L2CAP_CR_INVALID_SCID;
4006 		goto response;
4007 	}
4008 
4009 	/* Check if we already have channel with that dcid */
4010 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4011 		result = L2CAP_CR_SCID_IN_USE;
4012 		goto response;
4013 	}
4014 
4015 	chan = pchan->ops->new_connection(pchan);
4016 	if (!chan)
4017 		goto response;
4018 
4019 	/* For certain devices (ex: HID mouse), support for authentication,
4020 	 * pairing and bonding is optional. For such devices, inorder to avoid
4021 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4022 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4023 	 */
4024 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4025 
4026 	bacpy(&chan->src, &conn->hcon->src);
4027 	bacpy(&chan->dst, &conn->hcon->dst);
4028 	chan->src_type = bdaddr_src_type(conn->hcon);
4029 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4030 	chan->psm  = psm;
4031 	chan->dcid = scid;
4032 
4033 	__l2cap_chan_add(conn, chan);
4034 
4035 	dcid = chan->scid;
4036 
4037 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4038 
4039 	chan->ident = cmd->ident;
4040 
4041 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4042 		if (l2cap_chan_check_security(chan, false)) {
4043 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4044 				l2cap_state_change(chan, BT_CONNECT2);
4045 				result = L2CAP_CR_PEND;
4046 				status = L2CAP_CS_AUTHOR_PEND;
4047 				chan->ops->defer(chan);
4048 			} else {
4049 				l2cap_state_change(chan, BT_CONFIG);
4050 				result = L2CAP_CR_SUCCESS;
4051 				status = L2CAP_CS_NO_INFO;
4052 			}
4053 		} else {
4054 			l2cap_state_change(chan, BT_CONNECT2);
4055 			result = L2CAP_CR_PEND;
4056 			status = L2CAP_CS_AUTHEN_PEND;
4057 		}
4058 	} else {
4059 		l2cap_state_change(chan, BT_CONNECT2);
4060 		result = L2CAP_CR_PEND;
4061 		status = L2CAP_CS_NO_INFO;
4062 	}
4063 
4064 response:
4065 	rsp.scid   = cpu_to_le16(scid);
4066 	rsp.dcid   = cpu_to_le16(dcid);
4067 	rsp.result = cpu_to_le16(result);
4068 	rsp.status = cpu_to_le16(status);
4069 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4070 
4071 	if (!pchan)
4072 		return;
4073 
4074 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4075 		struct l2cap_info_req info;
4076 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4077 
4078 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4079 		conn->info_ident = l2cap_get_ident(conn);
4080 
4081 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4082 
4083 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4084 			       sizeof(info), &info);
4085 	}
4086 
4087 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4088 	    result == L2CAP_CR_SUCCESS) {
4089 		u8 buf[128];
4090 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4091 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4092 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4093 		chan->num_conf_req++;
4094 	}
4095 
4096 	l2cap_chan_unlock(pchan);
4097 	l2cap_chan_put(pchan);
4098 }
4099 
4100 static int l2cap_connect_req(struct l2cap_conn *conn,
4101 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4102 {
4103 	if (cmd_len < sizeof(struct l2cap_conn_req))
4104 		return -EPROTO;
4105 
4106 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4107 	return 0;
4108 }
4109 
4110 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4111 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4112 				    u8 *data)
4113 {
4114 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4115 	u16 scid, dcid, result, status;
4116 	struct l2cap_chan *chan;
4117 	u8 req[128];
4118 	int err;
4119 
4120 	if (cmd_len < sizeof(*rsp))
4121 		return -EPROTO;
4122 
4123 	scid   = __le16_to_cpu(rsp->scid);
4124 	dcid   = __le16_to_cpu(rsp->dcid);
4125 	result = __le16_to_cpu(rsp->result);
4126 	status = __le16_to_cpu(rsp->status);
4127 
4128 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4129 					   dcid > L2CAP_CID_DYN_END))
4130 		return -EPROTO;
4131 
4132 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4133 	       dcid, scid, result, status);
4134 
4135 	if (scid) {
4136 		chan = __l2cap_get_chan_by_scid(conn, scid);
4137 		if (!chan)
4138 			return -EBADSLT;
4139 	} else {
4140 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4141 		if (!chan)
4142 			return -EBADSLT;
4143 	}
4144 
4145 	chan = l2cap_chan_hold_unless_zero(chan);
4146 	if (!chan)
4147 		return -EBADSLT;
4148 
4149 	err = 0;
4150 
4151 	l2cap_chan_lock(chan);
4152 
4153 	switch (result) {
4154 	case L2CAP_CR_SUCCESS:
4155 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4156 			err = -EBADSLT;
4157 			break;
4158 		}
4159 
4160 		l2cap_state_change(chan, BT_CONFIG);
4161 		chan->ident = 0;
4162 		chan->dcid = dcid;
4163 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4164 
4165 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4166 			break;
4167 
4168 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4169 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4170 		chan->num_conf_req++;
4171 		break;
4172 
4173 	case L2CAP_CR_PEND:
4174 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4175 		break;
4176 
4177 	default:
4178 		l2cap_chan_del(chan, ECONNREFUSED);
4179 		break;
4180 	}
4181 
4182 	l2cap_chan_unlock(chan);
4183 	l2cap_chan_put(chan);
4184 
4185 	return err;
4186 }
4187 
4188 static inline void set_default_fcs(struct l2cap_chan *chan)
4189 {
4190 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4191 	 * sides request it.
4192 	 */
4193 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4194 		chan->fcs = L2CAP_FCS_NONE;
4195 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4196 		chan->fcs = L2CAP_FCS_CRC16;
4197 }
4198 
4199 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4200 				    u8 ident, u16 flags)
4201 {
4202 	struct l2cap_conn *conn = chan->conn;
4203 
4204 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4205 	       flags);
4206 
4207 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4208 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4209 
4210 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4211 		       l2cap_build_conf_rsp(chan, data,
4212 					    L2CAP_CONF_SUCCESS, flags), data);
4213 }
4214 
4215 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4216 				   u16 scid, u16 dcid)
4217 {
4218 	struct l2cap_cmd_rej_cid rej;
4219 
4220 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4221 	rej.scid = __cpu_to_le16(scid);
4222 	rej.dcid = __cpu_to_le16(dcid);
4223 
4224 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4225 }
4226 
4227 static inline int l2cap_config_req(struct l2cap_conn *conn,
4228 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4229 				   u8 *data)
4230 {
4231 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4232 	u16 dcid, flags;
4233 	u8 rsp[64];
4234 	struct l2cap_chan *chan;
4235 	int len, err = 0;
4236 
4237 	if (cmd_len < sizeof(*req))
4238 		return -EPROTO;
4239 
4240 	dcid  = __le16_to_cpu(req->dcid);
4241 	flags = __le16_to_cpu(req->flags);
4242 
4243 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4244 
4245 	chan = l2cap_get_chan_by_scid(conn, dcid);
4246 	if (!chan) {
4247 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4248 		return 0;
4249 	}
4250 
4251 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4252 	    chan->state != BT_CONNECTED) {
4253 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4254 				       chan->dcid);
4255 		goto unlock;
4256 	}
4257 
4258 	/* Reject if config buffer is too small. */
4259 	len = cmd_len - sizeof(*req);
4260 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4261 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4262 			       l2cap_build_conf_rsp(chan, rsp,
4263 			       L2CAP_CONF_REJECT, flags), rsp);
4264 		goto unlock;
4265 	}
4266 
4267 	/* Store config. */
4268 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4269 	chan->conf_len += len;
4270 
4271 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4272 		/* Incomplete config. Send empty response. */
4273 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4274 			       l2cap_build_conf_rsp(chan, rsp,
4275 			       L2CAP_CONF_SUCCESS, flags), rsp);
4276 		goto unlock;
4277 	}
4278 
4279 	/* Complete config. */
4280 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4281 	if (len < 0) {
4282 		l2cap_send_disconn_req(chan, ECONNRESET);
4283 		goto unlock;
4284 	}
4285 
4286 	chan->ident = cmd->ident;
4287 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4288 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4289 		chan->num_conf_rsp++;
4290 
4291 	/* Reset config buffer. */
4292 	chan->conf_len = 0;
4293 
4294 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4295 		goto unlock;
4296 
4297 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4298 		set_default_fcs(chan);
4299 
4300 		if (chan->mode == L2CAP_MODE_ERTM ||
4301 		    chan->mode == L2CAP_MODE_STREAMING)
4302 			err = l2cap_ertm_init(chan);
4303 
4304 		if (err < 0)
4305 			l2cap_send_disconn_req(chan, -err);
4306 		else
4307 			l2cap_chan_ready(chan);
4308 
4309 		goto unlock;
4310 	}
4311 
4312 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4313 		u8 buf[64];
4314 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4315 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4316 		chan->num_conf_req++;
4317 	}
4318 
4319 	/* Got Conf Rsp PENDING from remote side and assume we sent
4320 	   Conf Rsp PENDING in the code above */
4321 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4322 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4323 
4324 		/* check compatibility */
4325 
4326 		/* Send rsp for BR/EDR channel */
4327 		l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4328 	}
4329 
4330 unlock:
4331 	l2cap_chan_unlock(chan);
4332 	l2cap_chan_put(chan);
4333 	return err;
4334 }
4335 
4336 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4337 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4338 				   u8 *data)
4339 {
4340 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4341 	u16 scid, flags, result;
4342 	struct l2cap_chan *chan;
4343 	int len = cmd_len - sizeof(*rsp);
4344 	int err = 0;
4345 
4346 	if (cmd_len < sizeof(*rsp))
4347 		return -EPROTO;
4348 
4349 	scid   = __le16_to_cpu(rsp->scid);
4350 	flags  = __le16_to_cpu(rsp->flags);
4351 	result = __le16_to_cpu(rsp->result);
4352 
4353 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4354 	       result, len);
4355 
4356 	chan = l2cap_get_chan_by_scid(conn, scid);
4357 	if (!chan)
4358 		return 0;
4359 
4360 	switch (result) {
4361 	case L2CAP_CONF_SUCCESS:
4362 		l2cap_conf_rfc_get(chan, rsp->data, len);
4363 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4364 		break;
4365 
4366 	case L2CAP_CONF_PENDING:
4367 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4368 
4369 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4370 			char buf[64];
4371 
4372 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4373 						   buf, sizeof(buf), &result);
4374 			if (len < 0) {
4375 				l2cap_send_disconn_req(chan, ECONNRESET);
4376 				goto done;
4377 			}
4378 
4379 			l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4380 		}
4381 		goto done;
4382 
4383 	case L2CAP_CONF_UNKNOWN:
4384 	case L2CAP_CONF_UNACCEPT:
4385 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4386 			char req[64];
4387 
4388 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4389 				l2cap_send_disconn_req(chan, ECONNRESET);
4390 				goto done;
4391 			}
4392 
4393 			/* throw out any old stored conf requests */
4394 			result = L2CAP_CONF_SUCCESS;
4395 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4396 						   req, sizeof(req), &result);
4397 			if (len < 0) {
4398 				l2cap_send_disconn_req(chan, ECONNRESET);
4399 				goto done;
4400 			}
4401 
4402 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4403 				       L2CAP_CONF_REQ, len, req);
4404 			chan->num_conf_req++;
4405 			if (result != L2CAP_CONF_SUCCESS)
4406 				goto done;
4407 			break;
4408 		}
4409 		fallthrough;
4410 
4411 	default:
4412 		l2cap_chan_set_err(chan, ECONNRESET);
4413 
4414 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4415 		l2cap_send_disconn_req(chan, ECONNRESET);
4416 		goto done;
4417 	}
4418 
4419 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4420 		goto done;
4421 
4422 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4423 
4424 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4425 		set_default_fcs(chan);
4426 
4427 		if (chan->mode == L2CAP_MODE_ERTM ||
4428 		    chan->mode == L2CAP_MODE_STREAMING)
4429 			err = l2cap_ertm_init(chan);
4430 
4431 		if (err < 0)
4432 			l2cap_send_disconn_req(chan, -err);
4433 		else
4434 			l2cap_chan_ready(chan);
4435 	}
4436 
4437 done:
4438 	l2cap_chan_unlock(chan);
4439 	l2cap_chan_put(chan);
4440 	return err;
4441 }
4442 
4443 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4444 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4445 				       u8 *data)
4446 {
4447 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4448 	struct l2cap_disconn_rsp rsp;
4449 	u16 dcid, scid;
4450 	struct l2cap_chan *chan;
4451 
4452 	if (cmd_len != sizeof(*req))
4453 		return -EPROTO;
4454 
4455 	scid = __le16_to_cpu(req->scid);
4456 	dcid = __le16_to_cpu(req->dcid);
4457 
4458 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4459 
4460 	chan = l2cap_get_chan_by_scid(conn, dcid);
4461 	if (!chan) {
4462 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4463 		return 0;
4464 	}
4465 
4466 	rsp.dcid = cpu_to_le16(chan->scid);
4467 	rsp.scid = cpu_to_le16(chan->dcid);
4468 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4469 
4470 	chan->ops->set_shutdown(chan);
4471 
4472 	l2cap_chan_del(chan, ECONNRESET);
4473 
4474 	chan->ops->close(chan);
4475 
4476 	l2cap_chan_unlock(chan);
4477 	l2cap_chan_put(chan);
4478 
4479 	return 0;
4480 }
4481 
4482 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4483 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4484 				       u8 *data)
4485 {
4486 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4487 	u16 dcid, scid;
4488 	struct l2cap_chan *chan;
4489 
4490 	if (cmd_len != sizeof(*rsp))
4491 		return -EPROTO;
4492 
4493 	scid = __le16_to_cpu(rsp->scid);
4494 	dcid = __le16_to_cpu(rsp->dcid);
4495 
4496 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4497 
4498 	chan = l2cap_get_chan_by_scid(conn, scid);
4499 	if (!chan) {
4500 		return 0;
4501 	}
4502 
4503 	if (chan->state != BT_DISCONN) {
4504 		l2cap_chan_unlock(chan);
4505 		l2cap_chan_put(chan);
4506 		return 0;
4507 	}
4508 
4509 	l2cap_chan_del(chan, 0);
4510 
4511 	chan->ops->close(chan);
4512 
4513 	l2cap_chan_unlock(chan);
4514 	l2cap_chan_put(chan);
4515 
4516 	return 0;
4517 }
4518 
4519 static inline int l2cap_information_req(struct l2cap_conn *conn,
4520 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4521 					u8 *data)
4522 {
4523 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4524 	u16 type;
4525 
4526 	if (cmd_len != sizeof(*req))
4527 		return -EPROTO;
4528 
4529 	type = __le16_to_cpu(req->type);
4530 
4531 	BT_DBG("type 0x%4.4x", type);
4532 
4533 	if (type == L2CAP_IT_FEAT_MASK) {
4534 		u8 buf[8];
4535 		u32 feat_mask = l2cap_feat_mask;
4536 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4537 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4538 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4539 		if (!disable_ertm)
4540 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4541 				| L2CAP_FEAT_FCS;
4542 
4543 		put_unaligned_le32(feat_mask, rsp->data);
4544 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4545 			       buf);
4546 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4547 		u8 buf[12];
4548 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4549 
4550 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4551 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4552 		rsp->data[0] = conn->local_fixed_chan;
4553 		memset(rsp->data + 1, 0, 7);
4554 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4555 			       buf);
4556 	} else {
4557 		struct l2cap_info_rsp rsp;
4558 		rsp.type   = cpu_to_le16(type);
4559 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4560 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4561 			       &rsp);
4562 	}
4563 
4564 	return 0;
4565 }
4566 
4567 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4568 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4569 					u8 *data)
4570 {
4571 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4572 	u16 type, result;
4573 
4574 	if (cmd_len < sizeof(*rsp))
4575 		return -EPROTO;
4576 
4577 	type   = __le16_to_cpu(rsp->type);
4578 	result = __le16_to_cpu(rsp->result);
4579 
4580 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4581 
4582 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4583 	if (cmd->ident != conn->info_ident ||
4584 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4585 		return 0;
4586 
4587 	cancel_delayed_work(&conn->info_timer);
4588 
4589 	if (result != L2CAP_IR_SUCCESS) {
4590 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4591 		conn->info_ident = 0;
4592 
4593 		l2cap_conn_start(conn);
4594 
4595 		return 0;
4596 	}
4597 
4598 	switch (type) {
4599 	case L2CAP_IT_FEAT_MASK:
4600 		conn->feat_mask = get_unaligned_le32(rsp->data);
4601 
4602 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4603 			struct l2cap_info_req req;
4604 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4605 
4606 			conn->info_ident = l2cap_get_ident(conn);
4607 
4608 			l2cap_send_cmd(conn, conn->info_ident,
4609 				       L2CAP_INFO_REQ, sizeof(req), &req);
4610 		} else {
4611 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4612 			conn->info_ident = 0;
4613 
4614 			l2cap_conn_start(conn);
4615 		}
4616 		break;
4617 
4618 	case L2CAP_IT_FIXED_CHAN:
4619 		conn->remote_fixed_chan = rsp->data[0];
4620 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4621 		conn->info_ident = 0;
4622 
4623 		l2cap_conn_start(conn);
4624 		break;
4625 	}
4626 
4627 	return 0;
4628 }
4629 
4630 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4631 					      struct l2cap_cmd_hdr *cmd,
4632 					      u16 cmd_len, u8 *data)
4633 {
4634 	struct hci_conn *hcon = conn->hcon;
4635 	struct l2cap_conn_param_update_req *req;
4636 	struct l2cap_conn_param_update_rsp rsp;
4637 	u16 min, max, latency, to_multiplier;
4638 	int err;
4639 
4640 	if (hcon->role != HCI_ROLE_MASTER)
4641 		return -EINVAL;
4642 
4643 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4644 		return -EPROTO;
4645 
4646 	req = (struct l2cap_conn_param_update_req *) data;
4647 	min		= __le16_to_cpu(req->min);
4648 	max		= __le16_to_cpu(req->max);
4649 	latency		= __le16_to_cpu(req->latency);
4650 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4651 
4652 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4653 	       min, max, latency, to_multiplier);
4654 
4655 	memset(&rsp, 0, sizeof(rsp));
4656 
4657 	err = hci_check_conn_params(min, max, latency, to_multiplier);
4658 	if (err)
4659 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4660 	else
4661 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4662 
4663 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4664 		       sizeof(rsp), &rsp);
4665 
4666 	if (!err) {
4667 		u8 store_hint;
4668 
4669 		store_hint = hci_le_conn_update(hcon, min, max, latency,
4670 						to_multiplier);
4671 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4672 				    store_hint, min, max, latency,
4673 				    to_multiplier);
4674 
4675 	}
4676 
4677 	return 0;
4678 }
4679 
4680 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4681 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4682 				u8 *data)
4683 {
4684 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4685 	struct hci_conn *hcon = conn->hcon;
4686 	u16 dcid, mtu, mps, credits, result;
4687 	struct l2cap_chan *chan;
4688 	int err, sec_level;
4689 
4690 	if (cmd_len < sizeof(*rsp))
4691 		return -EPROTO;
4692 
4693 	dcid    = __le16_to_cpu(rsp->dcid);
4694 	mtu     = __le16_to_cpu(rsp->mtu);
4695 	mps     = __le16_to_cpu(rsp->mps);
4696 	credits = __le16_to_cpu(rsp->credits);
4697 	result  = __le16_to_cpu(rsp->result);
4698 
4699 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4700 					   dcid < L2CAP_CID_DYN_START ||
4701 					   dcid > L2CAP_CID_LE_DYN_END))
4702 		return -EPROTO;
4703 
4704 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4705 	       dcid, mtu, mps, credits, result);
4706 
4707 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4708 	if (!chan)
4709 		return -EBADSLT;
4710 
4711 	err = 0;
4712 
4713 	l2cap_chan_lock(chan);
4714 
4715 	switch (result) {
4716 	case L2CAP_CR_LE_SUCCESS:
4717 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4718 			err = -EBADSLT;
4719 			break;
4720 		}
4721 
4722 		chan->ident = 0;
4723 		chan->dcid = dcid;
4724 		chan->omtu = mtu;
4725 		chan->remote_mps = mps;
4726 		chan->tx_credits = credits;
4727 		l2cap_chan_ready(chan);
4728 		break;
4729 
4730 	case L2CAP_CR_LE_AUTHENTICATION:
4731 	case L2CAP_CR_LE_ENCRYPTION:
4732 		/* If we already have MITM protection we can't do
4733 		 * anything.
4734 		 */
4735 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4736 			l2cap_chan_del(chan, ECONNREFUSED);
4737 			break;
4738 		}
4739 
4740 		sec_level = hcon->sec_level + 1;
4741 		if (chan->sec_level < sec_level)
4742 			chan->sec_level = sec_level;
4743 
4744 		/* We'll need to send a new Connect Request */
4745 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4746 
4747 		smp_conn_security(hcon, chan->sec_level);
4748 		break;
4749 
4750 	default:
4751 		l2cap_chan_del(chan, ECONNREFUSED);
4752 		break;
4753 	}
4754 
4755 	l2cap_chan_unlock(chan);
4756 
4757 	return err;
4758 }
4759 
4760 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4761 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4762 				      u8 *data)
4763 {
4764 	int err = 0;
4765 
4766 	switch (cmd->code) {
4767 	case L2CAP_COMMAND_REJ:
4768 		l2cap_command_rej(conn, cmd, cmd_len, data);
4769 		break;
4770 
4771 	case L2CAP_CONN_REQ:
4772 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
4773 		break;
4774 
4775 	case L2CAP_CONN_RSP:
4776 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4777 		break;
4778 
4779 	case L2CAP_CONF_REQ:
4780 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4781 		break;
4782 
4783 	case L2CAP_CONF_RSP:
4784 		l2cap_config_rsp(conn, cmd, cmd_len, data);
4785 		break;
4786 
4787 	case L2CAP_DISCONN_REQ:
4788 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4789 		break;
4790 
4791 	case L2CAP_DISCONN_RSP:
4792 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4793 		break;
4794 
4795 	case L2CAP_ECHO_REQ:
4796 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4797 		break;
4798 
4799 	case L2CAP_ECHO_RSP:
4800 		break;
4801 
4802 	case L2CAP_INFO_REQ:
4803 		err = l2cap_information_req(conn, cmd, cmd_len, data);
4804 		break;
4805 
4806 	case L2CAP_INFO_RSP:
4807 		l2cap_information_rsp(conn, cmd, cmd_len, data);
4808 		break;
4809 
4810 	default:
4811 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4812 		err = -EINVAL;
4813 		break;
4814 	}
4815 
4816 	return err;
4817 }
4818 
4819 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4820 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4821 				u8 *data)
4822 {
4823 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4824 	struct l2cap_le_conn_rsp rsp;
4825 	struct l2cap_chan *chan, *pchan;
4826 	u16 dcid, scid, credits, mtu, mps;
4827 	__le16 psm;
4828 	u8 result;
4829 
4830 	if (cmd_len != sizeof(*req))
4831 		return -EPROTO;
4832 
4833 	scid = __le16_to_cpu(req->scid);
4834 	mtu  = __le16_to_cpu(req->mtu);
4835 	mps  = __le16_to_cpu(req->mps);
4836 	psm  = req->psm;
4837 	dcid = 0;
4838 	credits = 0;
4839 
4840 	if (mtu < 23 || mps < 23)
4841 		return -EPROTO;
4842 
4843 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4844 	       scid, mtu, mps);
4845 
4846 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4847 	 * page 1059:
4848 	 *
4849 	 * Valid range: 0x0001-0x00ff
4850 	 *
4851 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4852 	 */
4853 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4854 		result = L2CAP_CR_LE_BAD_PSM;
4855 		chan = NULL;
4856 		goto response;
4857 	}
4858 
4859 	/* Check if we have socket listening on psm */
4860 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4861 					 &conn->hcon->dst, LE_LINK);
4862 	if (!pchan) {
4863 		result = L2CAP_CR_LE_BAD_PSM;
4864 		chan = NULL;
4865 		goto response;
4866 	}
4867 
4868 	l2cap_chan_lock(pchan);
4869 
4870 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4871 				     SMP_ALLOW_STK)) {
4872 		result = L2CAP_CR_LE_AUTHENTICATION;
4873 		chan = NULL;
4874 		goto response_unlock;
4875 	}
4876 
4877 	/* Check for valid dynamic CID range */
4878 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4879 		result = L2CAP_CR_LE_INVALID_SCID;
4880 		chan = NULL;
4881 		goto response_unlock;
4882 	}
4883 
4884 	/* Check if we already have channel with that dcid */
4885 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4886 		result = L2CAP_CR_LE_SCID_IN_USE;
4887 		chan = NULL;
4888 		goto response_unlock;
4889 	}
4890 
4891 	chan = pchan->ops->new_connection(pchan);
4892 	if (!chan) {
4893 		result = L2CAP_CR_LE_NO_MEM;
4894 		goto response_unlock;
4895 	}
4896 
4897 	bacpy(&chan->src, &conn->hcon->src);
4898 	bacpy(&chan->dst, &conn->hcon->dst);
4899 	chan->src_type = bdaddr_src_type(conn->hcon);
4900 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4901 	chan->psm  = psm;
4902 	chan->dcid = scid;
4903 	chan->omtu = mtu;
4904 	chan->remote_mps = mps;
4905 
4906 	__l2cap_chan_add(conn, chan);
4907 
4908 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4909 
4910 	dcid = chan->scid;
4911 	credits = chan->rx_credits;
4912 
4913 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4914 
4915 	chan->ident = cmd->ident;
4916 
4917 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4918 		l2cap_state_change(chan, BT_CONNECT2);
4919 		/* The following result value is actually not defined
4920 		 * for LE CoC but we use it to let the function know
4921 		 * that it should bail out after doing its cleanup
4922 		 * instead of sending a response.
4923 		 */
4924 		result = L2CAP_CR_PEND;
4925 		chan->ops->defer(chan);
4926 	} else {
4927 		l2cap_chan_ready(chan);
4928 		result = L2CAP_CR_LE_SUCCESS;
4929 	}
4930 
4931 response_unlock:
4932 	l2cap_chan_unlock(pchan);
4933 	l2cap_chan_put(pchan);
4934 
4935 	if (result == L2CAP_CR_PEND)
4936 		return 0;
4937 
4938 response:
4939 	if (chan) {
4940 		rsp.mtu = cpu_to_le16(chan->imtu);
4941 		rsp.mps = cpu_to_le16(chan->mps);
4942 	} else {
4943 		rsp.mtu = 0;
4944 		rsp.mps = 0;
4945 	}
4946 
4947 	rsp.dcid    = cpu_to_le16(dcid);
4948 	rsp.credits = cpu_to_le16(credits);
4949 	rsp.result  = cpu_to_le16(result);
4950 
4951 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4952 
4953 	return 0;
4954 }
4955 
4956 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4957 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4958 				   u8 *data)
4959 {
4960 	struct l2cap_le_credits *pkt;
4961 	struct l2cap_chan *chan;
4962 	u16 cid, credits, max_credits;
4963 
4964 	if (cmd_len != sizeof(*pkt))
4965 		return -EPROTO;
4966 
4967 	pkt = (struct l2cap_le_credits *) data;
4968 	cid	= __le16_to_cpu(pkt->cid);
4969 	credits	= __le16_to_cpu(pkt->credits);
4970 
4971 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4972 
4973 	chan = l2cap_get_chan_by_dcid(conn, cid);
4974 	if (!chan)
4975 		return -EBADSLT;
4976 
4977 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4978 	if (credits > max_credits) {
4979 		BT_ERR("LE credits overflow");
4980 		l2cap_send_disconn_req(chan, ECONNRESET);
4981 
4982 		/* Return 0 so that we don't trigger an unnecessary
4983 		 * command reject packet.
4984 		 */
4985 		goto unlock;
4986 	}
4987 
4988 	chan->tx_credits += credits;
4989 
4990 	/* Resume sending */
4991 	l2cap_le_flowctl_send(chan);
4992 
4993 	if (chan->tx_credits)
4994 		chan->ops->resume(chan);
4995 
4996 unlock:
4997 	l2cap_chan_unlock(chan);
4998 	l2cap_chan_put(chan);
4999 
5000 	return 0;
5001 }
5002 
5003 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5004 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5005 				       u8 *data)
5006 {
5007 	struct l2cap_ecred_conn_req *req = (void *) data;
5008 	DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
5009 	struct l2cap_chan *chan, *pchan;
5010 	u16 mtu, mps;
5011 	__le16 psm;
5012 	u8 result, len = 0;
5013 	int i, num_scid;
5014 	bool defer = false;
5015 
5016 	if (!enable_ecred)
5017 		return -EINVAL;
5018 
5019 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5020 		result = L2CAP_CR_LE_INVALID_PARAMS;
5021 		goto response;
5022 	}
5023 
5024 	cmd_len -= sizeof(*req);
5025 	num_scid = cmd_len / sizeof(u16);
5026 
5027 	if (num_scid > L2CAP_ECRED_MAX_CID) {
5028 		result = L2CAP_CR_LE_INVALID_PARAMS;
5029 		goto response;
5030 	}
5031 
5032 	mtu  = __le16_to_cpu(req->mtu);
5033 	mps  = __le16_to_cpu(req->mps);
5034 
5035 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5036 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5037 		goto response;
5038 	}
5039 
5040 	psm  = req->psm;
5041 
5042 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5043 	 * page 1059:
5044 	 *
5045 	 * Valid range: 0x0001-0x00ff
5046 	 *
5047 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5048 	 */
5049 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5050 		result = L2CAP_CR_LE_BAD_PSM;
5051 		goto response;
5052 	}
5053 
5054 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5055 
5056 	memset(pdu, 0, sizeof(*pdu));
5057 
5058 	/* Check if we have socket listening on psm */
5059 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5060 					 &conn->hcon->dst, LE_LINK);
5061 	if (!pchan) {
5062 		result = L2CAP_CR_LE_BAD_PSM;
5063 		goto response;
5064 	}
5065 
5066 	l2cap_chan_lock(pchan);
5067 
5068 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5069 				     SMP_ALLOW_STK)) {
5070 		result = L2CAP_CR_LE_AUTHENTICATION;
5071 		goto unlock;
5072 	}
5073 
5074 	result = L2CAP_CR_LE_SUCCESS;
5075 
5076 	for (i = 0; i < num_scid; i++) {
5077 		u16 scid = __le16_to_cpu(req->scid[i]);
5078 
5079 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5080 
5081 		pdu->dcid[i] = 0x0000;
5082 		len += sizeof(*pdu->dcid);
5083 
5084 		/* Check for valid dynamic CID range */
5085 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5086 			result = L2CAP_CR_LE_INVALID_SCID;
5087 			continue;
5088 		}
5089 
5090 		/* Check if we already have channel with that dcid */
5091 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5092 			result = L2CAP_CR_LE_SCID_IN_USE;
5093 			continue;
5094 		}
5095 
5096 		chan = pchan->ops->new_connection(pchan);
5097 		if (!chan) {
5098 			result = L2CAP_CR_LE_NO_MEM;
5099 			continue;
5100 		}
5101 
5102 		bacpy(&chan->src, &conn->hcon->src);
5103 		bacpy(&chan->dst, &conn->hcon->dst);
5104 		chan->src_type = bdaddr_src_type(conn->hcon);
5105 		chan->dst_type = bdaddr_dst_type(conn->hcon);
5106 		chan->psm  = psm;
5107 		chan->dcid = scid;
5108 		chan->omtu = mtu;
5109 		chan->remote_mps = mps;
5110 
5111 		__l2cap_chan_add(conn, chan);
5112 
5113 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5114 
5115 		/* Init response */
5116 		if (!pdu->credits) {
5117 			pdu->mtu = cpu_to_le16(chan->imtu);
5118 			pdu->mps = cpu_to_le16(chan->mps);
5119 			pdu->credits = cpu_to_le16(chan->rx_credits);
5120 		}
5121 
5122 		pdu->dcid[i] = cpu_to_le16(chan->scid);
5123 
5124 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5125 
5126 		chan->ident = cmd->ident;
5127 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5128 
5129 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5130 			l2cap_state_change(chan, BT_CONNECT2);
5131 			defer = true;
5132 			chan->ops->defer(chan);
5133 		} else {
5134 			l2cap_chan_ready(chan);
5135 		}
5136 	}
5137 
5138 unlock:
5139 	l2cap_chan_unlock(pchan);
5140 	l2cap_chan_put(pchan);
5141 
5142 response:
5143 	pdu->result = cpu_to_le16(result);
5144 
5145 	if (defer)
5146 		return 0;
5147 
5148 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5149 		       sizeof(*pdu) + len, pdu);
5150 
5151 	return 0;
5152 }
5153 
5154 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5155 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5156 				       u8 *data)
5157 {
5158 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5159 	struct hci_conn *hcon = conn->hcon;
5160 	u16 mtu, mps, credits, result;
5161 	struct l2cap_chan *chan, *tmp;
5162 	int err = 0, sec_level;
5163 	int i = 0;
5164 
5165 	if (cmd_len < sizeof(*rsp))
5166 		return -EPROTO;
5167 
5168 	mtu     = __le16_to_cpu(rsp->mtu);
5169 	mps     = __le16_to_cpu(rsp->mps);
5170 	credits = __le16_to_cpu(rsp->credits);
5171 	result  = __le16_to_cpu(rsp->result);
5172 
5173 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5174 	       result);
5175 
5176 	cmd_len -= sizeof(*rsp);
5177 
5178 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5179 		u16 dcid;
5180 
5181 		if (chan->ident != cmd->ident ||
5182 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5183 		    chan->state == BT_CONNECTED)
5184 			continue;
5185 
5186 		l2cap_chan_lock(chan);
5187 
5188 		/* Check that there is a dcid for each pending channel */
5189 		if (cmd_len < sizeof(dcid)) {
5190 			l2cap_chan_del(chan, ECONNREFUSED);
5191 			l2cap_chan_unlock(chan);
5192 			continue;
5193 		}
5194 
5195 		dcid = __le16_to_cpu(rsp->dcid[i++]);
5196 		cmd_len -= sizeof(u16);
5197 
5198 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5199 
5200 		/* Check if dcid is already in use */
5201 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5202 			/* If a device receives a
5203 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5204 			 * already-assigned Destination CID, then both the
5205 			 * original channel and the new channel shall be
5206 			 * immediately discarded and not used.
5207 			 */
5208 			l2cap_chan_del(chan, ECONNREFUSED);
5209 			l2cap_chan_unlock(chan);
5210 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
5211 			l2cap_chan_lock(chan);
5212 			l2cap_chan_del(chan, ECONNRESET);
5213 			l2cap_chan_unlock(chan);
5214 			continue;
5215 		}
5216 
5217 		switch (result) {
5218 		case L2CAP_CR_LE_AUTHENTICATION:
5219 		case L2CAP_CR_LE_ENCRYPTION:
5220 			/* If we already have MITM protection we can't do
5221 			 * anything.
5222 			 */
5223 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5224 				l2cap_chan_del(chan, ECONNREFUSED);
5225 				break;
5226 			}
5227 
5228 			sec_level = hcon->sec_level + 1;
5229 			if (chan->sec_level < sec_level)
5230 				chan->sec_level = sec_level;
5231 
5232 			/* We'll need to send a new Connect Request */
5233 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5234 
5235 			smp_conn_security(hcon, chan->sec_level);
5236 			break;
5237 
5238 		case L2CAP_CR_LE_BAD_PSM:
5239 			l2cap_chan_del(chan, ECONNREFUSED);
5240 			break;
5241 
5242 		default:
5243 			/* If dcid was not set it means channels was refused */
5244 			if (!dcid) {
5245 				l2cap_chan_del(chan, ECONNREFUSED);
5246 				break;
5247 			}
5248 
5249 			chan->ident = 0;
5250 			chan->dcid = dcid;
5251 			chan->omtu = mtu;
5252 			chan->remote_mps = mps;
5253 			chan->tx_credits = credits;
5254 			l2cap_chan_ready(chan);
5255 			break;
5256 		}
5257 
5258 		l2cap_chan_unlock(chan);
5259 	}
5260 
5261 	return err;
5262 }
5263 
5264 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5265 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5266 					 u8 *data)
5267 {
5268 	struct l2cap_ecred_reconf_req *req = (void *) data;
5269 	struct l2cap_ecred_reconf_rsp rsp;
5270 	u16 mtu, mps, result;
5271 	struct l2cap_chan *chan;
5272 	int i, num_scid;
5273 
5274 	if (!enable_ecred)
5275 		return -EINVAL;
5276 
5277 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5278 		result = L2CAP_CR_LE_INVALID_PARAMS;
5279 		goto respond;
5280 	}
5281 
5282 	mtu = __le16_to_cpu(req->mtu);
5283 	mps = __le16_to_cpu(req->mps);
5284 
5285 	BT_DBG("mtu %u mps %u", mtu, mps);
5286 
5287 	if (mtu < L2CAP_ECRED_MIN_MTU) {
5288 		result = L2CAP_RECONF_INVALID_MTU;
5289 		goto respond;
5290 	}
5291 
5292 	if (mps < L2CAP_ECRED_MIN_MPS) {
5293 		result = L2CAP_RECONF_INVALID_MPS;
5294 		goto respond;
5295 	}
5296 
5297 	cmd_len -= sizeof(*req);
5298 	num_scid = cmd_len / sizeof(u16);
5299 	result = L2CAP_RECONF_SUCCESS;
5300 
5301 	for (i = 0; i < num_scid; i++) {
5302 		u16 scid;
5303 
5304 		scid = __le16_to_cpu(req->scid[i]);
5305 		if (!scid)
5306 			return -EPROTO;
5307 
5308 		chan = __l2cap_get_chan_by_dcid(conn, scid);
5309 		if (!chan)
5310 			continue;
5311 
5312 		/* If the MTU value is decreased for any of the included
5313 		 * channels, then the receiver shall disconnect all
5314 		 * included channels.
5315 		 */
5316 		if (chan->omtu > mtu) {
5317 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
5318 			       chan->omtu, mtu);
5319 			result = L2CAP_RECONF_INVALID_MTU;
5320 		}
5321 
5322 		chan->omtu = mtu;
5323 		chan->remote_mps = mps;
5324 	}
5325 
5326 respond:
5327 	rsp.result = cpu_to_le16(result);
5328 
5329 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5330 		       &rsp);
5331 
5332 	return 0;
5333 }
5334 
5335 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5336 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5337 					 u8 *data)
5338 {
5339 	struct l2cap_chan *chan, *tmp;
5340 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5341 	u16 result;
5342 
5343 	if (cmd_len < sizeof(*rsp))
5344 		return -EPROTO;
5345 
5346 	result = __le16_to_cpu(rsp->result);
5347 
5348 	BT_DBG("result 0x%4.4x", rsp->result);
5349 
5350 	if (!result)
5351 		return 0;
5352 
5353 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5354 		if (chan->ident != cmd->ident)
5355 			continue;
5356 
5357 		l2cap_chan_del(chan, ECONNRESET);
5358 	}
5359 
5360 	return 0;
5361 }
5362 
5363 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5364 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5365 				       u8 *data)
5366 {
5367 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5368 	struct l2cap_chan *chan;
5369 
5370 	if (cmd_len < sizeof(*rej))
5371 		return -EPROTO;
5372 
5373 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5374 	if (!chan)
5375 		goto done;
5376 
5377 	chan = l2cap_chan_hold_unless_zero(chan);
5378 	if (!chan)
5379 		goto done;
5380 
5381 	l2cap_chan_lock(chan);
5382 	l2cap_chan_del(chan, ECONNREFUSED);
5383 	l2cap_chan_unlock(chan);
5384 	l2cap_chan_put(chan);
5385 
5386 done:
5387 	return 0;
5388 }
5389 
5390 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5391 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5392 				   u8 *data)
5393 {
5394 	int err = 0;
5395 
5396 	switch (cmd->code) {
5397 	case L2CAP_COMMAND_REJ:
5398 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5399 		break;
5400 
5401 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5402 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5403 		break;
5404 
5405 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5406 		break;
5407 
5408 	case L2CAP_LE_CONN_RSP:
5409 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5410 		break;
5411 
5412 	case L2CAP_LE_CONN_REQ:
5413 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5414 		break;
5415 
5416 	case L2CAP_LE_CREDITS:
5417 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5418 		break;
5419 
5420 	case L2CAP_ECRED_CONN_REQ:
5421 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5422 		break;
5423 
5424 	case L2CAP_ECRED_CONN_RSP:
5425 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5426 		break;
5427 
5428 	case L2CAP_ECRED_RECONF_REQ:
5429 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5430 		break;
5431 
5432 	case L2CAP_ECRED_RECONF_RSP:
5433 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5434 		break;
5435 
5436 	case L2CAP_DISCONN_REQ:
5437 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5438 		break;
5439 
5440 	case L2CAP_DISCONN_RSP:
5441 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5442 		break;
5443 
5444 	default:
5445 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5446 		err = -EINVAL;
5447 		break;
5448 	}
5449 
5450 	return err;
5451 }
5452 
5453 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5454 					struct sk_buff *skb)
5455 {
5456 	struct hci_conn *hcon = conn->hcon;
5457 	struct l2cap_cmd_hdr *cmd;
5458 	u16 len;
5459 	int err;
5460 
5461 	if (hcon->type != LE_LINK)
5462 		goto drop;
5463 
5464 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5465 		goto drop;
5466 
5467 	cmd = (void *) skb->data;
5468 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5469 
5470 	len = le16_to_cpu(cmd->len);
5471 
5472 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5473 
5474 	if (len != skb->len || !cmd->ident) {
5475 		BT_DBG("corrupted command");
5476 		goto drop;
5477 	}
5478 
5479 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5480 	if (err) {
5481 		struct l2cap_cmd_rej_unk rej;
5482 
5483 		BT_ERR("Wrong link type (%d)", err);
5484 
5485 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5486 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5487 			       sizeof(rej), &rej);
5488 	}
5489 
5490 drop:
5491 	kfree_skb(skb);
5492 }
5493 
5494 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5495 {
5496 	struct l2cap_cmd_rej_unk rej;
5497 
5498 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5499 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5500 }
5501 
5502 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5503 				     struct sk_buff *skb)
5504 {
5505 	struct hci_conn *hcon = conn->hcon;
5506 	struct l2cap_cmd_hdr *cmd;
5507 	int err;
5508 
5509 	l2cap_raw_recv(conn, skb);
5510 
5511 	if (hcon->type != ACL_LINK)
5512 		goto drop;
5513 
5514 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5515 		u16 len;
5516 
5517 		cmd = (void *) skb->data;
5518 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5519 
5520 		len = le16_to_cpu(cmd->len);
5521 
5522 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5523 		       cmd->ident);
5524 
5525 		if (len > skb->len || !cmd->ident) {
5526 			BT_DBG("corrupted command");
5527 			l2cap_sig_send_rej(conn, cmd->ident);
5528 			skb_pull(skb, len > skb->len ? skb->len : len);
5529 			continue;
5530 		}
5531 
5532 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5533 		if (err) {
5534 			BT_ERR("Wrong link type (%d)", err);
5535 			l2cap_sig_send_rej(conn, cmd->ident);
5536 		}
5537 
5538 		skb_pull(skb, len);
5539 	}
5540 
5541 	if (skb->len > 0) {
5542 		BT_DBG("corrupted command");
5543 		l2cap_sig_send_rej(conn, 0);
5544 	}
5545 
5546 drop:
5547 	kfree_skb(skb);
5548 }
5549 
5550 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5551 {
5552 	u16 our_fcs, rcv_fcs;
5553 	int hdr_size;
5554 
5555 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5556 		hdr_size = L2CAP_EXT_HDR_SIZE;
5557 	else
5558 		hdr_size = L2CAP_ENH_HDR_SIZE;
5559 
5560 	if (chan->fcs == L2CAP_FCS_CRC16) {
5561 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5562 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5563 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5564 
5565 		if (our_fcs != rcv_fcs)
5566 			return -EBADMSG;
5567 	}
5568 	return 0;
5569 }
5570 
5571 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5572 {
5573 	struct l2cap_ctrl control;
5574 
5575 	BT_DBG("chan %p", chan);
5576 
5577 	memset(&control, 0, sizeof(control));
5578 	control.sframe = 1;
5579 	control.final = 1;
5580 	control.reqseq = chan->buffer_seq;
5581 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5582 
5583 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5584 		control.super = L2CAP_SUPER_RNR;
5585 		l2cap_send_sframe(chan, &control);
5586 	}
5587 
5588 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5589 	    chan->unacked_frames > 0)
5590 		__set_retrans_timer(chan);
5591 
5592 	/* Send pending iframes */
5593 	l2cap_ertm_send(chan);
5594 
5595 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5596 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5597 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5598 		 * send it now.
5599 		 */
5600 		control.super = L2CAP_SUPER_RR;
5601 		l2cap_send_sframe(chan, &control);
5602 	}
5603 }
5604 
5605 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5606 			    struct sk_buff **last_frag)
5607 {
5608 	/* skb->len reflects data in skb as well as all fragments
5609 	 * skb->data_len reflects only data in fragments
5610 	 */
5611 	if (!skb_has_frag_list(skb))
5612 		skb_shinfo(skb)->frag_list = new_frag;
5613 
5614 	new_frag->next = NULL;
5615 
5616 	(*last_frag)->next = new_frag;
5617 	*last_frag = new_frag;
5618 
5619 	skb->len += new_frag->len;
5620 	skb->data_len += new_frag->len;
5621 	skb->truesize += new_frag->truesize;
5622 }
5623 
5624 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5625 				struct l2cap_ctrl *control)
5626 {
5627 	int err = -EINVAL;
5628 
5629 	switch (control->sar) {
5630 	case L2CAP_SAR_UNSEGMENTED:
5631 		if (chan->sdu)
5632 			break;
5633 
5634 		err = chan->ops->recv(chan, skb);
5635 		break;
5636 
5637 	case L2CAP_SAR_START:
5638 		if (chan->sdu)
5639 			break;
5640 
5641 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5642 			break;
5643 
5644 		chan->sdu_len = get_unaligned_le16(skb->data);
5645 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5646 
5647 		if (chan->sdu_len > chan->imtu) {
5648 			err = -EMSGSIZE;
5649 			break;
5650 		}
5651 
5652 		if (skb->len >= chan->sdu_len)
5653 			break;
5654 
5655 		chan->sdu = skb;
5656 		chan->sdu_last_frag = skb;
5657 
5658 		skb = NULL;
5659 		err = 0;
5660 		break;
5661 
5662 	case L2CAP_SAR_CONTINUE:
5663 		if (!chan->sdu)
5664 			break;
5665 
5666 		append_skb_frag(chan->sdu, skb,
5667 				&chan->sdu_last_frag);
5668 		skb = NULL;
5669 
5670 		if (chan->sdu->len >= chan->sdu_len)
5671 			break;
5672 
5673 		err = 0;
5674 		break;
5675 
5676 	case L2CAP_SAR_END:
5677 		if (!chan->sdu)
5678 			break;
5679 
5680 		append_skb_frag(chan->sdu, skb,
5681 				&chan->sdu_last_frag);
5682 		skb = NULL;
5683 
5684 		if (chan->sdu->len != chan->sdu_len)
5685 			break;
5686 
5687 		err = chan->ops->recv(chan, chan->sdu);
5688 
5689 		if (!err) {
5690 			/* Reassembly complete */
5691 			chan->sdu = NULL;
5692 			chan->sdu_last_frag = NULL;
5693 			chan->sdu_len = 0;
5694 		}
5695 		break;
5696 	}
5697 
5698 	if (err) {
5699 		kfree_skb(skb);
5700 		kfree_skb(chan->sdu);
5701 		chan->sdu = NULL;
5702 		chan->sdu_last_frag = NULL;
5703 		chan->sdu_len = 0;
5704 	}
5705 
5706 	return err;
5707 }
5708 
5709 static int l2cap_resegment(struct l2cap_chan *chan)
5710 {
5711 	/* Placeholder */
5712 	return 0;
5713 }
5714 
5715 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5716 {
5717 	u8 event;
5718 
5719 	if (chan->mode != L2CAP_MODE_ERTM)
5720 		return;
5721 
5722 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5723 	l2cap_tx(chan, NULL, NULL, event);
5724 }
5725 
5726 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5727 {
5728 	int err = 0;
5729 	/* Pass sequential frames to l2cap_reassemble_sdu()
5730 	 * until a gap is encountered.
5731 	 */
5732 
5733 	BT_DBG("chan %p", chan);
5734 
5735 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5736 		struct sk_buff *skb;
5737 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5738 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5739 
5740 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5741 
5742 		if (!skb)
5743 			break;
5744 
5745 		skb_unlink(skb, &chan->srej_q);
5746 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5747 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5748 		if (err)
5749 			break;
5750 	}
5751 
5752 	if (skb_queue_empty(&chan->srej_q)) {
5753 		chan->rx_state = L2CAP_RX_STATE_RECV;
5754 		l2cap_send_ack(chan);
5755 	}
5756 
5757 	return err;
5758 }
5759 
5760 static void l2cap_handle_srej(struct l2cap_chan *chan,
5761 			      struct l2cap_ctrl *control)
5762 {
5763 	struct sk_buff *skb;
5764 
5765 	BT_DBG("chan %p, control %p", chan, control);
5766 
5767 	if (control->reqseq == chan->next_tx_seq) {
5768 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5769 		l2cap_send_disconn_req(chan, ECONNRESET);
5770 		return;
5771 	}
5772 
5773 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5774 
5775 	if (skb == NULL) {
5776 		BT_DBG("Seq %d not available for retransmission",
5777 		       control->reqseq);
5778 		return;
5779 	}
5780 
5781 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5782 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5783 		l2cap_send_disconn_req(chan, ECONNRESET);
5784 		return;
5785 	}
5786 
5787 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5788 
5789 	if (control->poll) {
5790 		l2cap_pass_to_tx(chan, control);
5791 
5792 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5793 		l2cap_retransmit(chan, control);
5794 		l2cap_ertm_send(chan);
5795 
5796 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5797 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5798 			chan->srej_save_reqseq = control->reqseq;
5799 		}
5800 	} else {
5801 		l2cap_pass_to_tx_fbit(chan, control);
5802 
5803 		if (control->final) {
5804 			if (chan->srej_save_reqseq != control->reqseq ||
5805 			    !test_and_clear_bit(CONN_SREJ_ACT,
5806 						&chan->conn_state))
5807 				l2cap_retransmit(chan, control);
5808 		} else {
5809 			l2cap_retransmit(chan, control);
5810 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5811 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5812 				chan->srej_save_reqseq = control->reqseq;
5813 			}
5814 		}
5815 	}
5816 }
5817 
5818 static void l2cap_handle_rej(struct l2cap_chan *chan,
5819 			     struct l2cap_ctrl *control)
5820 {
5821 	struct sk_buff *skb;
5822 
5823 	BT_DBG("chan %p, control %p", chan, control);
5824 
5825 	if (control->reqseq == chan->next_tx_seq) {
5826 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5827 		l2cap_send_disconn_req(chan, ECONNRESET);
5828 		return;
5829 	}
5830 
5831 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5832 
5833 	if (chan->max_tx && skb &&
5834 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5835 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5836 		l2cap_send_disconn_req(chan, ECONNRESET);
5837 		return;
5838 	}
5839 
5840 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5841 
5842 	l2cap_pass_to_tx(chan, control);
5843 
5844 	if (control->final) {
5845 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5846 			l2cap_retransmit_all(chan, control);
5847 	} else {
5848 		l2cap_retransmit_all(chan, control);
5849 		l2cap_ertm_send(chan);
5850 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5851 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5852 	}
5853 }
5854 
5855 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5856 {
5857 	BT_DBG("chan %p, txseq %d", chan, txseq);
5858 
5859 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5860 	       chan->expected_tx_seq);
5861 
5862 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5863 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5864 		    chan->tx_win) {
5865 			/* See notes below regarding "double poll" and
5866 			 * invalid packets.
5867 			 */
5868 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5869 				BT_DBG("Invalid/Ignore - after SREJ");
5870 				return L2CAP_TXSEQ_INVALID_IGNORE;
5871 			} else {
5872 				BT_DBG("Invalid - in window after SREJ sent");
5873 				return L2CAP_TXSEQ_INVALID;
5874 			}
5875 		}
5876 
5877 		if (chan->srej_list.head == txseq) {
5878 			BT_DBG("Expected SREJ");
5879 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5880 		}
5881 
5882 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5883 			BT_DBG("Duplicate SREJ - txseq already stored");
5884 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5885 		}
5886 
5887 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5888 			BT_DBG("Unexpected SREJ - not requested");
5889 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5890 		}
5891 	}
5892 
5893 	if (chan->expected_tx_seq == txseq) {
5894 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5895 		    chan->tx_win) {
5896 			BT_DBG("Invalid - txseq outside tx window");
5897 			return L2CAP_TXSEQ_INVALID;
5898 		} else {
5899 			BT_DBG("Expected");
5900 			return L2CAP_TXSEQ_EXPECTED;
5901 		}
5902 	}
5903 
5904 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5905 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5906 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5907 		return L2CAP_TXSEQ_DUPLICATE;
5908 	}
5909 
5910 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5911 		/* A source of invalid packets is a "double poll" condition,
5912 		 * where delays cause us to send multiple poll packets.  If
5913 		 * the remote stack receives and processes both polls,
5914 		 * sequence numbers can wrap around in such a way that a
5915 		 * resent frame has a sequence number that looks like new data
5916 		 * with a sequence gap.  This would trigger an erroneous SREJ
5917 		 * request.
5918 		 *
5919 		 * Fortunately, this is impossible with a tx window that's
5920 		 * less than half of the maximum sequence number, which allows
5921 		 * invalid frames to be safely ignored.
5922 		 *
5923 		 * With tx window sizes greater than half of the tx window
5924 		 * maximum, the frame is invalid and cannot be ignored.  This
5925 		 * causes a disconnect.
5926 		 */
5927 
5928 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5929 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5930 			return L2CAP_TXSEQ_INVALID_IGNORE;
5931 		} else {
5932 			BT_DBG("Invalid - txseq outside tx window");
5933 			return L2CAP_TXSEQ_INVALID;
5934 		}
5935 	} else {
5936 		BT_DBG("Unexpected - txseq indicates missing frames");
5937 		return L2CAP_TXSEQ_UNEXPECTED;
5938 	}
5939 }
5940 
5941 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5942 			       struct l2cap_ctrl *control,
5943 			       struct sk_buff *skb, u8 event)
5944 {
5945 	struct l2cap_ctrl local_control;
5946 	int err = 0;
5947 	bool skb_in_use = false;
5948 
5949 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5950 	       event);
5951 
5952 	switch (event) {
5953 	case L2CAP_EV_RECV_IFRAME:
5954 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5955 		case L2CAP_TXSEQ_EXPECTED:
5956 			l2cap_pass_to_tx(chan, control);
5957 
5958 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5959 				BT_DBG("Busy, discarding expected seq %d",
5960 				       control->txseq);
5961 				break;
5962 			}
5963 
5964 			chan->expected_tx_seq = __next_seq(chan,
5965 							   control->txseq);
5966 
5967 			chan->buffer_seq = chan->expected_tx_seq;
5968 			skb_in_use = true;
5969 
5970 			/* l2cap_reassemble_sdu may free skb, hence invalidate
5971 			 * control, so make a copy in advance to use it after
5972 			 * l2cap_reassemble_sdu returns and to avoid the race
5973 			 * condition, for example:
5974 			 *
5975 			 * The current thread calls:
5976 			 *   l2cap_reassemble_sdu
5977 			 *     chan->ops->recv == l2cap_sock_recv_cb
5978 			 *       __sock_queue_rcv_skb
5979 			 * Another thread calls:
5980 			 *   bt_sock_recvmsg
5981 			 *     skb_recv_datagram
5982 			 *     skb_free_datagram
5983 			 * Then the current thread tries to access control, but
5984 			 * it was freed by skb_free_datagram.
5985 			 */
5986 			local_control = *control;
5987 			err = l2cap_reassemble_sdu(chan, skb, control);
5988 			if (err)
5989 				break;
5990 
5991 			if (local_control.final) {
5992 				if (!test_and_clear_bit(CONN_REJ_ACT,
5993 							&chan->conn_state)) {
5994 					local_control.final = 0;
5995 					l2cap_retransmit_all(chan, &local_control);
5996 					l2cap_ertm_send(chan);
5997 				}
5998 			}
5999 
6000 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6001 				l2cap_send_ack(chan);
6002 			break;
6003 		case L2CAP_TXSEQ_UNEXPECTED:
6004 			l2cap_pass_to_tx(chan, control);
6005 
6006 			/* Can't issue SREJ frames in the local busy state.
6007 			 * Drop this frame, it will be seen as missing
6008 			 * when local busy is exited.
6009 			 */
6010 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6011 				BT_DBG("Busy, discarding unexpected seq %d",
6012 				       control->txseq);
6013 				break;
6014 			}
6015 
6016 			/* There was a gap in the sequence, so an SREJ
6017 			 * must be sent for each missing frame.  The
6018 			 * current frame is stored for later use.
6019 			 */
6020 			skb_queue_tail(&chan->srej_q, skb);
6021 			skb_in_use = true;
6022 			BT_DBG("Queued %p (queue len %d)", skb,
6023 			       skb_queue_len(&chan->srej_q));
6024 
6025 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6026 			l2cap_seq_list_clear(&chan->srej_list);
6027 			l2cap_send_srej(chan, control->txseq);
6028 
6029 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6030 			break;
6031 		case L2CAP_TXSEQ_DUPLICATE:
6032 			l2cap_pass_to_tx(chan, control);
6033 			break;
6034 		case L2CAP_TXSEQ_INVALID_IGNORE:
6035 			break;
6036 		case L2CAP_TXSEQ_INVALID:
6037 		default:
6038 			l2cap_send_disconn_req(chan, ECONNRESET);
6039 			break;
6040 		}
6041 		break;
6042 	case L2CAP_EV_RECV_RR:
6043 		l2cap_pass_to_tx(chan, control);
6044 		if (control->final) {
6045 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6046 
6047 			if (!test_and_clear_bit(CONN_REJ_ACT,
6048 						&chan->conn_state)) {
6049 				control->final = 0;
6050 				l2cap_retransmit_all(chan, control);
6051 			}
6052 
6053 			l2cap_ertm_send(chan);
6054 		} else if (control->poll) {
6055 			l2cap_send_i_or_rr_or_rnr(chan);
6056 		} else {
6057 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6058 					       &chan->conn_state) &&
6059 			    chan->unacked_frames)
6060 				__set_retrans_timer(chan);
6061 
6062 			l2cap_ertm_send(chan);
6063 		}
6064 		break;
6065 	case L2CAP_EV_RECV_RNR:
6066 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6067 		l2cap_pass_to_tx(chan, control);
6068 		if (control && control->poll) {
6069 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6070 			l2cap_send_rr_or_rnr(chan, 0);
6071 		}
6072 		__clear_retrans_timer(chan);
6073 		l2cap_seq_list_clear(&chan->retrans_list);
6074 		break;
6075 	case L2CAP_EV_RECV_REJ:
6076 		l2cap_handle_rej(chan, control);
6077 		break;
6078 	case L2CAP_EV_RECV_SREJ:
6079 		l2cap_handle_srej(chan, control);
6080 		break;
6081 	default:
6082 		break;
6083 	}
6084 
6085 	if (skb && !skb_in_use) {
6086 		BT_DBG("Freeing %p", skb);
6087 		kfree_skb(skb);
6088 	}
6089 
6090 	return err;
6091 }
6092 
6093 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6094 				    struct l2cap_ctrl *control,
6095 				    struct sk_buff *skb, u8 event)
6096 {
6097 	int err = 0;
6098 	u16 txseq = control->txseq;
6099 	bool skb_in_use = false;
6100 
6101 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6102 	       event);
6103 
6104 	switch (event) {
6105 	case L2CAP_EV_RECV_IFRAME:
6106 		switch (l2cap_classify_txseq(chan, txseq)) {
6107 		case L2CAP_TXSEQ_EXPECTED:
6108 			/* Keep frame for reassembly later */
6109 			l2cap_pass_to_tx(chan, control);
6110 			skb_queue_tail(&chan->srej_q, skb);
6111 			skb_in_use = true;
6112 			BT_DBG("Queued %p (queue len %d)", skb,
6113 			       skb_queue_len(&chan->srej_q));
6114 
6115 			chan->expected_tx_seq = __next_seq(chan, txseq);
6116 			break;
6117 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6118 			l2cap_seq_list_pop(&chan->srej_list);
6119 
6120 			l2cap_pass_to_tx(chan, control);
6121 			skb_queue_tail(&chan->srej_q, skb);
6122 			skb_in_use = true;
6123 			BT_DBG("Queued %p (queue len %d)", skb,
6124 			       skb_queue_len(&chan->srej_q));
6125 
6126 			err = l2cap_rx_queued_iframes(chan);
6127 			if (err)
6128 				break;
6129 
6130 			break;
6131 		case L2CAP_TXSEQ_UNEXPECTED:
6132 			/* Got a frame that can't be reassembled yet.
6133 			 * Save it for later, and send SREJs to cover
6134 			 * the missing frames.
6135 			 */
6136 			skb_queue_tail(&chan->srej_q, skb);
6137 			skb_in_use = true;
6138 			BT_DBG("Queued %p (queue len %d)", skb,
6139 			       skb_queue_len(&chan->srej_q));
6140 
6141 			l2cap_pass_to_tx(chan, control);
6142 			l2cap_send_srej(chan, control->txseq);
6143 			break;
6144 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6145 			/* This frame was requested with an SREJ, but
6146 			 * some expected retransmitted frames are
6147 			 * missing.  Request retransmission of missing
6148 			 * SREJ'd frames.
6149 			 */
6150 			skb_queue_tail(&chan->srej_q, skb);
6151 			skb_in_use = true;
6152 			BT_DBG("Queued %p (queue len %d)", skb,
6153 			       skb_queue_len(&chan->srej_q));
6154 
6155 			l2cap_pass_to_tx(chan, control);
6156 			l2cap_send_srej_list(chan, control->txseq);
6157 			break;
6158 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6159 			/* We've already queued this frame.  Drop this copy. */
6160 			l2cap_pass_to_tx(chan, control);
6161 			break;
6162 		case L2CAP_TXSEQ_DUPLICATE:
6163 			/* Expecting a later sequence number, so this frame
6164 			 * was already received.  Ignore it completely.
6165 			 */
6166 			break;
6167 		case L2CAP_TXSEQ_INVALID_IGNORE:
6168 			break;
6169 		case L2CAP_TXSEQ_INVALID:
6170 		default:
6171 			l2cap_send_disconn_req(chan, ECONNRESET);
6172 			break;
6173 		}
6174 		break;
6175 	case L2CAP_EV_RECV_RR:
6176 		l2cap_pass_to_tx(chan, control);
6177 		if (control->final) {
6178 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6179 
6180 			if (!test_and_clear_bit(CONN_REJ_ACT,
6181 						&chan->conn_state)) {
6182 				control->final = 0;
6183 				l2cap_retransmit_all(chan, control);
6184 			}
6185 
6186 			l2cap_ertm_send(chan);
6187 		} else if (control->poll) {
6188 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6189 					       &chan->conn_state) &&
6190 			    chan->unacked_frames) {
6191 				__set_retrans_timer(chan);
6192 			}
6193 
6194 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6195 			l2cap_send_srej_tail(chan);
6196 		} else {
6197 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6198 					       &chan->conn_state) &&
6199 			    chan->unacked_frames)
6200 				__set_retrans_timer(chan);
6201 
6202 			l2cap_send_ack(chan);
6203 		}
6204 		break;
6205 	case L2CAP_EV_RECV_RNR:
6206 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6207 		l2cap_pass_to_tx(chan, control);
6208 		if (control->poll) {
6209 			l2cap_send_srej_tail(chan);
6210 		} else {
6211 			struct l2cap_ctrl rr_control;
6212 			memset(&rr_control, 0, sizeof(rr_control));
6213 			rr_control.sframe = 1;
6214 			rr_control.super = L2CAP_SUPER_RR;
6215 			rr_control.reqseq = chan->buffer_seq;
6216 			l2cap_send_sframe(chan, &rr_control);
6217 		}
6218 
6219 		break;
6220 	case L2CAP_EV_RECV_REJ:
6221 		l2cap_handle_rej(chan, control);
6222 		break;
6223 	case L2CAP_EV_RECV_SREJ:
6224 		l2cap_handle_srej(chan, control);
6225 		break;
6226 	}
6227 
6228 	if (skb && !skb_in_use) {
6229 		BT_DBG("Freeing %p", skb);
6230 		kfree_skb(skb);
6231 	}
6232 
6233 	return err;
6234 }
6235 
6236 static int l2cap_finish_move(struct l2cap_chan *chan)
6237 {
6238 	BT_DBG("chan %p", chan);
6239 
6240 	chan->rx_state = L2CAP_RX_STATE_RECV;
6241 	chan->conn->mtu = chan->conn->hcon->mtu;
6242 
6243 	return l2cap_resegment(chan);
6244 }
6245 
6246 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6247 				 struct l2cap_ctrl *control,
6248 				 struct sk_buff *skb, u8 event)
6249 {
6250 	int err;
6251 
6252 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6253 	       event);
6254 
6255 	if (!control->poll)
6256 		return -EPROTO;
6257 
6258 	l2cap_process_reqseq(chan, control->reqseq);
6259 
6260 	if (!skb_queue_empty(&chan->tx_q))
6261 		chan->tx_send_head = skb_peek(&chan->tx_q);
6262 	else
6263 		chan->tx_send_head = NULL;
6264 
6265 	/* Rewind next_tx_seq to the point expected
6266 	 * by the receiver.
6267 	 */
6268 	chan->next_tx_seq = control->reqseq;
6269 	chan->unacked_frames = 0;
6270 
6271 	err = l2cap_finish_move(chan);
6272 	if (err)
6273 		return err;
6274 
6275 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6276 	l2cap_send_i_or_rr_or_rnr(chan);
6277 
6278 	if (event == L2CAP_EV_RECV_IFRAME)
6279 		return -EPROTO;
6280 
6281 	return l2cap_rx_state_recv(chan, control, NULL, event);
6282 }
6283 
6284 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6285 				 struct l2cap_ctrl *control,
6286 				 struct sk_buff *skb, u8 event)
6287 {
6288 	int err;
6289 
6290 	if (!control->final)
6291 		return -EPROTO;
6292 
6293 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6294 
6295 	chan->rx_state = L2CAP_RX_STATE_RECV;
6296 	l2cap_process_reqseq(chan, control->reqseq);
6297 
6298 	if (!skb_queue_empty(&chan->tx_q))
6299 		chan->tx_send_head = skb_peek(&chan->tx_q);
6300 	else
6301 		chan->tx_send_head = NULL;
6302 
6303 	/* Rewind next_tx_seq to the point expected
6304 	 * by the receiver.
6305 	 */
6306 	chan->next_tx_seq = control->reqseq;
6307 	chan->unacked_frames = 0;
6308 	chan->conn->mtu = chan->conn->hcon->mtu;
6309 
6310 	err = l2cap_resegment(chan);
6311 
6312 	if (!err)
6313 		err = l2cap_rx_state_recv(chan, control, skb, event);
6314 
6315 	return err;
6316 }
6317 
6318 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6319 {
6320 	/* Make sure reqseq is for a packet that has been sent but not acked */
6321 	u16 unacked;
6322 
6323 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6324 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6325 }
6326 
6327 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6328 		    struct sk_buff *skb, u8 event)
6329 {
6330 	int err = 0;
6331 
6332 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6333 	       control, skb, event, chan->rx_state);
6334 
6335 	if (__valid_reqseq(chan, control->reqseq)) {
6336 		switch (chan->rx_state) {
6337 		case L2CAP_RX_STATE_RECV:
6338 			err = l2cap_rx_state_recv(chan, control, skb, event);
6339 			break;
6340 		case L2CAP_RX_STATE_SREJ_SENT:
6341 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6342 						       event);
6343 			break;
6344 		case L2CAP_RX_STATE_WAIT_P:
6345 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6346 			break;
6347 		case L2CAP_RX_STATE_WAIT_F:
6348 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6349 			break;
6350 		default:
6351 			/* shut it down */
6352 			break;
6353 		}
6354 	} else {
6355 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6356 		       control->reqseq, chan->next_tx_seq,
6357 		       chan->expected_ack_seq);
6358 		l2cap_send_disconn_req(chan, ECONNRESET);
6359 	}
6360 
6361 	return err;
6362 }
6363 
6364 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6365 			   struct sk_buff *skb)
6366 {
6367 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6368 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
6369 	 * returns and to avoid the race condition, for example:
6370 	 *
6371 	 * The current thread calls:
6372 	 *   l2cap_reassemble_sdu
6373 	 *     chan->ops->recv == l2cap_sock_recv_cb
6374 	 *       __sock_queue_rcv_skb
6375 	 * Another thread calls:
6376 	 *   bt_sock_recvmsg
6377 	 *     skb_recv_datagram
6378 	 *     skb_free_datagram
6379 	 * Then the current thread tries to access control, but it was freed by
6380 	 * skb_free_datagram.
6381 	 */
6382 	u16 txseq = control->txseq;
6383 
6384 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6385 	       chan->rx_state);
6386 
6387 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6388 		l2cap_pass_to_tx(chan, control);
6389 
6390 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6391 		       __next_seq(chan, chan->buffer_seq));
6392 
6393 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6394 
6395 		l2cap_reassemble_sdu(chan, skb, control);
6396 	} else {
6397 		if (chan->sdu) {
6398 			kfree_skb(chan->sdu);
6399 			chan->sdu = NULL;
6400 		}
6401 		chan->sdu_last_frag = NULL;
6402 		chan->sdu_len = 0;
6403 
6404 		if (skb) {
6405 			BT_DBG("Freeing %p", skb);
6406 			kfree_skb(skb);
6407 		}
6408 	}
6409 
6410 	chan->last_acked_seq = txseq;
6411 	chan->expected_tx_seq = __next_seq(chan, txseq);
6412 
6413 	return 0;
6414 }
6415 
6416 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6417 {
6418 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6419 	u16 len;
6420 	u8 event;
6421 
6422 	__unpack_control(chan, skb);
6423 
6424 	len = skb->len;
6425 
6426 	/*
6427 	 * We can just drop the corrupted I-frame here.
6428 	 * Receiver will miss it and start proper recovery
6429 	 * procedures and ask for retransmission.
6430 	 */
6431 	if (l2cap_check_fcs(chan, skb))
6432 		goto drop;
6433 
6434 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6435 		len -= L2CAP_SDULEN_SIZE;
6436 
6437 	if (chan->fcs == L2CAP_FCS_CRC16)
6438 		len -= L2CAP_FCS_SIZE;
6439 
6440 	if (len > chan->mps) {
6441 		l2cap_send_disconn_req(chan, ECONNRESET);
6442 		goto drop;
6443 	}
6444 
6445 	if (chan->ops->filter) {
6446 		if (chan->ops->filter(chan, skb))
6447 			goto drop;
6448 	}
6449 
6450 	if (!control->sframe) {
6451 		int err;
6452 
6453 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6454 		       control->sar, control->reqseq, control->final,
6455 		       control->txseq);
6456 
6457 		/* Validate F-bit - F=0 always valid, F=1 only
6458 		 * valid in TX WAIT_F
6459 		 */
6460 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6461 			goto drop;
6462 
6463 		if (chan->mode != L2CAP_MODE_STREAMING) {
6464 			event = L2CAP_EV_RECV_IFRAME;
6465 			err = l2cap_rx(chan, control, skb, event);
6466 		} else {
6467 			err = l2cap_stream_rx(chan, control, skb);
6468 		}
6469 
6470 		if (err)
6471 			l2cap_send_disconn_req(chan, ECONNRESET);
6472 	} else {
6473 		const u8 rx_func_to_event[4] = {
6474 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6475 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6476 		};
6477 
6478 		/* Only I-frames are expected in streaming mode */
6479 		if (chan->mode == L2CAP_MODE_STREAMING)
6480 			goto drop;
6481 
6482 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6483 		       control->reqseq, control->final, control->poll,
6484 		       control->super);
6485 
6486 		if (len != 0) {
6487 			BT_ERR("Trailing bytes: %d in sframe", len);
6488 			l2cap_send_disconn_req(chan, ECONNRESET);
6489 			goto drop;
6490 		}
6491 
6492 		/* Validate F and P bits */
6493 		if (control->final && (control->poll ||
6494 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6495 			goto drop;
6496 
6497 		event = rx_func_to_event[control->super];
6498 		if (l2cap_rx(chan, control, skb, event))
6499 			l2cap_send_disconn_req(chan, ECONNRESET);
6500 	}
6501 
6502 	return 0;
6503 
6504 drop:
6505 	kfree_skb(skb);
6506 	return 0;
6507 }
6508 
6509 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6510 {
6511 	struct l2cap_conn *conn = chan->conn;
6512 	struct l2cap_le_credits pkt;
6513 	u16 return_credits = l2cap_le_rx_credits(chan);
6514 
6515 	if (chan->rx_credits >= return_credits)
6516 		return;
6517 
6518 	return_credits -= chan->rx_credits;
6519 
6520 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6521 
6522 	chan->rx_credits += return_credits;
6523 
6524 	pkt.cid     = cpu_to_le16(chan->scid);
6525 	pkt.credits = cpu_to_le16(return_credits);
6526 
6527 	chan->ident = l2cap_get_ident(conn);
6528 
6529 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6530 }
6531 
6532 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6533 {
6534 	if (chan->rx_avail == rx_avail)
6535 		return;
6536 
6537 	BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6538 
6539 	chan->rx_avail = rx_avail;
6540 
6541 	if (chan->state == BT_CONNECTED)
6542 		l2cap_chan_le_send_credits(chan);
6543 }
6544 
6545 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6546 {
6547 	int err;
6548 
6549 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6550 
6551 	/* Wait recv to confirm reception before updating the credits */
6552 	err = chan->ops->recv(chan, skb);
6553 
6554 	if (err < 0 && chan->rx_avail != -1) {
6555 		BT_ERR("Queueing received LE L2CAP data failed");
6556 		l2cap_send_disconn_req(chan, ECONNRESET);
6557 		return err;
6558 	}
6559 
6560 	/* Update credits whenever an SDU is received */
6561 	l2cap_chan_le_send_credits(chan);
6562 
6563 	return err;
6564 }
6565 
6566 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6567 {
6568 	int err;
6569 
6570 	if (!chan->rx_credits) {
6571 		BT_ERR("No credits to receive LE L2CAP data");
6572 		l2cap_send_disconn_req(chan, ECONNRESET);
6573 		return -ENOBUFS;
6574 	}
6575 
6576 	if (chan->imtu < skb->len) {
6577 		BT_ERR("Too big LE L2CAP PDU");
6578 		return -ENOBUFS;
6579 	}
6580 
6581 	chan->rx_credits--;
6582 	BT_DBG("chan %p: rx_credits %u -> %u",
6583 	       chan, chan->rx_credits + 1, chan->rx_credits);
6584 
6585 	/* Update if remote had run out of credits, this should only happens
6586 	 * if the remote is not using the entire MPS.
6587 	 */
6588 	if (!chan->rx_credits)
6589 		l2cap_chan_le_send_credits(chan);
6590 
6591 	err = 0;
6592 
6593 	if (!chan->sdu) {
6594 		u16 sdu_len;
6595 
6596 		sdu_len = get_unaligned_le16(skb->data);
6597 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6598 
6599 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6600 		       sdu_len, skb->len, chan->imtu);
6601 
6602 		if (sdu_len > chan->imtu) {
6603 			BT_ERR("Too big LE L2CAP SDU length received");
6604 			err = -EMSGSIZE;
6605 			goto failed;
6606 		}
6607 
6608 		if (skb->len > sdu_len) {
6609 			BT_ERR("Too much LE L2CAP data received");
6610 			err = -EINVAL;
6611 			goto failed;
6612 		}
6613 
6614 		if (skb->len == sdu_len)
6615 			return l2cap_ecred_recv(chan, skb);
6616 
6617 		chan->sdu = skb;
6618 		chan->sdu_len = sdu_len;
6619 		chan->sdu_last_frag = skb;
6620 
6621 		/* Detect if remote is not able to use the selected MPS */
6622 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6623 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6624 
6625 			/* Adjust the number of credits */
6626 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6627 			chan->mps = mps_len;
6628 			l2cap_chan_le_send_credits(chan);
6629 		}
6630 
6631 		return 0;
6632 	}
6633 
6634 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6635 	       chan->sdu->len, skb->len, chan->sdu_len);
6636 
6637 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6638 		BT_ERR("Too much LE L2CAP data received");
6639 		err = -EINVAL;
6640 		goto failed;
6641 	}
6642 
6643 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6644 	skb = NULL;
6645 
6646 	if (chan->sdu->len == chan->sdu_len) {
6647 		err = l2cap_ecred_recv(chan, chan->sdu);
6648 		if (!err) {
6649 			chan->sdu = NULL;
6650 			chan->sdu_last_frag = NULL;
6651 			chan->sdu_len = 0;
6652 		}
6653 	}
6654 
6655 failed:
6656 	if (err) {
6657 		kfree_skb(skb);
6658 		kfree_skb(chan->sdu);
6659 		chan->sdu = NULL;
6660 		chan->sdu_last_frag = NULL;
6661 		chan->sdu_len = 0;
6662 	}
6663 
6664 	/* We can't return an error here since we took care of the skb
6665 	 * freeing internally. An error return would cause the caller to
6666 	 * do a double-free of the skb.
6667 	 */
6668 	return 0;
6669 }
6670 
6671 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6672 			       struct sk_buff *skb)
6673 {
6674 	struct l2cap_chan *chan;
6675 
6676 	chan = l2cap_get_chan_by_scid(conn, cid);
6677 	if (!chan) {
6678 		BT_DBG("unknown cid 0x%4.4x", cid);
6679 		/* Drop packet and return */
6680 		kfree_skb(skb);
6681 		return;
6682 	}
6683 
6684 	BT_DBG("chan %p, len %d", chan, skb->len);
6685 
6686 	/* If we receive data on a fixed channel before the info req/rsp
6687 	 * procedure is done simply assume that the channel is supported
6688 	 * and mark it as ready.
6689 	 */
6690 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6691 		l2cap_chan_ready(chan);
6692 
6693 	if (chan->state != BT_CONNECTED)
6694 		goto drop;
6695 
6696 	switch (chan->mode) {
6697 	case L2CAP_MODE_LE_FLOWCTL:
6698 	case L2CAP_MODE_EXT_FLOWCTL:
6699 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
6700 			goto drop;
6701 
6702 		goto done;
6703 
6704 	case L2CAP_MODE_BASIC:
6705 		/* If socket recv buffers overflows we drop data here
6706 		 * which is *bad* because L2CAP has to be reliable.
6707 		 * But we don't have any other choice. L2CAP doesn't
6708 		 * provide flow control mechanism. */
6709 
6710 		if (chan->imtu < skb->len) {
6711 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6712 			goto drop;
6713 		}
6714 
6715 		if (!chan->ops->recv(chan, skb))
6716 			goto done;
6717 		break;
6718 
6719 	case L2CAP_MODE_ERTM:
6720 	case L2CAP_MODE_STREAMING:
6721 		l2cap_data_rcv(chan, skb);
6722 		goto done;
6723 
6724 	default:
6725 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6726 		break;
6727 	}
6728 
6729 drop:
6730 	kfree_skb(skb);
6731 
6732 done:
6733 	l2cap_chan_unlock(chan);
6734 	l2cap_chan_put(chan);
6735 }
6736 
6737 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6738 				  struct sk_buff *skb)
6739 {
6740 	struct hci_conn *hcon = conn->hcon;
6741 	struct l2cap_chan *chan;
6742 
6743 	if (hcon->type != ACL_LINK)
6744 		goto free_skb;
6745 
6746 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6747 					ACL_LINK);
6748 	if (!chan)
6749 		goto free_skb;
6750 
6751 	BT_DBG("chan %p, len %d", chan, skb->len);
6752 
6753 	l2cap_chan_lock(chan);
6754 
6755 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6756 		goto drop;
6757 
6758 	if (chan->imtu < skb->len)
6759 		goto drop;
6760 
6761 	/* Store remote BD_ADDR and PSM for msg_name */
6762 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6763 	bt_cb(skb)->l2cap.psm = psm;
6764 
6765 	if (!chan->ops->recv(chan, skb)) {
6766 		l2cap_chan_unlock(chan);
6767 		l2cap_chan_put(chan);
6768 		return;
6769 	}
6770 
6771 drop:
6772 	l2cap_chan_unlock(chan);
6773 	l2cap_chan_put(chan);
6774 free_skb:
6775 	kfree_skb(skb);
6776 }
6777 
6778 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6779 {
6780 	struct l2cap_hdr *lh = (void *) skb->data;
6781 	struct hci_conn *hcon = conn->hcon;
6782 	u16 cid, len;
6783 	__le16 psm;
6784 
6785 	if (hcon->state != BT_CONNECTED) {
6786 		BT_DBG("queueing pending rx skb");
6787 		skb_queue_tail(&conn->pending_rx, skb);
6788 		return;
6789 	}
6790 
6791 	skb_pull(skb, L2CAP_HDR_SIZE);
6792 	cid = __le16_to_cpu(lh->cid);
6793 	len = __le16_to_cpu(lh->len);
6794 
6795 	if (len != skb->len) {
6796 		kfree_skb(skb);
6797 		return;
6798 	}
6799 
6800 	/* Since we can't actively block incoming LE connections we must
6801 	 * at least ensure that we ignore incoming data from them.
6802 	 */
6803 	if (hcon->type == LE_LINK &&
6804 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6805 				   bdaddr_dst_type(hcon))) {
6806 		kfree_skb(skb);
6807 		return;
6808 	}
6809 
6810 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6811 
6812 	switch (cid) {
6813 	case L2CAP_CID_SIGNALING:
6814 		l2cap_sig_channel(conn, skb);
6815 		break;
6816 
6817 	case L2CAP_CID_CONN_LESS:
6818 		psm = get_unaligned((__le16 *) skb->data);
6819 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6820 		l2cap_conless_channel(conn, psm, skb);
6821 		break;
6822 
6823 	case L2CAP_CID_LE_SIGNALING:
6824 		l2cap_le_sig_channel(conn, skb);
6825 		break;
6826 
6827 	default:
6828 		l2cap_data_channel(conn, cid, skb);
6829 		break;
6830 	}
6831 }
6832 
6833 static void process_pending_rx(struct work_struct *work)
6834 {
6835 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6836 					       pending_rx_work);
6837 	struct sk_buff *skb;
6838 
6839 	BT_DBG("");
6840 
6841 	mutex_lock(&conn->lock);
6842 
6843 	while ((skb = skb_dequeue(&conn->pending_rx)))
6844 		l2cap_recv_frame(conn, skb);
6845 
6846 	mutex_unlock(&conn->lock);
6847 }
6848 
6849 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6850 {
6851 	struct l2cap_conn *conn = hcon->l2cap_data;
6852 	struct hci_chan *hchan;
6853 
6854 	if (conn)
6855 		return conn;
6856 
6857 	hchan = hci_chan_create(hcon);
6858 	if (!hchan)
6859 		return NULL;
6860 
6861 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6862 	if (!conn) {
6863 		hci_chan_del(hchan);
6864 		return NULL;
6865 	}
6866 
6867 	kref_init(&conn->ref);
6868 	hcon->l2cap_data = conn;
6869 	conn->hcon = hci_conn_get(hcon);
6870 	conn->hchan = hchan;
6871 
6872 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6873 
6874 	conn->mtu = hcon->mtu;
6875 	conn->feat_mask = 0;
6876 
6877 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6878 
6879 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6880 	    (bredr_sc_enabled(hcon->hdev) ||
6881 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6882 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6883 
6884 	mutex_init(&conn->ident_lock);
6885 	mutex_init(&conn->lock);
6886 
6887 	INIT_LIST_HEAD(&conn->chan_l);
6888 	INIT_LIST_HEAD(&conn->users);
6889 
6890 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6891 
6892 	skb_queue_head_init(&conn->pending_rx);
6893 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6894 	INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6895 
6896 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6897 
6898 	return conn;
6899 }
6900 
6901 static bool is_valid_psm(u16 psm, u8 dst_type)
6902 {
6903 	if (!psm)
6904 		return false;
6905 
6906 	if (bdaddr_type_is_le(dst_type))
6907 		return (psm <= 0x00ff);
6908 
6909 	/* PSM must be odd and lsb of upper byte must be 0 */
6910 	return ((psm & 0x0101) == 0x0001);
6911 }
6912 
6913 struct l2cap_chan_data {
6914 	struct l2cap_chan *chan;
6915 	struct pid *pid;
6916 	int count;
6917 };
6918 
6919 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6920 {
6921 	struct l2cap_chan_data *d = data;
6922 	struct pid *pid;
6923 
6924 	if (chan == d->chan)
6925 		return;
6926 
6927 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6928 		return;
6929 
6930 	pid = chan->ops->get_peer_pid(chan);
6931 
6932 	/* Only count deferred channels with the same PID/PSM */
6933 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6934 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6935 		return;
6936 
6937 	d->count++;
6938 }
6939 
6940 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6941 		       bdaddr_t *dst, u8 dst_type, u16 timeout)
6942 {
6943 	struct l2cap_conn *conn;
6944 	struct hci_conn *hcon;
6945 	struct hci_dev *hdev;
6946 	int err;
6947 
6948 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6949 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
6950 
6951 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
6952 	if (!hdev)
6953 		return -EHOSTUNREACH;
6954 
6955 	hci_dev_lock(hdev);
6956 
6957 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6958 	    chan->chan_type != L2CAP_CHAN_RAW) {
6959 		err = -EINVAL;
6960 		goto done;
6961 	}
6962 
6963 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6964 		err = -EINVAL;
6965 		goto done;
6966 	}
6967 
6968 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6969 		err = -EINVAL;
6970 		goto done;
6971 	}
6972 
6973 	switch (chan->mode) {
6974 	case L2CAP_MODE_BASIC:
6975 		break;
6976 	case L2CAP_MODE_LE_FLOWCTL:
6977 		break;
6978 	case L2CAP_MODE_EXT_FLOWCTL:
6979 		if (!enable_ecred) {
6980 			err = -EOPNOTSUPP;
6981 			goto done;
6982 		}
6983 		break;
6984 	case L2CAP_MODE_ERTM:
6985 	case L2CAP_MODE_STREAMING:
6986 		if (!disable_ertm)
6987 			break;
6988 		fallthrough;
6989 	default:
6990 		err = -EOPNOTSUPP;
6991 		goto done;
6992 	}
6993 
6994 	switch (chan->state) {
6995 	case BT_CONNECT:
6996 	case BT_CONNECT2:
6997 	case BT_CONFIG:
6998 		/* Already connecting */
6999 		err = 0;
7000 		goto done;
7001 
7002 	case BT_CONNECTED:
7003 		/* Already connected */
7004 		err = -EISCONN;
7005 		goto done;
7006 
7007 	case BT_OPEN:
7008 	case BT_BOUND:
7009 		/* Can connect */
7010 		break;
7011 
7012 	default:
7013 		err = -EBADFD;
7014 		goto done;
7015 	}
7016 
7017 	/* Set destination address and psm */
7018 	bacpy(&chan->dst, dst);
7019 	chan->dst_type = dst_type;
7020 
7021 	chan->psm = psm;
7022 	chan->dcid = cid;
7023 
7024 	if (bdaddr_type_is_le(dst_type)) {
7025 		/* Convert from L2CAP channel address type to HCI address type
7026 		 */
7027 		if (dst_type == BDADDR_LE_PUBLIC)
7028 			dst_type = ADDR_LE_DEV_PUBLIC;
7029 		else
7030 			dst_type = ADDR_LE_DEV_RANDOM;
7031 
7032 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7033 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7034 					      chan->sec_level, timeout,
7035 					      HCI_ROLE_SLAVE, 0, 0);
7036 		else
7037 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7038 						   chan->sec_level, timeout,
7039 						   CONN_REASON_L2CAP_CHAN);
7040 
7041 	} else {
7042 		u8 auth_type = l2cap_get_auth_type(chan);
7043 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7044 				       CONN_REASON_L2CAP_CHAN, timeout);
7045 	}
7046 
7047 	if (IS_ERR(hcon)) {
7048 		err = PTR_ERR(hcon);
7049 		goto done;
7050 	}
7051 
7052 	conn = l2cap_conn_add(hcon);
7053 	if (!conn) {
7054 		hci_conn_drop(hcon);
7055 		err = -ENOMEM;
7056 		goto done;
7057 	}
7058 
7059 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7060 		struct l2cap_chan_data data;
7061 
7062 		data.chan = chan;
7063 		data.pid = chan->ops->get_peer_pid(chan);
7064 		data.count = 1;
7065 
7066 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7067 
7068 		/* Check if there isn't too many channels being connected */
7069 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7070 			hci_conn_drop(hcon);
7071 			err = -EPROTO;
7072 			goto done;
7073 		}
7074 	}
7075 
7076 	mutex_lock(&conn->lock);
7077 	l2cap_chan_lock(chan);
7078 
7079 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7080 		hci_conn_drop(hcon);
7081 		err = -EBUSY;
7082 		goto chan_unlock;
7083 	}
7084 
7085 	/* Update source addr of the socket */
7086 	bacpy(&chan->src, &hcon->src);
7087 	chan->src_type = bdaddr_src_type(hcon);
7088 
7089 	__l2cap_chan_add(conn, chan);
7090 
7091 	/* l2cap_chan_add takes its own ref so we can drop this one */
7092 	hci_conn_drop(hcon);
7093 
7094 	l2cap_state_change(chan, BT_CONNECT);
7095 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7096 
7097 	/* Release chan->sport so that it can be reused by other
7098 	 * sockets (as it's only used for listening sockets).
7099 	 */
7100 	write_lock(&chan_list_lock);
7101 	chan->sport = 0;
7102 	write_unlock(&chan_list_lock);
7103 
7104 	if (hcon->state == BT_CONNECTED) {
7105 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7106 			__clear_chan_timer(chan);
7107 			if (l2cap_chan_check_security(chan, true))
7108 				l2cap_state_change(chan, BT_CONNECTED);
7109 		} else
7110 			l2cap_do_start(chan);
7111 	}
7112 
7113 	err = 0;
7114 
7115 chan_unlock:
7116 	l2cap_chan_unlock(chan);
7117 	mutex_unlock(&conn->lock);
7118 done:
7119 	hci_dev_unlock(hdev);
7120 	hci_dev_put(hdev);
7121 	return err;
7122 }
7123 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7124 
7125 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7126 {
7127 	struct l2cap_conn *conn = chan->conn;
7128 	DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7129 
7130 	pdu->mtu = cpu_to_le16(chan->imtu);
7131 	pdu->mps = cpu_to_le16(chan->mps);
7132 	pdu->scid[0] = cpu_to_le16(chan->scid);
7133 
7134 	chan->ident = l2cap_get_ident(conn);
7135 
7136 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7137 		       sizeof(pdu), &pdu);
7138 }
7139 
7140 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7141 {
7142 	if (chan->imtu > mtu)
7143 		return -EINVAL;
7144 
7145 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7146 
7147 	chan->imtu = mtu;
7148 
7149 	l2cap_ecred_reconfigure(chan);
7150 
7151 	return 0;
7152 }
7153 
7154 /* ---- L2CAP interface with lower layer (HCI) ---- */
7155 
7156 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7157 {
7158 	int exact = 0, lm1 = 0, lm2 = 0;
7159 	struct l2cap_chan *c;
7160 
7161 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7162 
7163 	/* Find listening sockets and check their link_mode */
7164 	read_lock(&chan_list_lock);
7165 	list_for_each_entry(c, &chan_list, global_l) {
7166 		if (c->state != BT_LISTEN)
7167 			continue;
7168 
7169 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7170 			lm1 |= HCI_LM_ACCEPT;
7171 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7172 				lm1 |= HCI_LM_MASTER;
7173 			exact++;
7174 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7175 			lm2 |= HCI_LM_ACCEPT;
7176 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7177 				lm2 |= HCI_LM_MASTER;
7178 		}
7179 	}
7180 	read_unlock(&chan_list_lock);
7181 
7182 	return exact ? lm1 : lm2;
7183 }
7184 
7185 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7186  * from an existing channel in the list or from the beginning of the
7187  * global list (by passing NULL as first parameter).
7188  */
7189 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7190 						  struct hci_conn *hcon)
7191 {
7192 	u8 src_type = bdaddr_src_type(hcon);
7193 
7194 	read_lock(&chan_list_lock);
7195 
7196 	if (c)
7197 		c = list_next_entry(c, global_l);
7198 	else
7199 		c = list_entry(chan_list.next, typeof(*c), global_l);
7200 
7201 	list_for_each_entry_from(c, &chan_list, global_l) {
7202 		if (c->chan_type != L2CAP_CHAN_FIXED)
7203 			continue;
7204 		if (c->state != BT_LISTEN)
7205 			continue;
7206 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7207 			continue;
7208 		if (src_type != c->src_type)
7209 			continue;
7210 
7211 		c = l2cap_chan_hold_unless_zero(c);
7212 		read_unlock(&chan_list_lock);
7213 		return c;
7214 	}
7215 
7216 	read_unlock(&chan_list_lock);
7217 
7218 	return NULL;
7219 }
7220 
7221 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7222 {
7223 	struct hci_dev *hdev = hcon->hdev;
7224 	struct l2cap_conn *conn;
7225 	struct l2cap_chan *pchan;
7226 	u8 dst_type;
7227 
7228 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7229 		return;
7230 
7231 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7232 
7233 	if (status) {
7234 		l2cap_conn_del(hcon, bt_to_errno(status));
7235 		return;
7236 	}
7237 
7238 	conn = l2cap_conn_add(hcon);
7239 	if (!conn)
7240 		return;
7241 
7242 	dst_type = bdaddr_dst_type(hcon);
7243 
7244 	/* If device is blocked, do not create channels for it */
7245 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7246 		return;
7247 
7248 	/* Find fixed channels and notify them of the new connection. We
7249 	 * use multiple individual lookups, continuing each time where
7250 	 * we left off, because the list lock would prevent calling the
7251 	 * potentially sleeping l2cap_chan_lock() function.
7252 	 */
7253 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7254 	while (pchan) {
7255 		struct l2cap_chan *chan, *next;
7256 
7257 		/* Client fixed channels should override server ones */
7258 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7259 			goto next;
7260 
7261 		l2cap_chan_lock(pchan);
7262 		chan = pchan->ops->new_connection(pchan);
7263 		if (chan) {
7264 			bacpy(&chan->src, &hcon->src);
7265 			bacpy(&chan->dst, &hcon->dst);
7266 			chan->src_type = bdaddr_src_type(hcon);
7267 			chan->dst_type = dst_type;
7268 
7269 			__l2cap_chan_add(conn, chan);
7270 		}
7271 
7272 		l2cap_chan_unlock(pchan);
7273 next:
7274 		next = l2cap_global_fixed_chan(pchan, hcon);
7275 		l2cap_chan_put(pchan);
7276 		pchan = next;
7277 	}
7278 
7279 	l2cap_conn_ready(conn);
7280 }
7281 
7282 int l2cap_disconn_ind(struct hci_conn *hcon)
7283 {
7284 	struct l2cap_conn *conn = hcon->l2cap_data;
7285 
7286 	BT_DBG("hcon %p", hcon);
7287 
7288 	if (!conn)
7289 		return HCI_ERROR_REMOTE_USER_TERM;
7290 	return conn->disc_reason;
7291 }
7292 
7293 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7294 {
7295 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7296 		return;
7297 
7298 	BT_DBG("hcon %p reason %d", hcon, reason);
7299 
7300 	l2cap_conn_del(hcon, bt_to_errno(reason));
7301 }
7302 
7303 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7304 {
7305 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7306 		return;
7307 
7308 	if (encrypt == 0x00) {
7309 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7310 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7311 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7312 			   chan->sec_level == BT_SECURITY_FIPS)
7313 			l2cap_chan_close(chan, ECONNREFUSED);
7314 	} else {
7315 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7316 			__clear_chan_timer(chan);
7317 	}
7318 }
7319 
7320 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7321 {
7322 	struct l2cap_conn *conn = hcon->l2cap_data;
7323 	struct l2cap_chan *chan;
7324 
7325 	if (!conn)
7326 		return;
7327 
7328 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7329 
7330 	mutex_lock(&conn->lock);
7331 
7332 	list_for_each_entry(chan, &conn->chan_l, list) {
7333 		l2cap_chan_lock(chan);
7334 
7335 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7336 		       state_to_string(chan->state));
7337 
7338 		if (!status && encrypt)
7339 			chan->sec_level = hcon->sec_level;
7340 
7341 		if (!__l2cap_no_conn_pending(chan)) {
7342 			l2cap_chan_unlock(chan);
7343 			continue;
7344 		}
7345 
7346 		if (!status && (chan->state == BT_CONNECTED ||
7347 				chan->state == BT_CONFIG)) {
7348 			chan->ops->resume(chan);
7349 			l2cap_check_encryption(chan, encrypt);
7350 			l2cap_chan_unlock(chan);
7351 			continue;
7352 		}
7353 
7354 		if (chan->state == BT_CONNECT) {
7355 			if (!status && l2cap_check_enc_key_size(hcon))
7356 				l2cap_start_connection(chan);
7357 			else
7358 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7359 		} else if (chan->state == BT_CONNECT2 &&
7360 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7361 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7362 			struct l2cap_conn_rsp rsp;
7363 			__u16 res, stat;
7364 
7365 			if (!status && l2cap_check_enc_key_size(hcon)) {
7366 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7367 					res = L2CAP_CR_PEND;
7368 					stat = L2CAP_CS_AUTHOR_PEND;
7369 					chan->ops->defer(chan);
7370 				} else {
7371 					l2cap_state_change(chan, BT_CONFIG);
7372 					res = L2CAP_CR_SUCCESS;
7373 					stat = L2CAP_CS_NO_INFO;
7374 				}
7375 			} else {
7376 				l2cap_state_change(chan, BT_DISCONN);
7377 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7378 				res = L2CAP_CR_SEC_BLOCK;
7379 				stat = L2CAP_CS_NO_INFO;
7380 			}
7381 
7382 			rsp.scid   = cpu_to_le16(chan->dcid);
7383 			rsp.dcid   = cpu_to_le16(chan->scid);
7384 			rsp.result = cpu_to_le16(res);
7385 			rsp.status = cpu_to_le16(stat);
7386 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7387 				       sizeof(rsp), &rsp);
7388 
7389 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7390 			    res == L2CAP_CR_SUCCESS) {
7391 				char buf[128];
7392 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7393 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7394 					       L2CAP_CONF_REQ,
7395 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7396 					       buf);
7397 				chan->num_conf_req++;
7398 			}
7399 		}
7400 
7401 		l2cap_chan_unlock(chan);
7402 	}
7403 
7404 	mutex_unlock(&conn->lock);
7405 }
7406 
7407 /* Append fragment into frame respecting the maximum len of rx_skb */
7408 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7409 			   u16 len)
7410 {
7411 	if (!conn->rx_skb) {
7412 		/* Allocate skb for the complete frame (with header) */
7413 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7414 		if (!conn->rx_skb)
7415 			return -ENOMEM;
7416 		/* Init rx_len */
7417 		conn->rx_len = len;
7418 	}
7419 
7420 	/* Copy as much as the rx_skb can hold */
7421 	len = min_t(u16, len, skb->len);
7422 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7423 	skb_pull(skb, len);
7424 	conn->rx_len -= len;
7425 
7426 	return len;
7427 }
7428 
7429 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7430 {
7431 	struct sk_buff *rx_skb;
7432 	int len;
7433 
7434 	/* Append just enough to complete the header */
7435 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7436 
7437 	/* If header could not be read just continue */
7438 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7439 		return len;
7440 
7441 	rx_skb = conn->rx_skb;
7442 	len = get_unaligned_le16(rx_skb->data);
7443 
7444 	/* Check if rx_skb has enough space to received all fragments */
7445 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7446 		/* Update expected len */
7447 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7448 		return L2CAP_LEN_SIZE;
7449 	}
7450 
7451 	/* Reset conn->rx_skb since it will need to be reallocated in order to
7452 	 * fit all fragments.
7453 	 */
7454 	conn->rx_skb = NULL;
7455 
7456 	/* Reallocates rx_skb using the exact expected length */
7457 	len = l2cap_recv_frag(conn, rx_skb,
7458 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7459 	kfree_skb(rx_skb);
7460 
7461 	return len;
7462 }
7463 
7464 static void l2cap_recv_reset(struct l2cap_conn *conn)
7465 {
7466 	kfree_skb(conn->rx_skb);
7467 	conn->rx_skb = NULL;
7468 	conn->rx_len = 0;
7469 }
7470 
7471 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7472 {
7473 	if (!c)
7474 		return NULL;
7475 
7476 	BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7477 
7478 	if (!kref_get_unless_zero(&c->ref))
7479 		return NULL;
7480 
7481 	return c;
7482 }
7483 
7484 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7485 {
7486 	struct l2cap_conn *conn;
7487 	int len;
7488 
7489 	/* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */
7490 	hci_dev_lock(hcon->hdev);
7491 
7492 	conn = hcon->l2cap_data;
7493 
7494 	if (!conn)
7495 		conn = l2cap_conn_add(hcon);
7496 
7497 	conn = l2cap_conn_hold_unless_zero(conn);
7498 
7499 	hci_dev_unlock(hcon->hdev);
7500 
7501 	if (!conn) {
7502 		kfree_skb(skb);
7503 		return;
7504 	}
7505 
7506 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7507 
7508 	mutex_lock(&conn->lock);
7509 
7510 	switch (flags) {
7511 	case ACL_START:
7512 	case ACL_START_NO_FLUSH:
7513 	case ACL_COMPLETE:
7514 		if (conn->rx_skb) {
7515 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7516 			l2cap_recv_reset(conn);
7517 			l2cap_conn_unreliable(conn, ECOMM);
7518 		}
7519 
7520 		/* Start fragment may not contain the L2CAP length so just
7521 		 * copy the initial byte when that happens and use conn->mtu as
7522 		 * expected length.
7523 		 */
7524 		if (skb->len < L2CAP_LEN_SIZE) {
7525 			l2cap_recv_frag(conn, skb, conn->mtu);
7526 			break;
7527 		}
7528 
7529 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7530 
7531 		if (len == skb->len) {
7532 			/* Complete frame received */
7533 			l2cap_recv_frame(conn, skb);
7534 			goto unlock;
7535 		}
7536 
7537 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7538 
7539 		if (skb->len > len) {
7540 			BT_ERR("Frame is too long (len %u, expected len %d)",
7541 			       skb->len, len);
7542 			l2cap_conn_unreliable(conn, ECOMM);
7543 			goto drop;
7544 		}
7545 
7546 		/* Append fragment into frame (with header) */
7547 		if (l2cap_recv_frag(conn, skb, len) < 0)
7548 			goto drop;
7549 
7550 		break;
7551 
7552 	case ACL_CONT:
7553 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7554 
7555 		if (!conn->rx_skb) {
7556 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7557 			l2cap_conn_unreliable(conn, ECOMM);
7558 			goto drop;
7559 		}
7560 
7561 		/* Complete the L2CAP length if it has not been read */
7562 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7563 			if (l2cap_recv_len(conn, skb) < 0) {
7564 				l2cap_conn_unreliable(conn, ECOMM);
7565 				goto drop;
7566 			}
7567 
7568 			/* Header still could not be read just continue */
7569 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7570 				break;
7571 		}
7572 
7573 		if (skb->len > conn->rx_len) {
7574 			BT_ERR("Fragment is too long (len %u, expected %u)",
7575 			       skb->len, conn->rx_len);
7576 			l2cap_recv_reset(conn);
7577 			l2cap_conn_unreliable(conn, ECOMM);
7578 			goto drop;
7579 		}
7580 
7581 		/* Append fragment into frame (with header) */
7582 		l2cap_recv_frag(conn, skb, skb->len);
7583 
7584 		if (!conn->rx_len) {
7585 			/* Complete frame received. l2cap_recv_frame
7586 			 * takes ownership of the skb so set the global
7587 			 * rx_skb pointer to NULL first.
7588 			 */
7589 			struct sk_buff *rx_skb = conn->rx_skb;
7590 			conn->rx_skb = NULL;
7591 			l2cap_recv_frame(conn, rx_skb);
7592 		}
7593 		break;
7594 	}
7595 
7596 drop:
7597 	kfree_skb(skb);
7598 unlock:
7599 	mutex_unlock(&conn->lock);
7600 	l2cap_conn_put(conn);
7601 }
7602 
7603 static struct hci_cb l2cap_cb = {
7604 	.name		= "L2CAP",
7605 	.connect_cfm	= l2cap_connect_cfm,
7606 	.disconn_cfm	= l2cap_disconn_cfm,
7607 	.security_cfm	= l2cap_security_cfm,
7608 };
7609 
7610 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7611 {
7612 	struct l2cap_chan *c;
7613 
7614 	read_lock(&chan_list_lock);
7615 
7616 	list_for_each_entry(c, &chan_list, global_l) {
7617 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7618 			   &c->src, c->src_type, &c->dst, c->dst_type,
7619 			   c->state, __le16_to_cpu(c->psm),
7620 			   c->scid, c->dcid, c->imtu, c->omtu,
7621 			   c->sec_level, c->mode);
7622 	}
7623 
7624 	read_unlock(&chan_list_lock);
7625 
7626 	return 0;
7627 }
7628 
7629 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7630 
7631 static struct dentry *l2cap_debugfs;
7632 
7633 int __init l2cap_init(void)
7634 {
7635 	int err;
7636 
7637 	err = l2cap_init_sockets();
7638 	if (err < 0)
7639 		return err;
7640 
7641 	hci_register_cb(&l2cap_cb);
7642 
7643 	if (IS_ERR_OR_NULL(bt_debugfs))
7644 		return 0;
7645 
7646 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7647 					    NULL, &l2cap_debugfs_fops);
7648 
7649 	return 0;
7650 }
7651 
7652 void l2cap_exit(void)
7653 {
7654 	debugfs_remove(l2cap_debugfs);
7655 	hci_unregister_cb(&l2cap_cb);
7656 	l2cap_cleanup_sockets();
7657 }
7658 
7659 module_param(disable_ertm, bool, 0644);
7660 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7661 
7662 module_param(enable_ecred, bool, 0644);
7663 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7664