xref: /linux/net/bluetooth/l2cap_core.c (revision 32e940f2bd3b16551f23ea44be47f6f5d1746d64)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44 
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 				       u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 			   void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 		     struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65 
bdaddr_type(u8 link_type,u8 bdaddr_type)66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
bdaddr_src_type(struct hci_conn * hcon)78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
bdaddr_dst_type(struct hci_conn * hcon)83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns a reference locked channel.
116  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 
130 	return c;
131 }
132 
133 /* Find channel with given DCID.
134  * Returns a reference locked channel.
135  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)136 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 						 u16 cid)
138 {
139 	struct l2cap_chan *c;
140 
141 	c = __l2cap_get_chan_by_dcid(conn, cid);
142 	if (c) {
143 		/* Only lock if chan reference is not 0 */
144 		c = l2cap_chan_hold_unless_zero(c);
145 		if (c)
146 			l2cap_chan_lock(c);
147 	}
148 
149 	return c;
150 }
151 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)152 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 						    u8 ident)
154 {
155 	struct l2cap_chan *c;
156 
157 	list_for_each_entry(c, &conn->chan_l, list) {
158 		if (c->ident == ident)
159 			return c;
160 	}
161 	return NULL;
162 }
163 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165 						      u8 src_type)
166 {
167 	struct l2cap_chan *c;
168 
169 	list_for_each_entry(c, &chan_list, global_l) {
170 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171 			continue;
172 
173 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174 			continue;
175 
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213 							 chan->src_type)) {
214 				chan->psm   = cpu_to_le16(p);
215 				chan->sport = cpu_to_le16(p);
216 				err = 0;
217 				break;
218 			}
219 	}
220 
221 done:
222 	write_unlock(&chan_list_lock);
223 	return err;
224 }
225 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)227 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
228 {
229 	write_lock(&chan_list_lock);
230 
231 	/* Override the defaults (which are for conn-oriented) */
232 	chan->omtu = L2CAP_DEFAULT_MTU;
233 	chan->chan_type = L2CAP_CHAN_FIXED;
234 
235 	chan->scid = scid;
236 
237 	write_unlock(&chan_list_lock);
238 
239 	return 0;
240 }
241 
l2cap_alloc_cid(struct l2cap_conn * conn)242 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243 {
244 	u16 cid, dyn_end;
245 
246 	if (conn->hcon->type == LE_LINK)
247 		dyn_end = L2CAP_CID_LE_DYN_END;
248 	else
249 		dyn_end = L2CAP_CID_DYN_END;
250 
251 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252 		if (!__l2cap_get_chan_by_scid(conn, cid))
253 			return cid;
254 	}
255 
256 	return 0;
257 }
258 
l2cap_state_change(struct l2cap_chan * chan,int state)259 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 {
261 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262 	       state_to_string(state));
263 
264 	chan->state = state;
265 	chan->ops->state_change(chan, state, 0);
266 }
267 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)268 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269 						int state, int err)
270 {
271 	chan->state = state;
272 	chan->ops->state_change(chan, chan->state, err);
273 }
274 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)275 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 {
277 	chan->ops->state_change(chan, chan->state, err);
278 }
279 
__set_retrans_timer(struct l2cap_chan * chan)280 static void __set_retrans_timer(struct l2cap_chan *chan)
281 {
282 	if (!delayed_work_pending(&chan->monitor_timer) &&
283 	    chan->retrans_timeout) {
284 		l2cap_set_timer(chan, &chan->retrans_timer,
285 				msecs_to_jiffies(chan->retrans_timeout));
286 	}
287 }
288 
__set_monitor_timer(struct l2cap_chan * chan)289 static void __set_monitor_timer(struct l2cap_chan *chan)
290 {
291 	__clear_retrans_timer(chan);
292 	if (chan->monitor_timeout) {
293 		l2cap_set_timer(chan, &chan->monitor_timer,
294 				msecs_to_jiffies(chan->monitor_timeout));
295 	}
296 }
297 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)298 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299 					       u16 seq)
300 {
301 	struct sk_buff *skb;
302 
303 	skb_queue_walk(head, skb) {
304 		if (bt_cb(skb)->l2cap.txseq == seq)
305 			return skb;
306 	}
307 
308 	return NULL;
309 }
310 
311 /* ---- L2CAP sequence number lists ---- */
312 
313 /* For ERTM, ordered lists of sequence numbers must be tracked for
314  * SREJ requests that are received and for frames that are to be
315  * retransmitted. These seq_list functions implement a singly-linked
316  * list in an array, where membership in the list can also be checked
317  * in constant time. Items can also be added to the tail of the list
318  * and removed from the head in constant time, without further memory
319  * allocs or frees.
320  */
321 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)322 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 {
324 	size_t alloc_size, i;
325 
326 	/* Allocated size is a power of 2 to map sequence numbers
327 	 * (which may be up to 14 bits) in to a smaller array that is
328 	 * sized for the negotiated ERTM transmit windows.
329 	 */
330 	alloc_size = roundup_pow_of_two(size);
331 
332 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333 	if (!seq_list->list)
334 		return -ENOMEM;
335 
336 	seq_list->mask = alloc_size - 1;
337 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 	for (i = 0; i < alloc_size; i++)
340 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341 
342 	return 0;
343 }
344 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)345 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 {
347 	kfree(seq_list->list);
348 }
349 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)350 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351 					   u16 seq)
352 {
353 	/* Constant-time check for list membership */
354 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355 }
356 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)357 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 {
359 	u16 seq = seq_list->head;
360 	u16 mask = seq_list->mask;
361 
362 	seq_list->head = seq_list->list[seq & mask];
363 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364 
365 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368 	}
369 
370 	return seq;
371 }
372 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)373 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374 {
375 	u16 i;
376 
377 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378 		return;
379 
380 	for (i = 0; i <= seq_list->mask; i++)
381 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382 
383 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385 }
386 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)387 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 {
389 	u16 mask = seq_list->mask;
390 
391 	/* All appends happen in constant time */
392 
393 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394 		return;
395 
396 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397 		seq_list->head = seq;
398 	else
399 		seq_list->list[seq_list->tail & mask] = seq;
400 
401 	seq_list->tail = seq;
402 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403 }
404 
l2cap_chan_timeout(struct work_struct * work)405 static void l2cap_chan_timeout(struct work_struct *work)
406 {
407 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 					       chan_timer.work);
409 	struct l2cap_conn *conn = chan->conn;
410 	int reason;
411 
412 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413 
414 	if (!conn)
415 		return;
416 
417 	mutex_lock(&conn->lock);
418 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419 	 * this work. No need to call l2cap_chan_hold(chan) here again.
420 	 */
421 	l2cap_chan_lock(chan);
422 
423 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424 		reason = ECONNREFUSED;
425 	else if (chan->state == BT_CONNECT &&
426 		 chan->sec_level != BT_SECURITY_SDP)
427 		reason = ECONNREFUSED;
428 	else
429 		reason = ETIMEDOUT;
430 
431 	l2cap_chan_close(chan, reason);
432 
433 	chan->ops->close(chan);
434 
435 	l2cap_chan_unlock(chan);
436 	l2cap_chan_put(chan);
437 
438 	mutex_unlock(&conn->lock);
439 }
440 
l2cap_chan_create(void)441 struct l2cap_chan *l2cap_chan_create(void)
442 {
443 	struct l2cap_chan *chan;
444 
445 	chan = kzalloc_obj(*chan, GFP_ATOMIC);
446 	if (!chan)
447 		return NULL;
448 
449 	skb_queue_head_init(&chan->tx_q);
450 	skb_queue_head_init(&chan->srej_q);
451 	mutex_init(&chan->lock);
452 
453 	/* Set default lock nesting level */
454 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455 
456 	/* Available receive buffer space is initially unknown */
457 	chan->rx_avail = -1;
458 
459 	write_lock(&chan_list_lock);
460 	list_add(&chan->global_l, &chan_list);
461 	write_unlock(&chan_list_lock);
462 
463 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467 
468 	chan->state = BT_OPEN;
469 
470 	kref_init(&chan->kref);
471 
472 	/* This flag is cleared in l2cap_chan_ready() */
473 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474 
475 	BT_DBG("chan %p", chan);
476 
477 	return chan;
478 }
479 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480 
l2cap_chan_destroy(struct kref * kref)481 static void l2cap_chan_destroy(struct kref *kref)
482 {
483 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484 
485 	BT_DBG("chan %p", chan);
486 
487 	write_lock(&chan_list_lock);
488 	list_del(&chan->global_l);
489 	write_unlock(&chan_list_lock);
490 
491 	kfree(chan);
492 }
493 
l2cap_chan_hold(struct l2cap_chan * c)494 void l2cap_chan_hold(struct l2cap_chan *c)
495 {
496 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497 
498 	kref_get(&c->kref);
499 }
500 EXPORT_SYMBOL_GPL(l2cap_chan_hold);
501 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)502 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
503 {
504 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
505 
506 	if (!kref_get_unless_zero(&c->kref))
507 		return NULL;
508 
509 	return c;
510 }
511 
l2cap_chan_put(struct l2cap_chan * c)512 void l2cap_chan_put(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_put(&c->kref, l2cap_chan_destroy);
517 }
518 EXPORT_SYMBOL_GPL(l2cap_chan_put);
519 
l2cap_chan_set_defaults(struct l2cap_chan * chan)520 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
521 {
522 	chan->fcs  = L2CAP_FCS_CRC16;
523 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
524 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
525 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
526 	chan->remote_max_tx = chan->max_tx;
527 	chan->remote_tx_win = chan->tx_win;
528 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
529 	chan->sec_level = BT_SECURITY_LOW;
530 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
531 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
532 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
533 
534 	chan->conf_state = 0;
535 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
536 
537 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
538 }
539 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
540 
l2cap_le_rx_credits(struct l2cap_chan * chan)541 static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
542 {
543 	size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
544 
545 	if (chan->mps == 0)
546 		return 0;
547 
548 	/* If we don't know the available space in the receiver buffer, give
549 	 * enough credits for a full packet.
550 	 */
551 	if (chan->rx_avail == -1)
552 		return (chan->imtu / chan->mps) + 1;
553 
554 	/* If we know how much space is available in the receive buffer, give
555 	 * out as many credits as would fill the buffer.
556 	 */
557 	if (chan->rx_avail <= sdu_len)
558 		return 0;
559 
560 	return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
561 }
562 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)563 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
564 {
565 	chan->sdu = NULL;
566 	chan->sdu_last_frag = NULL;
567 	chan->sdu_len = 0;
568 	chan->tx_credits = tx_credits;
569 	/* Derive MPS from connection MTU to stop HCI fragmentation */
570 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
571 	chan->rx_credits = l2cap_le_rx_credits(chan);
572 
573 	skb_queue_head_init(&chan->tx_q);
574 }
575 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)576 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
577 {
578 	l2cap_le_flowctl_init(chan, tx_credits);
579 
580 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
581 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
582 		chan->mps = L2CAP_ECRED_MIN_MPS;
583 		chan->rx_credits = l2cap_le_rx_credits(chan);
584 	}
585 }
586 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)587 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
588 {
589 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
590 	       __le16_to_cpu(chan->psm), chan->dcid);
591 
592 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
593 
594 	chan->conn = conn;
595 
596 	switch (chan->chan_type) {
597 	case L2CAP_CHAN_CONN_ORIENTED:
598 		/* Alloc CID for connection-oriented socket */
599 		chan->scid = l2cap_alloc_cid(conn);
600 		if (conn->hcon->type == ACL_LINK)
601 			chan->omtu = L2CAP_DEFAULT_MTU;
602 		break;
603 
604 	case L2CAP_CHAN_CONN_LESS:
605 		/* Connectionless socket */
606 		chan->scid = L2CAP_CID_CONN_LESS;
607 		chan->dcid = L2CAP_CID_CONN_LESS;
608 		chan->omtu = L2CAP_DEFAULT_MTU;
609 		break;
610 
611 	case L2CAP_CHAN_FIXED:
612 		/* Caller will set CID and CID specific MTU values */
613 		break;
614 
615 	default:
616 		/* Raw socket can send/recv signalling messages only */
617 		chan->scid = L2CAP_CID_SIGNALING;
618 		chan->dcid = L2CAP_CID_SIGNALING;
619 		chan->omtu = L2CAP_DEFAULT_MTU;
620 	}
621 
622 	chan->local_id		= L2CAP_BESTEFFORT_ID;
623 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
624 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
625 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
626 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
627 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
628 
629 	l2cap_chan_hold(chan);
630 
631 	/* Only keep a reference for fixed channels if they requested it */
632 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
633 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
634 		hci_conn_hold(conn->hcon);
635 
636 	/* Append to the list since the order matters for ECRED */
637 	list_add_tail(&chan->list, &conn->chan_l);
638 }
639 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)640 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
641 {
642 	mutex_lock(&conn->lock);
643 	__l2cap_chan_add(conn, chan);
644 	mutex_unlock(&conn->lock);
645 }
646 
l2cap_chan_del(struct l2cap_chan * chan,int err)647 void l2cap_chan_del(struct l2cap_chan *chan, int err)
648 {
649 	struct l2cap_conn *conn = chan->conn;
650 
651 	__clear_chan_timer(chan);
652 
653 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
654 	       state_to_string(chan->state));
655 
656 	chan->ops->teardown(chan, err);
657 
658 	if (conn) {
659 		/* Delete from channel list */
660 		list_del(&chan->list);
661 
662 		l2cap_chan_put(chan);
663 
664 		chan->conn = NULL;
665 
666 		/* Reference was only held for non-fixed channels or
667 		 * fixed channels that explicitly requested it using the
668 		 * FLAG_HOLD_HCI_CONN flag.
669 		 */
670 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
671 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
672 			hci_conn_drop(conn->hcon);
673 	}
674 
675 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
676 		return;
677 
678 	switch (chan->mode) {
679 	case L2CAP_MODE_BASIC:
680 		break;
681 
682 	case L2CAP_MODE_LE_FLOWCTL:
683 	case L2CAP_MODE_EXT_FLOWCTL:
684 		skb_queue_purge(&chan->tx_q);
685 		break;
686 
687 	case L2CAP_MODE_ERTM:
688 		__clear_retrans_timer(chan);
689 		__clear_monitor_timer(chan);
690 		__clear_ack_timer(chan);
691 
692 		skb_queue_purge(&chan->srej_q);
693 
694 		l2cap_seq_list_free(&chan->srej_list);
695 		l2cap_seq_list_free(&chan->retrans_list);
696 		fallthrough;
697 
698 	case L2CAP_MODE_STREAMING:
699 		skb_queue_purge(&chan->tx_q);
700 		break;
701 	}
702 }
703 EXPORT_SYMBOL_GPL(l2cap_chan_del);
704 
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)705 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
706 				 l2cap_chan_func_t func, void *data)
707 {
708 	struct l2cap_chan *chan, *l;
709 
710 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
711 		if (chan->ident == id)
712 			func(chan, data);
713 	}
714 }
715 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)716 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
717 			      void *data)
718 {
719 	struct l2cap_chan *chan;
720 
721 	list_for_each_entry(chan, &conn->chan_l, list) {
722 		func(chan, data);
723 	}
724 }
725 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)726 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
727 		     void *data)
728 {
729 	if (!conn)
730 		return;
731 
732 	mutex_lock(&conn->lock);
733 	__l2cap_chan_list(conn, func, data);
734 	mutex_unlock(&conn->lock);
735 }
736 
737 EXPORT_SYMBOL_GPL(l2cap_chan_list);
738 
l2cap_conn_update_id_addr(struct work_struct * work)739 static void l2cap_conn_update_id_addr(struct work_struct *work)
740 {
741 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
742 					       id_addr_timer.work);
743 	struct hci_conn *hcon = conn->hcon;
744 	struct l2cap_chan *chan;
745 
746 	mutex_lock(&conn->lock);
747 
748 	list_for_each_entry(chan, &conn->chan_l, list) {
749 		l2cap_chan_lock(chan);
750 		bacpy(&chan->dst, &hcon->dst);
751 		chan->dst_type = bdaddr_dst_type(hcon);
752 		l2cap_chan_unlock(chan);
753 	}
754 
755 	mutex_unlock(&conn->lock);
756 }
757 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)758 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
759 {
760 	struct l2cap_conn *conn = chan->conn;
761 	struct l2cap_le_conn_rsp rsp;
762 	u16 result;
763 
764 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
765 		result = L2CAP_CR_LE_AUTHORIZATION;
766 	else
767 		result = L2CAP_CR_LE_BAD_PSM;
768 
769 	l2cap_state_change(chan, BT_DISCONN);
770 
771 	rsp.dcid    = cpu_to_le16(chan->scid);
772 	rsp.mtu     = cpu_to_le16(chan->imtu);
773 	rsp.mps     = cpu_to_le16(chan->mps);
774 	rsp.credits = cpu_to_le16(chan->rx_credits);
775 	rsp.result  = cpu_to_le16(result);
776 
777 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
778 		       &rsp);
779 }
780 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)781 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
782 {
783 	l2cap_state_change(chan, BT_DISCONN);
784 
785 	__l2cap_ecred_conn_rsp_defer(chan);
786 }
787 
l2cap_chan_connect_reject(struct l2cap_chan * chan)788 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
789 {
790 	struct l2cap_conn *conn = chan->conn;
791 	struct l2cap_conn_rsp rsp;
792 	u16 result;
793 
794 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
795 		result = L2CAP_CR_SEC_BLOCK;
796 	else
797 		result = L2CAP_CR_BAD_PSM;
798 
799 	l2cap_state_change(chan, BT_DISCONN);
800 
801 	rsp.scid   = cpu_to_le16(chan->dcid);
802 	rsp.dcid   = cpu_to_le16(chan->scid);
803 	rsp.result = cpu_to_le16(result);
804 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 
806 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
807 }
808 
l2cap_chan_close(struct l2cap_chan * chan,int reason)809 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
810 {
811 	struct l2cap_conn *conn = chan->conn;
812 
813 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
814 
815 	switch (chan->state) {
816 	case BT_LISTEN:
817 		chan->ops->teardown(chan, 0);
818 		break;
819 
820 	case BT_CONNECTED:
821 	case BT_CONFIG:
822 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
823 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
824 			l2cap_send_disconn_req(chan, reason);
825 		} else
826 			l2cap_chan_del(chan, reason);
827 		break;
828 
829 	case BT_CONNECT2:
830 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
831 			if (conn->hcon->type == ACL_LINK)
832 				l2cap_chan_connect_reject(chan);
833 			else if (conn->hcon->type == LE_LINK) {
834 				switch (chan->mode) {
835 				case L2CAP_MODE_LE_FLOWCTL:
836 					l2cap_chan_le_connect_reject(chan);
837 					break;
838 				case L2CAP_MODE_EXT_FLOWCTL:
839 					l2cap_chan_ecred_connect_reject(chan);
840 					return;
841 				}
842 			}
843 		}
844 
845 		l2cap_chan_del(chan, reason);
846 		break;
847 
848 	case BT_CONNECT:
849 	case BT_DISCONN:
850 		l2cap_chan_del(chan, reason);
851 		break;
852 
853 	default:
854 		chan->ops->teardown(chan, 0);
855 		break;
856 	}
857 }
858 EXPORT_SYMBOL(l2cap_chan_close);
859 
l2cap_get_auth_type(struct l2cap_chan * chan)860 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
861 {
862 	switch (chan->chan_type) {
863 	case L2CAP_CHAN_RAW:
864 		switch (chan->sec_level) {
865 		case BT_SECURITY_HIGH:
866 		case BT_SECURITY_FIPS:
867 			return HCI_AT_DEDICATED_BONDING_MITM;
868 		case BT_SECURITY_MEDIUM:
869 			return HCI_AT_DEDICATED_BONDING;
870 		default:
871 			return HCI_AT_NO_BONDING;
872 		}
873 		break;
874 	case L2CAP_CHAN_CONN_LESS:
875 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
876 			if (chan->sec_level == BT_SECURITY_LOW)
877 				chan->sec_level = BT_SECURITY_SDP;
878 		}
879 		if (chan->sec_level == BT_SECURITY_HIGH ||
880 		    chan->sec_level == BT_SECURITY_FIPS)
881 			return HCI_AT_NO_BONDING_MITM;
882 		else
883 			return HCI_AT_NO_BONDING;
884 		break;
885 	case L2CAP_CHAN_CONN_ORIENTED:
886 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
887 			if (chan->sec_level == BT_SECURITY_LOW)
888 				chan->sec_level = BT_SECURITY_SDP;
889 
890 			if (chan->sec_level == BT_SECURITY_HIGH ||
891 			    chan->sec_level == BT_SECURITY_FIPS)
892 				return HCI_AT_NO_BONDING_MITM;
893 			else
894 				return HCI_AT_NO_BONDING;
895 		}
896 		fallthrough;
897 
898 	default:
899 		switch (chan->sec_level) {
900 		case BT_SECURITY_HIGH:
901 		case BT_SECURITY_FIPS:
902 			return HCI_AT_GENERAL_BONDING_MITM;
903 		case BT_SECURITY_MEDIUM:
904 			return HCI_AT_GENERAL_BONDING;
905 		default:
906 			return HCI_AT_NO_BONDING;
907 		}
908 		break;
909 	}
910 }
911 
912 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)913 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
914 {
915 	struct l2cap_conn *conn = chan->conn;
916 	__u8 auth_type;
917 
918 	if (conn->hcon->type == LE_LINK)
919 		return smp_conn_security(conn->hcon, chan->sec_level);
920 
921 	auth_type = l2cap_get_auth_type(chan);
922 
923 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
924 				 initiator);
925 }
926 
l2cap_get_ident(struct l2cap_conn * conn)927 static int l2cap_get_ident(struct l2cap_conn *conn)
928 {
929 	u8 max;
930 	int ident;
931 
932 	/* LE link does not support tools like l2ping so use the full range */
933 	if (conn->hcon->type == LE_LINK)
934 		max = 255;
935 	/* Get next available identificator.
936 	 *    1 - 128 are used by kernel.
937 	 *  129 - 199 are reserved.
938 	 *  200 - 254 are used by utilities like l2ping, etc.
939 	 */
940 	else
941 		max = 128;
942 
943 	/* Allocate ident using min as last used + 1 (cyclic) */
944 	ident = ida_alloc_range(&conn->tx_ida, READ_ONCE(conn->tx_ident) + 1,
945 				max, GFP_ATOMIC);
946 	/* Force min 1 to start over */
947 	if (ident <= 0) {
948 		ident = ida_alloc_range(&conn->tx_ida, 1, max, GFP_ATOMIC);
949 		if (ident <= 0) {
950 			/* If all idents are in use, log an error, this is
951 			 * extremely unlikely to happen and would indicate a bug
952 			 * in the code that idents are not being freed properly.
953 			 */
954 			BT_ERR("Unable to allocate ident: %d", ident);
955 			return 0;
956 		}
957 	}
958 
959 	WRITE_ONCE(conn->tx_ident, ident);
960 
961 	return ident;
962 }
963 
l2cap_send_acl(struct l2cap_conn * conn,struct sk_buff * skb,u8 flags)964 static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
965 			   u8 flags)
966 {
967 	/* Check if the hcon still valid before attempting to send */
968 	if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
969 		hci_send_acl(conn->hchan, skb, flags);
970 	else
971 		kfree_skb(skb);
972 }
973 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)974 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
975 			   void *data)
976 {
977 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
978 	u8 flags;
979 
980 	BT_DBG("code 0x%2.2x", code);
981 
982 	if (!skb)
983 		return;
984 
985 	/* Use NO_FLUSH if supported or we have an LE link (which does
986 	 * not support auto-flushing packets) */
987 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
988 	    conn->hcon->type == LE_LINK)
989 		flags = ACL_START_NO_FLUSH;
990 	else
991 		flags = ACL_START;
992 
993 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
994 	skb->priority = HCI_PRIO_MAX;
995 
996 	l2cap_send_acl(conn, skb, flags);
997 }
998 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)999 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1000 {
1001 	struct hci_conn *hcon = chan->conn->hcon;
1002 	u16 flags;
1003 
1004 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
1005 	       skb->priority);
1006 
1007 	/* Use NO_FLUSH for LE links (where this is the only option) or
1008 	 * if the BR/EDR link supports it and flushing has not been
1009 	 * explicitly requested (through FLAG_FLUSHABLE).
1010 	 */
1011 	if (hcon->type == LE_LINK ||
1012 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1013 	     lmp_no_flush_capable(hcon->hdev)))
1014 		flags = ACL_START_NO_FLUSH;
1015 	else
1016 		flags = ACL_START;
1017 
1018 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1019 	hci_send_acl(chan->conn->hchan, skb, flags);
1020 }
1021 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1022 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1023 {
1024 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1025 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1026 
1027 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1028 		/* S-Frame */
1029 		control->sframe = 1;
1030 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1031 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1032 
1033 		control->sar = 0;
1034 		control->txseq = 0;
1035 	} else {
1036 		/* I-Frame */
1037 		control->sframe = 0;
1038 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1039 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1040 
1041 		control->poll = 0;
1042 		control->super = 0;
1043 	}
1044 }
1045 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1046 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1047 {
1048 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1049 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1050 
1051 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1052 		/* S-Frame */
1053 		control->sframe = 1;
1054 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1055 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1056 
1057 		control->sar = 0;
1058 		control->txseq = 0;
1059 	} else {
1060 		/* I-Frame */
1061 		control->sframe = 0;
1062 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1063 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1064 
1065 		control->poll = 0;
1066 		control->super = 0;
1067 	}
1068 }
1069 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1070 static inline void __unpack_control(struct l2cap_chan *chan,
1071 				    struct sk_buff *skb)
1072 {
1073 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1074 		__unpack_extended_control(get_unaligned_le32(skb->data),
1075 					  &bt_cb(skb)->l2cap);
1076 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1077 	} else {
1078 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1079 					  &bt_cb(skb)->l2cap);
1080 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1081 	}
1082 }
1083 
__pack_extended_control(struct l2cap_ctrl * control)1084 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1085 {
1086 	u32 packed;
1087 
1088 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1089 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1090 
1091 	if (control->sframe) {
1092 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1093 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1094 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1095 	} else {
1096 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1097 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1098 	}
1099 
1100 	return packed;
1101 }
1102 
__pack_enhanced_control(struct l2cap_ctrl * control)1103 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1104 {
1105 	u16 packed;
1106 
1107 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1108 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1109 
1110 	if (control->sframe) {
1111 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1112 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1113 		packed |= L2CAP_CTRL_FRAME_TYPE;
1114 	} else {
1115 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1116 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1117 	}
1118 
1119 	return packed;
1120 }
1121 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1122 static inline void __pack_control(struct l2cap_chan *chan,
1123 				  struct l2cap_ctrl *control,
1124 				  struct sk_buff *skb)
1125 {
1126 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1127 		put_unaligned_le32(__pack_extended_control(control),
1128 				   skb->data + L2CAP_HDR_SIZE);
1129 	} else {
1130 		put_unaligned_le16(__pack_enhanced_control(control),
1131 				   skb->data + L2CAP_HDR_SIZE);
1132 	}
1133 }
1134 
__ertm_hdr_size(struct l2cap_chan * chan)1135 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1136 {
1137 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1138 		return L2CAP_EXT_HDR_SIZE;
1139 	else
1140 		return L2CAP_ENH_HDR_SIZE;
1141 }
1142 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1143 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1144 					       u32 control)
1145 {
1146 	struct sk_buff *skb;
1147 	struct l2cap_hdr *lh;
1148 	int hlen = __ertm_hdr_size(chan);
1149 
1150 	if (chan->fcs == L2CAP_FCS_CRC16)
1151 		hlen += L2CAP_FCS_SIZE;
1152 
1153 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1154 
1155 	if (!skb)
1156 		return ERR_PTR(-ENOMEM);
1157 
1158 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1159 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1160 	lh->cid = cpu_to_le16(chan->dcid);
1161 
1162 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1163 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1164 	else
1165 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1166 
1167 	if (chan->fcs == L2CAP_FCS_CRC16) {
1168 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1169 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1170 	}
1171 
1172 	skb->priority = HCI_PRIO_MAX;
1173 	return skb;
1174 }
1175 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1176 static void l2cap_send_sframe(struct l2cap_chan *chan,
1177 			      struct l2cap_ctrl *control)
1178 {
1179 	struct sk_buff *skb;
1180 	u32 control_field;
1181 
1182 	BT_DBG("chan %p, control %p", chan, control);
1183 
1184 	if (!control->sframe)
1185 		return;
1186 
1187 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1188 	    !control->poll)
1189 		control->final = 1;
1190 
1191 	if (control->super == L2CAP_SUPER_RR)
1192 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1193 	else if (control->super == L2CAP_SUPER_RNR)
1194 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1195 
1196 	if (control->super != L2CAP_SUPER_SREJ) {
1197 		chan->last_acked_seq = control->reqseq;
1198 		__clear_ack_timer(chan);
1199 	}
1200 
1201 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1202 	       control->final, control->poll, control->super);
1203 
1204 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1205 		control_field = __pack_extended_control(control);
1206 	else
1207 		control_field = __pack_enhanced_control(control);
1208 
1209 	skb = l2cap_create_sframe_pdu(chan, control_field);
1210 	if (!IS_ERR(skb))
1211 		l2cap_do_send(chan, skb);
1212 }
1213 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1214 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1215 {
1216 	struct l2cap_ctrl control;
1217 
1218 	BT_DBG("chan %p, poll %d", chan, poll);
1219 
1220 	memset(&control, 0, sizeof(control));
1221 	control.sframe = 1;
1222 	control.poll = poll;
1223 
1224 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1225 		control.super = L2CAP_SUPER_RNR;
1226 	else
1227 		control.super = L2CAP_SUPER_RR;
1228 
1229 	control.reqseq = chan->buffer_seq;
1230 	l2cap_send_sframe(chan, &control);
1231 }
1232 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1233 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1234 {
1235 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1236 		return true;
1237 
1238 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1239 }
1240 
l2cap_send_conn_req(struct l2cap_chan * chan)1241 void l2cap_send_conn_req(struct l2cap_chan *chan)
1242 {
1243 	struct l2cap_conn *conn = chan->conn;
1244 	struct l2cap_conn_req req;
1245 
1246 	req.scid = cpu_to_le16(chan->scid);
1247 	req.psm  = chan->psm;
1248 
1249 	chan->ident = l2cap_get_ident(conn);
1250 
1251 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1252 
1253 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1254 }
1255 
l2cap_chan_ready(struct l2cap_chan * chan)1256 static void l2cap_chan_ready(struct l2cap_chan *chan)
1257 {
1258 	/* The channel may have already been flagged as connected in
1259 	 * case of receiving data before the L2CAP info req/rsp
1260 	 * procedure is complete.
1261 	 */
1262 	if (chan->state == BT_CONNECTED)
1263 		return;
1264 
1265 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1266 	chan->conf_state = 0;
1267 	__clear_chan_timer(chan);
1268 
1269 	switch (chan->mode) {
1270 	case L2CAP_MODE_LE_FLOWCTL:
1271 	case L2CAP_MODE_EXT_FLOWCTL:
1272 		if (!chan->tx_credits)
1273 			chan->ops->suspend(chan);
1274 		break;
1275 	}
1276 
1277 	chan->state = BT_CONNECTED;
1278 
1279 	chan->ops->ready(chan);
1280 }
1281 
l2cap_le_connect(struct l2cap_chan * chan)1282 static void l2cap_le_connect(struct l2cap_chan *chan)
1283 {
1284 	struct l2cap_conn *conn = chan->conn;
1285 	struct l2cap_le_conn_req req;
1286 
1287 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1288 		return;
1289 
1290 	if (!chan->imtu)
1291 		chan->imtu = chan->conn->mtu;
1292 
1293 	l2cap_le_flowctl_init(chan, 0);
1294 
1295 	memset(&req, 0, sizeof(req));
1296 	req.psm     = chan->psm;
1297 	req.scid    = cpu_to_le16(chan->scid);
1298 	req.mtu     = cpu_to_le16(chan->imtu);
1299 	req.mps     = cpu_to_le16(chan->mps);
1300 	req.credits = cpu_to_le16(chan->rx_credits);
1301 
1302 	chan->ident = l2cap_get_ident(conn);
1303 
1304 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1305 		       sizeof(req), &req);
1306 }
1307 
1308 struct l2cap_ecred_conn_data {
1309 	struct {
1310 		struct l2cap_ecred_conn_req_hdr req;
1311 		__le16 scid[5];
1312 	} __packed pdu;
1313 	struct l2cap_chan *chan;
1314 	struct pid *pid;
1315 	int count;
1316 };
1317 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1318 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1319 {
1320 	struct l2cap_ecred_conn_data *conn = data;
1321 	struct pid *pid;
1322 
1323 	if (chan == conn->chan)
1324 		return;
1325 
1326 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1327 		return;
1328 
1329 	pid = chan->ops->get_peer_pid(chan);
1330 
1331 	/* Only add deferred channels with the same PID/PSM */
1332 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1333 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1334 		return;
1335 
1336 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1337 		return;
1338 
1339 	l2cap_ecred_init(chan, 0);
1340 
1341 	/* Set the same ident so we can match on the rsp */
1342 	chan->ident = conn->chan->ident;
1343 
1344 	/* Include all channels deferred */
1345 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1346 
1347 	conn->count++;
1348 }
1349 
l2cap_ecred_connect(struct l2cap_chan * chan)1350 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1351 {
1352 	struct l2cap_conn *conn = chan->conn;
1353 	struct l2cap_ecred_conn_data data;
1354 
1355 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1356 		return;
1357 
1358 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1359 		return;
1360 
1361 	l2cap_ecred_init(chan, 0);
1362 
1363 	memset(&data, 0, sizeof(data));
1364 	data.pdu.req.psm     = chan->psm;
1365 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1366 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1367 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1368 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1369 
1370 	chan->ident = l2cap_get_ident(conn);
1371 
1372 	data.count = 1;
1373 	data.chan = chan;
1374 	data.pid = chan->ops->get_peer_pid(chan);
1375 
1376 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1377 
1378 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1379 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1380 		       &data.pdu);
1381 }
1382 
l2cap_le_start(struct l2cap_chan * chan)1383 static void l2cap_le_start(struct l2cap_chan *chan)
1384 {
1385 	struct l2cap_conn *conn = chan->conn;
1386 
1387 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1388 		return;
1389 
1390 	if (!chan->psm) {
1391 		l2cap_chan_ready(chan);
1392 		return;
1393 	}
1394 
1395 	if (chan->state == BT_CONNECT) {
1396 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1397 			l2cap_ecred_connect(chan);
1398 		else
1399 			l2cap_le_connect(chan);
1400 	}
1401 }
1402 
l2cap_start_connection(struct l2cap_chan * chan)1403 static void l2cap_start_connection(struct l2cap_chan *chan)
1404 {
1405 	if (chan->conn->hcon->type == LE_LINK) {
1406 		l2cap_le_start(chan);
1407 	} else {
1408 		l2cap_send_conn_req(chan);
1409 	}
1410 }
1411 
l2cap_request_info(struct l2cap_conn * conn)1412 static void l2cap_request_info(struct l2cap_conn *conn)
1413 {
1414 	struct l2cap_info_req req;
1415 
1416 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1417 		return;
1418 
1419 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1420 
1421 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1422 	conn->info_ident = l2cap_get_ident(conn);
1423 
1424 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1425 
1426 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1427 		       sizeof(req), &req);
1428 }
1429 
l2cap_check_enc_key_size(struct hci_conn * hcon,struct l2cap_chan * chan)1430 static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
1431 				     struct l2cap_chan *chan)
1432 {
1433 	/* The minimum encryption key size needs to be enforced by the
1434 	 * host stack before establishing any L2CAP connections. The
1435 	 * specification in theory allows a minimum of 1, but to align
1436 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1437 	 *
1438 	 * This check might also be called for unencrypted connections
1439 	 * that have no key size requirements. Ensure that the link is
1440 	 * actually encrypted before enforcing a key size.
1441 	 */
1442 	int min_key_size = hcon->hdev->min_enc_key_size;
1443 
1444 	/* On FIPS security level, key size must be 16 bytes */
1445 	if (chan->sec_level == BT_SECURITY_FIPS)
1446 		min_key_size = 16;
1447 
1448 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1449 		hcon->enc_key_size >= min_key_size);
1450 }
1451 
l2cap_do_start(struct l2cap_chan * chan)1452 static void l2cap_do_start(struct l2cap_chan *chan)
1453 {
1454 	struct l2cap_conn *conn = chan->conn;
1455 
1456 	if (conn->hcon->type == LE_LINK) {
1457 		l2cap_le_start(chan);
1458 		return;
1459 	}
1460 
1461 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1462 		l2cap_request_info(conn);
1463 		return;
1464 	}
1465 
1466 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1467 		return;
1468 
1469 	if (!l2cap_chan_check_security(chan, true) ||
1470 	    !__l2cap_no_conn_pending(chan))
1471 		return;
1472 
1473 	if (l2cap_check_enc_key_size(conn->hcon, chan))
1474 		l2cap_start_connection(chan);
1475 	else
1476 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1477 }
1478 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1479 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1480 {
1481 	u32 local_feat_mask = l2cap_feat_mask;
1482 	if (!disable_ertm)
1483 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1484 
1485 	switch (mode) {
1486 	case L2CAP_MODE_ERTM:
1487 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1488 	case L2CAP_MODE_STREAMING:
1489 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1490 	default:
1491 		return 0x00;
1492 	}
1493 }
1494 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1495 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1496 {
1497 	struct l2cap_conn *conn = chan->conn;
1498 	struct l2cap_disconn_req req;
1499 
1500 	if (!conn)
1501 		return;
1502 
1503 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1504 		__clear_retrans_timer(chan);
1505 		__clear_monitor_timer(chan);
1506 		__clear_ack_timer(chan);
1507 	}
1508 
1509 	req.dcid = cpu_to_le16(chan->dcid);
1510 	req.scid = cpu_to_le16(chan->scid);
1511 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1512 		       sizeof(req), &req);
1513 
1514 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1515 }
1516 
1517 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1518 static void l2cap_conn_start(struct l2cap_conn *conn)
1519 {
1520 	struct l2cap_chan *chan, *tmp;
1521 
1522 	BT_DBG("conn %p", conn);
1523 
1524 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1525 		l2cap_chan_lock(chan);
1526 
1527 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1528 			l2cap_chan_ready(chan);
1529 			l2cap_chan_unlock(chan);
1530 			continue;
1531 		}
1532 
1533 		if (chan->state == BT_CONNECT) {
1534 			if (!l2cap_chan_check_security(chan, true) ||
1535 			    !__l2cap_no_conn_pending(chan)) {
1536 				l2cap_chan_unlock(chan);
1537 				continue;
1538 			}
1539 
1540 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1541 			    && test_bit(CONF_STATE2_DEVICE,
1542 					&chan->conf_state)) {
1543 				l2cap_chan_close(chan, ECONNRESET);
1544 				l2cap_chan_unlock(chan);
1545 				continue;
1546 			}
1547 
1548 			if (l2cap_check_enc_key_size(conn->hcon, chan))
1549 				l2cap_start_connection(chan);
1550 			else
1551 				l2cap_chan_close(chan, ECONNREFUSED);
1552 
1553 		} else if (chan->state == BT_CONNECT2) {
1554 			struct l2cap_conn_rsp rsp;
1555 			char buf[128];
1556 			rsp.scid = cpu_to_le16(chan->dcid);
1557 			rsp.dcid = cpu_to_le16(chan->scid);
1558 
1559 			if (l2cap_chan_check_security(chan, false)) {
1560 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1561 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1562 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1563 					chan->ops->defer(chan);
1564 
1565 				} else {
1566 					l2cap_state_change(chan, BT_CONFIG);
1567 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1568 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1569 				}
1570 			} else {
1571 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1572 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1573 			}
1574 
1575 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1576 				       sizeof(rsp), &rsp);
1577 
1578 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1579 			    rsp.result != L2CAP_CR_SUCCESS) {
1580 				l2cap_chan_unlock(chan);
1581 				continue;
1582 			}
1583 
1584 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1585 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1586 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1587 			chan->num_conf_req++;
1588 		}
1589 
1590 		l2cap_chan_unlock(chan);
1591 	}
1592 }
1593 
l2cap_le_conn_ready(struct l2cap_conn * conn)1594 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1595 {
1596 	struct hci_conn *hcon = conn->hcon;
1597 	struct hci_dev *hdev = hcon->hdev;
1598 
1599 	BT_DBG("%s conn %p", hdev->name, conn);
1600 
1601 	/* For outgoing pairing which doesn't necessarily have an
1602 	 * associated socket (e.g. mgmt_pair_device).
1603 	 */
1604 	if (hcon->out)
1605 		smp_conn_security(hcon, hcon->pending_sec_level);
1606 
1607 	/* For LE peripheral connections, make sure the connection interval
1608 	 * is in the range of the minimum and maximum interval that has
1609 	 * been configured for this connection. If not, then trigger
1610 	 * the connection update procedure.
1611 	 */
1612 	if (hcon->role == HCI_ROLE_SLAVE &&
1613 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1614 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1615 		struct l2cap_conn_param_update_req req;
1616 
1617 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1618 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1619 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1620 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1621 
1622 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1623 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1624 	}
1625 }
1626 
l2cap_conn_ready(struct l2cap_conn * conn)1627 static void l2cap_conn_ready(struct l2cap_conn *conn)
1628 {
1629 	struct l2cap_chan *chan;
1630 	struct hci_conn *hcon = conn->hcon;
1631 
1632 	BT_DBG("conn %p", conn);
1633 
1634 	if (hcon->type == ACL_LINK)
1635 		l2cap_request_info(conn);
1636 
1637 	mutex_lock(&conn->lock);
1638 
1639 	list_for_each_entry(chan, &conn->chan_l, list) {
1640 
1641 		l2cap_chan_lock(chan);
1642 
1643 		if (hcon->type == LE_LINK) {
1644 			l2cap_le_start(chan);
1645 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1646 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1647 				l2cap_chan_ready(chan);
1648 		} else if (chan->state == BT_CONNECT) {
1649 			l2cap_do_start(chan);
1650 		}
1651 
1652 		l2cap_chan_unlock(chan);
1653 	}
1654 
1655 	mutex_unlock(&conn->lock);
1656 
1657 	if (hcon->type == LE_LINK)
1658 		l2cap_le_conn_ready(conn);
1659 
1660 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1661 }
1662 
1663 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1664 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1665 {
1666 	struct l2cap_chan *chan;
1667 
1668 	BT_DBG("conn %p", conn);
1669 
1670 	list_for_each_entry(chan, &conn->chan_l, list) {
1671 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1672 			l2cap_chan_set_err(chan, err);
1673 	}
1674 }
1675 
l2cap_info_timeout(struct work_struct * work)1676 static void l2cap_info_timeout(struct work_struct *work)
1677 {
1678 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1679 					       info_timer.work);
1680 
1681 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1682 	conn->info_ident = 0;
1683 
1684 	mutex_lock(&conn->lock);
1685 	l2cap_conn_start(conn);
1686 	mutex_unlock(&conn->lock);
1687 }
1688 
1689 /*
1690  * l2cap_user
1691  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1692  * callback is called during registration. The ->remove callback is called
1693  * during unregistration.
1694  * An l2cap_user object can either be explicitly unregistered or when the
1695  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1696  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1697  * External modules must own a reference to the l2cap_conn object if they intend
1698  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1699  * any time if they don't.
1700  */
1701 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1702 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1703 {
1704 	int ret;
1705 
1706 	/* We need to check whether l2cap_conn is registered. If it is not, we
1707 	 * must not register the l2cap_user. l2cap_conn_del() unregisters
1708 	 * l2cap_conn objects under conn->lock, and we use the same lock here
1709 	 * to protect access to conn->users and conn->hchan.
1710 	 */
1711 
1712 	mutex_lock(&conn->lock);
1713 
1714 	if (!list_empty(&user->list)) {
1715 		ret = -EINVAL;
1716 		goto out_unlock;
1717 	}
1718 
1719 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1720 	if (!conn->hchan) {
1721 		ret = -ENODEV;
1722 		goto out_unlock;
1723 	}
1724 
1725 	ret = user->probe(conn, user);
1726 	if (ret)
1727 		goto out_unlock;
1728 
1729 	list_add(&user->list, &conn->users);
1730 	ret = 0;
1731 
1732 out_unlock:
1733 	mutex_unlock(&conn->lock);
1734 	return ret;
1735 }
1736 EXPORT_SYMBOL(l2cap_register_user);
1737 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1738 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1739 {
1740 	mutex_lock(&conn->lock);
1741 
1742 	if (list_empty(&user->list))
1743 		goto out_unlock;
1744 
1745 	list_del_init(&user->list);
1746 	user->remove(conn, user);
1747 
1748 out_unlock:
1749 	mutex_unlock(&conn->lock);
1750 }
1751 EXPORT_SYMBOL(l2cap_unregister_user);
1752 
l2cap_unregister_all_users(struct l2cap_conn * conn)1753 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1754 {
1755 	struct l2cap_user *user;
1756 
1757 	while (!list_empty(&conn->users)) {
1758 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1759 		list_del_init(&user->list);
1760 		user->remove(conn, user);
1761 	}
1762 }
1763 
l2cap_conn_del(struct hci_conn * hcon,int err)1764 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1765 {
1766 	struct l2cap_conn *conn = hcon->l2cap_data;
1767 	struct l2cap_chan *chan, *l;
1768 
1769 	if (!conn)
1770 		return;
1771 
1772 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1773 
1774 	disable_delayed_work_sync(&conn->info_timer);
1775 	disable_delayed_work_sync(&conn->id_addr_timer);
1776 
1777 	mutex_lock(&conn->lock);
1778 
1779 	kfree_skb(conn->rx_skb);
1780 
1781 	skb_queue_purge(&conn->pending_rx);
1782 
1783 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1784 	 * might block if we are running on a worker from the same workqueue
1785 	 * pending_rx_work is waiting on.
1786 	 */
1787 	if (work_pending(&conn->pending_rx_work))
1788 		cancel_work_sync(&conn->pending_rx_work);
1789 
1790 	ida_destroy(&conn->tx_ida);
1791 
1792 	l2cap_unregister_all_users(conn);
1793 
1794 	/* Force the connection to be immediately dropped */
1795 	hcon->disc_timeout = 0;
1796 
1797 	/* Kill channels */
1798 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1799 		l2cap_chan_hold(chan);
1800 		l2cap_chan_lock(chan);
1801 
1802 		l2cap_chan_del(chan, err);
1803 
1804 		chan->ops->close(chan);
1805 
1806 		l2cap_chan_unlock(chan);
1807 		l2cap_chan_put(chan);
1808 	}
1809 
1810 	hci_chan_del(conn->hchan);
1811 	conn->hchan = NULL;
1812 
1813 	hcon->l2cap_data = NULL;
1814 	mutex_unlock(&conn->lock);
1815 	l2cap_conn_put(conn);
1816 }
1817 
l2cap_conn_free(struct kref * ref)1818 static void l2cap_conn_free(struct kref *ref)
1819 {
1820 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1821 
1822 	hci_conn_put(conn->hcon);
1823 	kfree(conn);
1824 }
1825 
l2cap_conn_get(struct l2cap_conn * conn)1826 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1827 {
1828 	kref_get(&conn->ref);
1829 	return conn;
1830 }
1831 EXPORT_SYMBOL(l2cap_conn_get);
1832 
l2cap_conn_put(struct l2cap_conn * conn)1833 void l2cap_conn_put(struct l2cap_conn *conn)
1834 {
1835 	kref_put(&conn->ref, l2cap_conn_free);
1836 }
1837 EXPORT_SYMBOL(l2cap_conn_put);
1838 
1839 /* ---- Socket interface ---- */
1840 
1841 /* Find socket with psm and source / destination bdaddr.
1842  * Returns closest match.
1843  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1844 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1845 						   bdaddr_t *src,
1846 						   bdaddr_t *dst,
1847 						   u8 link_type)
1848 {
1849 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1850 
1851 	read_lock(&chan_list_lock);
1852 
1853 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1854 		if (state && c->state != state)
1855 			continue;
1856 
1857 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1858 			continue;
1859 
1860 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1861 			continue;
1862 
1863 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1864 			int src_match, dst_match;
1865 			int src_any, dst_any;
1866 
1867 			/* Exact match. */
1868 			src_match = !bacmp(&c->src, src);
1869 			dst_match = !bacmp(&c->dst, dst);
1870 			if (src_match && dst_match) {
1871 				if (!l2cap_chan_hold_unless_zero(c))
1872 					continue;
1873 
1874 				read_unlock(&chan_list_lock);
1875 				return c;
1876 			}
1877 
1878 			/* Closest match */
1879 			src_any = !bacmp(&c->src, BDADDR_ANY);
1880 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1881 			if ((src_match && dst_any) || (src_any && dst_match) ||
1882 			    (src_any && dst_any))
1883 				c1 = c;
1884 		}
1885 	}
1886 
1887 	if (c1)
1888 		c1 = l2cap_chan_hold_unless_zero(c1);
1889 
1890 	read_unlock(&chan_list_lock);
1891 
1892 	return c1;
1893 }
1894 
l2cap_monitor_timeout(struct work_struct * work)1895 static void l2cap_monitor_timeout(struct work_struct *work)
1896 {
1897 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1898 					       monitor_timer.work);
1899 
1900 	BT_DBG("chan %p", chan);
1901 
1902 	l2cap_chan_lock(chan);
1903 
1904 	if (!chan->conn) {
1905 		l2cap_chan_unlock(chan);
1906 		l2cap_chan_put(chan);
1907 		return;
1908 	}
1909 
1910 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1911 
1912 	l2cap_chan_unlock(chan);
1913 	l2cap_chan_put(chan);
1914 }
1915 
l2cap_retrans_timeout(struct work_struct * work)1916 static void l2cap_retrans_timeout(struct work_struct *work)
1917 {
1918 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1919 					       retrans_timer.work);
1920 
1921 	BT_DBG("chan %p", chan);
1922 
1923 	l2cap_chan_lock(chan);
1924 
1925 	if (!chan->conn) {
1926 		l2cap_chan_unlock(chan);
1927 		l2cap_chan_put(chan);
1928 		return;
1929 	}
1930 
1931 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1932 	l2cap_chan_unlock(chan);
1933 	l2cap_chan_put(chan);
1934 }
1935 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1936 static void l2cap_streaming_send(struct l2cap_chan *chan,
1937 				 struct sk_buff_head *skbs)
1938 {
1939 	struct sk_buff *skb;
1940 	struct l2cap_ctrl *control;
1941 
1942 	BT_DBG("chan %p, skbs %p", chan, skbs);
1943 
1944 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1945 
1946 	while (!skb_queue_empty(&chan->tx_q)) {
1947 
1948 		skb = skb_dequeue(&chan->tx_q);
1949 
1950 		bt_cb(skb)->l2cap.retries = 1;
1951 		control = &bt_cb(skb)->l2cap;
1952 
1953 		control->reqseq = 0;
1954 		control->txseq = chan->next_tx_seq;
1955 
1956 		__pack_control(chan, control, skb);
1957 
1958 		if (chan->fcs == L2CAP_FCS_CRC16) {
1959 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1960 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1961 		}
1962 
1963 		l2cap_do_send(chan, skb);
1964 
1965 		BT_DBG("Sent txseq %u", control->txseq);
1966 
1967 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1968 		chan->frames_sent++;
1969 	}
1970 }
1971 
l2cap_ertm_send(struct l2cap_chan * chan)1972 static int l2cap_ertm_send(struct l2cap_chan *chan)
1973 {
1974 	struct sk_buff *skb, *tx_skb;
1975 	struct l2cap_ctrl *control;
1976 	int sent = 0;
1977 
1978 	BT_DBG("chan %p", chan);
1979 
1980 	if (chan->state != BT_CONNECTED)
1981 		return -ENOTCONN;
1982 
1983 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1984 		return 0;
1985 
1986 	while (chan->tx_send_head &&
1987 	       chan->unacked_frames < chan->remote_tx_win &&
1988 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1989 
1990 		skb = chan->tx_send_head;
1991 
1992 		bt_cb(skb)->l2cap.retries = 1;
1993 		control = &bt_cb(skb)->l2cap;
1994 
1995 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1996 			control->final = 1;
1997 
1998 		control->reqseq = chan->buffer_seq;
1999 		chan->last_acked_seq = chan->buffer_seq;
2000 		control->txseq = chan->next_tx_seq;
2001 
2002 		__pack_control(chan, control, skb);
2003 
2004 		if (chan->fcs == L2CAP_FCS_CRC16) {
2005 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2006 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2007 		}
2008 
2009 		/* Clone after data has been modified. Data is assumed to be
2010 		   read-only (for locking purposes) on cloned sk_buffs.
2011 		 */
2012 		tx_skb = skb_clone(skb, GFP_KERNEL);
2013 
2014 		if (!tx_skb)
2015 			break;
2016 
2017 		__set_retrans_timer(chan);
2018 
2019 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2020 		chan->unacked_frames++;
2021 		chan->frames_sent++;
2022 		sent++;
2023 
2024 		if (skb_queue_is_last(&chan->tx_q, skb))
2025 			chan->tx_send_head = NULL;
2026 		else
2027 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2028 
2029 		l2cap_do_send(chan, tx_skb);
2030 		BT_DBG("Sent txseq %u", control->txseq);
2031 	}
2032 
2033 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2034 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2035 
2036 	return sent;
2037 }
2038 
l2cap_ertm_resend(struct l2cap_chan * chan)2039 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2040 {
2041 	struct l2cap_ctrl control;
2042 	struct sk_buff *skb;
2043 	struct sk_buff *tx_skb;
2044 	u16 seq;
2045 
2046 	BT_DBG("chan %p", chan);
2047 
2048 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2049 		return;
2050 
2051 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2052 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2053 
2054 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2055 		if (!skb) {
2056 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2057 			       seq);
2058 			continue;
2059 		}
2060 
2061 		bt_cb(skb)->l2cap.retries++;
2062 		control = bt_cb(skb)->l2cap;
2063 
2064 		if (chan->max_tx != 0 &&
2065 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2066 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2067 			l2cap_send_disconn_req(chan, ECONNRESET);
2068 			l2cap_seq_list_clear(&chan->retrans_list);
2069 			break;
2070 		}
2071 
2072 		control.reqseq = chan->buffer_seq;
2073 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2074 			control.final = 1;
2075 		else
2076 			control.final = 0;
2077 
2078 		if (skb_cloned(skb)) {
2079 			/* Cloned sk_buffs are read-only, so we need a
2080 			 * writeable copy
2081 			 */
2082 			tx_skb = skb_copy(skb, GFP_KERNEL);
2083 		} else {
2084 			tx_skb = skb_clone(skb, GFP_KERNEL);
2085 		}
2086 
2087 		if (!tx_skb) {
2088 			l2cap_seq_list_clear(&chan->retrans_list);
2089 			break;
2090 		}
2091 
2092 		/* Update skb contents */
2093 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2094 			put_unaligned_le32(__pack_extended_control(&control),
2095 					   tx_skb->data + L2CAP_HDR_SIZE);
2096 		} else {
2097 			put_unaligned_le16(__pack_enhanced_control(&control),
2098 					   tx_skb->data + L2CAP_HDR_SIZE);
2099 		}
2100 
2101 		/* Update FCS */
2102 		if (chan->fcs == L2CAP_FCS_CRC16) {
2103 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2104 					tx_skb->len - L2CAP_FCS_SIZE);
2105 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2106 						L2CAP_FCS_SIZE);
2107 		}
2108 
2109 		l2cap_do_send(chan, tx_skb);
2110 
2111 		BT_DBG("Resent txseq %d", control.txseq);
2112 
2113 		chan->last_acked_seq = chan->buffer_seq;
2114 	}
2115 }
2116 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2117 static void l2cap_retransmit(struct l2cap_chan *chan,
2118 			     struct l2cap_ctrl *control)
2119 {
2120 	BT_DBG("chan %p, control %p", chan, control);
2121 
2122 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2123 	l2cap_ertm_resend(chan);
2124 }
2125 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2126 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2127 				 struct l2cap_ctrl *control)
2128 {
2129 	struct sk_buff *skb;
2130 
2131 	BT_DBG("chan %p, control %p", chan, control);
2132 
2133 	if (control->poll)
2134 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2135 
2136 	l2cap_seq_list_clear(&chan->retrans_list);
2137 
2138 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2139 		return;
2140 
2141 	if (chan->unacked_frames) {
2142 		skb_queue_walk(&chan->tx_q, skb) {
2143 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2144 			    skb == chan->tx_send_head)
2145 				break;
2146 		}
2147 
2148 		skb_queue_walk_from(&chan->tx_q, skb) {
2149 			if (skb == chan->tx_send_head)
2150 				break;
2151 
2152 			l2cap_seq_list_append(&chan->retrans_list,
2153 					      bt_cb(skb)->l2cap.txseq);
2154 		}
2155 
2156 		l2cap_ertm_resend(chan);
2157 	}
2158 }
2159 
l2cap_send_ack(struct l2cap_chan * chan)2160 static void l2cap_send_ack(struct l2cap_chan *chan)
2161 {
2162 	struct l2cap_ctrl control;
2163 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2164 					 chan->last_acked_seq);
2165 	int threshold;
2166 
2167 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2168 	       chan, chan->last_acked_seq, chan->buffer_seq);
2169 
2170 	memset(&control, 0, sizeof(control));
2171 	control.sframe = 1;
2172 
2173 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2174 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2175 		__clear_ack_timer(chan);
2176 		control.super = L2CAP_SUPER_RNR;
2177 		control.reqseq = chan->buffer_seq;
2178 		l2cap_send_sframe(chan, &control);
2179 	} else {
2180 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2181 			l2cap_ertm_send(chan);
2182 			/* If any i-frames were sent, they included an ack */
2183 			if (chan->buffer_seq == chan->last_acked_seq)
2184 				frames_to_ack = 0;
2185 		}
2186 
2187 		/* Ack now if the window is 3/4ths full.
2188 		 * Calculate without mul or div
2189 		 */
2190 		threshold = chan->ack_win;
2191 		threshold += threshold << 1;
2192 		threshold >>= 2;
2193 
2194 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2195 		       threshold);
2196 
2197 		if (frames_to_ack >= threshold) {
2198 			__clear_ack_timer(chan);
2199 			control.super = L2CAP_SUPER_RR;
2200 			control.reqseq = chan->buffer_seq;
2201 			l2cap_send_sframe(chan, &control);
2202 			frames_to_ack = 0;
2203 		}
2204 
2205 		if (frames_to_ack)
2206 			__set_ack_timer(chan);
2207 	}
2208 }
2209 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2210 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2211 					 struct msghdr *msg, int len,
2212 					 int count, struct sk_buff *skb)
2213 {
2214 	struct l2cap_conn *conn = chan->conn;
2215 	struct sk_buff **frag;
2216 	int sent = 0;
2217 
2218 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2219 		return -EFAULT;
2220 
2221 	sent += count;
2222 	len  -= count;
2223 
2224 	/* Continuation fragments (no L2CAP header) */
2225 	frag = &skb_shinfo(skb)->frag_list;
2226 	while (len) {
2227 		struct sk_buff *tmp;
2228 
2229 		count = min_t(unsigned int, conn->mtu, len);
2230 
2231 		tmp = chan->ops->alloc_skb(chan, 0, count,
2232 					   msg->msg_flags & MSG_DONTWAIT);
2233 		if (IS_ERR(tmp))
2234 			return PTR_ERR(tmp);
2235 
2236 		*frag = tmp;
2237 
2238 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2239 				   &msg->msg_iter))
2240 			return -EFAULT;
2241 
2242 		sent += count;
2243 		len  -= count;
2244 
2245 		skb->len += (*frag)->len;
2246 		skb->data_len += (*frag)->len;
2247 
2248 		frag = &(*frag)->next;
2249 	}
2250 
2251 	return sent;
2252 }
2253 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2254 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2255 						 struct msghdr *msg, size_t len)
2256 {
2257 	struct l2cap_conn *conn = chan->conn;
2258 	struct sk_buff *skb;
2259 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2260 	struct l2cap_hdr *lh;
2261 
2262 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2263 	       __le16_to_cpu(chan->psm), len);
2264 
2265 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2266 
2267 	skb = chan->ops->alloc_skb(chan, hlen, count,
2268 				   msg->msg_flags & MSG_DONTWAIT);
2269 	if (IS_ERR(skb))
2270 		return skb;
2271 
2272 	/* Create L2CAP header */
2273 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2274 	lh->cid = cpu_to_le16(chan->dcid);
2275 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2276 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2277 
2278 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2279 	if (unlikely(err < 0)) {
2280 		kfree_skb(skb);
2281 		return ERR_PTR(err);
2282 	}
2283 	return skb;
2284 }
2285 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2286 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2287 					      struct msghdr *msg, size_t len)
2288 {
2289 	struct l2cap_conn *conn = chan->conn;
2290 	struct sk_buff *skb;
2291 	int err, count;
2292 	struct l2cap_hdr *lh;
2293 
2294 	BT_DBG("chan %p len %zu", chan, len);
2295 
2296 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2297 
2298 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2299 				   msg->msg_flags & MSG_DONTWAIT);
2300 	if (IS_ERR(skb))
2301 		return skb;
2302 
2303 	/* Create L2CAP header */
2304 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2305 	lh->cid = cpu_to_le16(chan->dcid);
2306 	lh->len = cpu_to_le16(len);
2307 
2308 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2309 	if (unlikely(err < 0)) {
2310 		kfree_skb(skb);
2311 		return ERR_PTR(err);
2312 	}
2313 	return skb;
2314 }
2315 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2316 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2317 					       struct msghdr *msg, size_t len,
2318 					       u16 sdulen)
2319 {
2320 	struct l2cap_conn *conn = chan->conn;
2321 	struct sk_buff *skb;
2322 	int err, count, hlen;
2323 	struct l2cap_hdr *lh;
2324 
2325 	BT_DBG("chan %p len %zu", chan, len);
2326 
2327 	if (!conn)
2328 		return ERR_PTR(-ENOTCONN);
2329 
2330 	hlen = __ertm_hdr_size(chan);
2331 
2332 	if (sdulen)
2333 		hlen += L2CAP_SDULEN_SIZE;
2334 
2335 	if (chan->fcs == L2CAP_FCS_CRC16)
2336 		hlen += L2CAP_FCS_SIZE;
2337 
2338 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2339 
2340 	skb = chan->ops->alloc_skb(chan, hlen, count,
2341 				   msg->msg_flags & MSG_DONTWAIT);
2342 	if (IS_ERR(skb))
2343 		return skb;
2344 
2345 	/* Create L2CAP header */
2346 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2347 	lh->cid = cpu_to_le16(chan->dcid);
2348 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2349 
2350 	/* Control header is populated later */
2351 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2352 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2353 	else
2354 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2355 
2356 	if (sdulen)
2357 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2358 
2359 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2360 	if (unlikely(err < 0)) {
2361 		kfree_skb(skb);
2362 		return ERR_PTR(err);
2363 	}
2364 
2365 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2366 	bt_cb(skb)->l2cap.retries = 0;
2367 	return skb;
2368 }
2369 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2370 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2371 			     struct sk_buff_head *seg_queue,
2372 			     struct msghdr *msg, size_t len)
2373 {
2374 	struct sk_buff *skb;
2375 	u16 sdu_len;
2376 	size_t pdu_len;
2377 	u8 sar;
2378 
2379 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2380 
2381 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2382 	 * so fragmented skbs are not used.  The HCI layer's handling
2383 	 * of fragmented skbs is not compatible with ERTM's queueing.
2384 	 */
2385 
2386 	/* PDU size is derived from the HCI MTU */
2387 	pdu_len = chan->conn->mtu;
2388 
2389 	/* Constrain PDU size for BR/EDR connections */
2390 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2391 
2392 	/* Adjust for largest possible L2CAP overhead. */
2393 	if (chan->fcs)
2394 		pdu_len -= L2CAP_FCS_SIZE;
2395 
2396 	pdu_len -= __ertm_hdr_size(chan);
2397 
2398 	/* Remote device may have requested smaller PDUs */
2399 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2400 
2401 	if (!pdu_len)
2402 		return -EINVAL;
2403 
2404 	if (len <= pdu_len) {
2405 		sar = L2CAP_SAR_UNSEGMENTED;
2406 		sdu_len = 0;
2407 		pdu_len = len;
2408 	} else {
2409 		sar = L2CAP_SAR_START;
2410 		sdu_len = len;
2411 	}
2412 
2413 	while (len > 0) {
2414 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2415 
2416 		if (IS_ERR(skb)) {
2417 			__skb_queue_purge(seg_queue);
2418 			return PTR_ERR(skb);
2419 		}
2420 
2421 		bt_cb(skb)->l2cap.sar = sar;
2422 		__skb_queue_tail(seg_queue, skb);
2423 
2424 		len -= pdu_len;
2425 		if (sdu_len)
2426 			sdu_len = 0;
2427 
2428 		if (len <= pdu_len) {
2429 			sar = L2CAP_SAR_END;
2430 			pdu_len = len;
2431 		} else {
2432 			sar = L2CAP_SAR_CONTINUE;
2433 		}
2434 	}
2435 
2436 	return 0;
2437 }
2438 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2439 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2440 						   struct msghdr *msg,
2441 						   size_t len, u16 sdulen)
2442 {
2443 	struct l2cap_conn *conn = chan->conn;
2444 	struct sk_buff *skb;
2445 	int err, count, hlen;
2446 	struct l2cap_hdr *lh;
2447 
2448 	BT_DBG("chan %p len %zu", chan, len);
2449 
2450 	if (!conn)
2451 		return ERR_PTR(-ENOTCONN);
2452 
2453 	hlen = L2CAP_HDR_SIZE;
2454 
2455 	if (sdulen)
2456 		hlen += L2CAP_SDULEN_SIZE;
2457 
2458 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2459 
2460 	skb = chan->ops->alloc_skb(chan, hlen, count,
2461 				   msg->msg_flags & MSG_DONTWAIT);
2462 	if (IS_ERR(skb))
2463 		return skb;
2464 
2465 	/* Create L2CAP header */
2466 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2467 	lh->cid = cpu_to_le16(chan->dcid);
2468 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2469 
2470 	if (sdulen)
2471 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2472 
2473 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2474 	if (unlikely(err < 0)) {
2475 		kfree_skb(skb);
2476 		return ERR_PTR(err);
2477 	}
2478 
2479 	return skb;
2480 }
2481 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2482 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2483 				struct sk_buff_head *seg_queue,
2484 				struct msghdr *msg, size_t len)
2485 {
2486 	struct sk_buff *skb;
2487 	size_t pdu_len;
2488 	u16 sdu_len;
2489 
2490 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2491 
2492 	sdu_len = len;
2493 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2494 
2495 	while (len > 0) {
2496 		if (len <= pdu_len)
2497 			pdu_len = len;
2498 
2499 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2500 		if (IS_ERR(skb)) {
2501 			__skb_queue_purge(seg_queue);
2502 			return PTR_ERR(skb);
2503 		}
2504 
2505 		__skb_queue_tail(seg_queue, skb);
2506 
2507 		len -= pdu_len;
2508 
2509 		if (sdu_len) {
2510 			sdu_len = 0;
2511 			pdu_len += L2CAP_SDULEN_SIZE;
2512 		}
2513 	}
2514 
2515 	return 0;
2516 }
2517 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2518 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2519 {
2520 	int sent = 0;
2521 
2522 	BT_DBG("chan %p", chan);
2523 
2524 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2525 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2526 		chan->tx_credits--;
2527 		sent++;
2528 	}
2529 
2530 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2531 	       skb_queue_len(&chan->tx_q));
2532 }
2533 
l2cap_tx_timestamp(struct sk_buff * skb,const struct sockcm_cookie * sockc,size_t len)2534 static void l2cap_tx_timestamp(struct sk_buff *skb,
2535 			       const struct sockcm_cookie *sockc,
2536 			       size_t len)
2537 {
2538 	struct sock *sk = skb ? skb->sk : NULL;
2539 
2540 	if (sk && sk->sk_type == SOCK_STREAM)
2541 		hci_setup_tx_timestamp(skb, len, sockc);
2542 	else
2543 		hci_setup_tx_timestamp(skb, 1, sockc);
2544 }
2545 
l2cap_tx_timestamp_seg(struct sk_buff_head * queue,const struct sockcm_cookie * sockc,size_t len)2546 static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue,
2547 				   const struct sockcm_cookie *sockc,
2548 				   size_t len)
2549 {
2550 	struct sk_buff *skb = skb_peek(queue);
2551 	struct sock *sk = skb ? skb->sk : NULL;
2552 
2553 	if (sk && sk->sk_type == SOCK_STREAM)
2554 		l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len);
2555 	else
2556 		l2cap_tx_timestamp(skb, sockc, len);
2557 }
2558 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len,const struct sockcm_cookie * sockc)2559 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2560 		    const struct sockcm_cookie *sockc)
2561 {
2562 	struct sk_buff *skb;
2563 	int err;
2564 	struct sk_buff_head seg_queue;
2565 
2566 	if (!chan->conn)
2567 		return -ENOTCONN;
2568 
2569 	/* Connectionless channel */
2570 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2571 		skb = l2cap_create_connless_pdu(chan, msg, len);
2572 		if (IS_ERR(skb))
2573 			return PTR_ERR(skb);
2574 
2575 		l2cap_tx_timestamp(skb, sockc, len);
2576 
2577 		l2cap_do_send(chan, skb);
2578 		return len;
2579 	}
2580 
2581 	switch (chan->mode) {
2582 	case L2CAP_MODE_LE_FLOWCTL:
2583 	case L2CAP_MODE_EXT_FLOWCTL:
2584 		/* Check outgoing MTU */
2585 		if (len > chan->omtu)
2586 			return -EMSGSIZE;
2587 
2588 		__skb_queue_head_init(&seg_queue);
2589 
2590 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2591 
2592 		if (chan->state != BT_CONNECTED) {
2593 			__skb_queue_purge(&seg_queue);
2594 			err = -ENOTCONN;
2595 		}
2596 
2597 		if (err)
2598 			return err;
2599 
2600 		l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2601 
2602 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2603 
2604 		l2cap_le_flowctl_send(chan);
2605 
2606 		if (!chan->tx_credits)
2607 			chan->ops->suspend(chan);
2608 
2609 		err = len;
2610 
2611 		break;
2612 
2613 	case L2CAP_MODE_BASIC:
2614 		/* Check outgoing MTU */
2615 		if (len > chan->omtu)
2616 			return -EMSGSIZE;
2617 
2618 		/* Create a basic PDU */
2619 		skb = l2cap_create_basic_pdu(chan, msg, len);
2620 		if (IS_ERR(skb))
2621 			return PTR_ERR(skb);
2622 
2623 		l2cap_tx_timestamp(skb, sockc, len);
2624 
2625 		l2cap_do_send(chan, skb);
2626 		err = len;
2627 		break;
2628 
2629 	case L2CAP_MODE_ERTM:
2630 	case L2CAP_MODE_STREAMING:
2631 		/* Check outgoing MTU */
2632 		if (len > chan->omtu) {
2633 			err = -EMSGSIZE;
2634 			break;
2635 		}
2636 
2637 		__skb_queue_head_init(&seg_queue);
2638 
2639 		/* Do segmentation before calling in to the state machine,
2640 		 * since it's possible to block while waiting for memory
2641 		 * allocation.
2642 		 */
2643 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2644 
2645 		if (err)
2646 			break;
2647 
2648 		if (chan->mode == L2CAP_MODE_ERTM) {
2649 			/* TODO: ERTM mode timestamping */
2650 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2651 		} else {
2652 			l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2653 			l2cap_streaming_send(chan, &seg_queue);
2654 		}
2655 
2656 		err = len;
2657 
2658 		/* If the skbs were not queued for sending, they'll still be in
2659 		 * seg_queue and need to be purged.
2660 		 */
2661 		__skb_queue_purge(&seg_queue);
2662 		break;
2663 
2664 	default:
2665 		BT_DBG("bad state %1.1x", chan->mode);
2666 		err = -EBADFD;
2667 	}
2668 
2669 	return err;
2670 }
2671 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2672 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2673 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2674 {
2675 	struct l2cap_ctrl control;
2676 	u16 seq;
2677 
2678 	BT_DBG("chan %p, txseq %u", chan, txseq);
2679 
2680 	memset(&control, 0, sizeof(control));
2681 	control.sframe = 1;
2682 	control.super = L2CAP_SUPER_SREJ;
2683 
2684 	for (seq = chan->expected_tx_seq; seq != txseq;
2685 	     seq = __next_seq(chan, seq)) {
2686 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2687 			control.reqseq = seq;
2688 			l2cap_send_sframe(chan, &control);
2689 			l2cap_seq_list_append(&chan->srej_list, seq);
2690 		}
2691 	}
2692 
2693 	chan->expected_tx_seq = __next_seq(chan, txseq);
2694 }
2695 
l2cap_send_srej_tail(struct l2cap_chan * chan)2696 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2697 {
2698 	struct l2cap_ctrl control;
2699 
2700 	BT_DBG("chan %p", chan);
2701 
2702 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2703 		return;
2704 
2705 	memset(&control, 0, sizeof(control));
2706 	control.sframe = 1;
2707 	control.super = L2CAP_SUPER_SREJ;
2708 	control.reqseq = chan->srej_list.tail;
2709 	l2cap_send_sframe(chan, &control);
2710 }
2711 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2712 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2713 {
2714 	struct l2cap_ctrl control;
2715 	u16 initial_head;
2716 	u16 seq;
2717 
2718 	BT_DBG("chan %p, txseq %u", chan, txseq);
2719 
2720 	memset(&control, 0, sizeof(control));
2721 	control.sframe = 1;
2722 	control.super = L2CAP_SUPER_SREJ;
2723 
2724 	/* Capture initial list head to allow only one pass through the list. */
2725 	initial_head = chan->srej_list.head;
2726 
2727 	do {
2728 		seq = l2cap_seq_list_pop(&chan->srej_list);
2729 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2730 			break;
2731 
2732 		control.reqseq = seq;
2733 		l2cap_send_sframe(chan, &control);
2734 		l2cap_seq_list_append(&chan->srej_list, seq);
2735 	} while (chan->srej_list.head != initial_head);
2736 }
2737 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2738 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2739 {
2740 	struct sk_buff *acked_skb;
2741 	u16 ackseq;
2742 
2743 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2744 
2745 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2746 		return;
2747 
2748 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2749 	       chan->expected_ack_seq, chan->unacked_frames);
2750 
2751 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2752 	     ackseq = __next_seq(chan, ackseq)) {
2753 
2754 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2755 		if (acked_skb) {
2756 			skb_unlink(acked_skb, &chan->tx_q);
2757 			kfree_skb(acked_skb);
2758 			chan->unacked_frames--;
2759 		}
2760 	}
2761 
2762 	chan->expected_ack_seq = reqseq;
2763 
2764 	if (chan->unacked_frames == 0)
2765 		__clear_retrans_timer(chan);
2766 
2767 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2768 }
2769 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2770 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2771 {
2772 	BT_DBG("chan %p", chan);
2773 
2774 	chan->expected_tx_seq = chan->buffer_seq;
2775 	l2cap_seq_list_clear(&chan->srej_list);
2776 	skb_queue_purge(&chan->srej_q);
2777 	chan->rx_state = L2CAP_RX_STATE_RECV;
2778 }
2779 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2780 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2781 				struct l2cap_ctrl *control,
2782 				struct sk_buff_head *skbs, u8 event)
2783 {
2784 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2785 	       event);
2786 
2787 	switch (event) {
2788 	case L2CAP_EV_DATA_REQUEST:
2789 		if (chan->tx_send_head == NULL)
2790 			chan->tx_send_head = skb_peek(skbs);
2791 
2792 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2793 		l2cap_ertm_send(chan);
2794 		break;
2795 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2796 		BT_DBG("Enter LOCAL_BUSY");
2797 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2798 
2799 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2800 			/* The SREJ_SENT state must be aborted if we are to
2801 			 * enter the LOCAL_BUSY state.
2802 			 */
2803 			l2cap_abort_rx_srej_sent(chan);
2804 		}
2805 
2806 		l2cap_send_ack(chan);
2807 
2808 		break;
2809 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2810 		BT_DBG("Exit LOCAL_BUSY");
2811 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2812 
2813 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2814 			struct l2cap_ctrl local_control;
2815 
2816 			memset(&local_control, 0, sizeof(local_control));
2817 			local_control.sframe = 1;
2818 			local_control.super = L2CAP_SUPER_RR;
2819 			local_control.poll = 1;
2820 			local_control.reqseq = chan->buffer_seq;
2821 			l2cap_send_sframe(chan, &local_control);
2822 
2823 			chan->retry_count = 1;
2824 			__set_monitor_timer(chan);
2825 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2826 		}
2827 		break;
2828 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2829 		l2cap_process_reqseq(chan, control->reqseq);
2830 		break;
2831 	case L2CAP_EV_EXPLICIT_POLL:
2832 		l2cap_send_rr_or_rnr(chan, 1);
2833 		chan->retry_count = 1;
2834 		__set_monitor_timer(chan);
2835 		__clear_ack_timer(chan);
2836 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2837 		break;
2838 	case L2CAP_EV_RETRANS_TO:
2839 		l2cap_send_rr_or_rnr(chan, 1);
2840 		chan->retry_count = 1;
2841 		__set_monitor_timer(chan);
2842 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2843 		break;
2844 	case L2CAP_EV_RECV_FBIT:
2845 		/* Nothing to process */
2846 		break;
2847 	default:
2848 		break;
2849 	}
2850 }
2851 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2852 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2853 				  struct l2cap_ctrl *control,
2854 				  struct sk_buff_head *skbs, u8 event)
2855 {
2856 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2857 	       event);
2858 
2859 	switch (event) {
2860 	case L2CAP_EV_DATA_REQUEST:
2861 		if (chan->tx_send_head == NULL)
2862 			chan->tx_send_head = skb_peek(skbs);
2863 		/* Queue data, but don't send. */
2864 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2865 		break;
2866 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2867 		BT_DBG("Enter LOCAL_BUSY");
2868 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2869 
2870 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2871 			/* The SREJ_SENT state must be aborted if we are to
2872 			 * enter the LOCAL_BUSY state.
2873 			 */
2874 			l2cap_abort_rx_srej_sent(chan);
2875 		}
2876 
2877 		l2cap_send_ack(chan);
2878 
2879 		break;
2880 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2881 		BT_DBG("Exit LOCAL_BUSY");
2882 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2883 
2884 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2885 			struct l2cap_ctrl local_control;
2886 			memset(&local_control, 0, sizeof(local_control));
2887 			local_control.sframe = 1;
2888 			local_control.super = L2CAP_SUPER_RR;
2889 			local_control.poll = 1;
2890 			local_control.reqseq = chan->buffer_seq;
2891 			l2cap_send_sframe(chan, &local_control);
2892 
2893 			chan->retry_count = 1;
2894 			__set_monitor_timer(chan);
2895 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2896 		}
2897 		break;
2898 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2899 		l2cap_process_reqseq(chan, control->reqseq);
2900 		fallthrough;
2901 
2902 	case L2CAP_EV_RECV_FBIT:
2903 		if (control && control->final) {
2904 			__clear_monitor_timer(chan);
2905 			if (chan->unacked_frames > 0)
2906 				__set_retrans_timer(chan);
2907 			chan->retry_count = 0;
2908 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2909 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2910 		}
2911 		break;
2912 	case L2CAP_EV_EXPLICIT_POLL:
2913 		/* Ignore */
2914 		break;
2915 	case L2CAP_EV_MONITOR_TO:
2916 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2917 			l2cap_send_rr_or_rnr(chan, 1);
2918 			__set_monitor_timer(chan);
2919 			chan->retry_count++;
2920 		} else {
2921 			l2cap_send_disconn_req(chan, ECONNABORTED);
2922 		}
2923 		break;
2924 	default:
2925 		break;
2926 	}
2927 }
2928 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2929 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2930 		     struct sk_buff_head *skbs, u8 event)
2931 {
2932 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2933 	       chan, control, skbs, event, chan->tx_state);
2934 
2935 	switch (chan->tx_state) {
2936 	case L2CAP_TX_STATE_XMIT:
2937 		l2cap_tx_state_xmit(chan, control, skbs, event);
2938 		break;
2939 	case L2CAP_TX_STATE_WAIT_F:
2940 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2941 		break;
2942 	default:
2943 		/* Ignore event */
2944 		break;
2945 	}
2946 }
2947 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2948 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2949 			     struct l2cap_ctrl *control)
2950 {
2951 	BT_DBG("chan %p, control %p", chan, control);
2952 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2953 }
2954 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2955 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2956 				  struct l2cap_ctrl *control)
2957 {
2958 	BT_DBG("chan %p, control %p", chan, control);
2959 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2960 }
2961 
2962 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2963 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2964 {
2965 	struct sk_buff *nskb;
2966 	struct l2cap_chan *chan;
2967 
2968 	BT_DBG("conn %p", conn);
2969 
2970 	list_for_each_entry(chan, &conn->chan_l, list) {
2971 		if (chan->chan_type != L2CAP_CHAN_RAW)
2972 			continue;
2973 
2974 		/* Don't send frame to the channel it came from */
2975 		if (bt_cb(skb)->l2cap.chan == chan)
2976 			continue;
2977 
2978 		nskb = skb_clone(skb, GFP_KERNEL);
2979 		if (!nskb)
2980 			continue;
2981 		if (chan->ops->recv(chan, nskb))
2982 			kfree_skb(nskb);
2983 	}
2984 }
2985 
2986 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2987 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2988 				       u8 ident, u16 dlen, void *data)
2989 {
2990 	struct sk_buff *skb, **frag;
2991 	struct l2cap_cmd_hdr *cmd;
2992 	struct l2cap_hdr *lh;
2993 	int len, count;
2994 
2995 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2996 	       conn, code, ident, dlen);
2997 
2998 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2999 		return NULL;
3000 
3001 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3002 	count = min_t(unsigned int, conn->mtu, len);
3003 
3004 	skb = bt_skb_alloc(count, GFP_KERNEL);
3005 	if (!skb)
3006 		return NULL;
3007 
3008 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3009 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3010 
3011 	if (conn->hcon->type == LE_LINK)
3012 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3013 	else
3014 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3015 
3016 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3017 	cmd->code  = code;
3018 	cmd->ident = ident;
3019 	cmd->len   = cpu_to_le16(dlen);
3020 
3021 	if (dlen) {
3022 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3023 		skb_put_data(skb, data, count);
3024 		data += count;
3025 	}
3026 
3027 	len -= skb->len;
3028 
3029 	/* Continuation fragments (no L2CAP header) */
3030 	frag = &skb_shinfo(skb)->frag_list;
3031 	while (len) {
3032 		count = min_t(unsigned int, conn->mtu, len);
3033 
3034 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3035 		if (!*frag)
3036 			goto fail;
3037 
3038 		skb_put_data(*frag, data, count);
3039 
3040 		len  -= count;
3041 		data += count;
3042 
3043 		frag = &(*frag)->next;
3044 	}
3045 
3046 	return skb;
3047 
3048 fail:
3049 	kfree_skb(skb);
3050 	return NULL;
3051 }
3052 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3053 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3054 				     unsigned long *val)
3055 {
3056 	struct l2cap_conf_opt *opt = *ptr;
3057 	int len;
3058 
3059 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3060 	*ptr += len;
3061 
3062 	*type = opt->type;
3063 	*olen = opt->len;
3064 
3065 	switch (opt->len) {
3066 	case 1:
3067 		*val = *((u8 *) opt->val);
3068 		break;
3069 
3070 	case 2:
3071 		*val = get_unaligned_le16(opt->val);
3072 		break;
3073 
3074 	case 4:
3075 		*val = get_unaligned_le32(opt->val);
3076 		break;
3077 
3078 	default:
3079 		*val = (unsigned long) opt->val;
3080 		break;
3081 	}
3082 
3083 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3084 	return len;
3085 }
3086 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3087 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3088 {
3089 	struct l2cap_conf_opt *opt = *ptr;
3090 
3091 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3092 
3093 	if (size < L2CAP_CONF_OPT_SIZE + len)
3094 		return;
3095 
3096 	opt->type = type;
3097 	opt->len  = len;
3098 
3099 	switch (len) {
3100 	case 1:
3101 		*((u8 *) opt->val)  = val;
3102 		break;
3103 
3104 	case 2:
3105 		put_unaligned_le16(val, opt->val);
3106 		break;
3107 
3108 	case 4:
3109 		put_unaligned_le32(val, opt->val);
3110 		break;
3111 
3112 	default:
3113 		memcpy(opt->val, (void *) val, len);
3114 		break;
3115 	}
3116 
3117 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3118 }
3119 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3120 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3121 {
3122 	struct l2cap_conf_efs efs;
3123 
3124 	switch (chan->mode) {
3125 	case L2CAP_MODE_ERTM:
3126 		efs.id		= chan->local_id;
3127 		efs.stype	= chan->local_stype;
3128 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3129 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3130 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3131 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3132 		break;
3133 
3134 	case L2CAP_MODE_STREAMING:
3135 		efs.id		= 1;
3136 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3137 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3138 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3139 		efs.acc_lat	= 0;
3140 		efs.flush_to	= 0;
3141 		break;
3142 
3143 	default:
3144 		return;
3145 	}
3146 
3147 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3148 			   (unsigned long) &efs, size);
3149 }
3150 
l2cap_ack_timeout(struct work_struct * work)3151 static void l2cap_ack_timeout(struct work_struct *work)
3152 {
3153 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3154 					       ack_timer.work);
3155 	u16 frames_to_ack;
3156 
3157 	BT_DBG("chan %p", chan);
3158 
3159 	l2cap_chan_lock(chan);
3160 
3161 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3162 				     chan->last_acked_seq);
3163 
3164 	if (frames_to_ack)
3165 		l2cap_send_rr_or_rnr(chan, 0);
3166 
3167 	l2cap_chan_unlock(chan);
3168 	l2cap_chan_put(chan);
3169 }
3170 
l2cap_ertm_init(struct l2cap_chan * chan)3171 int l2cap_ertm_init(struct l2cap_chan *chan)
3172 {
3173 	int err;
3174 
3175 	chan->next_tx_seq = 0;
3176 	chan->expected_tx_seq = 0;
3177 	chan->expected_ack_seq = 0;
3178 	chan->unacked_frames = 0;
3179 	chan->buffer_seq = 0;
3180 	chan->frames_sent = 0;
3181 	chan->last_acked_seq = 0;
3182 	chan->sdu = NULL;
3183 	chan->sdu_last_frag = NULL;
3184 	chan->sdu_len = 0;
3185 
3186 	skb_queue_head_init(&chan->tx_q);
3187 
3188 	if (chan->mode != L2CAP_MODE_ERTM)
3189 		return 0;
3190 
3191 	chan->rx_state = L2CAP_RX_STATE_RECV;
3192 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3193 
3194 	skb_queue_head_init(&chan->srej_q);
3195 
3196 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3197 	if (err < 0)
3198 		return err;
3199 
3200 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3201 	if (err < 0)
3202 		l2cap_seq_list_free(&chan->srej_list);
3203 
3204 	return err;
3205 }
3206 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3207 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3208 {
3209 	switch (mode) {
3210 	case L2CAP_MODE_STREAMING:
3211 	case L2CAP_MODE_ERTM:
3212 		if (l2cap_mode_supported(mode, remote_feat_mask))
3213 			return mode;
3214 		fallthrough;
3215 	default:
3216 		return L2CAP_MODE_BASIC;
3217 	}
3218 }
3219 
__l2cap_ews_supported(struct l2cap_conn * conn)3220 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3221 {
3222 	return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3223 }
3224 
__l2cap_efs_supported(struct l2cap_conn * conn)3225 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3226 {
3227 	return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3228 }
3229 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3230 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3231 				      struct l2cap_conf_rfc *rfc)
3232 {
3233 	rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3234 	rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3235 }
3236 
l2cap_txwin_setup(struct l2cap_chan * chan)3237 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3238 {
3239 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3240 	    __l2cap_ews_supported(chan->conn)) {
3241 		/* use extended control field */
3242 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3243 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3244 	} else {
3245 		chan->tx_win = min_t(u16, chan->tx_win,
3246 				     L2CAP_DEFAULT_TX_WINDOW);
3247 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3248 	}
3249 	chan->ack_win = chan->tx_win;
3250 }
3251 
l2cap_mtu_auto(struct l2cap_chan * chan)3252 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3253 {
3254 	struct hci_conn *conn = chan->conn->hcon;
3255 
3256 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3257 
3258 	/* The 2-DH1 packet has between 2 and 56 information bytes
3259 	 * (including the 2-byte payload header)
3260 	 */
3261 	if (!(conn->pkt_type & HCI_2DH1))
3262 		chan->imtu = 54;
3263 
3264 	/* The 3-DH1 packet has between 2 and 85 information bytes
3265 	 * (including the 2-byte payload header)
3266 	 */
3267 	if (!(conn->pkt_type & HCI_3DH1))
3268 		chan->imtu = 83;
3269 
3270 	/* The 2-DH3 packet has between 2 and 369 information bytes
3271 	 * (including the 2-byte payload header)
3272 	 */
3273 	if (!(conn->pkt_type & HCI_2DH3))
3274 		chan->imtu = 367;
3275 
3276 	/* The 3-DH3 packet has between 2 and 554 information bytes
3277 	 * (including the 2-byte payload header)
3278 	 */
3279 	if (!(conn->pkt_type & HCI_3DH3))
3280 		chan->imtu = 552;
3281 
3282 	/* The 2-DH5 packet has between 2 and 681 information bytes
3283 	 * (including the 2-byte payload header)
3284 	 */
3285 	if (!(conn->pkt_type & HCI_2DH5))
3286 		chan->imtu = 679;
3287 
3288 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3289 	 * (including the 2-byte payload header)
3290 	 */
3291 	if (!(conn->pkt_type & HCI_3DH5))
3292 		chan->imtu = 1021;
3293 }
3294 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3295 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3296 {
3297 	struct l2cap_conf_req *req = data;
3298 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3299 	void *ptr = req->data;
3300 	void *endptr = data + data_size;
3301 	u16 size;
3302 
3303 	BT_DBG("chan %p", chan);
3304 
3305 	if (chan->num_conf_req || chan->num_conf_rsp)
3306 		goto done;
3307 
3308 	switch (chan->mode) {
3309 	case L2CAP_MODE_STREAMING:
3310 	case L2CAP_MODE_ERTM:
3311 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3312 			break;
3313 
3314 		if (__l2cap_efs_supported(chan->conn))
3315 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3316 
3317 		fallthrough;
3318 	default:
3319 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3320 		break;
3321 	}
3322 
3323 done:
3324 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3325 		if (!chan->imtu)
3326 			l2cap_mtu_auto(chan);
3327 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3328 				   endptr - ptr);
3329 	}
3330 
3331 	switch (chan->mode) {
3332 	case L2CAP_MODE_BASIC:
3333 		if (disable_ertm)
3334 			break;
3335 
3336 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3337 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3338 			break;
3339 
3340 		rfc.mode            = L2CAP_MODE_BASIC;
3341 		rfc.txwin_size      = 0;
3342 		rfc.max_transmit    = 0;
3343 		rfc.retrans_timeout = 0;
3344 		rfc.monitor_timeout = 0;
3345 		rfc.max_pdu_size    = 0;
3346 
3347 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3348 				   (unsigned long) &rfc, endptr - ptr);
3349 		break;
3350 
3351 	case L2CAP_MODE_ERTM:
3352 		rfc.mode            = L2CAP_MODE_ERTM;
3353 		rfc.max_transmit    = chan->max_tx;
3354 
3355 		__l2cap_set_ertm_timeouts(chan, &rfc);
3356 
3357 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3358 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3359 			     L2CAP_FCS_SIZE);
3360 		rfc.max_pdu_size = cpu_to_le16(size);
3361 
3362 		l2cap_txwin_setup(chan);
3363 
3364 		rfc.txwin_size = min_t(u16, chan->tx_win,
3365 				       L2CAP_DEFAULT_TX_WINDOW);
3366 
3367 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3368 				   (unsigned long) &rfc, endptr - ptr);
3369 
3370 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3371 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3372 
3373 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3374 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3375 					   chan->tx_win, endptr - ptr);
3376 
3377 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3378 			if (chan->fcs == L2CAP_FCS_NONE ||
3379 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3380 				chan->fcs = L2CAP_FCS_NONE;
3381 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3382 						   chan->fcs, endptr - ptr);
3383 			}
3384 		break;
3385 
3386 	case L2CAP_MODE_STREAMING:
3387 		l2cap_txwin_setup(chan);
3388 		rfc.mode            = L2CAP_MODE_STREAMING;
3389 		rfc.txwin_size      = 0;
3390 		rfc.max_transmit    = 0;
3391 		rfc.retrans_timeout = 0;
3392 		rfc.monitor_timeout = 0;
3393 
3394 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3395 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3396 			     L2CAP_FCS_SIZE);
3397 		rfc.max_pdu_size = cpu_to_le16(size);
3398 
3399 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3400 				   (unsigned long) &rfc, endptr - ptr);
3401 
3402 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3403 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3404 
3405 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3406 			if (chan->fcs == L2CAP_FCS_NONE ||
3407 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3408 				chan->fcs = L2CAP_FCS_NONE;
3409 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3410 						   chan->fcs, endptr - ptr);
3411 			}
3412 		break;
3413 	}
3414 
3415 	req->dcid  = cpu_to_le16(chan->dcid);
3416 	req->flags = cpu_to_le16(0);
3417 
3418 	return ptr - data;
3419 }
3420 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3421 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3422 {
3423 	struct l2cap_conf_rsp *rsp = data;
3424 	void *ptr = rsp->data;
3425 	void *endptr = data + data_size;
3426 	void *req = chan->conf_req;
3427 	int len = chan->conf_len;
3428 	int type, hint, olen;
3429 	unsigned long val;
3430 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3431 	struct l2cap_conf_efs efs;
3432 	u8 remote_efs = 0;
3433 	u16 mtu = 0;
3434 	u16 result = L2CAP_CONF_SUCCESS;
3435 	u16 size;
3436 
3437 	BT_DBG("chan %p", chan);
3438 
3439 	while (len >= L2CAP_CONF_OPT_SIZE) {
3440 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3441 		if (len < 0)
3442 			break;
3443 
3444 		hint  = type & L2CAP_CONF_HINT;
3445 		type &= L2CAP_CONF_MASK;
3446 
3447 		switch (type) {
3448 		case L2CAP_CONF_MTU:
3449 			if (olen != 2)
3450 				break;
3451 			mtu = val;
3452 			break;
3453 
3454 		case L2CAP_CONF_FLUSH_TO:
3455 			if (olen != 2)
3456 				break;
3457 			chan->flush_to = val;
3458 			break;
3459 
3460 		case L2CAP_CONF_QOS:
3461 			break;
3462 
3463 		case L2CAP_CONF_RFC:
3464 			if (olen != sizeof(rfc))
3465 				break;
3466 			memcpy(&rfc, (void *) val, olen);
3467 			break;
3468 
3469 		case L2CAP_CONF_FCS:
3470 			if (olen != 1)
3471 				break;
3472 			if (val == L2CAP_FCS_NONE)
3473 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3474 			break;
3475 
3476 		case L2CAP_CONF_EFS:
3477 			if (olen != sizeof(efs))
3478 				break;
3479 			remote_efs = 1;
3480 			memcpy(&efs, (void *) val, olen);
3481 			break;
3482 
3483 		case L2CAP_CONF_EWS:
3484 			if (olen != 2)
3485 				break;
3486 			return -ECONNREFUSED;
3487 
3488 		default:
3489 			if (hint)
3490 				break;
3491 			result = L2CAP_CONF_UNKNOWN;
3492 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3493 			break;
3494 		}
3495 	}
3496 
3497 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3498 		goto done;
3499 
3500 	switch (chan->mode) {
3501 	case L2CAP_MODE_STREAMING:
3502 	case L2CAP_MODE_ERTM:
3503 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3504 			chan->mode = l2cap_select_mode(rfc.mode,
3505 						       chan->conn->feat_mask);
3506 			break;
3507 		}
3508 
3509 		if (remote_efs) {
3510 			if (__l2cap_efs_supported(chan->conn))
3511 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3512 			else
3513 				return -ECONNREFUSED;
3514 		}
3515 
3516 		if (chan->mode != rfc.mode)
3517 			return -ECONNREFUSED;
3518 
3519 		break;
3520 	}
3521 
3522 done:
3523 	if (chan->mode != rfc.mode) {
3524 		result = L2CAP_CONF_UNACCEPT;
3525 		rfc.mode = chan->mode;
3526 
3527 		if (chan->num_conf_rsp == 1)
3528 			return -ECONNREFUSED;
3529 
3530 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3531 				   (unsigned long) &rfc, endptr - ptr);
3532 	}
3533 
3534 	if (result == L2CAP_CONF_SUCCESS) {
3535 		/* Configure output options and let the other side know
3536 		 * which ones we don't like. */
3537 
3538 		/* If MTU is not provided in configure request, try adjusting it
3539 		 * to the current output MTU if it has been set
3540 		 *
3541 		 * Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
3542 		 *
3543 		 * Each configuration parameter value (if any is present) in an
3544 		 * L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
3545 		 * configuration parameter value that has been sent (or, in case
3546 		 * of default values, implied) in the corresponding
3547 		 * L2CAP_CONFIGURATION_REQ packet.
3548 		 */
3549 		if (!mtu) {
3550 			/* Only adjust for ERTM channels as for older modes the
3551 			 * remote stack may not be able to detect that the
3552 			 * adjustment causing it to silently drop packets.
3553 			 */
3554 			if (chan->mode == L2CAP_MODE_ERTM &&
3555 			    chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
3556 				mtu = chan->omtu;
3557 			else
3558 				mtu = L2CAP_DEFAULT_MTU;
3559 		}
3560 
3561 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3562 			result = L2CAP_CONF_UNACCEPT;
3563 		else {
3564 			chan->omtu = mtu;
3565 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3566 		}
3567 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3568 
3569 		if (remote_efs) {
3570 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3571 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3572 			    efs.stype != chan->local_stype) {
3573 
3574 				result = L2CAP_CONF_UNACCEPT;
3575 
3576 				if (chan->num_conf_req >= 1)
3577 					return -ECONNREFUSED;
3578 
3579 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3580 						   sizeof(efs),
3581 						   (unsigned long) &efs, endptr - ptr);
3582 			} else {
3583 				/* Send PENDING Conf Rsp */
3584 				result = L2CAP_CONF_PENDING;
3585 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3586 			}
3587 		}
3588 
3589 		switch (rfc.mode) {
3590 		case L2CAP_MODE_BASIC:
3591 			chan->fcs = L2CAP_FCS_NONE;
3592 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3593 			break;
3594 
3595 		case L2CAP_MODE_ERTM:
3596 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3597 				chan->remote_tx_win = rfc.txwin_size;
3598 			else
3599 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3600 
3601 			chan->remote_max_tx = rfc.max_transmit;
3602 
3603 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3604 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3605 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3606 			rfc.max_pdu_size = cpu_to_le16(size);
3607 			chan->remote_mps = size;
3608 
3609 			__l2cap_set_ertm_timeouts(chan, &rfc);
3610 
3611 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3612 
3613 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3614 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3615 
3616 			if (remote_efs &&
3617 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3618 				chan->remote_id = efs.id;
3619 				chan->remote_stype = efs.stype;
3620 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3621 				chan->remote_flush_to =
3622 					le32_to_cpu(efs.flush_to);
3623 				chan->remote_acc_lat =
3624 					le32_to_cpu(efs.acc_lat);
3625 				chan->remote_sdu_itime =
3626 					le32_to_cpu(efs.sdu_itime);
3627 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3628 						   sizeof(efs),
3629 						   (unsigned long) &efs, endptr - ptr);
3630 			}
3631 			break;
3632 
3633 		case L2CAP_MODE_STREAMING:
3634 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3635 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3636 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3637 			rfc.max_pdu_size = cpu_to_le16(size);
3638 			chan->remote_mps = size;
3639 
3640 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3641 
3642 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3643 					   (unsigned long) &rfc, endptr - ptr);
3644 
3645 			break;
3646 
3647 		default:
3648 			result = L2CAP_CONF_UNACCEPT;
3649 
3650 			memset(&rfc, 0, sizeof(rfc));
3651 			rfc.mode = chan->mode;
3652 		}
3653 
3654 		if (result == L2CAP_CONF_SUCCESS)
3655 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3656 	}
3657 	rsp->scid   = cpu_to_le16(chan->dcid);
3658 	rsp->result = cpu_to_le16(result);
3659 	rsp->flags  = cpu_to_le16(0);
3660 
3661 	return ptr - data;
3662 }
3663 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3664 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3665 				void *data, size_t size, u16 *result)
3666 {
3667 	struct l2cap_conf_req *req = data;
3668 	void *ptr = req->data;
3669 	void *endptr = data + size;
3670 	int type, olen;
3671 	unsigned long val;
3672 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3673 	struct l2cap_conf_efs efs;
3674 
3675 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3676 
3677 	while (len >= L2CAP_CONF_OPT_SIZE) {
3678 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3679 		if (len < 0)
3680 			break;
3681 
3682 		switch (type) {
3683 		case L2CAP_CONF_MTU:
3684 			if (olen != 2)
3685 				break;
3686 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3687 				*result = L2CAP_CONF_UNACCEPT;
3688 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3689 			} else
3690 				chan->imtu = val;
3691 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3692 					   endptr - ptr);
3693 			break;
3694 
3695 		case L2CAP_CONF_FLUSH_TO:
3696 			if (olen != 2)
3697 				break;
3698 			chan->flush_to = val;
3699 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3700 					   chan->flush_to, endptr - ptr);
3701 			break;
3702 
3703 		case L2CAP_CONF_RFC:
3704 			if (olen != sizeof(rfc))
3705 				break;
3706 			memcpy(&rfc, (void *)val, olen);
3707 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3708 			    rfc.mode != chan->mode)
3709 				return -ECONNREFUSED;
3710 			chan->fcs = 0;
3711 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3712 					   (unsigned long) &rfc, endptr - ptr);
3713 			break;
3714 
3715 		case L2CAP_CONF_EWS:
3716 			if (olen != 2)
3717 				break;
3718 			chan->ack_win = min_t(u16, val, chan->ack_win);
3719 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3720 					   chan->tx_win, endptr - ptr);
3721 			break;
3722 
3723 		case L2CAP_CONF_EFS:
3724 			if (olen != sizeof(efs))
3725 				break;
3726 			memcpy(&efs, (void *)val, olen);
3727 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3728 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3729 			    efs.stype != chan->local_stype)
3730 				return -ECONNREFUSED;
3731 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3732 					   (unsigned long) &efs, endptr - ptr);
3733 			break;
3734 
3735 		case L2CAP_CONF_FCS:
3736 			if (olen != 1)
3737 				break;
3738 			if (*result == L2CAP_CONF_PENDING)
3739 				if (val == L2CAP_FCS_NONE)
3740 					set_bit(CONF_RECV_NO_FCS,
3741 						&chan->conf_state);
3742 			break;
3743 		}
3744 	}
3745 
3746 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3747 		return -ECONNREFUSED;
3748 
3749 	chan->mode = rfc.mode;
3750 
3751 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3752 		switch (rfc.mode) {
3753 		case L2CAP_MODE_ERTM:
3754 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3755 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3756 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3757 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3758 				chan->ack_win = min_t(u16, chan->ack_win,
3759 						      rfc.txwin_size);
3760 
3761 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3762 				chan->local_msdu = le16_to_cpu(efs.msdu);
3763 				chan->local_sdu_itime =
3764 					le32_to_cpu(efs.sdu_itime);
3765 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3766 				chan->local_flush_to =
3767 					le32_to_cpu(efs.flush_to);
3768 			}
3769 			break;
3770 
3771 		case L2CAP_MODE_STREAMING:
3772 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3773 		}
3774 	}
3775 
3776 	req->dcid   = cpu_to_le16(chan->dcid);
3777 	req->flags  = cpu_to_le16(0);
3778 
3779 	return ptr - data;
3780 }
3781 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3782 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3783 				u16 result, u16 flags)
3784 {
3785 	struct l2cap_conf_rsp *rsp = data;
3786 	void *ptr = rsp->data;
3787 
3788 	BT_DBG("chan %p", chan);
3789 
3790 	rsp->scid   = cpu_to_le16(chan->dcid);
3791 	rsp->result = cpu_to_le16(result);
3792 	rsp->flags  = cpu_to_le16(flags);
3793 
3794 	return ptr - data;
3795 }
3796 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3797 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3798 {
3799 	struct l2cap_le_conn_rsp rsp;
3800 	struct l2cap_conn *conn = chan->conn;
3801 
3802 	BT_DBG("chan %p", chan);
3803 
3804 	rsp.dcid    = cpu_to_le16(chan->scid);
3805 	rsp.mtu     = cpu_to_le16(chan->imtu);
3806 	rsp.mps     = cpu_to_le16(chan->mps);
3807 	rsp.credits = cpu_to_le16(chan->rx_credits);
3808 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3809 
3810 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3811 		       &rsp);
3812 }
3813 
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3814 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3815 {
3816 	int *result = data;
3817 
3818 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3819 		return;
3820 
3821 	switch (chan->state) {
3822 	case BT_CONNECT2:
3823 		/* If channel still pending accept add to result */
3824 		(*result)++;
3825 		return;
3826 	case BT_CONNECTED:
3827 		return;
3828 	default:
3829 		/* If not connected or pending accept it has been refused */
3830 		*result = -ECONNREFUSED;
3831 		return;
3832 	}
3833 }
3834 
3835 struct l2cap_ecred_rsp_data {
3836 	struct {
3837 		struct l2cap_ecred_conn_rsp_hdr rsp;
3838 		__le16 scid[L2CAP_ECRED_MAX_CID];
3839 	} __packed pdu;
3840 	int count;
3841 };
3842 
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3843 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3844 {
3845 	struct l2cap_ecred_rsp_data *rsp = data;
3846 	struct l2cap_ecred_conn_rsp *rsp_flex =
3847 		container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3848 
3849 	/* Check if channel for outgoing connection or if it wasn't deferred
3850 	 * since in those cases it must be skipped.
3851 	 */
3852 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3853 	    !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3854 		return;
3855 
3856 	/* Reset ident so only one response is sent */
3857 	chan->ident = 0;
3858 
3859 	/* Include all channels pending with the same ident */
3860 	if (!rsp->pdu.rsp.result)
3861 		rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3862 	else
3863 		l2cap_chan_del(chan, ECONNRESET);
3864 }
3865 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3866 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3867 {
3868 	struct l2cap_conn *conn = chan->conn;
3869 	struct l2cap_ecred_rsp_data data;
3870 	u16 id = chan->ident;
3871 	int result = 0;
3872 
3873 	if (!id)
3874 		return;
3875 
3876 	BT_DBG("chan %p id %d", chan, id);
3877 
3878 	memset(&data, 0, sizeof(data));
3879 
3880 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3881 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3882 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3883 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3884 
3885 	/* Verify that all channels are ready */
3886 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3887 
3888 	if (result > 0)
3889 		return;
3890 
3891 	if (result < 0)
3892 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3893 
3894 	/* Build response */
3895 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3896 
3897 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3898 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3899 		       &data.pdu);
3900 }
3901 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3902 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3903 {
3904 	struct l2cap_conn_rsp rsp;
3905 	struct l2cap_conn *conn = chan->conn;
3906 	u8 buf[128];
3907 	u8 rsp_code;
3908 
3909 	rsp.scid   = cpu_to_le16(chan->dcid);
3910 	rsp.dcid   = cpu_to_le16(chan->scid);
3911 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3912 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3913 	rsp_code = L2CAP_CONN_RSP;
3914 
3915 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3916 
3917 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3918 
3919 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3920 		return;
3921 
3922 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3923 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3924 	chan->num_conf_req++;
3925 }
3926 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3927 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3928 {
3929 	int type, olen;
3930 	unsigned long val;
3931 	/* Use sane default values in case a misbehaving remote device
3932 	 * did not send an RFC or extended window size option.
3933 	 */
3934 	u16 txwin_ext = chan->ack_win;
3935 	struct l2cap_conf_rfc rfc = {
3936 		.mode = chan->mode,
3937 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3938 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3939 		.max_pdu_size = cpu_to_le16(chan->imtu),
3940 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3941 	};
3942 
3943 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3944 
3945 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3946 		return;
3947 
3948 	while (len >= L2CAP_CONF_OPT_SIZE) {
3949 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3950 		if (len < 0)
3951 			break;
3952 
3953 		switch (type) {
3954 		case L2CAP_CONF_RFC:
3955 			if (olen != sizeof(rfc))
3956 				break;
3957 			memcpy(&rfc, (void *)val, olen);
3958 			break;
3959 		case L2CAP_CONF_EWS:
3960 			if (olen != 2)
3961 				break;
3962 			txwin_ext = val;
3963 			break;
3964 		}
3965 	}
3966 
3967 	switch (rfc.mode) {
3968 	case L2CAP_MODE_ERTM:
3969 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3970 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3971 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3972 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3973 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3974 		else
3975 			chan->ack_win = min_t(u16, chan->ack_win,
3976 					      rfc.txwin_size);
3977 		break;
3978 	case L2CAP_MODE_STREAMING:
3979 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3980 	}
3981 }
3982 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3983 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3984 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3985 				    u8 *data)
3986 {
3987 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3988 
3989 	if (cmd_len < sizeof(*rej))
3990 		return -EPROTO;
3991 
3992 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3993 		return 0;
3994 
3995 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3996 	    cmd->ident == conn->info_ident) {
3997 		cancel_delayed_work(&conn->info_timer);
3998 
3999 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4000 		conn->info_ident = 0;
4001 
4002 		l2cap_conn_start(conn);
4003 	}
4004 
4005 	return 0;
4006 }
4007 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code)4008 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
4009 			  u8 *data, u8 rsp_code)
4010 {
4011 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4012 	struct l2cap_conn_rsp rsp;
4013 	struct l2cap_chan *chan = NULL, *pchan = NULL;
4014 	int result, status = L2CAP_CS_NO_INFO;
4015 
4016 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4017 	__le16 psm = req->psm;
4018 
4019 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4020 
4021 	/* Check if we have socket listening on psm */
4022 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4023 					 &conn->hcon->dst, ACL_LINK);
4024 	if (!pchan) {
4025 		result = L2CAP_CR_BAD_PSM;
4026 		goto response;
4027 	}
4028 
4029 	l2cap_chan_lock(pchan);
4030 
4031 	/* Check if the ACL is secure enough (if not SDP) */
4032 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4033 	    (!hci_conn_check_link_mode(conn->hcon) ||
4034 	    !l2cap_check_enc_key_size(conn->hcon, pchan))) {
4035 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4036 		result = L2CAP_CR_SEC_BLOCK;
4037 		goto response;
4038 	}
4039 
4040 	result = L2CAP_CR_NO_MEM;
4041 
4042 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4043 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4044 		result = L2CAP_CR_INVALID_SCID;
4045 		goto response;
4046 	}
4047 
4048 	/* Check if we already have channel with that dcid */
4049 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4050 		result = L2CAP_CR_SCID_IN_USE;
4051 		goto response;
4052 	}
4053 
4054 	chan = pchan->ops->new_connection(pchan);
4055 	if (!chan)
4056 		goto response;
4057 
4058 	/* For certain devices (ex: HID mouse), support for authentication,
4059 	 * pairing and bonding is optional. For such devices, inorder to avoid
4060 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4061 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4062 	 */
4063 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4064 
4065 	bacpy(&chan->src, &conn->hcon->src);
4066 	bacpy(&chan->dst, &conn->hcon->dst);
4067 	chan->src_type = bdaddr_src_type(conn->hcon);
4068 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4069 	chan->psm  = psm;
4070 	chan->dcid = scid;
4071 
4072 	__l2cap_chan_add(conn, chan);
4073 
4074 	dcid = chan->scid;
4075 
4076 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4077 
4078 	chan->ident = cmd->ident;
4079 
4080 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4081 		if (l2cap_chan_check_security(chan, false)) {
4082 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4083 				l2cap_state_change(chan, BT_CONNECT2);
4084 				result = L2CAP_CR_PEND;
4085 				status = L2CAP_CS_AUTHOR_PEND;
4086 				chan->ops->defer(chan);
4087 			} else {
4088 				l2cap_state_change(chan, BT_CONFIG);
4089 				result = L2CAP_CR_SUCCESS;
4090 				status = L2CAP_CS_NO_INFO;
4091 			}
4092 		} else {
4093 			l2cap_state_change(chan, BT_CONNECT2);
4094 			result = L2CAP_CR_PEND;
4095 			status = L2CAP_CS_AUTHEN_PEND;
4096 		}
4097 	} else {
4098 		l2cap_state_change(chan, BT_CONNECT2);
4099 		result = L2CAP_CR_PEND;
4100 		status = L2CAP_CS_NO_INFO;
4101 	}
4102 
4103 response:
4104 	rsp.scid   = cpu_to_le16(scid);
4105 	rsp.dcid   = cpu_to_le16(dcid);
4106 	rsp.result = cpu_to_le16(result);
4107 	rsp.status = cpu_to_le16(status);
4108 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4109 
4110 	if (!pchan)
4111 		return;
4112 
4113 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4114 		struct l2cap_info_req info;
4115 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4116 
4117 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4118 		conn->info_ident = l2cap_get_ident(conn);
4119 
4120 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4121 
4122 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4123 			       sizeof(info), &info);
4124 	}
4125 
4126 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4127 	    result == L2CAP_CR_SUCCESS) {
4128 		u8 buf[128];
4129 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4130 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4131 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4132 		chan->num_conf_req++;
4133 	}
4134 
4135 	l2cap_chan_unlock(pchan);
4136 	l2cap_chan_put(pchan);
4137 }
4138 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4139 static int l2cap_connect_req(struct l2cap_conn *conn,
4140 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4141 {
4142 	if (cmd_len < sizeof(struct l2cap_conn_req))
4143 		return -EPROTO;
4144 
4145 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4146 	return 0;
4147 }
4148 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4149 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4150 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4151 				    u8 *data)
4152 {
4153 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4154 	u16 scid, dcid, result, status;
4155 	struct l2cap_chan *chan;
4156 	u8 req[128];
4157 	int err;
4158 
4159 	if (cmd_len < sizeof(*rsp))
4160 		return -EPROTO;
4161 
4162 	scid   = __le16_to_cpu(rsp->scid);
4163 	dcid   = __le16_to_cpu(rsp->dcid);
4164 	result = __le16_to_cpu(rsp->result);
4165 	status = __le16_to_cpu(rsp->status);
4166 
4167 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4168 					   dcid > L2CAP_CID_DYN_END))
4169 		return -EPROTO;
4170 
4171 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4172 	       dcid, scid, result, status);
4173 
4174 	if (scid) {
4175 		chan = __l2cap_get_chan_by_scid(conn, scid);
4176 		if (!chan)
4177 			return -EBADSLT;
4178 	} else {
4179 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4180 		if (!chan)
4181 			return -EBADSLT;
4182 	}
4183 
4184 	chan = l2cap_chan_hold_unless_zero(chan);
4185 	if (!chan)
4186 		return -EBADSLT;
4187 
4188 	err = 0;
4189 
4190 	l2cap_chan_lock(chan);
4191 
4192 	switch (result) {
4193 	case L2CAP_CR_SUCCESS:
4194 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4195 			err = -EBADSLT;
4196 			break;
4197 		}
4198 
4199 		l2cap_state_change(chan, BT_CONFIG);
4200 		chan->ident = 0;
4201 		chan->dcid = dcid;
4202 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4203 
4204 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4205 			break;
4206 
4207 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4208 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4209 		chan->num_conf_req++;
4210 		break;
4211 
4212 	case L2CAP_CR_PEND:
4213 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4214 		break;
4215 
4216 	default:
4217 		l2cap_chan_del(chan, ECONNREFUSED);
4218 		break;
4219 	}
4220 
4221 	l2cap_chan_unlock(chan);
4222 	l2cap_chan_put(chan);
4223 
4224 	return err;
4225 }
4226 
set_default_fcs(struct l2cap_chan * chan)4227 static inline void set_default_fcs(struct l2cap_chan *chan)
4228 {
4229 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4230 	 * sides request it.
4231 	 */
4232 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4233 		chan->fcs = L2CAP_FCS_NONE;
4234 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4235 		chan->fcs = L2CAP_FCS_CRC16;
4236 }
4237 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4238 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4239 				    u8 ident, u16 flags)
4240 {
4241 	struct l2cap_conn *conn = chan->conn;
4242 
4243 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4244 	       flags);
4245 
4246 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4247 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4248 
4249 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4250 		       l2cap_build_conf_rsp(chan, data,
4251 					    L2CAP_CONF_SUCCESS, flags), data);
4252 }
4253 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4254 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4255 				   u16 scid, u16 dcid)
4256 {
4257 	struct l2cap_cmd_rej_cid rej;
4258 
4259 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4260 	rej.scid = __cpu_to_le16(scid);
4261 	rej.dcid = __cpu_to_le16(dcid);
4262 
4263 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4264 }
4265 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4266 static inline int l2cap_config_req(struct l2cap_conn *conn,
4267 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4268 				   u8 *data)
4269 {
4270 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4271 	u16 dcid, flags;
4272 	u8 rsp[64];
4273 	struct l2cap_chan *chan;
4274 	int len, err = 0;
4275 
4276 	if (cmd_len < sizeof(*req))
4277 		return -EPROTO;
4278 
4279 	dcid  = __le16_to_cpu(req->dcid);
4280 	flags = __le16_to_cpu(req->flags);
4281 
4282 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4283 
4284 	chan = l2cap_get_chan_by_scid(conn, dcid);
4285 	if (!chan) {
4286 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4287 		return 0;
4288 	}
4289 
4290 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4291 	    chan->state != BT_CONNECTED) {
4292 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4293 				       chan->dcid);
4294 		goto unlock;
4295 	}
4296 
4297 	/* Reject if config buffer is too small. */
4298 	len = cmd_len - sizeof(*req);
4299 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4300 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4301 			       l2cap_build_conf_rsp(chan, rsp,
4302 			       L2CAP_CONF_REJECT, flags), rsp);
4303 		goto unlock;
4304 	}
4305 
4306 	/* Store config. */
4307 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4308 	chan->conf_len += len;
4309 
4310 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4311 		/* Incomplete config. Send empty response. */
4312 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4313 			       l2cap_build_conf_rsp(chan, rsp,
4314 			       L2CAP_CONF_SUCCESS, flags), rsp);
4315 		goto unlock;
4316 	}
4317 
4318 	/* Complete config. */
4319 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4320 	if (len < 0) {
4321 		l2cap_send_disconn_req(chan, ECONNRESET);
4322 		goto unlock;
4323 	}
4324 
4325 	chan->ident = cmd->ident;
4326 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4327 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4328 		chan->num_conf_rsp++;
4329 
4330 	/* Reset config buffer. */
4331 	chan->conf_len = 0;
4332 
4333 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4334 		goto unlock;
4335 
4336 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4337 		set_default_fcs(chan);
4338 
4339 		if (chan->state != BT_CONNECTED) {
4340 			if (chan->mode == L2CAP_MODE_ERTM ||
4341 			    chan->mode == L2CAP_MODE_STREAMING)
4342 				err = l2cap_ertm_init(chan);
4343 
4344 			if (err < 0)
4345 				l2cap_send_disconn_req(chan, -err);
4346 			else
4347 				l2cap_chan_ready(chan);
4348 		}
4349 
4350 		goto unlock;
4351 	}
4352 
4353 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4354 		u8 buf[64];
4355 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4356 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4357 		chan->num_conf_req++;
4358 	}
4359 
4360 	/* Got Conf Rsp PENDING from remote side and assume we sent
4361 	   Conf Rsp PENDING in the code above */
4362 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4363 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4364 
4365 		/* check compatibility */
4366 
4367 		/* Send rsp for BR/EDR channel */
4368 		l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4369 	}
4370 
4371 unlock:
4372 	l2cap_chan_unlock(chan);
4373 	l2cap_chan_put(chan);
4374 	return err;
4375 }
4376 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4377 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4378 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4379 				   u8 *data)
4380 {
4381 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4382 	u16 scid, flags, result;
4383 	struct l2cap_chan *chan;
4384 	int len = cmd_len - sizeof(*rsp);
4385 	int err = 0;
4386 
4387 	if (cmd_len < sizeof(*rsp))
4388 		return -EPROTO;
4389 
4390 	scid   = __le16_to_cpu(rsp->scid);
4391 	flags  = __le16_to_cpu(rsp->flags);
4392 	result = __le16_to_cpu(rsp->result);
4393 
4394 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4395 	       result, len);
4396 
4397 	chan = l2cap_get_chan_by_scid(conn, scid);
4398 	if (!chan)
4399 		return 0;
4400 
4401 	switch (result) {
4402 	case L2CAP_CONF_SUCCESS:
4403 		l2cap_conf_rfc_get(chan, rsp->data, len);
4404 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4405 		break;
4406 
4407 	case L2CAP_CONF_PENDING:
4408 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4409 
4410 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4411 			char buf[64];
4412 
4413 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4414 						   buf, sizeof(buf), &result);
4415 			if (len < 0) {
4416 				l2cap_send_disconn_req(chan, ECONNRESET);
4417 				goto done;
4418 			}
4419 
4420 			l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4421 		}
4422 		goto done;
4423 
4424 	case L2CAP_CONF_UNKNOWN:
4425 	case L2CAP_CONF_UNACCEPT:
4426 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4427 			char req[64];
4428 
4429 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4430 				l2cap_send_disconn_req(chan, ECONNRESET);
4431 				goto done;
4432 			}
4433 
4434 			/* throw out any old stored conf requests */
4435 			result = L2CAP_CONF_SUCCESS;
4436 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4437 						   req, sizeof(req), &result);
4438 			if (len < 0) {
4439 				l2cap_send_disconn_req(chan, ECONNRESET);
4440 				goto done;
4441 			}
4442 
4443 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4444 				       L2CAP_CONF_REQ, len, req);
4445 			chan->num_conf_req++;
4446 			if (result != L2CAP_CONF_SUCCESS)
4447 				goto done;
4448 			break;
4449 		}
4450 		fallthrough;
4451 
4452 	default:
4453 		l2cap_chan_set_err(chan, ECONNRESET);
4454 
4455 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4456 		l2cap_send_disconn_req(chan, ECONNRESET);
4457 		goto done;
4458 	}
4459 
4460 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4461 		goto done;
4462 
4463 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4464 
4465 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4466 		set_default_fcs(chan);
4467 
4468 		if (chan->mode == L2CAP_MODE_ERTM ||
4469 		    chan->mode == L2CAP_MODE_STREAMING)
4470 			err = l2cap_ertm_init(chan);
4471 
4472 		if (err < 0)
4473 			l2cap_send_disconn_req(chan, -err);
4474 		else
4475 			l2cap_chan_ready(chan);
4476 	}
4477 
4478 done:
4479 	l2cap_chan_unlock(chan);
4480 	l2cap_chan_put(chan);
4481 	return err;
4482 }
4483 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4484 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4485 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4486 				       u8 *data)
4487 {
4488 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4489 	struct l2cap_disconn_rsp rsp;
4490 	u16 dcid, scid;
4491 	struct l2cap_chan *chan;
4492 
4493 	if (cmd_len != sizeof(*req))
4494 		return -EPROTO;
4495 
4496 	scid = __le16_to_cpu(req->scid);
4497 	dcid = __le16_to_cpu(req->dcid);
4498 
4499 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4500 
4501 	chan = l2cap_get_chan_by_scid(conn, dcid);
4502 	if (!chan) {
4503 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4504 		return 0;
4505 	}
4506 
4507 	rsp.dcid = cpu_to_le16(chan->scid);
4508 	rsp.scid = cpu_to_le16(chan->dcid);
4509 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4510 
4511 	chan->ops->set_shutdown(chan);
4512 
4513 	l2cap_chan_del(chan, ECONNRESET);
4514 
4515 	chan->ops->close(chan);
4516 
4517 	l2cap_chan_unlock(chan);
4518 	l2cap_chan_put(chan);
4519 
4520 	return 0;
4521 }
4522 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4523 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4524 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4525 				       u8 *data)
4526 {
4527 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4528 	u16 dcid, scid;
4529 	struct l2cap_chan *chan;
4530 
4531 	if (cmd_len != sizeof(*rsp))
4532 		return -EPROTO;
4533 
4534 	scid = __le16_to_cpu(rsp->scid);
4535 	dcid = __le16_to_cpu(rsp->dcid);
4536 
4537 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4538 
4539 	chan = l2cap_get_chan_by_scid(conn, scid);
4540 	if (!chan) {
4541 		return 0;
4542 	}
4543 
4544 	if (chan->state != BT_DISCONN) {
4545 		l2cap_chan_unlock(chan);
4546 		l2cap_chan_put(chan);
4547 		return 0;
4548 	}
4549 
4550 	l2cap_chan_del(chan, 0);
4551 
4552 	chan->ops->close(chan);
4553 
4554 	l2cap_chan_unlock(chan);
4555 	l2cap_chan_put(chan);
4556 
4557 	return 0;
4558 }
4559 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4560 static inline int l2cap_information_req(struct l2cap_conn *conn,
4561 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4562 					u8 *data)
4563 {
4564 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4565 	u16 type;
4566 
4567 	if (cmd_len != sizeof(*req))
4568 		return -EPROTO;
4569 
4570 	type = __le16_to_cpu(req->type);
4571 
4572 	BT_DBG("type 0x%4.4x", type);
4573 
4574 	if (type == L2CAP_IT_FEAT_MASK) {
4575 		u8 buf[8];
4576 		u32 feat_mask = l2cap_feat_mask;
4577 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4578 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4579 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4580 		if (!disable_ertm)
4581 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4582 				| L2CAP_FEAT_FCS;
4583 
4584 		put_unaligned_le32(feat_mask, rsp->data);
4585 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4586 			       buf);
4587 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4588 		u8 buf[12];
4589 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4590 
4591 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4592 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4593 		rsp->data[0] = conn->local_fixed_chan;
4594 		memset(rsp->data + 1, 0, 7);
4595 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4596 			       buf);
4597 	} else {
4598 		struct l2cap_info_rsp rsp;
4599 		rsp.type   = cpu_to_le16(type);
4600 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4601 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4602 			       &rsp);
4603 	}
4604 
4605 	return 0;
4606 }
4607 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4608 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4609 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4610 					u8 *data)
4611 {
4612 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4613 	u16 type, result;
4614 
4615 	if (cmd_len < sizeof(*rsp))
4616 		return -EPROTO;
4617 
4618 	type   = __le16_to_cpu(rsp->type);
4619 	result = __le16_to_cpu(rsp->result);
4620 
4621 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4622 
4623 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4624 	if (cmd->ident != conn->info_ident ||
4625 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4626 		return 0;
4627 
4628 	cancel_delayed_work(&conn->info_timer);
4629 
4630 	if (result != L2CAP_IR_SUCCESS) {
4631 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4632 		conn->info_ident = 0;
4633 
4634 		l2cap_conn_start(conn);
4635 
4636 		return 0;
4637 	}
4638 
4639 	switch (type) {
4640 	case L2CAP_IT_FEAT_MASK:
4641 		if (cmd_len >= sizeof(*rsp) + sizeof(u32))
4642 			conn->feat_mask = get_unaligned_le32(rsp->data);
4643 
4644 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4645 			struct l2cap_info_req req;
4646 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4647 
4648 			conn->info_ident = l2cap_get_ident(conn);
4649 
4650 			l2cap_send_cmd(conn, conn->info_ident,
4651 				       L2CAP_INFO_REQ, sizeof(req), &req);
4652 		} else {
4653 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4654 			conn->info_ident = 0;
4655 
4656 			l2cap_conn_start(conn);
4657 		}
4658 		break;
4659 
4660 	case L2CAP_IT_FIXED_CHAN:
4661 		if (cmd_len >= sizeof(*rsp) + sizeof(rsp->data[0]))
4662 			conn->remote_fixed_chan = rsp->data[0];
4663 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4664 		conn->info_ident = 0;
4665 
4666 		l2cap_conn_start(conn);
4667 		break;
4668 	}
4669 
4670 	return 0;
4671 }
4672 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4673 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4674 					      struct l2cap_cmd_hdr *cmd,
4675 					      u16 cmd_len, u8 *data)
4676 {
4677 	struct hci_conn *hcon = conn->hcon;
4678 	struct l2cap_conn_param_update_req *req;
4679 	struct l2cap_conn_param_update_rsp rsp;
4680 	u16 min, max, latency, to_multiplier;
4681 	int err;
4682 
4683 	if (hcon->role != HCI_ROLE_MASTER)
4684 		return -EINVAL;
4685 
4686 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4687 		return -EPROTO;
4688 
4689 	req = (struct l2cap_conn_param_update_req *) data;
4690 	min		= __le16_to_cpu(req->min);
4691 	max		= __le16_to_cpu(req->max);
4692 	latency		= __le16_to_cpu(req->latency);
4693 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4694 
4695 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4696 	       min, max, latency, to_multiplier);
4697 
4698 	memset(&rsp, 0, sizeof(rsp));
4699 
4700 	err = hci_check_conn_params(min, max, latency, to_multiplier);
4701 	if (err)
4702 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4703 	else
4704 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4705 
4706 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4707 		       sizeof(rsp), &rsp);
4708 
4709 	if (!err) {
4710 		u8 store_hint;
4711 
4712 		store_hint = hci_le_conn_update(hcon, min, max, latency,
4713 						to_multiplier);
4714 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4715 				    store_hint, min, max, latency,
4716 				    to_multiplier);
4717 
4718 	}
4719 
4720 	return 0;
4721 }
4722 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4723 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4724 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4725 				u8 *data)
4726 {
4727 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4728 	struct hci_conn *hcon = conn->hcon;
4729 	u16 dcid, mtu, mps, credits, result;
4730 	struct l2cap_chan *chan;
4731 	int err, sec_level;
4732 
4733 	if (cmd_len < sizeof(*rsp))
4734 		return -EPROTO;
4735 
4736 	dcid    = __le16_to_cpu(rsp->dcid);
4737 	mtu     = __le16_to_cpu(rsp->mtu);
4738 	mps     = __le16_to_cpu(rsp->mps);
4739 	credits = __le16_to_cpu(rsp->credits);
4740 	result  = __le16_to_cpu(rsp->result);
4741 
4742 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4743 					   dcid < L2CAP_CID_DYN_START ||
4744 					   dcid > L2CAP_CID_LE_DYN_END))
4745 		return -EPROTO;
4746 
4747 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4748 	       dcid, mtu, mps, credits, result);
4749 
4750 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4751 	if (!chan)
4752 		return -EBADSLT;
4753 
4754 	err = 0;
4755 
4756 	l2cap_chan_lock(chan);
4757 
4758 	switch (result) {
4759 	case L2CAP_CR_LE_SUCCESS:
4760 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4761 			err = -EBADSLT;
4762 			break;
4763 		}
4764 
4765 		chan->ident = 0;
4766 		chan->dcid = dcid;
4767 		chan->omtu = mtu;
4768 		chan->remote_mps = mps;
4769 		chan->tx_credits = credits;
4770 		l2cap_chan_ready(chan);
4771 		break;
4772 
4773 	case L2CAP_CR_LE_AUTHENTICATION:
4774 	case L2CAP_CR_LE_ENCRYPTION:
4775 		/* If we already have MITM protection we can't do
4776 		 * anything.
4777 		 */
4778 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4779 			l2cap_chan_del(chan, ECONNREFUSED);
4780 			break;
4781 		}
4782 
4783 		sec_level = hcon->sec_level + 1;
4784 		if (chan->sec_level < sec_level)
4785 			chan->sec_level = sec_level;
4786 
4787 		/* We'll need to send a new Connect Request */
4788 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4789 
4790 		smp_conn_security(hcon, chan->sec_level);
4791 		break;
4792 
4793 	default:
4794 		l2cap_chan_del(chan, ECONNREFUSED);
4795 		break;
4796 	}
4797 
4798 	l2cap_chan_unlock(chan);
4799 
4800 	return err;
4801 }
4802 
l2cap_put_ident(struct l2cap_conn * conn,u8 code,u8 id)4803 static void l2cap_put_ident(struct l2cap_conn *conn, u8 code, u8 id)
4804 {
4805 	switch (code) {
4806 	case L2CAP_COMMAND_REJ:
4807 	case L2CAP_CONN_RSP:
4808 	case L2CAP_CONF_RSP:
4809 	case L2CAP_DISCONN_RSP:
4810 	case L2CAP_ECHO_RSP:
4811 	case L2CAP_INFO_RSP:
4812 	case L2CAP_CONN_PARAM_UPDATE_RSP:
4813 	case L2CAP_ECRED_CONN_RSP:
4814 	case L2CAP_ECRED_RECONF_RSP:
4815 		/* First do a lookup since the remote may send bogus ids that
4816 		 * would make ida_free to generate warnings.
4817 		 */
4818 		if (ida_find_first_range(&conn->tx_ida, id, id) >= 0)
4819 			ida_free(&conn->tx_ida, id);
4820 	}
4821 }
4822 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4823 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4824 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4825 				      u8 *data)
4826 {
4827 	int err = 0;
4828 
4829 	l2cap_put_ident(conn, cmd->code, cmd->ident);
4830 
4831 	switch (cmd->code) {
4832 	case L2CAP_COMMAND_REJ:
4833 		l2cap_command_rej(conn, cmd, cmd_len, data);
4834 		break;
4835 
4836 	case L2CAP_CONN_REQ:
4837 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
4838 		break;
4839 
4840 	case L2CAP_CONN_RSP:
4841 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4842 		break;
4843 
4844 	case L2CAP_CONF_REQ:
4845 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4846 		break;
4847 
4848 	case L2CAP_CONF_RSP:
4849 		l2cap_config_rsp(conn, cmd, cmd_len, data);
4850 		break;
4851 
4852 	case L2CAP_DISCONN_REQ:
4853 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4854 		break;
4855 
4856 	case L2CAP_DISCONN_RSP:
4857 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4858 		break;
4859 
4860 	case L2CAP_ECHO_REQ:
4861 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4862 		break;
4863 
4864 	case L2CAP_ECHO_RSP:
4865 		break;
4866 
4867 	case L2CAP_INFO_REQ:
4868 		err = l2cap_information_req(conn, cmd, cmd_len, data);
4869 		break;
4870 
4871 	case L2CAP_INFO_RSP:
4872 		l2cap_information_rsp(conn, cmd, cmd_len, data);
4873 		break;
4874 
4875 	default:
4876 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4877 		err = -EINVAL;
4878 		break;
4879 	}
4880 
4881 	return err;
4882 }
4883 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4884 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4885 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4886 				u8 *data)
4887 {
4888 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4889 	struct l2cap_le_conn_rsp rsp;
4890 	struct l2cap_chan *chan, *pchan;
4891 	u16 dcid, scid, credits, mtu, mps;
4892 	__le16 psm;
4893 	u8 result;
4894 
4895 	if (cmd_len != sizeof(*req))
4896 		return -EPROTO;
4897 
4898 	scid = __le16_to_cpu(req->scid);
4899 	mtu  = __le16_to_cpu(req->mtu);
4900 	mps  = __le16_to_cpu(req->mps);
4901 	psm  = req->psm;
4902 	dcid = 0;
4903 	credits = 0;
4904 
4905 	if (mtu < 23 || mps < 23)
4906 		return -EPROTO;
4907 
4908 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4909 	       scid, mtu, mps);
4910 
4911 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4912 	 * page 1059:
4913 	 *
4914 	 * Valid range: 0x0001-0x00ff
4915 	 *
4916 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4917 	 */
4918 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4919 		result = L2CAP_CR_LE_BAD_PSM;
4920 		chan = NULL;
4921 		goto response;
4922 	}
4923 
4924 	/* Check if we have socket listening on psm */
4925 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4926 					 &conn->hcon->dst, LE_LINK);
4927 	if (!pchan) {
4928 		result = L2CAP_CR_LE_BAD_PSM;
4929 		chan = NULL;
4930 		goto response;
4931 	}
4932 
4933 	l2cap_chan_lock(pchan);
4934 
4935 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4936 				     SMP_ALLOW_STK)) {
4937 		result = pchan->sec_level == BT_SECURITY_MEDIUM ?
4938 			L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
4939 		chan = NULL;
4940 		goto response_unlock;
4941 	}
4942 
4943 	/* Check if Key Size is sufficient for the security level */
4944 	if (!l2cap_check_enc_key_size(conn->hcon, pchan)) {
4945 		result = L2CAP_CR_LE_BAD_KEY_SIZE;
4946 		chan = NULL;
4947 		goto response_unlock;
4948 	}
4949 
4950 	/* Check for valid dynamic CID range */
4951 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4952 		result = L2CAP_CR_LE_INVALID_SCID;
4953 		chan = NULL;
4954 		goto response_unlock;
4955 	}
4956 
4957 	/* Check if we already have channel with that dcid */
4958 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4959 		result = L2CAP_CR_LE_SCID_IN_USE;
4960 		chan = NULL;
4961 		goto response_unlock;
4962 	}
4963 
4964 	chan = pchan->ops->new_connection(pchan);
4965 	if (!chan) {
4966 		result = L2CAP_CR_LE_NO_MEM;
4967 		goto response_unlock;
4968 	}
4969 
4970 	bacpy(&chan->src, &conn->hcon->src);
4971 	bacpy(&chan->dst, &conn->hcon->dst);
4972 	chan->src_type = bdaddr_src_type(conn->hcon);
4973 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4974 	chan->psm  = psm;
4975 	chan->dcid = scid;
4976 	chan->omtu = mtu;
4977 	chan->remote_mps = mps;
4978 
4979 	__l2cap_chan_add(conn, chan);
4980 
4981 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4982 
4983 	dcid = chan->scid;
4984 	credits = chan->rx_credits;
4985 
4986 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4987 
4988 	chan->ident = cmd->ident;
4989 
4990 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4991 		l2cap_state_change(chan, BT_CONNECT2);
4992 		/* The following result value is actually not defined
4993 		 * for LE CoC but we use it to let the function know
4994 		 * that it should bail out after doing its cleanup
4995 		 * instead of sending a response.
4996 		 */
4997 		result = L2CAP_CR_PEND;
4998 		chan->ops->defer(chan);
4999 	} else {
5000 		l2cap_chan_ready(chan);
5001 		result = L2CAP_CR_LE_SUCCESS;
5002 	}
5003 
5004 response_unlock:
5005 	l2cap_chan_unlock(pchan);
5006 	l2cap_chan_put(pchan);
5007 
5008 	if (result == L2CAP_CR_PEND)
5009 		return 0;
5010 
5011 response:
5012 	if (chan) {
5013 		rsp.mtu = cpu_to_le16(chan->imtu);
5014 		rsp.mps = cpu_to_le16(chan->mps);
5015 	} else {
5016 		rsp.mtu = 0;
5017 		rsp.mps = 0;
5018 	}
5019 
5020 	rsp.dcid    = cpu_to_le16(dcid);
5021 	rsp.credits = cpu_to_le16(credits);
5022 	rsp.result  = cpu_to_le16(result);
5023 
5024 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5025 
5026 	return 0;
5027 }
5028 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5029 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5030 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5031 				   u8 *data)
5032 {
5033 	struct l2cap_le_credits *pkt;
5034 	struct l2cap_chan *chan;
5035 	u16 cid, credits, max_credits;
5036 
5037 	if (cmd_len != sizeof(*pkt))
5038 		return -EPROTO;
5039 
5040 	pkt = (struct l2cap_le_credits *) data;
5041 	cid	= __le16_to_cpu(pkt->cid);
5042 	credits	= __le16_to_cpu(pkt->credits);
5043 
5044 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5045 
5046 	chan = l2cap_get_chan_by_dcid(conn, cid);
5047 	if (!chan)
5048 		return -EBADSLT;
5049 
5050 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5051 	if (credits > max_credits) {
5052 		BT_ERR("LE credits overflow");
5053 		l2cap_send_disconn_req(chan, ECONNRESET);
5054 
5055 		/* Return 0 so that we don't trigger an unnecessary
5056 		 * command reject packet.
5057 		 */
5058 		goto unlock;
5059 	}
5060 
5061 	chan->tx_credits += credits;
5062 
5063 	/* Resume sending */
5064 	l2cap_le_flowctl_send(chan);
5065 
5066 	if (chan->tx_credits)
5067 		chan->ops->resume(chan);
5068 
5069 unlock:
5070 	l2cap_chan_unlock(chan);
5071 	l2cap_chan_put(chan);
5072 
5073 	return 0;
5074 }
5075 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5076 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5077 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5078 				       u8 *data)
5079 {
5080 	struct l2cap_ecred_conn_req *req = (void *) data;
5081 	DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
5082 	struct l2cap_chan *chan, *pchan;
5083 	u16 mtu, mps;
5084 	__le16 psm;
5085 	u8 result, rsp_len = 0;
5086 	int i, num_scid = 0;
5087 	bool defer = false;
5088 
5089 	if (!enable_ecred)
5090 		return -EINVAL;
5091 
5092 	memset(pdu, 0, sizeof(*pdu));
5093 
5094 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5095 		result = L2CAP_CR_LE_INVALID_PARAMS;
5096 		goto response;
5097 	}
5098 
5099 	/* Check if there are no pending channels with the same ident */
5100 	__l2cap_chan_list_id(conn, cmd->ident, l2cap_ecred_list_defer,
5101 			     &num_scid);
5102 	if (num_scid) {
5103 		result = L2CAP_CR_LE_INVALID_PARAMS;
5104 		goto response;
5105 	}
5106 
5107 	cmd_len -= sizeof(*req);
5108 	num_scid = cmd_len / sizeof(u16);
5109 
5110 	if (num_scid > L2CAP_ECRED_MAX_CID) {
5111 		result = L2CAP_CR_LE_INVALID_PARAMS;
5112 		goto response;
5113 	}
5114 
5115 	/* Always respond with the same number of scids as in the request */
5116 	rsp_len = cmd_len;
5117 
5118 	mtu  = __le16_to_cpu(req->mtu);
5119 	mps  = __le16_to_cpu(req->mps);
5120 
5121 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5122 		result = L2CAP_CR_LE_INVALID_PARAMS;
5123 		goto response;
5124 	}
5125 
5126 	psm  = req->psm;
5127 
5128 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5129 	 * page 1059:
5130 	 *
5131 	 * Valid range: 0x0001-0x00ff
5132 	 *
5133 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5134 	 */
5135 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5136 		result = L2CAP_CR_LE_BAD_PSM;
5137 		goto response;
5138 	}
5139 
5140 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5141 
5142 	/* Check if we have socket listening on psm */
5143 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5144 					 &conn->hcon->dst, LE_LINK);
5145 	if (!pchan) {
5146 		result = L2CAP_CR_LE_BAD_PSM;
5147 		goto response;
5148 	}
5149 
5150 	l2cap_chan_lock(pchan);
5151 
5152 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5153 				     SMP_ALLOW_STK)) {
5154 		result = pchan->sec_level == BT_SECURITY_MEDIUM ?
5155 			L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
5156 		goto unlock;
5157 	}
5158 
5159 	/* Check if the listening channel has set an output MTU then the
5160 	 * requested MTU shall be less than or equal to that value.
5161 	 */
5162 	if (pchan->omtu && mtu < pchan->omtu) {
5163 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5164 		goto unlock;
5165 	}
5166 
5167 	result = L2CAP_CR_LE_SUCCESS;
5168 
5169 	for (i = 0; i < num_scid; i++) {
5170 		u16 scid = __le16_to_cpu(req->scid[i]);
5171 
5172 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5173 
5174 		pdu->dcid[i] = 0x0000;
5175 
5176 		/* Check for valid dynamic CID range */
5177 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5178 			result = L2CAP_CR_LE_INVALID_SCID;
5179 			continue;
5180 		}
5181 
5182 		/* Check if we already have channel with that dcid */
5183 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5184 			result = L2CAP_CR_LE_SCID_IN_USE;
5185 			continue;
5186 		}
5187 
5188 		chan = pchan->ops->new_connection(pchan);
5189 		if (!chan) {
5190 			result = L2CAP_CR_LE_NO_MEM;
5191 			continue;
5192 		}
5193 
5194 		bacpy(&chan->src, &conn->hcon->src);
5195 		bacpy(&chan->dst, &conn->hcon->dst);
5196 		chan->src_type = bdaddr_src_type(conn->hcon);
5197 		chan->dst_type = bdaddr_dst_type(conn->hcon);
5198 		chan->psm  = psm;
5199 		chan->dcid = scid;
5200 		chan->omtu = mtu;
5201 		chan->remote_mps = mps;
5202 
5203 		__l2cap_chan_add(conn, chan);
5204 
5205 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5206 
5207 		/* Init response */
5208 		if (!pdu->credits) {
5209 			pdu->mtu = cpu_to_le16(chan->imtu);
5210 			pdu->mps = cpu_to_le16(chan->mps);
5211 			pdu->credits = cpu_to_le16(chan->rx_credits);
5212 		}
5213 
5214 		pdu->dcid[i] = cpu_to_le16(chan->scid);
5215 
5216 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5217 
5218 		chan->ident = cmd->ident;
5219 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5220 
5221 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5222 			l2cap_state_change(chan, BT_CONNECT2);
5223 			defer = true;
5224 			chan->ops->defer(chan);
5225 		} else {
5226 			l2cap_chan_ready(chan);
5227 		}
5228 	}
5229 
5230 unlock:
5231 	l2cap_chan_unlock(pchan);
5232 	l2cap_chan_put(pchan);
5233 
5234 response:
5235 	pdu->result = cpu_to_le16(result);
5236 
5237 	if (defer)
5238 		return 0;
5239 
5240 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5241 		       sizeof(*pdu) + rsp_len, pdu);
5242 
5243 	return 0;
5244 }
5245 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5246 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5247 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5248 				       u8 *data)
5249 {
5250 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5251 	struct hci_conn *hcon = conn->hcon;
5252 	u16 mtu, mps, credits, result;
5253 	struct l2cap_chan *chan, *tmp;
5254 	int err = 0, sec_level;
5255 	int i = 0;
5256 
5257 	if (cmd_len < sizeof(*rsp))
5258 		return -EPROTO;
5259 
5260 	mtu     = __le16_to_cpu(rsp->mtu);
5261 	mps     = __le16_to_cpu(rsp->mps);
5262 	credits = __le16_to_cpu(rsp->credits);
5263 	result  = __le16_to_cpu(rsp->result);
5264 
5265 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5266 	       result);
5267 
5268 	cmd_len -= sizeof(*rsp);
5269 
5270 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5271 		u16 dcid;
5272 
5273 		if (chan->ident != cmd->ident ||
5274 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5275 		    chan->state == BT_CONNECTED)
5276 			continue;
5277 
5278 		l2cap_chan_lock(chan);
5279 
5280 		/* Check that there is a dcid for each pending channel */
5281 		if (cmd_len < sizeof(dcid)) {
5282 			l2cap_chan_del(chan, ECONNREFUSED);
5283 			l2cap_chan_unlock(chan);
5284 			continue;
5285 		}
5286 
5287 		dcid = __le16_to_cpu(rsp->dcid[i++]);
5288 		cmd_len -= sizeof(u16);
5289 
5290 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5291 
5292 		/* Check if dcid is already in use */
5293 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5294 			/* If a device receives a
5295 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5296 			 * already-assigned Destination CID, then both the
5297 			 * original channel and the new channel shall be
5298 			 * immediately discarded and not used.
5299 			 */
5300 			l2cap_chan_del(chan, ECONNREFUSED);
5301 			l2cap_chan_unlock(chan);
5302 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
5303 			l2cap_chan_lock(chan);
5304 			l2cap_chan_del(chan, ECONNRESET);
5305 			l2cap_chan_unlock(chan);
5306 			continue;
5307 		}
5308 
5309 		switch (result) {
5310 		case L2CAP_CR_LE_AUTHENTICATION:
5311 		case L2CAP_CR_LE_ENCRYPTION:
5312 			/* If we already have MITM protection we can't do
5313 			 * anything.
5314 			 */
5315 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5316 				l2cap_chan_del(chan, ECONNREFUSED);
5317 				break;
5318 			}
5319 
5320 			sec_level = hcon->sec_level + 1;
5321 			if (chan->sec_level < sec_level)
5322 				chan->sec_level = sec_level;
5323 
5324 			/* We'll need to send a new Connect Request */
5325 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5326 
5327 			smp_conn_security(hcon, chan->sec_level);
5328 			break;
5329 
5330 		case L2CAP_CR_LE_BAD_PSM:
5331 			l2cap_chan_del(chan, ECONNREFUSED);
5332 			break;
5333 
5334 		default:
5335 			/* If dcid was not set it means channels was refused */
5336 			if (!dcid) {
5337 				l2cap_chan_del(chan, ECONNREFUSED);
5338 				break;
5339 			}
5340 
5341 			chan->ident = 0;
5342 			chan->dcid = dcid;
5343 			chan->omtu = mtu;
5344 			chan->remote_mps = mps;
5345 			chan->tx_credits = credits;
5346 			l2cap_chan_ready(chan);
5347 			break;
5348 		}
5349 
5350 		l2cap_chan_unlock(chan);
5351 	}
5352 
5353 	return err;
5354 }
5355 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5356 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5357 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5358 					 u8 *data)
5359 {
5360 	struct l2cap_ecred_reconf_req *req = (void *) data;
5361 	struct l2cap_ecred_reconf_rsp rsp;
5362 	u16 mtu, mps, result;
5363 	struct l2cap_chan *chan[L2CAP_ECRED_MAX_CID] = {};
5364 	int i, num_scid;
5365 
5366 	if (!enable_ecred)
5367 		return -EINVAL;
5368 
5369 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5370 		result = L2CAP_RECONF_INVALID_CID;
5371 		goto respond;
5372 	}
5373 
5374 	mtu = __le16_to_cpu(req->mtu);
5375 	mps = __le16_to_cpu(req->mps);
5376 
5377 	BT_DBG("mtu %u mps %u", mtu, mps);
5378 
5379 	if (mtu < L2CAP_ECRED_MIN_MTU) {
5380 		result = L2CAP_RECONF_INVALID_PARAMS;
5381 		goto respond;
5382 	}
5383 
5384 	if (mps < L2CAP_ECRED_MIN_MPS) {
5385 		result = L2CAP_RECONF_INVALID_PARAMS;
5386 		goto respond;
5387 	}
5388 
5389 	cmd_len -= sizeof(*req);
5390 	num_scid = cmd_len / sizeof(u16);
5391 
5392 	if (num_scid > L2CAP_ECRED_MAX_CID) {
5393 		result = L2CAP_RECONF_INVALID_PARAMS;
5394 		goto respond;
5395 	}
5396 
5397 	result = L2CAP_RECONF_SUCCESS;
5398 
5399 	/* Check if each SCID, MTU and MPS are valid */
5400 	for (i = 0; i < num_scid; i++) {
5401 		u16 scid;
5402 
5403 		scid = __le16_to_cpu(req->scid[i]);
5404 		if (!scid) {
5405 			result = L2CAP_RECONF_INVALID_CID;
5406 			goto respond;
5407 		}
5408 
5409 		chan[i] = __l2cap_get_chan_by_dcid(conn, scid);
5410 		if (!chan[i]) {
5411 			result = L2CAP_RECONF_INVALID_CID;
5412 			goto respond;
5413 		}
5414 
5415 		/* The MTU field shall be greater than or equal to the greatest
5416 		 * current MTU size of these channels.
5417 		 */
5418 		if (chan[i]->omtu > mtu) {
5419 			BT_ERR("chan %p decreased MTU %u -> %u", chan[i],
5420 			       chan[i]->omtu, mtu);
5421 			result = L2CAP_RECONF_INVALID_MTU;
5422 			goto respond;
5423 		}
5424 
5425 		/* If more than one channel is being configured, the MPS field
5426 		 * shall be greater than or equal to the current MPS size of
5427 		 * each of these channels. If only one channel is being
5428 		 * configured, the MPS field may be less than the current MPS
5429 		 * of that channel.
5430 		 */
5431 		if (chan[i]->remote_mps >= mps && i) {
5432 			BT_ERR("chan %p decreased MPS %u -> %u", chan[i],
5433 			       chan[i]->remote_mps, mps);
5434 			result = L2CAP_RECONF_INVALID_MPS;
5435 			goto respond;
5436 		}
5437 	}
5438 
5439 	/* Commit the new MTU and MPS values after checking they are valid */
5440 	for (i = 0; i < num_scid; i++) {
5441 		chan[i]->omtu = mtu;
5442 		chan[i]->remote_mps = mps;
5443 	}
5444 
5445 respond:
5446 	rsp.result = cpu_to_le16(result);
5447 
5448 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5449 		       &rsp);
5450 
5451 	return 0;
5452 }
5453 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5454 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5455 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5456 					 u8 *data)
5457 {
5458 	struct l2cap_chan *chan, *tmp;
5459 	struct l2cap_ecred_reconf_rsp *rsp = (void *)data;
5460 	u16 result;
5461 
5462 	if (cmd_len < sizeof(*rsp))
5463 		return -EPROTO;
5464 
5465 	result = __le16_to_cpu(rsp->result);
5466 
5467 	BT_DBG("result 0x%4.4x", result);
5468 
5469 	if (!result)
5470 		return 0;
5471 
5472 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5473 		if (chan->ident != cmd->ident)
5474 			continue;
5475 
5476 		l2cap_chan_hold(chan);
5477 		l2cap_chan_lock(chan);
5478 
5479 		l2cap_chan_del(chan, ECONNRESET);
5480 
5481 		l2cap_chan_unlock(chan);
5482 		l2cap_chan_put(chan);
5483 	}
5484 
5485 	return 0;
5486 }
5487 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5488 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5489 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5490 				       u8 *data)
5491 {
5492 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5493 	struct l2cap_chan *chan;
5494 
5495 	if (cmd_len < sizeof(*rej))
5496 		return -EPROTO;
5497 
5498 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5499 	if (!chan)
5500 		goto done;
5501 
5502 	chan = l2cap_chan_hold_unless_zero(chan);
5503 	if (!chan)
5504 		goto done;
5505 
5506 	l2cap_chan_lock(chan);
5507 	l2cap_chan_del(chan, ECONNREFUSED);
5508 	l2cap_chan_unlock(chan);
5509 	l2cap_chan_put(chan);
5510 
5511 done:
5512 	return 0;
5513 }
5514 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5515 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5516 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5517 				   u8 *data)
5518 {
5519 	int err = 0;
5520 
5521 	l2cap_put_ident(conn, cmd->code, cmd->ident);
5522 
5523 	switch (cmd->code) {
5524 	case L2CAP_COMMAND_REJ:
5525 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5526 		break;
5527 
5528 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5529 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5530 		break;
5531 
5532 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5533 		break;
5534 
5535 	case L2CAP_LE_CONN_RSP:
5536 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5537 		break;
5538 
5539 	case L2CAP_LE_CONN_REQ:
5540 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5541 		break;
5542 
5543 	case L2CAP_LE_CREDITS:
5544 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5545 		break;
5546 
5547 	case L2CAP_ECRED_CONN_REQ:
5548 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5549 		break;
5550 
5551 	case L2CAP_ECRED_CONN_RSP:
5552 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5553 		break;
5554 
5555 	case L2CAP_ECRED_RECONF_REQ:
5556 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5557 		break;
5558 
5559 	case L2CAP_ECRED_RECONF_RSP:
5560 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5561 		break;
5562 
5563 	case L2CAP_DISCONN_REQ:
5564 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5565 		break;
5566 
5567 	case L2CAP_DISCONN_RSP:
5568 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5569 		break;
5570 
5571 	default:
5572 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5573 		err = -EINVAL;
5574 		break;
5575 	}
5576 
5577 	return err;
5578 }
5579 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5580 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5581 					struct sk_buff *skb)
5582 {
5583 	struct hci_conn *hcon = conn->hcon;
5584 	struct l2cap_cmd_hdr *cmd;
5585 	u16 len;
5586 	int err;
5587 
5588 	if (hcon->type != LE_LINK)
5589 		goto drop;
5590 
5591 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5592 		goto drop;
5593 
5594 	cmd = (void *) skb->data;
5595 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5596 
5597 	len = le16_to_cpu(cmd->len);
5598 
5599 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5600 
5601 	if (len != skb->len || !cmd->ident) {
5602 		BT_DBG("corrupted command");
5603 		goto drop;
5604 	}
5605 
5606 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5607 	if (err) {
5608 		struct l2cap_cmd_rej_unk rej;
5609 
5610 		BT_ERR("Wrong link type (%d)", err);
5611 
5612 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5613 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5614 			       sizeof(rej), &rej);
5615 	}
5616 
5617 drop:
5618 	kfree_skb(skb);
5619 }
5620 
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)5621 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5622 {
5623 	struct l2cap_cmd_rej_unk rej;
5624 
5625 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5626 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5627 }
5628 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5629 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5630 				     struct sk_buff *skb)
5631 {
5632 	struct hci_conn *hcon = conn->hcon;
5633 	struct l2cap_cmd_hdr *cmd;
5634 	int err;
5635 
5636 	l2cap_raw_recv(conn, skb);
5637 
5638 	if (hcon->type != ACL_LINK)
5639 		goto drop;
5640 
5641 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5642 		u16 len;
5643 
5644 		cmd = (void *) skb->data;
5645 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5646 
5647 		len = le16_to_cpu(cmd->len);
5648 
5649 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5650 		       cmd->ident);
5651 
5652 		if (len > skb->len || !cmd->ident) {
5653 			BT_DBG("corrupted command");
5654 			l2cap_sig_send_rej(conn, cmd->ident);
5655 			skb_pull(skb, len > skb->len ? skb->len : len);
5656 			continue;
5657 		}
5658 
5659 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5660 		if (err) {
5661 			BT_ERR("Wrong link type (%d)", err);
5662 			l2cap_sig_send_rej(conn, cmd->ident);
5663 		}
5664 
5665 		skb_pull(skb, len);
5666 	}
5667 
5668 	if (skb->len > 0) {
5669 		BT_DBG("corrupted command");
5670 		l2cap_sig_send_rej(conn, 0);
5671 	}
5672 
5673 drop:
5674 	kfree_skb(skb);
5675 }
5676 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5677 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5678 {
5679 	u16 our_fcs, rcv_fcs;
5680 	int hdr_size;
5681 
5682 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5683 		hdr_size = L2CAP_EXT_HDR_SIZE;
5684 	else
5685 		hdr_size = L2CAP_ENH_HDR_SIZE;
5686 
5687 	if (chan->fcs == L2CAP_FCS_CRC16) {
5688 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5689 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5690 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5691 
5692 		if (our_fcs != rcv_fcs)
5693 			return -EBADMSG;
5694 	}
5695 	return 0;
5696 }
5697 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5698 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5699 {
5700 	struct l2cap_ctrl control;
5701 
5702 	BT_DBG("chan %p", chan);
5703 
5704 	memset(&control, 0, sizeof(control));
5705 	control.sframe = 1;
5706 	control.final = 1;
5707 	control.reqseq = chan->buffer_seq;
5708 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5709 
5710 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5711 		control.super = L2CAP_SUPER_RNR;
5712 		l2cap_send_sframe(chan, &control);
5713 	}
5714 
5715 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5716 	    chan->unacked_frames > 0)
5717 		__set_retrans_timer(chan);
5718 
5719 	/* Send pending iframes */
5720 	l2cap_ertm_send(chan);
5721 
5722 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5723 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5724 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5725 		 * send it now.
5726 		 */
5727 		control.super = L2CAP_SUPER_RR;
5728 		l2cap_send_sframe(chan, &control);
5729 	}
5730 }
5731 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5732 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5733 			    struct sk_buff **last_frag)
5734 {
5735 	/* skb->len reflects data in skb as well as all fragments
5736 	 * skb->data_len reflects only data in fragments
5737 	 */
5738 	if (!skb_has_frag_list(skb))
5739 		skb_shinfo(skb)->frag_list = new_frag;
5740 
5741 	new_frag->next = NULL;
5742 
5743 	(*last_frag)->next = new_frag;
5744 	*last_frag = new_frag;
5745 
5746 	skb->len += new_frag->len;
5747 	skb->data_len += new_frag->len;
5748 	skb->truesize += new_frag->truesize;
5749 }
5750 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5751 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5752 				struct l2cap_ctrl *control)
5753 {
5754 	int err = -EINVAL;
5755 
5756 	switch (control->sar) {
5757 	case L2CAP_SAR_UNSEGMENTED:
5758 		if (chan->sdu)
5759 			break;
5760 
5761 		err = chan->ops->recv(chan, skb);
5762 		break;
5763 
5764 	case L2CAP_SAR_START:
5765 		if (chan->sdu)
5766 			break;
5767 
5768 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5769 			break;
5770 
5771 		chan->sdu_len = get_unaligned_le16(skb->data);
5772 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5773 
5774 		if (chan->sdu_len > chan->imtu) {
5775 			err = -EMSGSIZE;
5776 			break;
5777 		}
5778 
5779 		if (skb->len >= chan->sdu_len)
5780 			break;
5781 
5782 		chan->sdu = skb;
5783 		chan->sdu_last_frag = skb;
5784 
5785 		skb = NULL;
5786 		err = 0;
5787 		break;
5788 
5789 	case L2CAP_SAR_CONTINUE:
5790 		if (!chan->sdu)
5791 			break;
5792 
5793 		append_skb_frag(chan->sdu, skb,
5794 				&chan->sdu_last_frag);
5795 		skb = NULL;
5796 
5797 		if (chan->sdu->len >= chan->sdu_len)
5798 			break;
5799 
5800 		err = 0;
5801 		break;
5802 
5803 	case L2CAP_SAR_END:
5804 		if (!chan->sdu)
5805 			break;
5806 
5807 		append_skb_frag(chan->sdu, skb,
5808 				&chan->sdu_last_frag);
5809 		skb = NULL;
5810 
5811 		if (chan->sdu->len != chan->sdu_len)
5812 			break;
5813 
5814 		err = chan->ops->recv(chan, chan->sdu);
5815 
5816 		if (!err) {
5817 			/* Reassembly complete */
5818 			chan->sdu = NULL;
5819 			chan->sdu_last_frag = NULL;
5820 			chan->sdu_len = 0;
5821 		}
5822 		break;
5823 	}
5824 
5825 	if (err) {
5826 		kfree_skb(skb);
5827 		kfree_skb(chan->sdu);
5828 		chan->sdu = NULL;
5829 		chan->sdu_last_frag = NULL;
5830 		chan->sdu_len = 0;
5831 	}
5832 
5833 	return err;
5834 }
5835 
l2cap_resegment(struct l2cap_chan * chan)5836 static int l2cap_resegment(struct l2cap_chan *chan)
5837 {
5838 	/* Placeholder */
5839 	return 0;
5840 }
5841 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5842 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5843 {
5844 	u8 event;
5845 
5846 	if (chan->mode != L2CAP_MODE_ERTM)
5847 		return;
5848 
5849 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5850 	l2cap_tx(chan, NULL, NULL, event);
5851 }
5852 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5853 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5854 {
5855 	int err = 0;
5856 	/* Pass sequential frames to l2cap_reassemble_sdu()
5857 	 * until a gap is encountered.
5858 	 */
5859 
5860 	BT_DBG("chan %p", chan);
5861 
5862 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5863 		struct sk_buff *skb;
5864 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5865 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5866 
5867 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5868 
5869 		if (!skb)
5870 			break;
5871 
5872 		skb_unlink(skb, &chan->srej_q);
5873 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5874 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5875 		if (err)
5876 			break;
5877 	}
5878 
5879 	if (skb_queue_empty(&chan->srej_q)) {
5880 		chan->rx_state = L2CAP_RX_STATE_RECV;
5881 		l2cap_send_ack(chan);
5882 	}
5883 
5884 	return err;
5885 }
5886 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5887 static void l2cap_handle_srej(struct l2cap_chan *chan,
5888 			      struct l2cap_ctrl *control)
5889 {
5890 	struct sk_buff *skb;
5891 
5892 	BT_DBG("chan %p, control %p", chan, control);
5893 
5894 	if (control->reqseq == chan->next_tx_seq) {
5895 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5896 		l2cap_send_disconn_req(chan, ECONNRESET);
5897 		return;
5898 	}
5899 
5900 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5901 
5902 	if (skb == NULL) {
5903 		BT_DBG("Seq %d not available for retransmission",
5904 		       control->reqseq);
5905 		return;
5906 	}
5907 
5908 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5909 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5910 		l2cap_send_disconn_req(chan, ECONNRESET);
5911 		return;
5912 	}
5913 
5914 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5915 
5916 	if (control->poll) {
5917 		l2cap_pass_to_tx(chan, control);
5918 
5919 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5920 		l2cap_retransmit(chan, control);
5921 		l2cap_ertm_send(chan);
5922 
5923 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5924 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5925 			chan->srej_save_reqseq = control->reqseq;
5926 		}
5927 	} else {
5928 		l2cap_pass_to_tx_fbit(chan, control);
5929 
5930 		if (control->final) {
5931 			if (chan->srej_save_reqseq != control->reqseq ||
5932 			    !test_and_clear_bit(CONN_SREJ_ACT,
5933 						&chan->conn_state))
5934 				l2cap_retransmit(chan, control);
5935 		} else {
5936 			l2cap_retransmit(chan, control);
5937 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5938 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5939 				chan->srej_save_reqseq = control->reqseq;
5940 			}
5941 		}
5942 	}
5943 }
5944 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5945 static void l2cap_handle_rej(struct l2cap_chan *chan,
5946 			     struct l2cap_ctrl *control)
5947 {
5948 	struct sk_buff *skb;
5949 
5950 	BT_DBG("chan %p, control %p", chan, control);
5951 
5952 	if (control->reqseq == chan->next_tx_seq) {
5953 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5954 		l2cap_send_disconn_req(chan, ECONNRESET);
5955 		return;
5956 	}
5957 
5958 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5959 
5960 	if (chan->max_tx && skb &&
5961 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5962 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5963 		l2cap_send_disconn_req(chan, ECONNRESET);
5964 		return;
5965 	}
5966 
5967 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5968 
5969 	l2cap_pass_to_tx(chan, control);
5970 
5971 	if (control->final) {
5972 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5973 			l2cap_retransmit_all(chan, control);
5974 	} else {
5975 		l2cap_retransmit_all(chan, control);
5976 		l2cap_ertm_send(chan);
5977 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5978 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5979 	}
5980 }
5981 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)5982 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5983 {
5984 	BT_DBG("chan %p, txseq %d", chan, txseq);
5985 
5986 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5987 	       chan->expected_tx_seq);
5988 
5989 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5990 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5991 		    chan->tx_win) {
5992 			/* See notes below regarding "double poll" and
5993 			 * invalid packets.
5994 			 */
5995 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5996 				BT_DBG("Invalid/Ignore - after SREJ");
5997 				return L2CAP_TXSEQ_INVALID_IGNORE;
5998 			} else {
5999 				BT_DBG("Invalid - in window after SREJ sent");
6000 				return L2CAP_TXSEQ_INVALID;
6001 			}
6002 		}
6003 
6004 		if (chan->srej_list.head == txseq) {
6005 			BT_DBG("Expected SREJ");
6006 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6007 		}
6008 
6009 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6010 			BT_DBG("Duplicate SREJ - txseq already stored");
6011 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6012 		}
6013 
6014 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6015 			BT_DBG("Unexpected SREJ - not requested");
6016 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6017 		}
6018 	}
6019 
6020 	if (chan->expected_tx_seq == txseq) {
6021 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6022 		    chan->tx_win) {
6023 			BT_DBG("Invalid - txseq outside tx window");
6024 			return L2CAP_TXSEQ_INVALID;
6025 		} else {
6026 			BT_DBG("Expected");
6027 			return L2CAP_TXSEQ_EXPECTED;
6028 		}
6029 	}
6030 
6031 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6032 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6033 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6034 		return L2CAP_TXSEQ_DUPLICATE;
6035 	}
6036 
6037 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6038 		/* A source of invalid packets is a "double poll" condition,
6039 		 * where delays cause us to send multiple poll packets.  If
6040 		 * the remote stack receives and processes both polls,
6041 		 * sequence numbers can wrap around in such a way that a
6042 		 * resent frame has a sequence number that looks like new data
6043 		 * with a sequence gap.  This would trigger an erroneous SREJ
6044 		 * request.
6045 		 *
6046 		 * Fortunately, this is impossible with a tx window that's
6047 		 * less than half of the maximum sequence number, which allows
6048 		 * invalid frames to be safely ignored.
6049 		 *
6050 		 * With tx window sizes greater than half of the tx window
6051 		 * maximum, the frame is invalid and cannot be ignored.  This
6052 		 * causes a disconnect.
6053 		 */
6054 
6055 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6056 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6057 			return L2CAP_TXSEQ_INVALID_IGNORE;
6058 		} else {
6059 			BT_DBG("Invalid - txseq outside tx window");
6060 			return L2CAP_TXSEQ_INVALID;
6061 		}
6062 	} else {
6063 		BT_DBG("Unexpected - txseq indicates missing frames");
6064 		return L2CAP_TXSEQ_UNEXPECTED;
6065 	}
6066 }
6067 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6068 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6069 			       struct l2cap_ctrl *control,
6070 			       struct sk_buff *skb, u8 event)
6071 {
6072 	struct l2cap_ctrl local_control;
6073 	int err = 0;
6074 	bool skb_in_use = false;
6075 
6076 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6077 	       event);
6078 
6079 	switch (event) {
6080 	case L2CAP_EV_RECV_IFRAME:
6081 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6082 		case L2CAP_TXSEQ_EXPECTED:
6083 			l2cap_pass_to_tx(chan, control);
6084 
6085 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6086 				BT_DBG("Busy, discarding expected seq %d",
6087 				       control->txseq);
6088 				break;
6089 			}
6090 
6091 			chan->expected_tx_seq = __next_seq(chan,
6092 							   control->txseq);
6093 
6094 			chan->buffer_seq = chan->expected_tx_seq;
6095 			skb_in_use = true;
6096 
6097 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6098 			 * control, so make a copy in advance to use it after
6099 			 * l2cap_reassemble_sdu returns and to avoid the race
6100 			 * condition, for example:
6101 			 *
6102 			 * The current thread calls:
6103 			 *   l2cap_reassemble_sdu
6104 			 *     chan->ops->recv == l2cap_sock_recv_cb
6105 			 *       __sock_queue_rcv_skb
6106 			 * Another thread calls:
6107 			 *   bt_sock_recvmsg
6108 			 *     skb_recv_datagram
6109 			 *     skb_free_datagram
6110 			 * Then the current thread tries to access control, but
6111 			 * it was freed by skb_free_datagram.
6112 			 */
6113 			local_control = *control;
6114 			err = l2cap_reassemble_sdu(chan, skb, control);
6115 			if (err)
6116 				break;
6117 
6118 			if (local_control.final) {
6119 				if (!test_and_clear_bit(CONN_REJ_ACT,
6120 							&chan->conn_state)) {
6121 					local_control.final = 0;
6122 					l2cap_retransmit_all(chan, &local_control);
6123 					l2cap_ertm_send(chan);
6124 				}
6125 			}
6126 
6127 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6128 				l2cap_send_ack(chan);
6129 			break;
6130 		case L2CAP_TXSEQ_UNEXPECTED:
6131 			l2cap_pass_to_tx(chan, control);
6132 
6133 			/* Can't issue SREJ frames in the local busy state.
6134 			 * Drop this frame, it will be seen as missing
6135 			 * when local busy is exited.
6136 			 */
6137 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6138 				BT_DBG("Busy, discarding unexpected seq %d",
6139 				       control->txseq);
6140 				break;
6141 			}
6142 
6143 			/* There was a gap in the sequence, so an SREJ
6144 			 * must be sent for each missing frame.  The
6145 			 * current frame is stored for later use.
6146 			 */
6147 			skb_queue_tail(&chan->srej_q, skb);
6148 			skb_in_use = true;
6149 			BT_DBG("Queued %p (queue len %d)", skb,
6150 			       skb_queue_len(&chan->srej_q));
6151 
6152 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6153 			l2cap_seq_list_clear(&chan->srej_list);
6154 			l2cap_send_srej(chan, control->txseq);
6155 
6156 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6157 			break;
6158 		case L2CAP_TXSEQ_DUPLICATE:
6159 			l2cap_pass_to_tx(chan, control);
6160 			break;
6161 		case L2CAP_TXSEQ_INVALID_IGNORE:
6162 			break;
6163 		case L2CAP_TXSEQ_INVALID:
6164 		default:
6165 			l2cap_send_disconn_req(chan, ECONNRESET);
6166 			break;
6167 		}
6168 		break;
6169 	case L2CAP_EV_RECV_RR:
6170 		l2cap_pass_to_tx(chan, control);
6171 		if (control->final) {
6172 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6173 
6174 			if (!test_and_clear_bit(CONN_REJ_ACT,
6175 						&chan->conn_state)) {
6176 				control->final = 0;
6177 				l2cap_retransmit_all(chan, control);
6178 			}
6179 
6180 			l2cap_ertm_send(chan);
6181 		} else if (control->poll) {
6182 			l2cap_send_i_or_rr_or_rnr(chan);
6183 		} else {
6184 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6185 					       &chan->conn_state) &&
6186 			    chan->unacked_frames)
6187 				__set_retrans_timer(chan);
6188 
6189 			l2cap_ertm_send(chan);
6190 		}
6191 		break;
6192 	case L2CAP_EV_RECV_RNR:
6193 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6194 		l2cap_pass_to_tx(chan, control);
6195 		if (control && control->poll) {
6196 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6197 			l2cap_send_rr_or_rnr(chan, 0);
6198 		}
6199 		__clear_retrans_timer(chan);
6200 		l2cap_seq_list_clear(&chan->retrans_list);
6201 		break;
6202 	case L2CAP_EV_RECV_REJ:
6203 		l2cap_handle_rej(chan, control);
6204 		break;
6205 	case L2CAP_EV_RECV_SREJ:
6206 		l2cap_handle_srej(chan, control);
6207 		break;
6208 	default:
6209 		break;
6210 	}
6211 
6212 	if (skb && !skb_in_use) {
6213 		BT_DBG("Freeing %p", skb);
6214 		kfree_skb(skb);
6215 	}
6216 
6217 	return err;
6218 }
6219 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6220 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6221 				    struct l2cap_ctrl *control,
6222 				    struct sk_buff *skb, u8 event)
6223 {
6224 	int err = 0;
6225 	u16 txseq = control->txseq;
6226 	bool skb_in_use = false;
6227 
6228 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6229 	       event);
6230 
6231 	switch (event) {
6232 	case L2CAP_EV_RECV_IFRAME:
6233 		switch (l2cap_classify_txseq(chan, txseq)) {
6234 		case L2CAP_TXSEQ_EXPECTED:
6235 			/* Keep frame for reassembly later */
6236 			l2cap_pass_to_tx(chan, control);
6237 			skb_queue_tail(&chan->srej_q, skb);
6238 			skb_in_use = true;
6239 			BT_DBG("Queued %p (queue len %d)", skb,
6240 			       skb_queue_len(&chan->srej_q));
6241 
6242 			chan->expected_tx_seq = __next_seq(chan, txseq);
6243 			break;
6244 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6245 			l2cap_seq_list_pop(&chan->srej_list);
6246 
6247 			l2cap_pass_to_tx(chan, control);
6248 			skb_queue_tail(&chan->srej_q, skb);
6249 			skb_in_use = true;
6250 			BT_DBG("Queued %p (queue len %d)", skb,
6251 			       skb_queue_len(&chan->srej_q));
6252 
6253 			err = l2cap_rx_queued_iframes(chan);
6254 			if (err)
6255 				break;
6256 
6257 			break;
6258 		case L2CAP_TXSEQ_UNEXPECTED:
6259 			/* Got a frame that can't be reassembled yet.
6260 			 * Save it for later, and send SREJs to cover
6261 			 * the missing frames.
6262 			 */
6263 			skb_queue_tail(&chan->srej_q, skb);
6264 			skb_in_use = true;
6265 			BT_DBG("Queued %p (queue len %d)", skb,
6266 			       skb_queue_len(&chan->srej_q));
6267 
6268 			l2cap_pass_to_tx(chan, control);
6269 			l2cap_send_srej(chan, control->txseq);
6270 			break;
6271 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6272 			/* This frame was requested with an SREJ, but
6273 			 * some expected retransmitted frames are
6274 			 * missing.  Request retransmission of missing
6275 			 * SREJ'd frames.
6276 			 */
6277 			skb_queue_tail(&chan->srej_q, skb);
6278 			skb_in_use = true;
6279 			BT_DBG("Queued %p (queue len %d)", skb,
6280 			       skb_queue_len(&chan->srej_q));
6281 
6282 			l2cap_pass_to_tx(chan, control);
6283 			l2cap_send_srej_list(chan, control->txseq);
6284 			break;
6285 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6286 			/* We've already queued this frame.  Drop this copy. */
6287 			l2cap_pass_to_tx(chan, control);
6288 			break;
6289 		case L2CAP_TXSEQ_DUPLICATE:
6290 			/* Expecting a later sequence number, so this frame
6291 			 * was already received.  Ignore it completely.
6292 			 */
6293 			break;
6294 		case L2CAP_TXSEQ_INVALID_IGNORE:
6295 			break;
6296 		case L2CAP_TXSEQ_INVALID:
6297 		default:
6298 			l2cap_send_disconn_req(chan, ECONNRESET);
6299 			break;
6300 		}
6301 		break;
6302 	case L2CAP_EV_RECV_RR:
6303 		l2cap_pass_to_tx(chan, control);
6304 		if (control->final) {
6305 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6306 
6307 			if (!test_and_clear_bit(CONN_REJ_ACT,
6308 						&chan->conn_state)) {
6309 				control->final = 0;
6310 				l2cap_retransmit_all(chan, control);
6311 			}
6312 
6313 			l2cap_ertm_send(chan);
6314 		} else if (control->poll) {
6315 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6316 					       &chan->conn_state) &&
6317 			    chan->unacked_frames) {
6318 				__set_retrans_timer(chan);
6319 			}
6320 
6321 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6322 			l2cap_send_srej_tail(chan);
6323 		} else {
6324 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6325 					       &chan->conn_state) &&
6326 			    chan->unacked_frames)
6327 				__set_retrans_timer(chan);
6328 
6329 			l2cap_send_ack(chan);
6330 		}
6331 		break;
6332 	case L2CAP_EV_RECV_RNR:
6333 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6334 		l2cap_pass_to_tx(chan, control);
6335 		if (control->poll) {
6336 			l2cap_send_srej_tail(chan);
6337 		} else {
6338 			struct l2cap_ctrl rr_control;
6339 			memset(&rr_control, 0, sizeof(rr_control));
6340 			rr_control.sframe = 1;
6341 			rr_control.super = L2CAP_SUPER_RR;
6342 			rr_control.reqseq = chan->buffer_seq;
6343 			l2cap_send_sframe(chan, &rr_control);
6344 		}
6345 
6346 		break;
6347 	case L2CAP_EV_RECV_REJ:
6348 		l2cap_handle_rej(chan, control);
6349 		break;
6350 	case L2CAP_EV_RECV_SREJ:
6351 		l2cap_handle_srej(chan, control);
6352 		break;
6353 	}
6354 
6355 	if (skb && !skb_in_use) {
6356 		BT_DBG("Freeing %p", skb);
6357 		kfree_skb(skb);
6358 	}
6359 
6360 	return err;
6361 }
6362 
l2cap_finish_move(struct l2cap_chan * chan)6363 static int l2cap_finish_move(struct l2cap_chan *chan)
6364 {
6365 	BT_DBG("chan %p", chan);
6366 
6367 	chan->rx_state = L2CAP_RX_STATE_RECV;
6368 	chan->conn->mtu = chan->conn->hcon->mtu;
6369 
6370 	return l2cap_resegment(chan);
6371 }
6372 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6373 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6374 				 struct l2cap_ctrl *control,
6375 				 struct sk_buff *skb, u8 event)
6376 {
6377 	int err;
6378 
6379 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6380 	       event);
6381 
6382 	if (!control->poll)
6383 		return -EPROTO;
6384 
6385 	l2cap_process_reqseq(chan, control->reqseq);
6386 
6387 	if (!skb_queue_empty(&chan->tx_q))
6388 		chan->tx_send_head = skb_peek(&chan->tx_q);
6389 	else
6390 		chan->tx_send_head = NULL;
6391 
6392 	/* Rewind next_tx_seq to the point expected
6393 	 * by the receiver.
6394 	 */
6395 	chan->next_tx_seq = control->reqseq;
6396 	chan->unacked_frames = 0;
6397 
6398 	err = l2cap_finish_move(chan);
6399 	if (err)
6400 		return err;
6401 
6402 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6403 	l2cap_send_i_or_rr_or_rnr(chan);
6404 
6405 	if (event == L2CAP_EV_RECV_IFRAME)
6406 		return -EPROTO;
6407 
6408 	return l2cap_rx_state_recv(chan, control, NULL, event);
6409 }
6410 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6411 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6412 				 struct l2cap_ctrl *control,
6413 				 struct sk_buff *skb, u8 event)
6414 {
6415 	int err;
6416 
6417 	if (!control->final)
6418 		return -EPROTO;
6419 
6420 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6421 
6422 	chan->rx_state = L2CAP_RX_STATE_RECV;
6423 	l2cap_process_reqseq(chan, control->reqseq);
6424 
6425 	if (!skb_queue_empty(&chan->tx_q))
6426 		chan->tx_send_head = skb_peek(&chan->tx_q);
6427 	else
6428 		chan->tx_send_head = NULL;
6429 
6430 	/* Rewind next_tx_seq to the point expected
6431 	 * by the receiver.
6432 	 */
6433 	chan->next_tx_seq = control->reqseq;
6434 	chan->unacked_frames = 0;
6435 	chan->conn->mtu = chan->conn->hcon->mtu;
6436 
6437 	err = l2cap_resegment(chan);
6438 
6439 	if (!err)
6440 		err = l2cap_rx_state_recv(chan, control, skb, event);
6441 
6442 	return err;
6443 }
6444 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6445 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6446 {
6447 	/* Make sure reqseq is for a packet that has been sent but not acked */
6448 	u16 unacked;
6449 
6450 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6451 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6452 }
6453 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6454 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6455 		    struct sk_buff *skb, u8 event)
6456 {
6457 	int err = 0;
6458 
6459 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6460 	       control, skb, event, chan->rx_state);
6461 
6462 	if (__valid_reqseq(chan, control->reqseq)) {
6463 		switch (chan->rx_state) {
6464 		case L2CAP_RX_STATE_RECV:
6465 			err = l2cap_rx_state_recv(chan, control, skb, event);
6466 			break;
6467 		case L2CAP_RX_STATE_SREJ_SENT:
6468 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6469 						       event);
6470 			break;
6471 		case L2CAP_RX_STATE_WAIT_P:
6472 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6473 			break;
6474 		case L2CAP_RX_STATE_WAIT_F:
6475 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6476 			break;
6477 		default:
6478 			/* shut it down */
6479 			break;
6480 		}
6481 	} else {
6482 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6483 		       control->reqseq, chan->next_tx_seq,
6484 		       chan->expected_ack_seq);
6485 		l2cap_send_disconn_req(chan, ECONNRESET);
6486 	}
6487 
6488 	return err;
6489 }
6490 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6491 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6492 			   struct sk_buff *skb)
6493 {
6494 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6495 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
6496 	 * returns and to avoid the race condition, for example:
6497 	 *
6498 	 * The current thread calls:
6499 	 *   l2cap_reassemble_sdu
6500 	 *     chan->ops->recv == l2cap_sock_recv_cb
6501 	 *       __sock_queue_rcv_skb
6502 	 * Another thread calls:
6503 	 *   bt_sock_recvmsg
6504 	 *     skb_recv_datagram
6505 	 *     skb_free_datagram
6506 	 * Then the current thread tries to access control, but it was freed by
6507 	 * skb_free_datagram.
6508 	 */
6509 	u16 txseq = control->txseq;
6510 
6511 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6512 	       chan->rx_state);
6513 
6514 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6515 		l2cap_pass_to_tx(chan, control);
6516 
6517 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6518 		       __next_seq(chan, chan->buffer_seq));
6519 
6520 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6521 
6522 		l2cap_reassemble_sdu(chan, skb, control);
6523 	} else {
6524 		if (chan->sdu) {
6525 			kfree_skb(chan->sdu);
6526 			chan->sdu = NULL;
6527 		}
6528 		chan->sdu_last_frag = NULL;
6529 		chan->sdu_len = 0;
6530 
6531 		if (skb) {
6532 			BT_DBG("Freeing %p", skb);
6533 			kfree_skb(skb);
6534 		}
6535 	}
6536 
6537 	chan->last_acked_seq = txseq;
6538 	chan->expected_tx_seq = __next_seq(chan, txseq);
6539 
6540 	return 0;
6541 }
6542 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6543 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6544 {
6545 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6546 	u16 len;
6547 	u8 event;
6548 
6549 	__unpack_control(chan, skb);
6550 
6551 	len = skb->len;
6552 
6553 	/*
6554 	 * We can just drop the corrupted I-frame here.
6555 	 * Receiver will miss it and start proper recovery
6556 	 * procedures and ask for retransmission.
6557 	 */
6558 	if (l2cap_check_fcs(chan, skb))
6559 		goto drop;
6560 
6561 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6562 		len -= L2CAP_SDULEN_SIZE;
6563 
6564 	if (chan->fcs == L2CAP_FCS_CRC16)
6565 		len -= L2CAP_FCS_SIZE;
6566 
6567 	if (len > chan->mps) {
6568 		l2cap_send_disconn_req(chan, ECONNRESET);
6569 		goto drop;
6570 	}
6571 
6572 	if (chan->ops->filter) {
6573 		if (chan->ops->filter(chan, skb))
6574 			goto drop;
6575 	}
6576 
6577 	if (!control->sframe) {
6578 		int err;
6579 
6580 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6581 		       control->sar, control->reqseq, control->final,
6582 		       control->txseq);
6583 
6584 		/* Validate F-bit - F=0 always valid, F=1 only
6585 		 * valid in TX WAIT_F
6586 		 */
6587 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6588 			goto drop;
6589 
6590 		if (chan->mode != L2CAP_MODE_STREAMING) {
6591 			event = L2CAP_EV_RECV_IFRAME;
6592 			err = l2cap_rx(chan, control, skb, event);
6593 		} else {
6594 			err = l2cap_stream_rx(chan, control, skb);
6595 		}
6596 
6597 		if (err)
6598 			l2cap_send_disconn_req(chan, ECONNRESET);
6599 	} else {
6600 		const u8 rx_func_to_event[4] = {
6601 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6602 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6603 		};
6604 
6605 		/* Only I-frames are expected in streaming mode */
6606 		if (chan->mode == L2CAP_MODE_STREAMING)
6607 			goto drop;
6608 
6609 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6610 		       control->reqseq, control->final, control->poll,
6611 		       control->super);
6612 
6613 		if (len != 0) {
6614 			BT_ERR("Trailing bytes: %d in sframe", len);
6615 			l2cap_send_disconn_req(chan, ECONNRESET);
6616 			goto drop;
6617 		}
6618 
6619 		/* Validate F and P bits */
6620 		if (control->final && (control->poll ||
6621 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6622 			goto drop;
6623 
6624 		event = rx_func_to_event[control->super];
6625 		if (l2cap_rx(chan, control, skb, event))
6626 			l2cap_send_disconn_req(chan, ECONNRESET);
6627 	}
6628 
6629 	return 0;
6630 
6631 drop:
6632 	kfree_skb(skb);
6633 	return 0;
6634 }
6635 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6636 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6637 {
6638 	struct l2cap_conn *conn = chan->conn;
6639 	struct l2cap_le_credits pkt;
6640 	u16 return_credits = l2cap_le_rx_credits(chan);
6641 
6642 	if (chan->mode != L2CAP_MODE_LE_FLOWCTL &&
6643 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL)
6644 		return;
6645 
6646 	if (chan->rx_credits >= return_credits)
6647 		return;
6648 
6649 	return_credits -= chan->rx_credits;
6650 
6651 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6652 
6653 	chan->rx_credits += return_credits;
6654 
6655 	pkt.cid     = cpu_to_le16(chan->scid);
6656 	pkt.credits = cpu_to_le16(return_credits);
6657 
6658 	chan->ident = l2cap_get_ident(conn);
6659 
6660 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6661 }
6662 
l2cap_chan_rx_avail(struct l2cap_chan * chan,ssize_t rx_avail)6663 void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6664 {
6665 	if (chan->rx_avail == rx_avail)
6666 		return;
6667 
6668 	BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6669 
6670 	chan->rx_avail = rx_avail;
6671 
6672 	if (chan->state == BT_CONNECTED)
6673 		l2cap_chan_le_send_credits(chan);
6674 }
6675 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)6676 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6677 {
6678 	int err;
6679 
6680 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6681 
6682 	/* Wait recv to confirm reception before updating the credits */
6683 	err = chan->ops->recv(chan, skb);
6684 
6685 	if (err < 0 && chan->rx_avail != -1) {
6686 		BT_ERR("Queueing received LE L2CAP data failed");
6687 		l2cap_send_disconn_req(chan, ECONNRESET);
6688 		return err;
6689 	}
6690 
6691 	/* Update credits whenever an SDU is received */
6692 	l2cap_chan_le_send_credits(chan);
6693 
6694 	return err;
6695 }
6696 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6697 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6698 {
6699 	int err;
6700 
6701 	if (!chan->rx_credits) {
6702 		BT_ERR("No credits to receive LE L2CAP data");
6703 		l2cap_send_disconn_req(chan, ECONNRESET);
6704 		return -ENOBUFS;
6705 	}
6706 
6707 	if (skb->len > chan->imtu) {
6708 		BT_ERR("Too big LE L2CAP PDU: len %u > %u", skb->len,
6709 		       chan->imtu);
6710 		l2cap_send_disconn_req(chan, ECONNRESET);
6711 		return -ENOBUFS;
6712 	}
6713 
6714 	if (skb->len > chan->mps) {
6715 		BT_ERR("Too big LE L2CAP MPS: len %u > %u", skb->len,
6716 		       chan->mps);
6717 		l2cap_send_disconn_req(chan, ECONNRESET);
6718 		return -ENOBUFS;
6719 	}
6720 
6721 	chan->rx_credits--;
6722 	BT_DBG("chan %p: rx_credits %u -> %u",
6723 	       chan, chan->rx_credits + 1, chan->rx_credits);
6724 
6725 	/* Update if remote had run out of credits, this should only happens
6726 	 * if the remote is not using the entire MPS.
6727 	 */
6728 	if (!chan->rx_credits)
6729 		l2cap_chan_le_send_credits(chan);
6730 
6731 	err = 0;
6732 
6733 	if (!chan->sdu) {
6734 		u16 sdu_len;
6735 
6736 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE)) {
6737 			err = -EINVAL;
6738 			goto failed;
6739 		}
6740 
6741 		sdu_len = get_unaligned_le16(skb->data);
6742 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6743 
6744 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6745 		       sdu_len, skb->len, chan->imtu);
6746 
6747 		if (sdu_len > chan->imtu) {
6748 			BT_ERR("Too big LE L2CAP SDU length: len %u > %u",
6749 			       sdu_len, chan->imtu);
6750 			l2cap_send_disconn_req(chan, ECONNRESET);
6751 			err = -EMSGSIZE;
6752 			goto failed;
6753 		}
6754 
6755 		if (skb->len > sdu_len) {
6756 			BT_ERR("Too much LE L2CAP data received");
6757 			err = -EINVAL;
6758 			goto failed;
6759 		}
6760 
6761 		if (skb->len == sdu_len)
6762 			return l2cap_ecred_recv(chan, skb);
6763 
6764 		chan->sdu = skb;
6765 		chan->sdu_len = sdu_len;
6766 		chan->sdu_last_frag = skb;
6767 
6768 		/* Detect if remote is not able to use the selected MPS */
6769 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6770 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6771 
6772 			/* Adjust the number of credits */
6773 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6774 			chan->mps = mps_len;
6775 			l2cap_chan_le_send_credits(chan);
6776 		}
6777 
6778 		return 0;
6779 	}
6780 
6781 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6782 	       chan->sdu->len, skb->len, chan->sdu_len);
6783 
6784 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6785 		BT_ERR("Too much LE L2CAP data received");
6786 		l2cap_send_disconn_req(chan, ECONNRESET);
6787 		err = -EINVAL;
6788 		goto failed;
6789 	}
6790 
6791 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6792 	skb = NULL;
6793 
6794 	if (chan->sdu->len == chan->sdu_len) {
6795 		err = l2cap_ecred_recv(chan, chan->sdu);
6796 		if (!err) {
6797 			chan->sdu = NULL;
6798 			chan->sdu_last_frag = NULL;
6799 			chan->sdu_len = 0;
6800 		}
6801 	}
6802 
6803 failed:
6804 	if (err) {
6805 		kfree_skb(skb);
6806 		kfree_skb(chan->sdu);
6807 		chan->sdu = NULL;
6808 		chan->sdu_last_frag = NULL;
6809 		chan->sdu_len = 0;
6810 	}
6811 
6812 	/* We can't return an error here since we took care of the skb
6813 	 * freeing internally. An error return would cause the caller to
6814 	 * do a double-free of the skb.
6815 	 */
6816 	return 0;
6817 }
6818 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6819 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6820 			       struct sk_buff *skb)
6821 {
6822 	struct l2cap_chan *chan;
6823 
6824 	chan = l2cap_get_chan_by_scid(conn, cid);
6825 	if (!chan) {
6826 		BT_DBG("unknown cid 0x%4.4x", cid);
6827 		/* Drop packet and return */
6828 		kfree_skb(skb);
6829 		return;
6830 	}
6831 
6832 	BT_DBG("chan %p, len %d", chan, skb->len);
6833 
6834 	/* If we receive data on a fixed channel before the info req/rsp
6835 	 * procedure is done simply assume that the channel is supported
6836 	 * and mark it as ready.
6837 	 */
6838 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6839 		l2cap_chan_ready(chan);
6840 
6841 	if (chan->state != BT_CONNECTED)
6842 		goto drop;
6843 
6844 	switch (chan->mode) {
6845 	case L2CAP_MODE_LE_FLOWCTL:
6846 	case L2CAP_MODE_EXT_FLOWCTL:
6847 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
6848 			goto drop;
6849 
6850 		goto done;
6851 
6852 	case L2CAP_MODE_BASIC:
6853 		/* If socket recv buffers overflows we drop data here
6854 		 * which is *bad* because L2CAP has to be reliable.
6855 		 * But we don't have any other choice. L2CAP doesn't
6856 		 * provide flow control mechanism. */
6857 
6858 		if (chan->imtu < skb->len) {
6859 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6860 			goto drop;
6861 		}
6862 
6863 		if (!chan->ops->recv(chan, skb))
6864 			goto done;
6865 		break;
6866 
6867 	case L2CAP_MODE_ERTM:
6868 	case L2CAP_MODE_STREAMING:
6869 		l2cap_data_rcv(chan, skb);
6870 		goto done;
6871 
6872 	default:
6873 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6874 		break;
6875 	}
6876 
6877 drop:
6878 	kfree_skb(skb);
6879 
6880 done:
6881 	l2cap_chan_unlock(chan);
6882 	l2cap_chan_put(chan);
6883 }
6884 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6885 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6886 				  struct sk_buff *skb)
6887 {
6888 	struct hci_conn *hcon = conn->hcon;
6889 	struct l2cap_chan *chan;
6890 
6891 	if (hcon->type != ACL_LINK)
6892 		goto free_skb;
6893 
6894 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6895 					ACL_LINK);
6896 	if (!chan)
6897 		goto free_skb;
6898 
6899 	BT_DBG("chan %p, len %d", chan, skb->len);
6900 
6901 	l2cap_chan_lock(chan);
6902 
6903 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6904 		goto drop;
6905 
6906 	if (chan->imtu < skb->len)
6907 		goto drop;
6908 
6909 	/* Store remote BD_ADDR and PSM for msg_name */
6910 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6911 	bt_cb(skb)->l2cap.psm = psm;
6912 
6913 	if (!chan->ops->recv(chan, skb)) {
6914 		l2cap_chan_unlock(chan);
6915 		l2cap_chan_put(chan);
6916 		return;
6917 	}
6918 
6919 drop:
6920 	l2cap_chan_unlock(chan);
6921 	l2cap_chan_put(chan);
6922 free_skb:
6923 	kfree_skb(skb);
6924 }
6925 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6926 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6927 {
6928 	struct l2cap_hdr *lh = (void *) skb->data;
6929 	struct hci_conn *hcon = conn->hcon;
6930 	u16 cid, len;
6931 	__le16 psm;
6932 
6933 	if (hcon->state != BT_CONNECTED) {
6934 		BT_DBG("queueing pending rx skb");
6935 		skb_queue_tail(&conn->pending_rx, skb);
6936 		return;
6937 	}
6938 
6939 	skb_pull(skb, L2CAP_HDR_SIZE);
6940 	cid = __le16_to_cpu(lh->cid);
6941 	len = __le16_to_cpu(lh->len);
6942 
6943 	if (len != skb->len) {
6944 		kfree_skb(skb);
6945 		return;
6946 	}
6947 
6948 	/* Since we can't actively block incoming LE connections we must
6949 	 * at least ensure that we ignore incoming data from them.
6950 	 */
6951 	if (hcon->type == LE_LINK &&
6952 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6953 				   bdaddr_dst_type(hcon))) {
6954 		kfree_skb(skb);
6955 		return;
6956 	}
6957 
6958 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6959 
6960 	switch (cid) {
6961 	case L2CAP_CID_SIGNALING:
6962 		l2cap_sig_channel(conn, skb);
6963 		break;
6964 
6965 	case L2CAP_CID_CONN_LESS:
6966 		psm = get_unaligned((__le16 *) skb->data);
6967 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6968 		l2cap_conless_channel(conn, psm, skb);
6969 		break;
6970 
6971 	case L2CAP_CID_LE_SIGNALING:
6972 		l2cap_le_sig_channel(conn, skb);
6973 		break;
6974 
6975 	default:
6976 		l2cap_data_channel(conn, cid, skb);
6977 		break;
6978 	}
6979 }
6980 
process_pending_rx(struct work_struct * work)6981 static void process_pending_rx(struct work_struct *work)
6982 {
6983 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6984 					       pending_rx_work);
6985 	struct sk_buff *skb;
6986 
6987 	BT_DBG("");
6988 
6989 	mutex_lock(&conn->lock);
6990 
6991 	while ((skb = skb_dequeue(&conn->pending_rx)))
6992 		l2cap_recv_frame(conn, skb);
6993 
6994 	mutex_unlock(&conn->lock);
6995 }
6996 
l2cap_conn_add(struct hci_conn * hcon)6997 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6998 {
6999 	struct l2cap_conn *conn = hcon->l2cap_data;
7000 	struct hci_chan *hchan;
7001 
7002 	if (conn)
7003 		return conn;
7004 
7005 	hchan = hci_chan_create(hcon);
7006 	if (!hchan)
7007 		return NULL;
7008 
7009 	conn = kzalloc_obj(*conn);
7010 	if (!conn) {
7011 		hci_chan_del(hchan);
7012 		return NULL;
7013 	}
7014 
7015 	kref_init(&conn->ref);
7016 	hcon->l2cap_data = conn;
7017 	conn->hcon = hci_conn_get(hcon);
7018 	conn->hchan = hchan;
7019 
7020 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7021 
7022 	conn->mtu = hcon->mtu;
7023 	conn->feat_mask = 0;
7024 
7025 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7026 
7027 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7028 	    (bredr_sc_enabled(hcon->hdev) ||
7029 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7030 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7031 
7032 	mutex_init(&conn->lock);
7033 
7034 	INIT_LIST_HEAD(&conn->chan_l);
7035 	INIT_LIST_HEAD(&conn->users);
7036 
7037 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7038 	ida_init(&conn->tx_ida);
7039 
7040 	skb_queue_head_init(&conn->pending_rx);
7041 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7042 	INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
7043 
7044 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7045 
7046 	return conn;
7047 }
7048 
is_valid_psm(u16 psm,u8 dst_type)7049 static bool is_valid_psm(u16 psm, u8 dst_type)
7050 {
7051 	if (!psm)
7052 		return false;
7053 
7054 	if (bdaddr_type_is_le(dst_type))
7055 		return (psm <= 0x00ff);
7056 
7057 	/* PSM must be odd and lsb of upper byte must be 0 */
7058 	return ((psm & 0x0101) == 0x0001);
7059 }
7060 
7061 struct l2cap_chan_data {
7062 	struct l2cap_chan *chan;
7063 	struct pid *pid;
7064 	int count;
7065 };
7066 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7067 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7068 {
7069 	struct l2cap_chan_data *d = data;
7070 	struct pid *pid;
7071 
7072 	if (chan == d->chan)
7073 		return;
7074 
7075 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7076 		return;
7077 
7078 	pid = chan->ops->get_peer_pid(chan);
7079 
7080 	/* Only count deferred channels with the same PID/PSM */
7081 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7082 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7083 		return;
7084 
7085 	d->count++;
7086 }
7087 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type,u16 timeout)7088 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7089 		       bdaddr_t *dst, u8 dst_type, u16 timeout)
7090 {
7091 	struct l2cap_conn *conn;
7092 	struct hci_conn *hcon;
7093 	struct hci_dev *hdev;
7094 	int err;
7095 
7096 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7097 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7098 
7099 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7100 	if (!hdev)
7101 		return -EHOSTUNREACH;
7102 
7103 	hci_dev_lock(hdev);
7104 
7105 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7106 	    chan->chan_type != L2CAP_CHAN_RAW) {
7107 		err = -EINVAL;
7108 		goto done;
7109 	}
7110 
7111 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7112 		err = -EINVAL;
7113 		goto done;
7114 	}
7115 
7116 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7117 		err = -EINVAL;
7118 		goto done;
7119 	}
7120 
7121 	switch (chan->mode) {
7122 	case L2CAP_MODE_BASIC:
7123 		break;
7124 	case L2CAP_MODE_LE_FLOWCTL:
7125 		break;
7126 	case L2CAP_MODE_EXT_FLOWCTL:
7127 		if (!enable_ecred) {
7128 			err = -EOPNOTSUPP;
7129 			goto done;
7130 		}
7131 		break;
7132 	case L2CAP_MODE_ERTM:
7133 	case L2CAP_MODE_STREAMING:
7134 		if (!disable_ertm)
7135 			break;
7136 		fallthrough;
7137 	default:
7138 		err = -EOPNOTSUPP;
7139 		goto done;
7140 	}
7141 
7142 	switch (chan->state) {
7143 	case BT_CONNECT:
7144 	case BT_CONNECT2:
7145 	case BT_CONFIG:
7146 		/* Already connecting */
7147 		err = 0;
7148 		goto done;
7149 
7150 	case BT_CONNECTED:
7151 		/* Already connected */
7152 		err = -EISCONN;
7153 		goto done;
7154 
7155 	case BT_OPEN:
7156 	case BT_BOUND:
7157 		/* Can connect */
7158 		break;
7159 
7160 	default:
7161 		err = -EBADFD;
7162 		goto done;
7163 	}
7164 
7165 	/* Set destination address and psm */
7166 	bacpy(&chan->dst, dst);
7167 	chan->dst_type = dst_type;
7168 
7169 	chan->psm = psm;
7170 	chan->dcid = cid;
7171 
7172 	if (bdaddr_type_is_le(dst_type)) {
7173 		/* Convert from L2CAP channel address type to HCI address type
7174 		 */
7175 		if (dst_type == BDADDR_LE_PUBLIC)
7176 			dst_type = ADDR_LE_DEV_PUBLIC;
7177 		else
7178 			dst_type = ADDR_LE_DEV_RANDOM;
7179 
7180 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7181 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7182 					      chan->sec_level, timeout,
7183 					      HCI_ROLE_SLAVE, 0, 0);
7184 		else
7185 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7186 						   chan->sec_level, timeout,
7187 						   CONN_REASON_L2CAP_CHAN);
7188 
7189 	} else {
7190 		u8 auth_type = l2cap_get_auth_type(chan);
7191 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7192 				       CONN_REASON_L2CAP_CHAN, timeout);
7193 	}
7194 
7195 	if (IS_ERR(hcon)) {
7196 		err = PTR_ERR(hcon);
7197 		goto done;
7198 	}
7199 
7200 	conn = l2cap_conn_add(hcon);
7201 	if (!conn) {
7202 		hci_conn_drop(hcon);
7203 		err = -ENOMEM;
7204 		goto done;
7205 	}
7206 
7207 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7208 		struct l2cap_chan_data data;
7209 
7210 		data.chan = chan;
7211 		data.pid = chan->ops->get_peer_pid(chan);
7212 		data.count = 1;
7213 
7214 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7215 
7216 		/* Check if there isn't too many channels being connected */
7217 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7218 			hci_conn_drop(hcon);
7219 			err = -EPROTO;
7220 			goto done;
7221 		}
7222 	}
7223 
7224 	mutex_lock(&conn->lock);
7225 	l2cap_chan_lock(chan);
7226 
7227 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7228 		hci_conn_drop(hcon);
7229 		err = -EBUSY;
7230 		goto chan_unlock;
7231 	}
7232 
7233 	/* Update source addr of the socket */
7234 	bacpy(&chan->src, &hcon->src);
7235 	chan->src_type = bdaddr_src_type(hcon);
7236 
7237 	__l2cap_chan_add(conn, chan);
7238 
7239 	/* l2cap_chan_add takes its own ref so we can drop this one */
7240 	hci_conn_drop(hcon);
7241 
7242 	l2cap_state_change(chan, BT_CONNECT);
7243 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7244 
7245 	/* Release chan->sport so that it can be reused by other
7246 	 * sockets (as it's only used for listening sockets).
7247 	 */
7248 	write_lock(&chan_list_lock);
7249 	chan->sport = 0;
7250 	write_unlock(&chan_list_lock);
7251 
7252 	if (hcon->state == BT_CONNECTED) {
7253 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7254 			__clear_chan_timer(chan);
7255 			if (l2cap_chan_check_security(chan, true))
7256 				l2cap_state_change(chan, BT_CONNECTED);
7257 		} else
7258 			l2cap_do_start(chan);
7259 	}
7260 
7261 	err = 0;
7262 
7263 chan_unlock:
7264 	l2cap_chan_unlock(chan);
7265 	mutex_unlock(&conn->lock);
7266 done:
7267 	hci_dev_unlock(hdev);
7268 	hci_dev_put(hdev);
7269 	return err;
7270 }
7271 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7272 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)7273 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7274 {
7275 	struct l2cap_conn *conn = chan->conn;
7276 	DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7277 
7278 	pdu->mtu = cpu_to_le16(chan->imtu);
7279 	pdu->mps = cpu_to_le16(chan->mps);
7280 	pdu->scid[0] = cpu_to_le16(chan->scid);
7281 
7282 	chan->ident = l2cap_get_ident(conn);
7283 
7284 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7285 		       sizeof(pdu), &pdu);
7286 }
7287 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)7288 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7289 {
7290 	if (chan->imtu > mtu)
7291 		return -EINVAL;
7292 
7293 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7294 
7295 	chan->imtu = mtu;
7296 
7297 	l2cap_ecred_reconfigure(chan);
7298 
7299 	return 0;
7300 }
7301 
7302 /* ---- L2CAP interface with lower layer (HCI) ---- */
7303 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7304 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7305 {
7306 	int exact = 0, lm1 = 0, lm2 = 0;
7307 	struct l2cap_chan *c;
7308 
7309 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7310 
7311 	/* Find listening sockets and check their link_mode */
7312 	read_lock(&chan_list_lock);
7313 	list_for_each_entry(c, &chan_list, global_l) {
7314 		if (c->state != BT_LISTEN)
7315 			continue;
7316 
7317 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7318 			lm1 |= HCI_LM_ACCEPT;
7319 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7320 				lm1 |= HCI_LM_MASTER;
7321 			exact++;
7322 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7323 			lm2 |= HCI_LM_ACCEPT;
7324 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7325 				lm2 |= HCI_LM_MASTER;
7326 		}
7327 	}
7328 	read_unlock(&chan_list_lock);
7329 
7330 	return exact ? lm1 : lm2;
7331 }
7332 
7333 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7334  * from an existing channel in the list or from the beginning of the
7335  * global list (by passing NULL as first parameter).
7336  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7337 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7338 						  struct hci_conn *hcon)
7339 {
7340 	u8 src_type = bdaddr_src_type(hcon);
7341 
7342 	read_lock(&chan_list_lock);
7343 
7344 	if (c)
7345 		c = list_next_entry(c, global_l);
7346 	else
7347 		c = list_entry(chan_list.next, typeof(*c), global_l);
7348 
7349 	list_for_each_entry_from(c, &chan_list, global_l) {
7350 		if (c->chan_type != L2CAP_CHAN_FIXED)
7351 			continue;
7352 		if (c->state != BT_LISTEN)
7353 			continue;
7354 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7355 			continue;
7356 		if (src_type != c->src_type)
7357 			continue;
7358 
7359 		c = l2cap_chan_hold_unless_zero(c);
7360 		read_unlock(&chan_list_lock);
7361 		return c;
7362 	}
7363 
7364 	read_unlock(&chan_list_lock);
7365 
7366 	return NULL;
7367 }
7368 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7369 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7370 {
7371 	struct hci_dev *hdev = hcon->hdev;
7372 	struct l2cap_conn *conn;
7373 	struct l2cap_chan *pchan;
7374 	u8 dst_type;
7375 
7376 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7377 		return;
7378 
7379 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7380 
7381 	if (status) {
7382 		l2cap_conn_del(hcon, bt_to_errno(status));
7383 		return;
7384 	}
7385 
7386 	conn = l2cap_conn_add(hcon);
7387 	if (!conn)
7388 		return;
7389 
7390 	dst_type = bdaddr_dst_type(hcon);
7391 
7392 	/* If device is blocked, do not create channels for it */
7393 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7394 		return;
7395 
7396 	/* Find fixed channels and notify them of the new connection. We
7397 	 * use multiple individual lookups, continuing each time where
7398 	 * we left off, because the list lock would prevent calling the
7399 	 * potentially sleeping l2cap_chan_lock() function.
7400 	 */
7401 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7402 	while (pchan) {
7403 		struct l2cap_chan *chan, *next;
7404 
7405 		/* Client fixed channels should override server ones */
7406 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7407 			goto next;
7408 
7409 		l2cap_chan_lock(pchan);
7410 		chan = pchan->ops->new_connection(pchan);
7411 		if (chan) {
7412 			bacpy(&chan->src, &hcon->src);
7413 			bacpy(&chan->dst, &hcon->dst);
7414 			chan->src_type = bdaddr_src_type(hcon);
7415 			chan->dst_type = dst_type;
7416 
7417 			__l2cap_chan_add(conn, chan);
7418 		}
7419 
7420 		l2cap_chan_unlock(pchan);
7421 next:
7422 		next = l2cap_global_fixed_chan(pchan, hcon);
7423 		l2cap_chan_put(pchan);
7424 		pchan = next;
7425 	}
7426 
7427 	l2cap_conn_ready(conn);
7428 }
7429 
l2cap_disconn_ind(struct hci_conn * hcon)7430 int l2cap_disconn_ind(struct hci_conn *hcon)
7431 {
7432 	struct l2cap_conn *conn = hcon->l2cap_data;
7433 
7434 	BT_DBG("hcon %p", hcon);
7435 
7436 	if (!conn)
7437 		return HCI_ERROR_REMOTE_USER_TERM;
7438 	return conn->disc_reason;
7439 }
7440 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7441 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7442 {
7443 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7444 		return;
7445 
7446 	BT_DBG("hcon %p reason %d", hcon, reason);
7447 
7448 	l2cap_conn_del(hcon, bt_to_errno(reason));
7449 }
7450 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7451 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7452 {
7453 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7454 		return;
7455 
7456 	if (encrypt == 0x00) {
7457 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7458 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7459 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7460 			   chan->sec_level == BT_SECURITY_FIPS)
7461 			l2cap_chan_close(chan, ECONNREFUSED);
7462 	} else {
7463 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7464 			__clear_chan_timer(chan);
7465 	}
7466 }
7467 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7468 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7469 {
7470 	struct l2cap_conn *conn = hcon->l2cap_data;
7471 	struct l2cap_chan *chan;
7472 
7473 	if (!conn)
7474 		return;
7475 
7476 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7477 
7478 	mutex_lock(&conn->lock);
7479 
7480 	list_for_each_entry(chan, &conn->chan_l, list) {
7481 		l2cap_chan_lock(chan);
7482 
7483 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7484 		       state_to_string(chan->state));
7485 
7486 		if (!status && encrypt)
7487 			chan->sec_level = hcon->sec_level;
7488 
7489 		if (!__l2cap_no_conn_pending(chan)) {
7490 			l2cap_chan_unlock(chan);
7491 			continue;
7492 		}
7493 
7494 		if (!status && (chan->state == BT_CONNECTED ||
7495 				chan->state == BT_CONFIG)) {
7496 			chan->ops->resume(chan);
7497 			l2cap_check_encryption(chan, encrypt);
7498 			l2cap_chan_unlock(chan);
7499 			continue;
7500 		}
7501 
7502 		if (chan->state == BT_CONNECT) {
7503 			if (!status && l2cap_check_enc_key_size(hcon, chan))
7504 				l2cap_start_connection(chan);
7505 			else
7506 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7507 		} else if (chan->state == BT_CONNECT2 &&
7508 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7509 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7510 			struct l2cap_conn_rsp rsp;
7511 			__u16 res, stat;
7512 
7513 			if (!status && l2cap_check_enc_key_size(hcon, chan)) {
7514 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7515 					res = L2CAP_CR_PEND;
7516 					stat = L2CAP_CS_AUTHOR_PEND;
7517 					chan->ops->defer(chan);
7518 				} else {
7519 					l2cap_state_change(chan, BT_CONFIG);
7520 					res = L2CAP_CR_SUCCESS;
7521 					stat = L2CAP_CS_NO_INFO;
7522 				}
7523 			} else {
7524 				l2cap_state_change(chan, BT_DISCONN);
7525 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7526 				res = L2CAP_CR_SEC_BLOCK;
7527 				stat = L2CAP_CS_NO_INFO;
7528 			}
7529 
7530 			rsp.scid   = cpu_to_le16(chan->dcid);
7531 			rsp.dcid   = cpu_to_le16(chan->scid);
7532 			rsp.result = cpu_to_le16(res);
7533 			rsp.status = cpu_to_le16(stat);
7534 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7535 				       sizeof(rsp), &rsp);
7536 
7537 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7538 			    res == L2CAP_CR_SUCCESS) {
7539 				char buf[128];
7540 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7541 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7542 					       L2CAP_CONF_REQ,
7543 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7544 					       buf);
7545 				chan->num_conf_req++;
7546 			}
7547 		}
7548 
7549 		l2cap_chan_unlock(chan);
7550 	}
7551 
7552 	mutex_unlock(&conn->lock);
7553 }
7554 
7555 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)7556 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7557 			   u16 len)
7558 {
7559 	if (!conn->rx_skb) {
7560 		/* Allocate skb for the complete frame (with header) */
7561 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7562 		if (!conn->rx_skb)
7563 			return -ENOMEM;
7564 		/* Init rx_len */
7565 		conn->rx_len = len;
7566 
7567 		skb_set_delivery_time(conn->rx_skb, skb->tstamp,
7568 				      skb->tstamp_type);
7569 	}
7570 
7571 	/* Copy as much as the rx_skb can hold */
7572 	len = min_t(u16, len, skb->len);
7573 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7574 	skb_pull(skb, len);
7575 	conn->rx_len -= len;
7576 
7577 	return len;
7578 }
7579 
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)7580 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7581 {
7582 	struct sk_buff *rx_skb;
7583 	int len;
7584 
7585 	/* Append just enough to complete the header */
7586 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7587 
7588 	/* If header could not be read just continue */
7589 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7590 		return len;
7591 
7592 	rx_skb = conn->rx_skb;
7593 	len = get_unaligned_le16(rx_skb->data);
7594 
7595 	/* Check if rx_skb has enough space to received all fragments */
7596 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7597 		/* Update expected len */
7598 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7599 		return L2CAP_LEN_SIZE;
7600 	}
7601 
7602 	/* Reset conn->rx_skb since it will need to be reallocated in order to
7603 	 * fit all fragments.
7604 	 */
7605 	conn->rx_skb = NULL;
7606 
7607 	/* Reallocates rx_skb using the exact expected length */
7608 	len = l2cap_recv_frag(conn, rx_skb,
7609 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7610 	kfree_skb(rx_skb);
7611 
7612 	return len;
7613 }
7614 
l2cap_recv_reset(struct l2cap_conn * conn)7615 static void l2cap_recv_reset(struct l2cap_conn *conn)
7616 {
7617 	kfree_skb(conn->rx_skb);
7618 	conn->rx_skb = NULL;
7619 	conn->rx_len = 0;
7620 }
7621 
l2cap_conn_hold_unless_zero(struct l2cap_conn * c)7622 struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7623 {
7624 	if (!c)
7625 		return NULL;
7626 
7627 	BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7628 
7629 	if (!kref_get_unless_zero(&c->ref))
7630 		return NULL;
7631 
7632 	return c;
7633 }
7634 
l2cap_recv_acldata(struct hci_dev * hdev,u16 handle,struct sk_buff * skb,u16 flags)7635 int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle,
7636 		       struct sk_buff *skb, u16 flags)
7637 {
7638 	struct hci_conn *hcon;
7639 	struct l2cap_conn *conn;
7640 	int len;
7641 
7642 	/* Lock hdev for hci_conn, and race on l2cap_data vs. l2cap_conn_del */
7643 	hci_dev_lock(hdev);
7644 
7645 	hcon = hci_conn_hash_lookup_handle(hdev, handle);
7646 	if (!hcon) {
7647 		hci_dev_unlock(hdev);
7648 		kfree_skb(skb);
7649 		return -ENOENT;
7650 	}
7651 
7652 	hci_conn_enter_active_mode(hcon, BT_POWER_FORCE_ACTIVE_OFF);
7653 
7654 	conn = hcon->l2cap_data;
7655 
7656 	if (!conn)
7657 		conn = l2cap_conn_add(hcon);
7658 
7659 	conn = l2cap_conn_hold_unless_zero(conn);
7660 	hcon = NULL;
7661 
7662 	hci_dev_unlock(hdev);
7663 
7664 	if (!conn) {
7665 		kfree_skb(skb);
7666 		return -EINVAL;
7667 	}
7668 
7669 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7670 
7671 	mutex_lock(&conn->lock);
7672 
7673 	switch (flags) {
7674 	case ACL_START:
7675 	case ACL_START_NO_FLUSH:
7676 	case ACL_COMPLETE:
7677 		if (conn->rx_skb) {
7678 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7679 			l2cap_recv_reset(conn);
7680 			l2cap_conn_unreliable(conn, ECOMM);
7681 		}
7682 
7683 		/* Start fragment may not contain the L2CAP length so just
7684 		 * copy the initial byte when that happens and use conn->mtu as
7685 		 * expected length.
7686 		 */
7687 		if (skb->len < L2CAP_LEN_SIZE) {
7688 			l2cap_recv_frag(conn, skb, conn->mtu);
7689 			break;
7690 		}
7691 
7692 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7693 
7694 		if (len == skb->len) {
7695 			/* Complete frame received */
7696 			l2cap_recv_frame(conn, skb);
7697 			goto unlock;
7698 		}
7699 
7700 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7701 
7702 		if (skb->len > len) {
7703 			BT_ERR("Frame is too long (len %u, expected len %d)",
7704 			       skb->len, len);
7705 			/* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
7706 			 * (Multiple Signaling Command in one PDU, Data
7707 			 * Truncated, BR/EDR) send a C-frame to the IUT with
7708 			 * PDU Length set to 8 and Channel ID set to the
7709 			 * correct signaling channel for the logical link.
7710 			 * The Information payload contains one L2CAP_ECHO_REQ
7711 			 * packet with Data Length set to 0 with 0 octets of
7712 			 * echo data and one invalid command packet due to
7713 			 * data truncated in PDU but present in HCI packet.
7714 			 *
7715 			 * Shorter the socket buffer to the PDU length to
7716 			 * allow to process valid commands from the PDU before
7717 			 * setting the socket unreliable.
7718 			 */
7719 			skb->len = len;
7720 			l2cap_recv_frame(conn, skb);
7721 			l2cap_conn_unreliable(conn, ECOMM);
7722 			goto unlock;
7723 		}
7724 
7725 		/* Append fragment into frame (with header) */
7726 		if (l2cap_recv_frag(conn, skb, len) < 0)
7727 			goto drop;
7728 
7729 		break;
7730 
7731 	case ACL_CONT:
7732 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7733 
7734 		if (!conn->rx_skb) {
7735 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7736 			l2cap_conn_unreliable(conn, ECOMM);
7737 			goto drop;
7738 		}
7739 
7740 		/* Complete the L2CAP length if it has not been read */
7741 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7742 			if (l2cap_recv_len(conn, skb) < 0) {
7743 				l2cap_conn_unreliable(conn, ECOMM);
7744 				goto drop;
7745 			}
7746 
7747 			/* Header still could not be read just continue */
7748 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7749 				break;
7750 		}
7751 
7752 		if (skb->len > conn->rx_len) {
7753 			BT_ERR("Fragment is too long (len %u, expected %u)",
7754 			       skb->len, conn->rx_len);
7755 			l2cap_recv_reset(conn);
7756 			l2cap_conn_unreliable(conn, ECOMM);
7757 			goto drop;
7758 		}
7759 
7760 		/* Append fragment into frame (with header) */
7761 		l2cap_recv_frag(conn, skb, skb->len);
7762 
7763 		if (!conn->rx_len) {
7764 			/* Complete frame received. l2cap_recv_frame
7765 			 * takes ownership of the skb so set the global
7766 			 * rx_skb pointer to NULL first.
7767 			 */
7768 			struct sk_buff *rx_skb = conn->rx_skb;
7769 			conn->rx_skb = NULL;
7770 			l2cap_recv_frame(conn, rx_skb);
7771 		}
7772 		break;
7773 	}
7774 
7775 drop:
7776 	kfree_skb(skb);
7777 unlock:
7778 	mutex_unlock(&conn->lock);
7779 	l2cap_conn_put(conn);
7780 	return 0;
7781 }
7782 
7783 static struct hci_cb l2cap_cb = {
7784 	.name		= "L2CAP",
7785 	.connect_cfm	= l2cap_connect_cfm,
7786 	.disconn_cfm	= l2cap_disconn_cfm,
7787 	.security_cfm	= l2cap_security_cfm,
7788 };
7789 
l2cap_debugfs_show(struct seq_file * f,void * p)7790 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7791 {
7792 	struct l2cap_chan *c;
7793 
7794 	read_lock(&chan_list_lock);
7795 
7796 	list_for_each_entry(c, &chan_list, global_l) {
7797 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7798 			   &c->src, c->src_type, &c->dst, c->dst_type,
7799 			   c->state, __le16_to_cpu(c->psm),
7800 			   c->scid, c->dcid, c->imtu, c->omtu,
7801 			   c->sec_level, c->mode);
7802 	}
7803 
7804 	read_unlock(&chan_list_lock);
7805 
7806 	return 0;
7807 }
7808 
7809 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7810 
7811 static struct dentry *l2cap_debugfs;
7812 
l2cap_init(void)7813 int __init l2cap_init(void)
7814 {
7815 	int err;
7816 
7817 	err = l2cap_init_sockets();
7818 	if (err < 0)
7819 		return err;
7820 
7821 	hci_register_cb(&l2cap_cb);
7822 
7823 	if (IS_ERR_OR_NULL(bt_debugfs))
7824 		return 0;
7825 
7826 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7827 					    NULL, &l2cap_debugfs_fops);
7828 
7829 	return 0;
7830 }
7831 
l2cap_exit(void)7832 void l2cap_exit(void)
7833 {
7834 	debugfs_remove(l2cap_debugfs);
7835 	hci_unregister_cb(&l2cap_cb);
7836 	l2cap_cleanup_sockets();
7837 }
7838 
7839 module_param(disable_ertm, bool, 0644);
7840 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7841 
7842 module_param(enable_ecred, bool, 0644);
7843 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7844