2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
43 #define LE_FLOWCTL_MAX_CREDITS 65535
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
72 return BDADDR_LE_RANDOM;
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
80 return bdaddr_type(hcon->type, hcon->src_type);
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
85 return bdaddr_type(hcon->type, hcon->dst_type);
88 /* ---- L2CAP channels ---- */
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
95 list_for_each_entry(c, &conn->chan_l, list) {
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
114 /* Find channel with given SCID.
115 * Returns a reference locked channel.
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 struct l2cap_chan *c;
122 mutex_lock(&conn->chan_lock);
123 c = __l2cap_get_chan_by_scid(conn, cid);
125 /* Only lock if chan reference is not 0 */
126 c = l2cap_chan_hold_unless_zero(c);
130 mutex_unlock(&conn->chan_lock);
135 /* Find channel with given DCID.
136 * Returns a reference locked channel.
138 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 struct l2cap_chan *c;
143 mutex_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_dcid(conn, cid);
146 /* Only lock if chan reference is not 0 */
147 c = l2cap_chan_hold_unless_zero(c);
151 mutex_unlock(&conn->chan_lock);
156 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 struct l2cap_chan *c;
161 list_for_each_entry(c, &conn->chan_l, list) {
162 if (c->ident == ident)
168 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
171 struct l2cap_chan *c;
173 list_for_each_entry(c, &chan_list, global_l) {
174 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
177 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
180 if (c->sport == psm && !bacmp(&c->src, src))
186 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
190 write_lock(&chan_list_lock);
192 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
202 u16 p, start, end, incr;
204 if (chan->src_type == BDADDR_BREDR) {
205 start = L2CAP_PSM_DYN_START;
206 end = L2CAP_PSM_AUTO_END;
209 start = L2CAP_PSM_LE_DYN_START;
210 end = L2CAP_PSM_LE_DYN_END;
215 for (p = start; p <= end; p += incr)
216 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
218 chan->psm = cpu_to_le16(p);
219 chan->sport = cpu_to_le16(p);
226 write_unlock(&chan_list_lock);
229 EXPORT_SYMBOL_GPL(l2cap_add_psm);
231 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
233 write_lock(&chan_list_lock);
235 /* Override the defaults (which are for conn-oriented) */
236 chan->omtu = L2CAP_DEFAULT_MTU;
237 chan->chan_type = L2CAP_CHAN_FIXED;
241 write_unlock(&chan_list_lock);
246 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
250 if (conn->hcon->type == LE_LINK)
251 dyn_end = L2CAP_CID_LE_DYN_END;
253 dyn_end = L2CAP_CID_DYN_END;
255 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
256 if (!__l2cap_get_chan_by_scid(conn, cid))
263 static void l2cap_state_change(struct l2cap_chan *chan, int state)
265 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
266 state_to_string(state));
269 chan->ops->state_change(chan, state, 0);
272 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
276 chan->ops->state_change(chan, chan->state, err);
279 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
281 chan->ops->state_change(chan, chan->state, err);
284 static void __set_retrans_timer(struct l2cap_chan *chan)
286 if (!delayed_work_pending(&chan->monitor_timer) &&
287 chan->retrans_timeout) {
288 l2cap_set_timer(chan, &chan->retrans_timer,
289 msecs_to_jiffies(chan->retrans_timeout));
293 static void __set_monitor_timer(struct l2cap_chan *chan)
295 __clear_retrans_timer(chan);
296 if (chan->monitor_timeout) {
297 l2cap_set_timer(chan, &chan->monitor_timer,
298 msecs_to_jiffies(chan->monitor_timeout));
302 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
307 skb_queue_walk(head, skb) {
308 if (bt_cb(skb)->l2cap.txseq == seq)
315 /* ---- L2CAP sequence number lists ---- */
317 /* For ERTM, ordered lists of sequence numbers must be tracked for
318 * SREJ requests that are received and for frames that are to be
319 * retransmitted. These seq_list functions implement a singly-linked
320 * list in an array, where membership in the list can also be checked
321 * in constant time. Items can also be added to the tail of the list
322 * and removed from the head in constant time, without further memory
326 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
328 size_t alloc_size, i;
330 /* Allocated size is a power of 2 to map sequence numbers
331 * (which may be up to 14 bits) in to a smaller array that is
332 * sized for the negotiated ERTM transmit windows.
334 alloc_size = roundup_pow_of_two(size);
336 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
340 seq_list->mask = alloc_size - 1;
341 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
342 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 for (i = 0; i < alloc_size; i++)
344 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
349 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
351 kfree(seq_list->list);
354 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
357 /* Constant-time check for list membership */
358 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
361 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
363 u16 seq = seq_list->head;
364 u16 mask = seq_list->mask;
366 seq_list->head = seq_list->list[seq & mask];
367 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
369 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
370 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
371 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
377 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
381 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
384 for (i = 0; i <= seq_list->mask; i++)
385 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
387 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
388 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
391 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
393 u16 mask = seq_list->mask;
395 /* All appends happen in constant time */
397 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
400 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
401 seq_list->head = seq;
403 seq_list->list[seq_list->tail & mask] = seq;
405 seq_list->tail = seq;
406 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
409 static void l2cap_chan_timeout(struct work_struct *work)
411 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
413 struct l2cap_conn *conn = chan->conn;
416 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
421 mutex_lock(&conn->chan_lock);
422 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
423 * this work. No need to call l2cap_chan_hold(chan) here again.
425 l2cap_chan_lock(chan);
427 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
428 reason = ECONNREFUSED;
429 else if (chan->state == BT_CONNECT &&
430 chan->sec_level != BT_SECURITY_SDP)
431 reason = ECONNREFUSED;
435 l2cap_chan_close(chan, reason);
437 chan->ops->close(chan);
439 l2cap_chan_unlock(chan);
440 l2cap_chan_put(chan);
442 mutex_unlock(&conn->chan_lock);
445 struct l2cap_chan *l2cap_chan_create(void)
447 struct l2cap_chan *chan;
449 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
453 skb_queue_head_init(&chan->tx_q);
454 skb_queue_head_init(&chan->srej_q);
455 mutex_init(&chan->lock);
457 /* Set default lock nesting level */
458 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
460 write_lock(&chan_list_lock);
461 list_add(&chan->global_l, &chan_list);
462 write_unlock(&chan_list_lock);
464 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
465 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
466 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
467 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
469 chan->state = BT_OPEN;
471 kref_init(&chan->kref);
473 /* This flag is cleared in l2cap_chan_ready() */
474 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
476 BT_DBG("chan %p", chan);
480 EXPORT_SYMBOL_GPL(l2cap_chan_create);
482 static void l2cap_chan_destroy(struct kref *kref)
484 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
486 BT_DBG("chan %p", chan);
488 write_lock(&chan_list_lock);
489 list_del(&chan->global_l);
490 write_unlock(&chan_list_lock);
495 void l2cap_chan_hold(struct l2cap_chan *c)
497 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
502 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
504 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
506 if (!kref_get_unless_zero(&c->kref))
512 void l2cap_chan_put(struct l2cap_chan *c)
514 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
516 kref_put(&c->kref, l2cap_chan_destroy);
518 EXPORT_SYMBOL_GPL(l2cap_chan_put);
520 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
522 chan->fcs = L2CAP_FCS_CRC16;
523 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
524 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
525 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
526 chan->remote_max_tx = chan->max_tx;
527 chan->remote_tx_win = chan->tx_win;
528 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
529 chan->sec_level = BT_SECURITY_LOW;
530 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
531 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
532 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
534 chan->conf_state = 0;
535 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
537 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
539 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
541 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
544 chan->sdu_last_frag = NULL;
546 chan->tx_credits = tx_credits;
547 /* Derive MPS from connection MTU to stop HCI fragmentation */
548 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
549 /* Give enough credits for a full packet */
550 chan->rx_credits = (chan->imtu / chan->mps) + 1;
552 skb_queue_head_init(&chan->tx_q);
555 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
557 l2cap_le_flowctl_init(chan, tx_credits);
559 /* L2CAP implementations shall support a minimum MPS of 64 octets */
560 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
561 chan->mps = L2CAP_ECRED_MIN_MPS;
562 chan->rx_credits = (chan->imtu / chan->mps) + 1;
566 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
568 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
569 __le16_to_cpu(chan->psm), chan->dcid);
571 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
575 switch (chan->chan_type) {
576 case L2CAP_CHAN_CONN_ORIENTED:
577 /* Alloc CID for connection-oriented socket */
578 chan->scid = l2cap_alloc_cid(conn);
579 if (conn->hcon->type == ACL_LINK)
580 chan->omtu = L2CAP_DEFAULT_MTU;
583 case L2CAP_CHAN_CONN_LESS:
584 /* Connectionless socket */
585 chan->scid = L2CAP_CID_CONN_LESS;
586 chan->dcid = L2CAP_CID_CONN_LESS;
587 chan->omtu = L2CAP_DEFAULT_MTU;
590 case L2CAP_CHAN_FIXED:
591 /* Caller will set CID and CID specific MTU values */
595 /* Raw socket can send/recv signalling messages only */
596 chan->scid = L2CAP_CID_SIGNALING;
597 chan->dcid = L2CAP_CID_SIGNALING;
598 chan->omtu = L2CAP_DEFAULT_MTU;
601 chan->local_id = L2CAP_BESTEFFORT_ID;
602 chan->local_stype = L2CAP_SERV_BESTEFFORT;
603 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
604 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
605 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
606 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
608 l2cap_chan_hold(chan);
610 /* Only keep a reference for fixed channels if they requested it */
611 if (chan->chan_type != L2CAP_CHAN_FIXED ||
612 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
613 hci_conn_hold(conn->hcon);
615 list_add(&chan->list, &conn->chan_l);
618 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
620 mutex_lock(&conn->chan_lock);
621 __l2cap_chan_add(conn, chan);
622 mutex_unlock(&conn->chan_lock);
625 void l2cap_chan_del(struct l2cap_chan *chan, int err)
627 struct l2cap_conn *conn = chan->conn;
629 __clear_chan_timer(chan);
631 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
632 state_to_string(chan->state));
634 chan->ops->teardown(chan, err);
637 /* Delete from channel list */
638 list_del(&chan->list);
640 l2cap_chan_put(chan);
644 /* Reference was only held for non-fixed channels or
645 * fixed channels that explicitly requested it using the
646 * FLAG_HOLD_HCI_CONN flag.
648 if (chan->chan_type != L2CAP_CHAN_FIXED ||
649 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
650 hci_conn_drop(conn->hcon);
653 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
656 switch (chan->mode) {
657 case L2CAP_MODE_BASIC:
660 case L2CAP_MODE_LE_FLOWCTL:
661 case L2CAP_MODE_EXT_FLOWCTL:
662 skb_queue_purge(&chan->tx_q);
665 case L2CAP_MODE_ERTM:
666 __clear_retrans_timer(chan);
667 __clear_monitor_timer(chan);
668 __clear_ack_timer(chan);
670 skb_queue_purge(&chan->srej_q);
672 l2cap_seq_list_free(&chan->srej_list);
673 l2cap_seq_list_free(&chan->retrans_list);
676 case L2CAP_MODE_STREAMING:
677 skb_queue_purge(&chan->tx_q);
681 EXPORT_SYMBOL_GPL(l2cap_chan_del);
683 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
684 l2cap_chan_func_t func, void *data)
686 struct l2cap_chan *chan, *l;
688 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
689 if (chan->ident == id)
694 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
697 struct l2cap_chan *chan;
699 list_for_each_entry(chan, &conn->chan_l, list) {
704 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
710 mutex_lock(&conn->chan_lock);
711 __l2cap_chan_list(conn, func, data);
712 mutex_unlock(&conn->chan_lock);
715 EXPORT_SYMBOL_GPL(l2cap_chan_list);
717 static void l2cap_conn_update_id_addr(struct work_struct *work)
719 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
721 struct hci_conn *hcon = conn->hcon;
722 struct l2cap_chan *chan;
724 mutex_lock(&conn->chan_lock);
726 list_for_each_entry(chan, &conn->chan_l, list) {
727 l2cap_chan_lock(chan);
728 bacpy(&chan->dst, &hcon->dst);
729 chan->dst_type = bdaddr_dst_type(hcon);
730 l2cap_chan_unlock(chan);
733 mutex_unlock(&conn->chan_lock);
736 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
738 struct l2cap_conn *conn = chan->conn;
739 struct l2cap_le_conn_rsp rsp;
742 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
743 result = L2CAP_CR_LE_AUTHORIZATION;
745 result = L2CAP_CR_LE_BAD_PSM;
747 l2cap_state_change(chan, BT_DISCONN);
749 rsp.dcid = cpu_to_le16(chan->scid);
750 rsp.mtu = cpu_to_le16(chan->imtu);
751 rsp.mps = cpu_to_le16(chan->mps);
752 rsp.credits = cpu_to_le16(chan->rx_credits);
753 rsp.result = cpu_to_le16(result);
755 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
759 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
761 l2cap_state_change(chan, BT_DISCONN);
763 __l2cap_ecred_conn_rsp_defer(chan);
766 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
768 struct l2cap_conn *conn = chan->conn;
769 struct l2cap_conn_rsp rsp;
772 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
773 result = L2CAP_CR_SEC_BLOCK;
775 result = L2CAP_CR_BAD_PSM;
777 l2cap_state_change(chan, BT_DISCONN);
779 rsp.scid = cpu_to_le16(chan->dcid);
780 rsp.dcid = cpu_to_le16(chan->scid);
781 rsp.result = cpu_to_le16(result);
782 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
784 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
787 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
789 struct l2cap_conn *conn = chan->conn;
791 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
793 switch (chan->state) {
795 chan->ops->teardown(chan, 0);
800 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
801 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
802 l2cap_send_disconn_req(chan, reason);
804 l2cap_chan_del(chan, reason);
808 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
809 if (conn->hcon->type == ACL_LINK)
810 l2cap_chan_connect_reject(chan);
811 else if (conn->hcon->type == LE_LINK) {
812 switch (chan->mode) {
813 case L2CAP_MODE_LE_FLOWCTL:
814 l2cap_chan_le_connect_reject(chan);
816 case L2CAP_MODE_EXT_FLOWCTL:
817 l2cap_chan_ecred_connect_reject(chan);
823 l2cap_chan_del(chan, reason);
828 l2cap_chan_del(chan, reason);
832 chan->ops->teardown(chan, 0);
836 EXPORT_SYMBOL(l2cap_chan_close);
838 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
840 switch (chan->chan_type) {
842 switch (chan->sec_level) {
843 case BT_SECURITY_HIGH:
844 case BT_SECURITY_FIPS:
845 return HCI_AT_DEDICATED_BONDING_MITM;
846 case BT_SECURITY_MEDIUM:
847 return HCI_AT_DEDICATED_BONDING;
849 return HCI_AT_NO_BONDING;
852 case L2CAP_CHAN_CONN_LESS:
853 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
854 if (chan->sec_level == BT_SECURITY_LOW)
855 chan->sec_level = BT_SECURITY_SDP;
857 if (chan->sec_level == BT_SECURITY_HIGH ||
858 chan->sec_level == BT_SECURITY_FIPS)
859 return HCI_AT_NO_BONDING_MITM;
861 return HCI_AT_NO_BONDING;
863 case L2CAP_CHAN_CONN_ORIENTED:
864 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
865 if (chan->sec_level == BT_SECURITY_LOW)
866 chan->sec_level = BT_SECURITY_SDP;
868 if (chan->sec_level == BT_SECURITY_HIGH ||
869 chan->sec_level == BT_SECURITY_FIPS)
870 return HCI_AT_NO_BONDING_MITM;
872 return HCI_AT_NO_BONDING;
877 switch (chan->sec_level) {
878 case BT_SECURITY_HIGH:
879 case BT_SECURITY_FIPS:
880 return HCI_AT_GENERAL_BONDING_MITM;
881 case BT_SECURITY_MEDIUM:
882 return HCI_AT_GENERAL_BONDING;
884 return HCI_AT_NO_BONDING;
890 /* Service level security */
891 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
893 struct l2cap_conn *conn = chan->conn;
896 if (conn->hcon->type == LE_LINK)
897 return smp_conn_security(conn->hcon, chan->sec_level);
899 auth_type = l2cap_get_auth_type(chan);
901 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
905 static u8 l2cap_get_ident(struct l2cap_conn *conn)
909 /* Get next available identificator.
910 * 1 - 128 are used by kernel.
911 * 129 - 199 are reserved.
912 * 200 - 254 are used by utilities like l2ping, etc.
915 mutex_lock(&conn->ident_lock);
917 if (++conn->tx_ident > 128)
922 mutex_unlock(&conn->ident_lock);
927 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
930 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
933 BT_DBG("code 0x%2.2x", code);
938 /* Use NO_FLUSH if supported or we have an LE link (which does
939 * not support auto-flushing packets) */
940 if (lmp_no_flush_capable(conn->hcon->hdev) ||
941 conn->hcon->type == LE_LINK)
942 flags = ACL_START_NO_FLUSH;
946 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
947 skb->priority = HCI_PRIO_MAX;
949 hci_send_acl(conn->hchan, skb, flags);
952 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
954 struct hci_conn *hcon = chan->conn->hcon;
957 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
960 /* Use NO_FLUSH for LE links (where this is the only option) or
961 * if the BR/EDR link supports it and flushing has not been
962 * explicitly requested (through FLAG_FLUSHABLE).
964 if (hcon->type == LE_LINK ||
965 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
966 lmp_no_flush_capable(hcon->hdev)))
967 flags = ACL_START_NO_FLUSH;
971 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
972 hci_send_acl(chan->conn->hchan, skb, flags);
975 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
977 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
978 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
980 if (enh & L2CAP_CTRL_FRAME_TYPE) {
983 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
984 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
991 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
992 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
999 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1001 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1002 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1004 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1006 control->sframe = 1;
1007 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1008 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1014 control->sframe = 0;
1015 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1016 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1023 static inline void __unpack_control(struct l2cap_chan *chan,
1024 struct sk_buff *skb)
1026 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1027 __unpack_extended_control(get_unaligned_le32(skb->data),
1028 &bt_cb(skb)->l2cap);
1029 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1031 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1032 &bt_cb(skb)->l2cap);
1033 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1037 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1041 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1042 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1044 if (control->sframe) {
1045 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1046 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1047 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1049 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1050 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1056 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1060 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1061 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1063 if (control->sframe) {
1064 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1065 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1066 packed |= L2CAP_CTRL_FRAME_TYPE;
1068 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1069 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1075 static inline void __pack_control(struct l2cap_chan *chan,
1076 struct l2cap_ctrl *control,
1077 struct sk_buff *skb)
1079 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1080 put_unaligned_le32(__pack_extended_control(control),
1081 skb->data + L2CAP_HDR_SIZE);
1083 put_unaligned_le16(__pack_enhanced_control(control),
1084 skb->data + L2CAP_HDR_SIZE);
1088 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1090 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1091 return L2CAP_EXT_HDR_SIZE;
1093 return L2CAP_ENH_HDR_SIZE;
1096 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1099 struct sk_buff *skb;
1100 struct l2cap_hdr *lh;
1101 int hlen = __ertm_hdr_size(chan);
1103 if (chan->fcs == L2CAP_FCS_CRC16)
1104 hlen += L2CAP_FCS_SIZE;
1106 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1109 return ERR_PTR(-ENOMEM);
1111 lh = skb_put(skb, L2CAP_HDR_SIZE);
1112 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1113 lh->cid = cpu_to_le16(chan->dcid);
1115 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1116 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1118 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1120 if (chan->fcs == L2CAP_FCS_CRC16) {
1121 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1122 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1125 skb->priority = HCI_PRIO_MAX;
1129 static void l2cap_send_sframe(struct l2cap_chan *chan,
1130 struct l2cap_ctrl *control)
1132 struct sk_buff *skb;
1135 BT_DBG("chan %p, control %p", chan, control);
1137 if (!control->sframe)
1140 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1144 if (control->super == L2CAP_SUPER_RR)
1145 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1146 else if (control->super == L2CAP_SUPER_RNR)
1147 set_bit(CONN_RNR_SENT, &chan->conn_state);
1149 if (control->super != L2CAP_SUPER_SREJ) {
1150 chan->last_acked_seq = control->reqseq;
1151 __clear_ack_timer(chan);
1154 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1155 control->final, control->poll, control->super);
1157 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1158 control_field = __pack_extended_control(control);
1160 control_field = __pack_enhanced_control(control);
1162 skb = l2cap_create_sframe_pdu(chan, control_field);
1164 l2cap_do_send(chan, skb);
1167 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1169 struct l2cap_ctrl control;
1171 BT_DBG("chan %p, poll %d", chan, poll);
1173 memset(&control, 0, sizeof(control));
1175 control.poll = poll;
1177 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1178 control.super = L2CAP_SUPER_RNR;
1180 control.super = L2CAP_SUPER_RR;
1182 control.reqseq = chan->buffer_seq;
1183 l2cap_send_sframe(chan, &control);
1186 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1188 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1191 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1194 void l2cap_send_conn_req(struct l2cap_chan *chan)
1196 struct l2cap_conn *conn = chan->conn;
1197 struct l2cap_conn_req req;
1199 req.scid = cpu_to_le16(chan->scid);
1200 req.psm = chan->psm;
1202 chan->ident = l2cap_get_ident(conn);
1204 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1206 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1209 static void l2cap_chan_ready(struct l2cap_chan *chan)
1211 /* The channel may have already been flagged as connected in
1212 * case of receiving data before the L2CAP info req/rsp
1213 * procedure is complete.
1215 if (chan->state == BT_CONNECTED)
1218 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1219 chan->conf_state = 0;
1220 __clear_chan_timer(chan);
1222 switch (chan->mode) {
1223 case L2CAP_MODE_LE_FLOWCTL:
1224 case L2CAP_MODE_EXT_FLOWCTL:
1225 if (!chan->tx_credits)
1226 chan->ops->suspend(chan);
1230 chan->state = BT_CONNECTED;
1232 chan->ops->ready(chan);
1235 static void l2cap_le_connect(struct l2cap_chan *chan)
1237 struct l2cap_conn *conn = chan->conn;
1238 struct l2cap_le_conn_req req;
1240 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1244 chan->imtu = chan->conn->mtu;
1246 l2cap_le_flowctl_init(chan, 0);
1248 memset(&req, 0, sizeof(req));
1249 req.psm = chan->psm;
1250 req.scid = cpu_to_le16(chan->scid);
1251 req.mtu = cpu_to_le16(chan->imtu);
1252 req.mps = cpu_to_le16(chan->mps);
1253 req.credits = cpu_to_le16(chan->rx_credits);
1255 chan->ident = l2cap_get_ident(conn);
1257 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1261 struct l2cap_ecred_conn_data {
1263 struct l2cap_ecred_conn_req req;
1266 struct l2cap_chan *chan;
1271 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1273 struct l2cap_ecred_conn_data *conn = data;
1276 if (chan == conn->chan)
1279 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1282 pid = chan->ops->get_peer_pid(chan);
1284 /* Only add deferred channels with the same PID/PSM */
1285 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1286 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1289 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1292 l2cap_ecred_init(chan, 0);
1294 /* Set the same ident so we can match on the rsp */
1295 chan->ident = conn->chan->ident;
1297 /* Include all channels deferred */
1298 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1303 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1305 struct l2cap_conn *conn = chan->conn;
1306 struct l2cap_ecred_conn_data data;
1308 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1311 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1314 l2cap_ecred_init(chan, 0);
1316 memset(&data, 0, sizeof(data));
1317 data.pdu.req.psm = chan->psm;
1318 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1319 data.pdu.req.mps = cpu_to_le16(chan->mps);
1320 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1321 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1323 chan->ident = l2cap_get_ident(conn);
1327 data.pid = chan->ops->get_peer_pid(chan);
1329 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1331 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1332 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1336 static void l2cap_le_start(struct l2cap_chan *chan)
1338 struct l2cap_conn *conn = chan->conn;
1340 if (!smp_conn_security(conn->hcon, chan->sec_level))
1344 l2cap_chan_ready(chan);
1348 if (chan->state == BT_CONNECT) {
1349 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1350 l2cap_ecred_connect(chan);
1352 l2cap_le_connect(chan);
1356 static void l2cap_start_connection(struct l2cap_chan *chan)
1358 if (chan->conn->hcon->type == LE_LINK) {
1359 l2cap_le_start(chan);
1361 l2cap_send_conn_req(chan);
1365 static void l2cap_request_info(struct l2cap_conn *conn)
1367 struct l2cap_info_req req;
1369 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1372 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1374 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1375 conn->info_ident = l2cap_get_ident(conn);
1377 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1379 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1383 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1385 /* The minimum encryption key size needs to be enforced by the
1386 * host stack before establishing any L2CAP connections. The
1387 * specification in theory allows a minimum of 1, but to align
1388 * BR/EDR and LE transports, a minimum of 7 is chosen.
1390 * This check might also be called for unencrypted connections
1391 * that have no key size requirements. Ensure that the link is
1392 * actually encrypted before enforcing a key size.
1394 int min_key_size = hcon->hdev->min_enc_key_size;
1396 /* On FIPS security level, key size must be 16 bytes */
1397 if (hcon->sec_level == BT_SECURITY_FIPS)
1400 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1401 hcon->enc_key_size >= min_key_size);
1404 static void l2cap_do_start(struct l2cap_chan *chan)
1406 struct l2cap_conn *conn = chan->conn;
1408 if (conn->hcon->type == LE_LINK) {
1409 l2cap_le_start(chan);
1413 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1414 l2cap_request_info(conn);
1418 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1421 if (!l2cap_chan_check_security(chan, true) ||
1422 !__l2cap_no_conn_pending(chan))
1425 if (l2cap_check_enc_key_size(conn->hcon))
1426 l2cap_start_connection(chan);
1428 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1431 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1433 u32 local_feat_mask = l2cap_feat_mask;
1435 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1438 case L2CAP_MODE_ERTM:
1439 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1440 case L2CAP_MODE_STREAMING:
1441 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1447 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1449 struct l2cap_conn *conn = chan->conn;
1450 struct l2cap_disconn_req req;
1455 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1456 __clear_retrans_timer(chan);
1457 __clear_monitor_timer(chan);
1458 __clear_ack_timer(chan);
1461 req.dcid = cpu_to_le16(chan->dcid);
1462 req.scid = cpu_to_le16(chan->scid);
1463 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1466 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1469 /* ---- L2CAP connections ---- */
1470 static void l2cap_conn_start(struct l2cap_conn *conn)
1472 struct l2cap_chan *chan, *tmp;
1474 BT_DBG("conn %p", conn);
1476 mutex_lock(&conn->chan_lock);
1478 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1479 l2cap_chan_lock(chan);
1481 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1482 l2cap_chan_ready(chan);
1483 l2cap_chan_unlock(chan);
1487 if (chan->state == BT_CONNECT) {
1488 if (!l2cap_chan_check_security(chan, true) ||
1489 !__l2cap_no_conn_pending(chan)) {
1490 l2cap_chan_unlock(chan);
1494 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1495 && test_bit(CONF_STATE2_DEVICE,
1496 &chan->conf_state)) {
1497 l2cap_chan_close(chan, ECONNRESET);
1498 l2cap_chan_unlock(chan);
1502 if (l2cap_check_enc_key_size(conn->hcon))
1503 l2cap_start_connection(chan);
1505 l2cap_chan_close(chan, ECONNREFUSED);
1507 } else if (chan->state == BT_CONNECT2) {
1508 struct l2cap_conn_rsp rsp;
1510 rsp.scid = cpu_to_le16(chan->dcid);
1511 rsp.dcid = cpu_to_le16(chan->scid);
1513 if (l2cap_chan_check_security(chan, false)) {
1514 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1515 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1516 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1517 chan->ops->defer(chan);
1520 l2cap_state_change(chan, BT_CONFIG);
1521 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1522 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1525 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1526 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1529 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1532 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1533 rsp.result != L2CAP_CR_SUCCESS) {
1534 l2cap_chan_unlock(chan);
1538 set_bit(CONF_REQ_SENT, &chan->conf_state);
1539 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1540 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1541 chan->num_conf_req++;
1544 l2cap_chan_unlock(chan);
1547 mutex_unlock(&conn->chan_lock);
1550 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1552 struct hci_conn *hcon = conn->hcon;
1553 struct hci_dev *hdev = hcon->hdev;
1555 BT_DBG("%s conn %p", hdev->name, conn);
1557 /* For outgoing pairing which doesn't necessarily have an
1558 * associated socket (e.g. mgmt_pair_device).
1561 smp_conn_security(hcon, hcon->pending_sec_level);
1563 /* For LE peripheral connections, make sure the connection interval
1564 * is in the range of the minimum and maximum interval that has
1565 * been configured for this connection. If not, then trigger
1566 * the connection update procedure.
1568 if (hcon->role == HCI_ROLE_SLAVE &&
1569 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1570 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1571 struct l2cap_conn_param_update_req req;
1573 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1574 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1575 req.latency = cpu_to_le16(hcon->le_conn_latency);
1576 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1578 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1579 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1583 static void l2cap_conn_ready(struct l2cap_conn *conn)
1585 struct l2cap_chan *chan;
1586 struct hci_conn *hcon = conn->hcon;
1588 BT_DBG("conn %p", conn);
1590 if (hcon->type == ACL_LINK)
1591 l2cap_request_info(conn);
1593 mutex_lock(&conn->chan_lock);
1595 list_for_each_entry(chan, &conn->chan_l, list) {
1597 l2cap_chan_lock(chan);
1599 if (hcon->type == LE_LINK) {
1600 l2cap_le_start(chan);
1601 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1602 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1603 l2cap_chan_ready(chan);
1604 } else if (chan->state == BT_CONNECT) {
1605 l2cap_do_start(chan);
1608 l2cap_chan_unlock(chan);
1611 mutex_unlock(&conn->chan_lock);
1613 if (hcon->type == LE_LINK)
1614 l2cap_le_conn_ready(conn);
1616 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1619 /* Notify sockets that we cannot guaranty reliability anymore */
1620 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1622 struct l2cap_chan *chan;
1624 BT_DBG("conn %p", conn);
1626 mutex_lock(&conn->chan_lock);
1628 list_for_each_entry(chan, &conn->chan_l, list) {
1629 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1630 l2cap_chan_set_err(chan, err);
1633 mutex_unlock(&conn->chan_lock);
1636 static void l2cap_info_timeout(struct work_struct *work)
1638 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1641 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1642 conn->info_ident = 0;
1644 l2cap_conn_start(conn);
1649 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1650 * callback is called during registration. The ->remove callback is called
1651 * during unregistration.
1652 * An l2cap_user object can either be explicitly unregistered or when the
1653 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1654 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1655 * External modules must own a reference to the l2cap_conn object if they intend
1656 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1657 * any time if they don't.
1660 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1662 struct hci_dev *hdev = conn->hcon->hdev;
1665 /* We need to check whether l2cap_conn is registered. If it is not, we
1666 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1667 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1668 * relies on the parent hci_conn object to be locked. This itself relies
1669 * on the hci_dev object to be locked. So we must lock the hci device
1674 if (!list_empty(&user->list)) {
1679 /* conn->hchan is NULL after l2cap_conn_del() was called */
1685 ret = user->probe(conn, user);
1689 list_add(&user->list, &conn->users);
1693 hci_dev_unlock(hdev);
1696 EXPORT_SYMBOL(l2cap_register_user);
1698 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1700 struct hci_dev *hdev = conn->hcon->hdev;
1704 if (list_empty(&user->list))
1707 list_del_init(&user->list);
1708 user->remove(conn, user);
1711 hci_dev_unlock(hdev);
1713 EXPORT_SYMBOL(l2cap_unregister_user);
1715 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1717 struct l2cap_user *user;
1719 while (!list_empty(&conn->users)) {
1720 user = list_first_entry(&conn->users, struct l2cap_user, list);
1721 list_del_init(&user->list);
1722 user->remove(conn, user);
1726 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1728 struct l2cap_conn *conn = hcon->l2cap_data;
1729 struct l2cap_chan *chan, *l;
1734 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1736 kfree_skb(conn->rx_skb);
1738 skb_queue_purge(&conn->pending_rx);
1740 /* We can not call flush_work(&conn->pending_rx_work) here since we
1741 * might block if we are running on a worker from the same workqueue
1742 * pending_rx_work is waiting on.
1744 if (work_pending(&conn->pending_rx_work))
1745 cancel_work_sync(&conn->pending_rx_work);
1747 cancel_delayed_work_sync(&conn->id_addr_timer);
1749 l2cap_unregister_all_users(conn);
1751 /* Force the connection to be immediately dropped */
1752 hcon->disc_timeout = 0;
1754 mutex_lock(&conn->chan_lock);
1757 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1758 l2cap_chan_hold(chan);
1759 l2cap_chan_lock(chan);
1761 l2cap_chan_del(chan, err);
1763 chan->ops->close(chan);
1765 l2cap_chan_unlock(chan);
1766 l2cap_chan_put(chan);
1769 mutex_unlock(&conn->chan_lock);
1771 hci_chan_del(conn->hchan);
1773 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1774 cancel_delayed_work_sync(&conn->info_timer);
1776 hcon->l2cap_data = NULL;
1778 l2cap_conn_put(conn);
1781 static void l2cap_conn_free(struct kref *ref)
1783 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1785 hci_conn_put(conn->hcon);
1789 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1791 kref_get(&conn->ref);
1794 EXPORT_SYMBOL(l2cap_conn_get);
1796 void l2cap_conn_put(struct l2cap_conn *conn)
1798 kref_put(&conn->ref, l2cap_conn_free);
1800 EXPORT_SYMBOL(l2cap_conn_put);
1802 /* ---- Socket interface ---- */
1804 /* Find socket with psm and source / destination bdaddr.
1805 * Returns closest match.
1807 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1812 struct l2cap_chan *c, *tmp, *c1 = NULL;
1814 read_lock(&chan_list_lock);
1816 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1817 if (state && c->state != state)
1820 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1823 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1826 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1827 int src_match, dst_match;
1828 int src_any, dst_any;
1831 src_match = !bacmp(&c->src, src);
1832 dst_match = !bacmp(&c->dst, dst);
1833 if (src_match && dst_match) {
1834 if (!l2cap_chan_hold_unless_zero(c))
1837 read_unlock(&chan_list_lock);
1842 src_any = !bacmp(&c->src, BDADDR_ANY);
1843 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1844 if ((src_match && dst_any) || (src_any && dst_match) ||
1845 (src_any && dst_any))
1851 c1 = l2cap_chan_hold_unless_zero(c1);
1853 read_unlock(&chan_list_lock);
1858 static void l2cap_monitor_timeout(struct work_struct *work)
1860 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1861 monitor_timer.work);
1863 BT_DBG("chan %p", chan);
1865 l2cap_chan_lock(chan);
1868 l2cap_chan_unlock(chan);
1869 l2cap_chan_put(chan);
1873 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1875 l2cap_chan_unlock(chan);
1876 l2cap_chan_put(chan);
1879 static void l2cap_retrans_timeout(struct work_struct *work)
1881 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1882 retrans_timer.work);
1884 BT_DBG("chan %p", chan);
1886 l2cap_chan_lock(chan);
1889 l2cap_chan_unlock(chan);
1890 l2cap_chan_put(chan);
1894 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1895 l2cap_chan_unlock(chan);
1896 l2cap_chan_put(chan);
1899 static void l2cap_streaming_send(struct l2cap_chan *chan,
1900 struct sk_buff_head *skbs)
1902 struct sk_buff *skb;
1903 struct l2cap_ctrl *control;
1905 BT_DBG("chan %p, skbs %p", chan, skbs);
1907 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1909 while (!skb_queue_empty(&chan->tx_q)) {
1911 skb = skb_dequeue(&chan->tx_q);
1913 bt_cb(skb)->l2cap.retries = 1;
1914 control = &bt_cb(skb)->l2cap;
1916 control->reqseq = 0;
1917 control->txseq = chan->next_tx_seq;
1919 __pack_control(chan, control, skb);
1921 if (chan->fcs == L2CAP_FCS_CRC16) {
1922 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1923 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1926 l2cap_do_send(chan, skb);
1928 BT_DBG("Sent txseq %u", control->txseq);
1930 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1931 chan->frames_sent++;
1935 static int l2cap_ertm_send(struct l2cap_chan *chan)
1937 struct sk_buff *skb, *tx_skb;
1938 struct l2cap_ctrl *control;
1941 BT_DBG("chan %p", chan);
1943 if (chan->state != BT_CONNECTED)
1946 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1949 while (chan->tx_send_head &&
1950 chan->unacked_frames < chan->remote_tx_win &&
1951 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1953 skb = chan->tx_send_head;
1955 bt_cb(skb)->l2cap.retries = 1;
1956 control = &bt_cb(skb)->l2cap;
1958 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1961 control->reqseq = chan->buffer_seq;
1962 chan->last_acked_seq = chan->buffer_seq;
1963 control->txseq = chan->next_tx_seq;
1965 __pack_control(chan, control, skb);
1967 if (chan->fcs == L2CAP_FCS_CRC16) {
1968 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1969 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1972 /* Clone after data has been modified. Data is assumed to be
1973 read-only (for locking purposes) on cloned sk_buffs.
1975 tx_skb = skb_clone(skb, GFP_KERNEL);
1980 __set_retrans_timer(chan);
1982 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1983 chan->unacked_frames++;
1984 chan->frames_sent++;
1987 if (skb_queue_is_last(&chan->tx_q, skb))
1988 chan->tx_send_head = NULL;
1990 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1992 l2cap_do_send(chan, tx_skb);
1993 BT_DBG("Sent txseq %u", control->txseq);
1996 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1997 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2002 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2004 struct l2cap_ctrl control;
2005 struct sk_buff *skb;
2006 struct sk_buff *tx_skb;
2009 BT_DBG("chan %p", chan);
2011 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2014 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2015 seq = l2cap_seq_list_pop(&chan->retrans_list);
2017 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2019 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2024 bt_cb(skb)->l2cap.retries++;
2025 control = bt_cb(skb)->l2cap;
2027 if (chan->max_tx != 0 &&
2028 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2029 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2030 l2cap_send_disconn_req(chan, ECONNRESET);
2031 l2cap_seq_list_clear(&chan->retrans_list);
2035 control.reqseq = chan->buffer_seq;
2036 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2041 if (skb_cloned(skb)) {
2042 /* Cloned sk_buffs are read-only, so we need a
2045 tx_skb = skb_copy(skb, GFP_KERNEL);
2047 tx_skb = skb_clone(skb, GFP_KERNEL);
2051 l2cap_seq_list_clear(&chan->retrans_list);
2055 /* Update skb contents */
2056 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2057 put_unaligned_le32(__pack_extended_control(&control),
2058 tx_skb->data + L2CAP_HDR_SIZE);
2060 put_unaligned_le16(__pack_enhanced_control(&control),
2061 tx_skb->data + L2CAP_HDR_SIZE);
2065 if (chan->fcs == L2CAP_FCS_CRC16) {
2066 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2067 tx_skb->len - L2CAP_FCS_SIZE);
2068 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2072 l2cap_do_send(chan, tx_skb);
2074 BT_DBG("Resent txseq %d", control.txseq);
2076 chan->last_acked_seq = chan->buffer_seq;
2080 static void l2cap_retransmit(struct l2cap_chan *chan,
2081 struct l2cap_ctrl *control)
2083 BT_DBG("chan %p, control %p", chan, control);
2085 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2086 l2cap_ertm_resend(chan);
2089 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2090 struct l2cap_ctrl *control)
2092 struct sk_buff *skb;
2094 BT_DBG("chan %p, control %p", chan, control);
2097 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2099 l2cap_seq_list_clear(&chan->retrans_list);
2101 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2104 if (chan->unacked_frames) {
2105 skb_queue_walk(&chan->tx_q, skb) {
2106 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2107 skb == chan->tx_send_head)
2111 skb_queue_walk_from(&chan->tx_q, skb) {
2112 if (skb == chan->tx_send_head)
2115 l2cap_seq_list_append(&chan->retrans_list,
2116 bt_cb(skb)->l2cap.txseq);
2119 l2cap_ertm_resend(chan);
2123 static void l2cap_send_ack(struct l2cap_chan *chan)
2125 struct l2cap_ctrl control;
2126 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2127 chan->last_acked_seq);
2130 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2131 chan, chan->last_acked_seq, chan->buffer_seq);
2133 memset(&control, 0, sizeof(control));
2136 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2137 chan->rx_state == L2CAP_RX_STATE_RECV) {
2138 __clear_ack_timer(chan);
2139 control.super = L2CAP_SUPER_RNR;
2140 control.reqseq = chan->buffer_seq;
2141 l2cap_send_sframe(chan, &control);
2143 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2144 l2cap_ertm_send(chan);
2145 /* If any i-frames were sent, they included an ack */
2146 if (chan->buffer_seq == chan->last_acked_seq)
2150 /* Ack now if the window is 3/4ths full.
2151 * Calculate without mul or div
2153 threshold = chan->ack_win;
2154 threshold += threshold << 1;
2157 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2160 if (frames_to_ack >= threshold) {
2161 __clear_ack_timer(chan);
2162 control.super = L2CAP_SUPER_RR;
2163 control.reqseq = chan->buffer_seq;
2164 l2cap_send_sframe(chan, &control);
2169 __set_ack_timer(chan);
2173 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2174 struct msghdr *msg, int len,
2175 int count, struct sk_buff *skb)
2177 struct l2cap_conn *conn = chan->conn;
2178 struct sk_buff **frag;
2181 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2187 /* Continuation fragments (no L2CAP header) */
2188 frag = &skb_shinfo(skb)->frag_list;
2190 struct sk_buff *tmp;
2192 count = min_t(unsigned int, conn->mtu, len);
2194 tmp = chan->ops->alloc_skb(chan, 0, count,
2195 msg->msg_flags & MSG_DONTWAIT);
2197 return PTR_ERR(tmp);
2201 if (!copy_from_iter_full(skb_put(*frag, count), count,
2208 skb->len += (*frag)->len;
2209 skb->data_len += (*frag)->len;
2211 frag = &(*frag)->next;
2217 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2218 struct msghdr *msg, size_t len)
2220 struct l2cap_conn *conn = chan->conn;
2221 struct sk_buff *skb;
2222 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2223 struct l2cap_hdr *lh;
2225 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2226 __le16_to_cpu(chan->psm), len);
2228 count = min_t(unsigned int, (conn->mtu - hlen), len);
2230 skb = chan->ops->alloc_skb(chan, hlen, count,
2231 msg->msg_flags & MSG_DONTWAIT);
2235 /* Create L2CAP header */
2236 lh = skb_put(skb, L2CAP_HDR_SIZE);
2237 lh->cid = cpu_to_le16(chan->dcid);
2238 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2239 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2241 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2242 if (unlikely(err < 0)) {
2244 return ERR_PTR(err);
2249 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2250 struct msghdr *msg, size_t len)
2252 struct l2cap_conn *conn = chan->conn;
2253 struct sk_buff *skb;
2255 struct l2cap_hdr *lh;
2257 BT_DBG("chan %p len %zu", chan, len);
2259 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2261 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2262 msg->msg_flags & MSG_DONTWAIT);
2266 /* Create L2CAP header */
2267 lh = skb_put(skb, L2CAP_HDR_SIZE);
2268 lh->cid = cpu_to_le16(chan->dcid);
2269 lh->len = cpu_to_le16(len);
2271 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2272 if (unlikely(err < 0)) {
2274 return ERR_PTR(err);
2279 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2280 struct msghdr *msg, size_t len,
2283 struct l2cap_conn *conn = chan->conn;
2284 struct sk_buff *skb;
2285 int err, count, hlen;
2286 struct l2cap_hdr *lh;
2288 BT_DBG("chan %p len %zu", chan, len);
2291 return ERR_PTR(-ENOTCONN);
2293 hlen = __ertm_hdr_size(chan);
2296 hlen += L2CAP_SDULEN_SIZE;
2298 if (chan->fcs == L2CAP_FCS_CRC16)
2299 hlen += L2CAP_FCS_SIZE;
2301 count = min_t(unsigned int, (conn->mtu - hlen), len);
2303 skb = chan->ops->alloc_skb(chan, hlen, count,
2304 msg->msg_flags & MSG_DONTWAIT);
2308 /* Create L2CAP header */
2309 lh = skb_put(skb, L2CAP_HDR_SIZE);
2310 lh->cid = cpu_to_le16(chan->dcid);
2311 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2313 /* Control header is populated later */
2314 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2315 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2317 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2320 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2322 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2323 if (unlikely(err < 0)) {
2325 return ERR_PTR(err);
2328 bt_cb(skb)->l2cap.fcs = chan->fcs;
2329 bt_cb(skb)->l2cap.retries = 0;
2333 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2334 struct sk_buff_head *seg_queue,
2335 struct msghdr *msg, size_t len)
2337 struct sk_buff *skb;
2342 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2344 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2345 * so fragmented skbs are not used. The HCI layer's handling
2346 * of fragmented skbs is not compatible with ERTM's queueing.
2349 /* PDU size is derived from the HCI MTU */
2350 pdu_len = chan->conn->mtu;
2352 /* Constrain PDU size for BR/EDR connections */
2353 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2355 /* Adjust for largest possible L2CAP overhead. */
2357 pdu_len -= L2CAP_FCS_SIZE;
2359 pdu_len -= __ertm_hdr_size(chan);
2361 /* Remote device may have requested smaller PDUs */
2362 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2364 if (len <= pdu_len) {
2365 sar = L2CAP_SAR_UNSEGMENTED;
2369 sar = L2CAP_SAR_START;
2374 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2377 __skb_queue_purge(seg_queue);
2378 return PTR_ERR(skb);
2381 bt_cb(skb)->l2cap.sar = sar;
2382 __skb_queue_tail(seg_queue, skb);
2388 if (len <= pdu_len) {
2389 sar = L2CAP_SAR_END;
2392 sar = L2CAP_SAR_CONTINUE;
2399 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2401 size_t len, u16 sdulen)
2403 struct l2cap_conn *conn = chan->conn;
2404 struct sk_buff *skb;
2405 int err, count, hlen;
2406 struct l2cap_hdr *lh;
2408 BT_DBG("chan %p len %zu", chan, len);
2411 return ERR_PTR(-ENOTCONN);
2413 hlen = L2CAP_HDR_SIZE;
2416 hlen += L2CAP_SDULEN_SIZE;
2418 count = min_t(unsigned int, (conn->mtu - hlen), len);
2420 skb = chan->ops->alloc_skb(chan, hlen, count,
2421 msg->msg_flags & MSG_DONTWAIT);
2425 /* Create L2CAP header */
2426 lh = skb_put(skb, L2CAP_HDR_SIZE);
2427 lh->cid = cpu_to_le16(chan->dcid);
2428 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2431 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2433 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2434 if (unlikely(err < 0)) {
2436 return ERR_PTR(err);
2442 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2443 struct sk_buff_head *seg_queue,
2444 struct msghdr *msg, size_t len)
2446 struct sk_buff *skb;
2450 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2453 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2459 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2461 __skb_queue_purge(seg_queue);
2462 return PTR_ERR(skb);
2465 __skb_queue_tail(seg_queue, skb);
2471 pdu_len += L2CAP_SDULEN_SIZE;
2478 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2482 BT_DBG("chan %p", chan);
2484 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2485 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2490 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2491 skb_queue_len(&chan->tx_q));
2494 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2496 struct sk_buff *skb;
2498 struct sk_buff_head seg_queue;
2503 /* Connectionless channel */
2504 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2505 skb = l2cap_create_connless_pdu(chan, msg, len);
2507 return PTR_ERR(skb);
2509 l2cap_do_send(chan, skb);
2513 switch (chan->mode) {
2514 case L2CAP_MODE_LE_FLOWCTL:
2515 case L2CAP_MODE_EXT_FLOWCTL:
2516 /* Check outgoing MTU */
2517 if (len > chan->omtu)
2520 __skb_queue_head_init(&seg_queue);
2522 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2524 if (chan->state != BT_CONNECTED) {
2525 __skb_queue_purge(&seg_queue);
2532 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2534 l2cap_le_flowctl_send(chan);
2536 if (!chan->tx_credits)
2537 chan->ops->suspend(chan);
2543 case L2CAP_MODE_BASIC:
2544 /* Check outgoing MTU */
2545 if (len > chan->omtu)
2548 /* Create a basic PDU */
2549 skb = l2cap_create_basic_pdu(chan, msg, len);
2551 return PTR_ERR(skb);
2553 l2cap_do_send(chan, skb);
2557 case L2CAP_MODE_ERTM:
2558 case L2CAP_MODE_STREAMING:
2559 /* Check outgoing MTU */
2560 if (len > chan->omtu) {
2565 __skb_queue_head_init(&seg_queue);
2567 /* Do segmentation before calling in to the state machine,
2568 * since it's possible to block while waiting for memory
2571 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2576 if (chan->mode == L2CAP_MODE_ERTM)
2577 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2579 l2cap_streaming_send(chan, &seg_queue);
2583 /* If the skbs were not queued for sending, they'll still be in
2584 * seg_queue and need to be purged.
2586 __skb_queue_purge(&seg_queue);
2590 BT_DBG("bad state %1.1x", chan->mode);
2596 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2598 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2600 struct l2cap_ctrl control;
2603 BT_DBG("chan %p, txseq %u", chan, txseq);
2605 memset(&control, 0, sizeof(control));
2607 control.super = L2CAP_SUPER_SREJ;
2609 for (seq = chan->expected_tx_seq; seq != txseq;
2610 seq = __next_seq(chan, seq)) {
2611 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2612 control.reqseq = seq;
2613 l2cap_send_sframe(chan, &control);
2614 l2cap_seq_list_append(&chan->srej_list, seq);
2618 chan->expected_tx_seq = __next_seq(chan, txseq);
2621 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2623 struct l2cap_ctrl control;
2625 BT_DBG("chan %p", chan);
2627 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2630 memset(&control, 0, sizeof(control));
2632 control.super = L2CAP_SUPER_SREJ;
2633 control.reqseq = chan->srej_list.tail;
2634 l2cap_send_sframe(chan, &control);
2637 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2639 struct l2cap_ctrl control;
2643 BT_DBG("chan %p, txseq %u", chan, txseq);
2645 memset(&control, 0, sizeof(control));
2647 control.super = L2CAP_SUPER_SREJ;
2649 /* Capture initial list head to allow only one pass through the list. */
2650 initial_head = chan->srej_list.head;
2653 seq = l2cap_seq_list_pop(&chan->srej_list);
2654 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2657 control.reqseq = seq;
2658 l2cap_send_sframe(chan, &control);
2659 l2cap_seq_list_append(&chan->srej_list, seq);
2660 } while (chan->srej_list.head != initial_head);
2663 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2665 struct sk_buff *acked_skb;
2668 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2670 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2673 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2674 chan->expected_ack_seq, chan->unacked_frames);
2676 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2677 ackseq = __next_seq(chan, ackseq)) {
2679 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2681 skb_unlink(acked_skb, &chan->tx_q);
2682 kfree_skb(acked_skb);
2683 chan->unacked_frames--;
2687 chan->expected_ack_seq = reqseq;
2689 if (chan->unacked_frames == 0)
2690 __clear_retrans_timer(chan);
2692 BT_DBG("unacked_frames %u", chan->unacked_frames);
2695 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2697 BT_DBG("chan %p", chan);
2699 chan->expected_tx_seq = chan->buffer_seq;
2700 l2cap_seq_list_clear(&chan->srej_list);
2701 skb_queue_purge(&chan->srej_q);
2702 chan->rx_state = L2CAP_RX_STATE_RECV;
2705 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2706 struct l2cap_ctrl *control,
2707 struct sk_buff_head *skbs, u8 event)
2709 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2713 case L2CAP_EV_DATA_REQUEST:
2714 if (chan->tx_send_head == NULL)
2715 chan->tx_send_head = skb_peek(skbs);
2717 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2718 l2cap_ertm_send(chan);
2720 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2721 BT_DBG("Enter LOCAL_BUSY");
2722 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2724 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2725 /* The SREJ_SENT state must be aborted if we are to
2726 * enter the LOCAL_BUSY state.
2728 l2cap_abort_rx_srej_sent(chan);
2731 l2cap_send_ack(chan);
2734 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2735 BT_DBG("Exit LOCAL_BUSY");
2736 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2738 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2739 struct l2cap_ctrl local_control;
2741 memset(&local_control, 0, sizeof(local_control));
2742 local_control.sframe = 1;
2743 local_control.super = L2CAP_SUPER_RR;
2744 local_control.poll = 1;
2745 local_control.reqseq = chan->buffer_seq;
2746 l2cap_send_sframe(chan, &local_control);
2748 chan->retry_count = 1;
2749 __set_monitor_timer(chan);
2750 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2753 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2754 l2cap_process_reqseq(chan, control->reqseq);
2756 case L2CAP_EV_EXPLICIT_POLL:
2757 l2cap_send_rr_or_rnr(chan, 1);
2758 chan->retry_count = 1;
2759 __set_monitor_timer(chan);
2760 __clear_ack_timer(chan);
2761 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2763 case L2CAP_EV_RETRANS_TO:
2764 l2cap_send_rr_or_rnr(chan, 1);
2765 chan->retry_count = 1;
2766 __set_monitor_timer(chan);
2767 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2769 case L2CAP_EV_RECV_FBIT:
2770 /* Nothing to process */
2777 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2778 struct l2cap_ctrl *control,
2779 struct sk_buff_head *skbs, u8 event)
2781 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2785 case L2CAP_EV_DATA_REQUEST:
2786 if (chan->tx_send_head == NULL)
2787 chan->tx_send_head = skb_peek(skbs);
2788 /* Queue data, but don't send. */
2789 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2791 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2792 BT_DBG("Enter LOCAL_BUSY");
2793 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2795 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2796 /* The SREJ_SENT state must be aborted if we are to
2797 * enter the LOCAL_BUSY state.
2799 l2cap_abort_rx_srej_sent(chan);
2802 l2cap_send_ack(chan);
2805 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2806 BT_DBG("Exit LOCAL_BUSY");
2807 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2809 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2810 struct l2cap_ctrl local_control;
2811 memset(&local_control, 0, sizeof(local_control));
2812 local_control.sframe = 1;
2813 local_control.super = L2CAP_SUPER_RR;
2814 local_control.poll = 1;
2815 local_control.reqseq = chan->buffer_seq;
2816 l2cap_send_sframe(chan, &local_control);
2818 chan->retry_count = 1;
2819 __set_monitor_timer(chan);
2820 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2823 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2824 l2cap_process_reqseq(chan, control->reqseq);
2827 case L2CAP_EV_RECV_FBIT:
2828 if (control && control->final) {
2829 __clear_monitor_timer(chan);
2830 if (chan->unacked_frames > 0)
2831 __set_retrans_timer(chan);
2832 chan->retry_count = 0;
2833 chan->tx_state = L2CAP_TX_STATE_XMIT;
2834 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2837 case L2CAP_EV_EXPLICIT_POLL:
2840 case L2CAP_EV_MONITOR_TO:
2841 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2842 l2cap_send_rr_or_rnr(chan, 1);
2843 __set_monitor_timer(chan);
2844 chan->retry_count++;
2846 l2cap_send_disconn_req(chan, ECONNABORTED);
2854 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2855 struct sk_buff_head *skbs, u8 event)
2857 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2858 chan, control, skbs, event, chan->tx_state);
2860 switch (chan->tx_state) {
2861 case L2CAP_TX_STATE_XMIT:
2862 l2cap_tx_state_xmit(chan, control, skbs, event);
2864 case L2CAP_TX_STATE_WAIT_F:
2865 l2cap_tx_state_wait_f(chan, control, skbs, event);
2873 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2874 struct l2cap_ctrl *control)
2876 BT_DBG("chan %p, control %p", chan, control);
2877 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2880 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2881 struct l2cap_ctrl *control)
2883 BT_DBG("chan %p, control %p", chan, control);
2884 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2887 /* Copy frame to all raw sockets on that connection */
2888 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2890 struct sk_buff *nskb;
2891 struct l2cap_chan *chan;
2893 BT_DBG("conn %p", conn);
2895 mutex_lock(&conn->chan_lock);
2897 list_for_each_entry(chan, &conn->chan_l, list) {
2898 if (chan->chan_type != L2CAP_CHAN_RAW)
2901 /* Don't send frame to the channel it came from */
2902 if (bt_cb(skb)->l2cap.chan == chan)
2905 nskb = skb_clone(skb, GFP_KERNEL);
2908 if (chan->ops->recv(chan, nskb))
2912 mutex_unlock(&conn->chan_lock);
2915 /* ---- L2CAP signalling commands ---- */
2916 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2917 u8 ident, u16 dlen, void *data)
2919 struct sk_buff *skb, **frag;
2920 struct l2cap_cmd_hdr *cmd;
2921 struct l2cap_hdr *lh;
2924 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2925 conn, code, ident, dlen);
2927 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2930 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2931 count = min_t(unsigned int, conn->mtu, len);
2933 skb = bt_skb_alloc(count, GFP_KERNEL);
2937 lh = skb_put(skb, L2CAP_HDR_SIZE);
2938 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2940 if (conn->hcon->type == LE_LINK)
2941 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2943 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2945 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2948 cmd->len = cpu_to_le16(dlen);
2951 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2952 skb_put_data(skb, data, count);
2958 /* Continuation fragments (no L2CAP header) */
2959 frag = &skb_shinfo(skb)->frag_list;
2961 count = min_t(unsigned int, conn->mtu, len);
2963 *frag = bt_skb_alloc(count, GFP_KERNEL);
2967 skb_put_data(*frag, data, count);
2972 frag = &(*frag)->next;
2982 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2985 struct l2cap_conf_opt *opt = *ptr;
2988 len = L2CAP_CONF_OPT_SIZE + opt->len;
2996 *val = *((u8 *) opt->val);
3000 *val = get_unaligned_le16(opt->val);
3004 *val = get_unaligned_le32(opt->val);
3008 *val = (unsigned long) opt->val;
3012 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3016 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3018 struct l2cap_conf_opt *opt = *ptr;
3020 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3022 if (size < L2CAP_CONF_OPT_SIZE + len)
3030 *((u8 *) opt->val) = val;
3034 put_unaligned_le16(val, opt->val);
3038 put_unaligned_le32(val, opt->val);
3042 memcpy(opt->val, (void *) val, len);
3046 *ptr += L2CAP_CONF_OPT_SIZE + len;
3049 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3051 struct l2cap_conf_efs efs;
3053 switch (chan->mode) {
3054 case L2CAP_MODE_ERTM:
3055 efs.id = chan->local_id;
3056 efs.stype = chan->local_stype;
3057 efs.msdu = cpu_to_le16(chan->local_msdu);
3058 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3059 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3060 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3063 case L2CAP_MODE_STREAMING:
3065 efs.stype = L2CAP_SERV_BESTEFFORT;
3066 efs.msdu = cpu_to_le16(chan->local_msdu);
3067 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3076 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3077 (unsigned long) &efs, size);
3080 static void l2cap_ack_timeout(struct work_struct *work)
3082 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3086 BT_DBG("chan %p", chan);
3088 l2cap_chan_lock(chan);
3090 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3091 chan->last_acked_seq);
3094 l2cap_send_rr_or_rnr(chan, 0);
3096 l2cap_chan_unlock(chan);
3097 l2cap_chan_put(chan);
3100 int l2cap_ertm_init(struct l2cap_chan *chan)
3104 chan->next_tx_seq = 0;
3105 chan->expected_tx_seq = 0;
3106 chan->expected_ack_seq = 0;
3107 chan->unacked_frames = 0;
3108 chan->buffer_seq = 0;
3109 chan->frames_sent = 0;
3110 chan->last_acked_seq = 0;
3112 chan->sdu_last_frag = NULL;
3115 skb_queue_head_init(&chan->tx_q);
3117 if (chan->mode != L2CAP_MODE_ERTM)
3120 chan->rx_state = L2CAP_RX_STATE_RECV;
3121 chan->tx_state = L2CAP_TX_STATE_XMIT;
3123 skb_queue_head_init(&chan->srej_q);
3125 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3129 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3131 l2cap_seq_list_free(&chan->srej_list);
3136 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3139 case L2CAP_MODE_STREAMING:
3140 case L2CAP_MODE_ERTM:
3141 if (l2cap_mode_supported(mode, remote_feat_mask))
3145 return L2CAP_MODE_BASIC;
3149 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3151 return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3154 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3156 return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3159 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3160 struct l2cap_conf_rfc *rfc)
3162 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3163 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3166 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3168 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3169 __l2cap_ews_supported(chan->conn)) {
3170 /* use extended control field */
3171 set_bit(FLAG_EXT_CTRL, &chan->flags);
3172 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3174 chan->tx_win = min_t(u16, chan->tx_win,
3175 L2CAP_DEFAULT_TX_WINDOW);
3176 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3178 chan->ack_win = chan->tx_win;
3181 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3183 struct hci_conn *conn = chan->conn->hcon;
3185 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3187 /* The 2-DH1 packet has between 2 and 56 information bytes
3188 * (including the 2-byte payload header)
3190 if (!(conn->pkt_type & HCI_2DH1))
3193 /* The 3-DH1 packet has between 2 and 85 information bytes
3194 * (including the 2-byte payload header)
3196 if (!(conn->pkt_type & HCI_3DH1))
3199 /* The 2-DH3 packet has between 2 and 369 information bytes
3200 * (including the 2-byte payload header)
3202 if (!(conn->pkt_type & HCI_2DH3))
3205 /* The 3-DH3 packet has between 2 and 554 information bytes
3206 * (including the 2-byte payload header)
3208 if (!(conn->pkt_type & HCI_3DH3))
3211 /* The 2-DH5 packet has between 2 and 681 information bytes
3212 * (including the 2-byte payload header)
3214 if (!(conn->pkt_type & HCI_2DH5))
3217 /* The 3-DH5 packet has between 2 and 1023 information bytes
3218 * (including the 2-byte payload header)
3220 if (!(conn->pkt_type & HCI_3DH5))
3224 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3226 struct l2cap_conf_req *req = data;
3227 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3228 void *ptr = req->data;
3229 void *endptr = data + data_size;
3232 BT_DBG("chan %p", chan);
3234 if (chan->num_conf_req || chan->num_conf_rsp)
3237 switch (chan->mode) {
3238 case L2CAP_MODE_STREAMING:
3239 case L2CAP_MODE_ERTM:
3240 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3243 if (__l2cap_efs_supported(chan->conn))
3244 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3248 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3253 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3255 l2cap_mtu_auto(chan);
3256 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3260 switch (chan->mode) {
3261 case L2CAP_MODE_BASIC:
3265 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3266 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3269 rfc.mode = L2CAP_MODE_BASIC;
3271 rfc.max_transmit = 0;
3272 rfc.retrans_timeout = 0;
3273 rfc.monitor_timeout = 0;
3274 rfc.max_pdu_size = 0;
3276 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3277 (unsigned long) &rfc, endptr - ptr);
3280 case L2CAP_MODE_ERTM:
3281 rfc.mode = L2CAP_MODE_ERTM;
3282 rfc.max_transmit = chan->max_tx;
3284 __l2cap_set_ertm_timeouts(chan, &rfc);
3286 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3287 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3289 rfc.max_pdu_size = cpu_to_le16(size);
3291 l2cap_txwin_setup(chan);
3293 rfc.txwin_size = min_t(u16, chan->tx_win,
3294 L2CAP_DEFAULT_TX_WINDOW);
3296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3297 (unsigned long) &rfc, endptr - ptr);
3299 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3300 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3302 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3303 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3304 chan->tx_win, endptr - ptr);
3306 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3307 if (chan->fcs == L2CAP_FCS_NONE ||
3308 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3309 chan->fcs = L2CAP_FCS_NONE;
3310 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3311 chan->fcs, endptr - ptr);
3315 case L2CAP_MODE_STREAMING:
3316 l2cap_txwin_setup(chan);
3317 rfc.mode = L2CAP_MODE_STREAMING;
3319 rfc.max_transmit = 0;
3320 rfc.retrans_timeout = 0;
3321 rfc.monitor_timeout = 0;
3323 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3324 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3326 rfc.max_pdu_size = cpu_to_le16(size);
3328 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3329 (unsigned long) &rfc, endptr - ptr);
3331 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3332 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3334 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3335 if (chan->fcs == L2CAP_FCS_NONE ||
3336 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3337 chan->fcs = L2CAP_FCS_NONE;
3338 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3339 chan->fcs, endptr - ptr);
3344 req->dcid = cpu_to_le16(chan->dcid);
3345 req->flags = cpu_to_le16(0);
3350 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3352 struct l2cap_conf_rsp *rsp = data;
3353 void *ptr = rsp->data;
3354 void *endptr = data + data_size;
3355 void *req = chan->conf_req;
3356 int len = chan->conf_len;
3357 int type, hint, olen;
3359 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3360 struct l2cap_conf_efs efs;
3362 u16 mtu = L2CAP_DEFAULT_MTU;
3363 u16 result = L2CAP_CONF_SUCCESS;
3366 BT_DBG("chan %p", chan);
3368 while (len >= L2CAP_CONF_OPT_SIZE) {
3369 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3373 hint = type & L2CAP_CONF_HINT;
3374 type &= L2CAP_CONF_MASK;
3377 case L2CAP_CONF_MTU:
3383 case L2CAP_CONF_FLUSH_TO:
3386 chan->flush_to = val;
3389 case L2CAP_CONF_QOS:
3392 case L2CAP_CONF_RFC:
3393 if (olen != sizeof(rfc))
3395 memcpy(&rfc, (void *) val, olen);
3398 case L2CAP_CONF_FCS:
3401 if (val == L2CAP_FCS_NONE)
3402 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3405 case L2CAP_CONF_EFS:
3406 if (olen != sizeof(efs))
3409 memcpy(&efs, (void *) val, olen);
3412 case L2CAP_CONF_EWS:
3415 return -ECONNREFUSED;
3420 result = L2CAP_CONF_UNKNOWN;
3421 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3426 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3429 switch (chan->mode) {
3430 case L2CAP_MODE_STREAMING:
3431 case L2CAP_MODE_ERTM:
3432 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3433 chan->mode = l2cap_select_mode(rfc.mode,
3434 chan->conn->feat_mask);
3439 if (__l2cap_efs_supported(chan->conn))
3440 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3442 return -ECONNREFUSED;
3445 if (chan->mode != rfc.mode)
3446 return -ECONNREFUSED;
3452 if (chan->mode != rfc.mode) {
3453 result = L2CAP_CONF_UNACCEPT;
3454 rfc.mode = chan->mode;
3456 if (chan->num_conf_rsp == 1)
3457 return -ECONNREFUSED;
3459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3460 (unsigned long) &rfc, endptr - ptr);
3463 if (result == L2CAP_CONF_SUCCESS) {
3464 /* Configure output options and let the other side know
3465 * which ones we don't like. */
3467 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3468 result = L2CAP_CONF_UNACCEPT;
3471 set_bit(CONF_MTU_DONE, &chan->conf_state);
3473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3476 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3477 efs.stype != L2CAP_SERV_NOTRAFIC &&
3478 efs.stype != chan->local_stype) {
3480 result = L2CAP_CONF_UNACCEPT;
3482 if (chan->num_conf_req >= 1)
3483 return -ECONNREFUSED;
3485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3487 (unsigned long) &efs, endptr - ptr);
3489 /* Send PENDING Conf Rsp */
3490 result = L2CAP_CONF_PENDING;
3491 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3496 case L2CAP_MODE_BASIC:
3497 chan->fcs = L2CAP_FCS_NONE;
3498 set_bit(CONF_MODE_DONE, &chan->conf_state);
3501 case L2CAP_MODE_ERTM:
3502 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3503 chan->remote_tx_win = rfc.txwin_size;
3505 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3507 chan->remote_max_tx = rfc.max_transmit;
3509 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3510 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3511 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3512 rfc.max_pdu_size = cpu_to_le16(size);
3513 chan->remote_mps = size;
3515 __l2cap_set_ertm_timeouts(chan, &rfc);
3517 set_bit(CONF_MODE_DONE, &chan->conf_state);
3519 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3520 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3523 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3524 chan->remote_id = efs.id;
3525 chan->remote_stype = efs.stype;
3526 chan->remote_msdu = le16_to_cpu(efs.msdu);
3527 chan->remote_flush_to =
3528 le32_to_cpu(efs.flush_to);
3529 chan->remote_acc_lat =
3530 le32_to_cpu(efs.acc_lat);
3531 chan->remote_sdu_itime =
3532 le32_to_cpu(efs.sdu_itime);
3533 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3535 (unsigned long) &efs, endptr - ptr);
3539 case L2CAP_MODE_STREAMING:
3540 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3541 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3542 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3543 rfc.max_pdu_size = cpu_to_le16(size);
3544 chan->remote_mps = size;
3546 set_bit(CONF_MODE_DONE, &chan->conf_state);
3548 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3549 (unsigned long) &rfc, endptr - ptr);
3554 result = L2CAP_CONF_UNACCEPT;
3556 memset(&rfc, 0, sizeof(rfc));
3557 rfc.mode = chan->mode;
3560 if (result == L2CAP_CONF_SUCCESS)
3561 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3563 rsp->scid = cpu_to_le16(chan->dcid);
3564 rsp->result = cpu_to_le16(result);
3565 rsp->flags = cpu_to_le16(0);
3570 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3571 void *data, size_t size, u16 *result)
3573 struct l2cap_conf_req *req = data;
3574 void *ptr = req->data;
3575 void *endptr = data + size;
3578 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3579 struct l2cap_conf_efs efs;
3581 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3583 while (len >= L2CAP_CONF_OPT_SIZE) {
3584 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3589 case L2CAP_CONF_MTU:
3592 if (val < L2CAP_DEFAULT_MIN_MTU) {
3593 *result = L2CAP_CONF_UNACCEPT;
3594 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3597 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3601 case L2CAP_CONF_FLUSH_TO:
3604 chan->flush_to = val;
3605 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3606 chan->flush_to, endptr - ptr);
3609 case L2CAP_CONF_RFC:
3610 if (olen != sizeof(rfc))
3612 memcpy(&rfc, (void *)val, olen);
3613 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3614 rfc.mode != chan->mode)
3615 return -ECONNREFUSED;
3617 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3618 (unsigned long) &rfc, endptr - ptr);
3621 case L2CAP_CONF_EWS:
3624 chan->ack_win = min_t(u16, val, chan->ack_win);
3625 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3626 chan->tx_win, endptr - ptr);
3629 case L2CAP_CONF_EFS:
3630 if (olen != sizeof(efs))
3632 memcpy(&efs, (void *)val, olen);
3633 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3634 efs.stype != L2CAP_SERV_NOTRAFIC &&
3635 efs.stype != chan->local_stype)
3636 return -ECONNREFUSED;
3637 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3638 (unsigned long) &efs, endptr - ptr);
3641 case L2CAP_CONF_FCS:
3644 if (*result == L2CAP_CONF_PENDING)
3645 if (val == L2CAP_FCS_NONE)
3646 set_bit(CONF_RECV_NO_FCS,
3652 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3653 return -ECONNREFUSED;
3655 chan->mode = rfc.mode;
3657 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3659 case L2CAP_MODE_ERTM:
3660 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3661 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3662 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3663 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3664 chan->ack_win = min_t(u16, chan->ack_win,
3667 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3668 chan->local_msdu = le16_to_cpu(efs.msdu);
3669 chan->local_sdu_itime =
3670 le32_to_cpu(efs.sdu_itime);
3671 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3672 chan->local_flush_to =
3673 le32_to_cpu(efs.flush_to);
3677 case L2CAP_MODE_STREAMING:
3678 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3682 req->dcid = cpu_to_le16(chan->dcid);
3683 req->flags = cpu_to_le16(0);
3688 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3689 u16 result, u16 flags)
3691 struct l2cap_conf_rsp *rsp = data;
3692 void *ptr = rsp->data;
3694 BT_DBG("chan %p", chan);
3696 rsp->scid = cpu_to_le16(chan->dcid);
3697 rsp->result = cpu_to_le16(result);
3698 rsp->flags = cpu_to_le16(flags);
3703 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3705 struct l2cap_le_conn_rsp rsp;
3706 struct l2cap_conn *conn = chan->conn;
3708 BT_DBG("chan %p", chan);
3710 rsp.dcid = cpu_to_le16(chan->scid);
3711 rsp.mtu = cpu_to_le16(chan->imtu);
3712 rsp.mps = cpu_to_le16(chan->mps);
3713 rsp.credits = cpu_to_le16(chan->rx_credits);
3714 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3716 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3720 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3724 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3727 switch (chan->state) {
3729 /* If channel still pending accept add to result */
3735 /* If not connected or pending accept it has been refused */
3736 *result = -ECONNREFUSED;
3741 struct l2cap_ecred_rsp_data {
3743 struct l2cap_ecred_conn_rsp rsp;
3744 __le16 scid[L2CAP_ECRED_MAX_CID];
3749 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3751 struct l2cap_ecred_rsp_data *rsp = data;
3753 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3756 /* Reset ident so only one response is sent */
3759 /* Include all channels pending with the same ident */
3760 if (!rsp->pdu.rsp.result)
3761 rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3763 l2cap_chan_del(chan, ECONNRESET);
3766 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3768 struct l2cap_conn *conn = chan->conn;
3769 struct l2cap_ecred_rsp_data data;
3770 u16 id = chan->ident;
3776 BT_DBG("chan %p id %d", chan, id);
3778 memset(&data, 0, sizeof(data));
3780 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3781 data.pdu.rsp.mps = cpu_to_le16(chan->mps);
3782 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3783 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3785 /* Verify that all channels are ready */
3786 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3792 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3794 /* Build response */
3795 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3797 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3798 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3802 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3804 struct l2cap_conn_rsp rsp;
3805 struct l2cap_conn *conn = chan->conn;
3809 rsp.scid = cpu_to_le16(chan->dcid);
3810 rsp.dcid = cpu_to_le16(chan->scid);
3811 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3812 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3813 rsp_code = L2CAP_CONN_RSP;
3815 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3817 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3819 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3822 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3823 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3824 chan->num_conf_req++;
3827 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3831 /* Use sane default values in case a misbehaving remote device
3832 * did not send an RFC or extended window size option.
3834 u16 txwin_ext = chan->ack_win;
3835 struct l2cap_conf_rfc rfc = {
3837 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3838 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3839 .max_pdu_size = cpu_to_le16(chan->imtu),
3840 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3843 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3845 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3848 while (len >= L2CAP_CONF_OPT_SIZE) {
3849 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3854 case L2CAP_CONF_RFC:
3855 if (olen != sizeof(rfc))
3857 memcpy(&rfc, (void *)val, olen);
3859 case L2CAP_CONF_EWS:
3868 case L2CAP_MODE_ERTM:
3869 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3870 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3871 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3872 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3873 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3875 chan->ack_win = min_t(u16, chan->ack_win,
3878 case L2CAP_MODE_STREAMING:
3879 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3883 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3884 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3887 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3889 if (cmd_len < sizeof(*rej))
3892 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3895 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3896 cmd->ident == conn->info_ident) {
3897 cancel_delayed_work(&conn->info_timer);
3899 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3900 conn->info_ident = 0;
3902 l2cap_conn_start(conn);
3908 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3909 u8 *data, u8 rsp_code, u8 amp_id)
3911 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3912 struct l2cap_conn_rsp rsp;
3913 struct l2cap_chan *chan = NULL, *pchan = NULL;
3914 int result, status = L2CAP_CS_NO_INFO;
3916 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3917 __le16 psm = req->psm;
3919 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3921 /* Check if we have socket listening on psm */
3922 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3923 &conn->hcon->dst, ACL_LINK);
3925 result = L2CAP_CR_BAD_PSM;
3929 mutex_lock(&conn->chan_lock);
3930 l2cap_chan_lock(pchan);
3932 /* Check if the ACL is secure enough (if not SDP) */
3933 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3934 !hci_conn_check_link_mode(conn->hcon)) {
3935 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3936 result = L2CAP_CR_SEC_BLOCK;
3940 result = L2CAP_CR_NO_MEM;
3942 /* Check for valid dynamic CID range (as per Erratum 3253) */
3943 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3944 result = L2CAP_CR_INVALID_SCID;
3948 /* Check if we already have channel with that dcid */
3949 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3950 result = L2CAP_CR_SCID_IN_USE;
3954 chan = pchan->ops->new_connection(pchan);
3958 /* For certain devices (ex: HID mouse), support for authentication,
3959 * pairing and bonding is optional. For such devices, inorder to avoid
3960 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3961 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3963 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3965 bacpy(&chan->src, &conn->hcon->src);
3966 bacpy(&chan->dst, &conn->hcon->dst);
3967 chan->src_type = bdaddr_src_type(conn->hcon);
3968 chan->dst_type = bdaddr_dst_type(conn->hcon);
3972 __l2cap_chan_add(conn, chan);
3976 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3978 chan->ident = cmd->ident;
3980 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3981 if (l2cap_chan_check_security(chan, false)) {
3982 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3983 l2cap_state_change(chan, BT_CONNECT2);
3984 result = L2CAP_CR_PEND;
3985 status = L2CAP_CS_AUTHOR_PEND;
3986 chan->ops->defer(chan);
3988 /* Force pending result for AMP controllers.
3989 * The connection will succeed after the
3990 * physical link is up.
3992 if (amp_id == AMP_ID_BREDR) {
3993 l2cap_state_change(chan, BT_CONFIG);
3994 result = L2CAP_CR_SUCCESS;
3996 l2cap_state_change(chan, BT_CONNECT2);
3997 result = L2CAP_CR_PEND;
3999 status = L2CAP_CS_NO_INFO;
4002 l2cap_state_change(chan, BT_CONNECT2);
4003 result = L2CAP_CR_PEND;
4004 status = L2CAP_CS_AUTHEN_PEND;
4007 l2cap_state_change(chan, BT_CONNECT2);
4008 result = L2CAP_CR_PEND;
4009 status = L2CAP_CS_NO_INFO;
4013 rsp.scid = cpu_to_le16(scid);
4014 rsp.dcid = cpu_to_le16(dcid);
4015 rsp.result = cpu_to_le16(result);
4016 rsp.status = cpu_to_le16(status);
4017 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4022 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4023 struct l2cap_info_req info;
4024 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4026 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4027 conn->info_ident = l2cap_get_ident(conn);
4029 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4031 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4032 sizeof(info), &info);
4035 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4036 result == L2CAP_CR_SUCCESS) {
4038 set_bit(CONF_REQ_SENT, &chan->conf_state);
4039 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4040 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4041 chan->num_conf_req++;
4044 l2cap_chan_unlock(pchan);
4045 mutex_unlock(&conn->chan_lock);
4046 l2cap_chan_put(pchan);
4049 static int l2cap_connect_req(struct l2cap_conn *conn,
4050 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4052 struct hci_dev *hdev = conn->hcon->hdev;
4053 struct hci_conn *hcon = conn->hcon;
4055 if (cmd_len < sizeof(struct l2cap_conn_req))
4059 if (hci_dev_test_flag(hdev, HCI_MGMT))
4060 mgmt_device_connected(hdev, hcon, NULL, 0);
4061 hci_dev_unlock(hdev);
4063 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4067 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4068 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4071 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4072 u16 scid, dcid, result, status;
4073 struct l2cap_chan *chan;
4077 if (cmd_len < sizeof(*rsp))
4080 scid = __le16_to_cpu(rsp->scid);
4081 dcid = __le16_to_cpu(rsp->dcid);
4082 result = __le16_to_cpu(rsp->result);
4083 status = __le16_to_cpu(rsp->status);
4085 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4086 dcid > L2CAP_CID_DYN_END))
4089 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4090 dcid, scid, result, status);
4092 mutex_lock(&conn->chan_lock);
4095 chan = __l2cap_get_chan_by_scid(conn, scid);
4101 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4108 chan = l2cap_chan_hold_unless_zero(chan);
4116 l2cap_chan_lock(chan);
4119 case L2CAP_CR_SUCCESS:
4120 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4125 l2cap_state_change(chan, BT_CONFIG);
4128 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4130 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4133 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4134 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4135 chan->num_conf_req++;
4139 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4143 l2cap_chan_del(chan, ECONNREFUSED);
4147 l2cap_chan_unlock(chan);
4148 l2cap_chan_put(chan);
4151 mutex_unlock(&conn->chan_lock);
4156 static inline void set_default_fcs(struct l2cap_chan *chan)
4158 /* FCS is enabled only in ERTM or streaming mode, if one or both
4161 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4162 chan->fcs = L2CAP_FCS_NONE;
4163 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4164 chan->fcs = L2CAP_FCS_CRC16;
4167 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4168 u8 ident, u16 flags)
4170 struct l2cap_conn *conn = chan->conn;
4172 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4175 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4176 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4178 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4179 l2cap_build_conf_rsp(chan, data,
4180 L2CAP_CONF_SUCCESS, flags), data);
4183 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4186 struct l2cap_cmd_rej_cid rej;
4188 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4189 rej.scid = __cpu_to_le16(scid);
4190 rej.dcid = __cpu_to_le16(dcid);
4192 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4195 static inline int l2cap_config_req(struct l2cap_conn *conn,
4196 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4199 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4202 struct l2cap_chan *chan;
4205 if (cmd_len < sizeof(*req))
4208 dcid = __le16_to_cpu(req->dcid);
4209 flags = __le16_to_cpu(req->flags);
4211 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4213 chan = l2cap_get_chan_by_scid(conn, dcid);
4215 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4219 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4220 chan->state != BT_CONNECTED) {
4221 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4226 /* Reject if config buffer is too small. */
4227 len = cmd_len - sizeof(*req);
4228 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4229 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4230 l2cap_build_conf_rsp(chan, rsp,
4231 L2CAP_CONF_REJECT, flags), rsp);
4236 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4237 chan->conf_len += len;
4239 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4240 /* Incomplete config. Send empty response. */
4241 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4242 l2cap_build_conf_rsp(chan, rsp,
4243 L2CAP_CONF_SUCCESS, flags), rsp);
4247 /* Complete config. */
4248 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4250 l2cap_send_disconn_req(chan, ECONNRESET);
4254 chan->ident = cmd->ident;
4255 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4256 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4257 chan->num_conf_rsp++;
4259 /* Reset config buffer. */
4262 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4265 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4266 set_default_fcs(chan);
4268 if (chan->mode == L2CAP_MODE_ERTM ||
4269 chan->mode == L2CAP_MODE_STREAMING)
4270 err = l2cap_ertm_init(chan);
4273 l2cap_send_disconn_req(chan, -err);
4275 l2cap_chan_ready(chan);
4280 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4282 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4283 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4284 chan->num_conf_req++;
4287 /* Got Conf Rsp PENDING from remote side and assume we sent
4288 Conf Rsp PENDING in the code above */
4289 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4290 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4292 /* check compatibility */
4294 /* Send rsp for BR/EDR channel */
4295 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4299 l2cap_chan_unlock(chan);
4300 l2cap_chan_put(chan);
4304 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4305 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4308 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4309 u16 scid, flags, result;
4310 struct l2cap_chan *chan;
4311 int len = cmd_len - sizeof(*rsp);
4314 if (cmd_len < sizeof(*rsp))
4317 scid = __le16_to_cpu(rsp->scid);
4318 flags = __le16_to_cpu(rsp->flags);
4319 result = __le16_to_cpu(rsp->result);
4321 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4324 chan = l2cap_get_chan_by_scid(conn, scid);
4329 case L2CAP_CONF_SUCCESS:
4330 l2cap_conf_rfc_get(chan, rsp->data, len);
4331 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4334 case L2CAP_CONF_PENDING:
4335 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4337 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4340 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4341 buf, sizeof(buf), &result);
4343 l2cap_send_disconn_req(chan, ECONNRESET);
4347 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4351 case L2CAP_CONF_UNKNOWN:
4352 case L2CAP_CONF_UNACCEPT:
4353 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4356 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4357 l2cap_send_disconn_req(chan, ECONNRESET);
4361 /* throw out any old stored conf requests */
4362 result = L2CAP_CONF_SUCCESS;
4363 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4364 req, sizeof(req), &result);
4366 l2cap_send_disconn_req(chan, ECONNRESET);
4370 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4371 L2CAP_CONF_REQ, len, req);
4372 chan->num_conf_req++;
4373 if (result != L2CAP_CONF_SUCCESS)
4380 l2cap_chan_set_err(chan, ECONNRESET);
4382 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4383 l2cap_send_disconn_req(chan, ECONNRESET);
4387 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4390 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4392 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4393 set_default_fcs(chan);
4395 if (chan->mode == L2CAP_MODE_ERTM ||
4396 chan->mode == L2CAP_MODE_STREAMING)
4397 err = l2cap_ertm_init(chan);
4400 l2cap_send_disconn_req(chan, -err);
4402 l2cap_chan_ready(chan);
4406 l2cap_chan_unlock(chan);
4407 l2cap_chan_put(chan);
4411 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4412 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4415 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4416 struct l2cap_disconn_rsp rsp;
4418 struct l2cap_chan *chan;
4420 if (cmd_len != sizeof(*req))
4423 scid = __le16_to_cpu(req->scid);
4424 dcid = __le16_to_cpu(req->dcid);
4426 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4428 chan = l2cap_get_chan_by_scid(conn, dcid);
4430 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4434 rsp.dcid = cpu_to_le16(chan->scid);
4435 rsp.scid = cpu_to_le16(chan->dcid);
4436 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4438 chan->ops->set_shutdown(chan);
4440 l2cap_chan_unlock(chan);
4441 mutex_lock(&conn->chan_lock);
4442 l2cap_chan_lock(chan);
4443 l2cap_chan_del(chan, ECONNRESET);
4444 mutex_unlock(&conn->chan_lock);
4446 chan->ops->close(chan);
4448 l2cap_chan_unlock(chan);
4449 l2cap_chan_put(chan);
4454 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4455 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4458 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4460 struct l2cap_chan *chan;
4462 if (cmd_len != sizeof(*rsp))
4465 scid = __le16_to_cpu(rsp->scid);
4466 dcid = __le16_to_cpu(rsp->dcid);
4468 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4470 chan = l2cap_get_chan_by_scid(conn, scid);
4475 if (chan->state != BT_DISCONN) {
4476 l2cap_chan_unlock(chan);
4477 l2cap_chan_put(chan);
4481 l2cap_chan_unlock(chan);
4482 mutex_lock(&conn->chan_lock);
4483 l2cap_chan_lock(chan);
4484 l2cap_chan_del(chan, 0);
4485 mutex_unlock(&conn->chan_lock);
4487 chan->ops->close(chan);
4489 l2cap_chan_unlock(chan);
4490 l2cap_chan_put(chan);
4495 static inline int l2cap_information_req(struct l2cap_conn *conn,
4496 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4499 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4502 if (cmd_len != sizeof(*req))
4505 type = __le16_to_cpu(req->type);
4507 BT_DBG("type 0x%4.4x", type);
4509 if (type == L2CAP_IT_FEAT_MASK) {
4511 u32 feat_mask = l2cap_feat_mask;
4512 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4513 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4514 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4516 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4519 put_unaligned_le32(feat_mask, rsp->data);
4520 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4522 } else if (type == L2CAP_IT_FIXED_CHAN) {
4524 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4526 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4527 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4528 rsp->data[0] = conn->local_fixed_chan;
4529 memset(rsp->data + 1, 0, 7);
4530 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4533 struct l2cap_info_rsp rsp;
4534 rsp.type = cpu_to_le16(type);
4535 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4536 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4543 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4544 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4547 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4550 if (cmd_len < sizeof(*rsp))
4553 type = __le16_to_cpu(rsp->type);
4554 result = __le16_to_cpu(rsp->result);
4556 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4558 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4559 if (cmd->ident != conn->info_ident ||
4560 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4563 cancel_delayed_work(&conn->info_timer);
4565 if (result != L2CAP_IR_SUCCESS) {
4566 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4567 conn->info_ident = 0;
4569 l2cap_conn_start(conn);
4575 case L2CAP_IT_FEAT_MASK:
4576 conn->feat_mask = get_unaligned_le32(rsp->data);
4578 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4579 struct l2cap_info_req req;
4580 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4582 conn->info_ident = l2cap_get_ident(conn);
4584 l2cap_send_cmd(conn, conn->info_ident,
4585 L2CAP_INFO_REQ, sizeof(req), &req);
4587 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4588 conn->info_ident = 0;
4590 l2cap_conn_start(conn);
4594 case L2CAP_IT_FIXED_CHAN:
4595 conn->remote_fixed_chan = rsp->data[0];
4596 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4597 conn->info_ident = 0;
4599 l2cap_conn_start(conn);
4606 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4607 struct l2cap_cmd_hdr *cmd,
4608 u16 cmd_len, u8 *data)
4610 struct hci_conn *hcon = conn->hcon;
4611 struct l2cap_conn_param_update_req *req;
4612 struct l2cap_conn_param_update_rsp rsp;
4613 u16 min, max, latency, to_multiplier;
4616 if (hcon->role != HCI_ROLE_MASTER)
4619 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4622 req = (struct l2cap_conn_param_update_req *) data;
4623 min = __le16_to_cpu(req->min);
4624 max = __le16_to_cpu(req->max);
4625 latency = __le16_to_cpu(req->latency);
4626 to_multiplier = __le16_to_cpu(req->to_multiplier);
4628 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4629 min, max, latency, to_multiplier);
4631 memset(&rsp, 0, sizeof(rsp));
4633 if (max > hcon->le_conn_max_interval) {
4634 BT_DBG("requested connection interval exceeds current bounds.");
4637 err = hci_check_conn_params(min, max, latency, to_multiplier);
4641 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4643 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4645 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4651 store_hint = hci_le_conn_update(hcon, min, max, latency,
4653 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4654 store_hint, min, max, latency,
4662 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4663 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4666 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4667 struct hci_conn *hcon = conn->hcon;
4668 u16 dcid, mtu, mps, credits, result;
4669 struct l2cap_chan *chan;
4672 if (cmd_len < sizeof(*rsp))
4675 dcid = __le16_to_cpu(rsp->dcid);
4676 mtu = __le16_to_cpu(rsp->mtu);
4677 mps = __le16_to_cpu(rsp->mps);
4678 credits = __le16_to_cpu(rsp->credits);
4679 result = __le16_to_cpu(rsp->result);
4681 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4682 dcid < L2CAP_CID_DYN_START ||
4683 dcid > L2CAP_CID_LE_DYN_END))
4686 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4687 dcid, mtu, mps, credits, result);
4689 mutex_lock(&conn->chan_lock);
4691 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4699 l2cap_chan_lock(chan);
4702 case L2CAP_CR_LE_SUCCESS:
4703 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4711 chan->remote_mps = mps;
4712 chan->tx_credits = credits;
4713 l2cap_chan_ready(chan);
4716 case L2CAP_CR_LE_AUTHENTICATION:
4717 case L2CAP_CR_LE_ENCRYPTION:
4718 /* If we already have MITM protection we can't do
4721 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4722 l2cap_chan_del(chan, ECONNREFUSED);
4726 sec_level = hcon->sec_level + 1;
4727 if (chan->sec_level < sec_level)
4728 chan->sec_level = sec_level;
4730 /* We'll need to send a new Connect Request */
4731 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4733 smp_conn_security(hcon, chan->sec_level);
4737 l2cap_chan_del(chan, ECONNREFUSED);
4741 l2cap_chan_unlock(chan);
4744 mutex_unlock(&conn->chan_lock);
4749 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4750 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4755 switch (cmd->code) {
4756 case L2CAP_COMMAND_REJ:
4757 l2cap_command_rej(conn, cmd, cmd_len, data);
4760 case L2CAP_CONN_REQ:
4761 err = l2cap_connect_req(conn, cmd, cmd_len, data);
4764 case L2CAP_CONN_RSP:
4765 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4768 case L2CAP_CONF_REQ:
4769 err = l2cap_config_req(conn, cmd, cmd_len, data);
4772 case L2CAP_CONF_RSP:
4773 l2cap_config_rsp(conn, cmd, cmd_len, data);
4776 case L2CAP_DISCONN_REQ:
4777 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4780 case L2CAP_DISCONN_RSP:
4781 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4784 case L2CAP_ECHO_REQ:
4785 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4788 case L2CAP_ECHO_RSP:
4791 case L2CAP_INFO_REQ:
4792 err = l2cap_information_req(conn, cmd, cmd_len, data);
4795 case L2CAP_INFO_RSP:
4796 l2cap_information_rsp(conn, cmd, cmd_len, data);
4800 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4808 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4809 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4812 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4813 struct l2cap_le_conn_rsp rsp;
4814 struct l2cap_chan *chan, *pchan;
4815 u16 dcid, scid, credits, mtu, mps;
4819 if (cmd_len != sizeof(*req))
4822 scid = __le16_to_cpu(req->scid);
4823 mtu = __le16_to_cpu(req->mtu);
4824 mps = __le16_to_cpu(req->mps);
4829 if (mtu < 23 || mps < 23)
4832 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4835 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4838 * Valid range: 0x0001-0x00ff
4840 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4842 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4843 result = L2CAP_CR_LE_BAD_PSM;
4848 /* Check if we have socket listening on psm */
4849 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4850 &conn->hcon->dst, LE_LINK);
4852 result = L2CAP_CR_LE_BAD_PSM;
4857 mutex_lock(&conn->chan_lock);
4858 l2cap_chan_lock(pchan);
4860 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4862 result = L2CAP_CR_LE_AUTHENTICATION;
4864 goto response_unlock;
4867 /* Check for valid dynamic CID range */
4868 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4869 result = L2CAP_CR_LE_INVALID_SCID;
4871 goto response_unlock;
4874 /* Check if we already have channel with that dcid */
4875 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4876 result = L2CAP_CR_LE_SCID_IN_USE;
4878 goto response_unlock;
4881 chan = pchan->ops->new_connection(pchan);
4883 result = L2CAP_CR_LE_NO_MEM;
4884 goto response_unlock;
4887 bacpy(&chan->src, &conn->hcon->src);
4888 bacpy(&chan->dst, &conn->hcon->dst);
4889 chan->src_type = bdaddr_src_type(conn->hcon);
4890 chan->dst_type = bdaddr_dst_type(conn->hcon);
4894 chan->remote_mps = mps;
4896 __l2cap_chan_add(conn, chan);
4898 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4901 credits = chan->rx_credits;
4903 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4905 chan->ident = cmd->ident;
4907 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4908 l2cap_state_change(chan, BT_CONNECT2);
4909 /* The following result value is actually not defined
4910 * for LE CoC but we use it to let the function know
4911 * that it should bail out after doing its cleanup
4912 * instead of sending a response.
4914 result = L2CAP_CR_PEND;
4915 chan->ops->defer(chan);
4917 l2cap_chan_ready(chan);
4918 result = L2CAP_CR_LE_SUCCESS;
4922 l2cap_chan_unlock(pchan);
4923 mutex_unlock(&conn->chan_lock);
4924 l2cap_chan_put(pchan);
4926 if (result == L2CAP_CR_PEND)
4931 rsp.mtu = cpu_to_le16(chan->imtu);
4932 rsp.mps = cpu_to_le16(chan->mps);
4938 rsp.dcid = cpu_to_le16(dcid);
4939 rsp.credits = cpu_to_le16(credits);
4940 rsp.result = cpu_to_le16(result);
4942 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4947 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4948 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4951 struct l2cap_le_credits *pkt;
4952 struct l2cap_chan *chan;
4953 u16 cid, credits, max_credits;
4955 if (cmd_len != sizeof(*pkt))
4958 pkt = (struct l2cap_le_credits *) data;
4959 cid = __le16_to_cpu(pkt->cid);
4960 credits = __le16_to_cpu(pkt->credits);
4962 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4964 chan = l2cap_get_chan_by_dcid(conn, cid);
4968 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4969 if (credits > max_credits) {
4970 BT_ERR("LE credits overflow");
4971 l2cap_send_disconn_req(chan, ECONNRESET);
4973 /* Return 0 so that we don't trigger an unnecessary
4974 * command reject packet.
4979 chan->tx_credits += credits;
4981 /* Resume sending */
4982 l2cap_le_flowctl_send(chan);
4984 if (chan->tx_credits)
4985 chan->ops->resume(chan);
4988 l2cap_chan_unlock(chan);
4989 l2cap_chan_put(chan);
4994 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
4995 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4998 struct l2cap_ecred_conn_req *req = (void *) data;
5000 struct l2cap_ecred_conn_rsp rsp;
5001 __le16 dcid[L2CAP_ECRED_MAX_CID];
5003 struct l2cap_chan *chan, *pchan;
5013 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5014 result = L2CAP_CR_LE_INVALID_PARAMS;
5018 cmd_len -= sizeof(*req);
5019 num_scid = cmd_len / sizeof(u16);
5021 if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5022 result = L2CAP_CR_LE_INVALID_PARAMS;
5026 mtu = __le16_to_cpu(req->mtu);
5027 mps = __le16_to_cpu(req->mps);
5029 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5030 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5036 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5039 * Valid range: 0x0001-0x00ff
5041 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5043 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5044 result = L2CAP_CR_LE_BAD_PSM;
5048 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5050 memset(&pdu, 0, sizeof(pdu));
5052 /* Check if we have socket listening on psm */
5053 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5054 &conn->hcon->dst, LE_LINK);
5056 result = L2CAP_CR_LE_BAD_PSM;
5060 mutex_lock(&conn->chan_lock);
5061 l2cap_chan_lock(pchan);
5063 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5065 result = L2CAP_CR_LE_AUTHENTICATION;
5069 result = L2CAP_CR_LE_SUCCESS;
5071 for (i = 0; i < num_scid; i++) {
5072 u16 scid = __le16_to_cpu(req->scid[i]);
5074 BT_DBG("scid[%d] 0x%4.4x", i, scid);
5076 pdu.dcid[i] = 0x0000;
5077 len += sizeof(*pdu.dcid);
5079 /* Check for valid dynamic CID range */
5080 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5081 result = L2CAP_CR_LE_INVALID_SCID;
5085 /* Check if we already have channel with that dcid */
5086 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5087 result = L2CAP_CR_LE_SCID_IN_USE;
5091 chan = pchan->ops->new_connection(pchan);
5093 result = L2CAP_CR_LE_NO_MEM;
5097 bacpy(&chan->src, &conn->hcon->src);
5098 bacpy(&chan->dst, &conn->hcon->dst);
5099 chan->src_type = bdaddr_src_type(conn->hcon);
5100 chan->dst_type = bdaddr_dst_type(conn->hcon);
5104 chan->remote_mps = mps;
5106 __l2cap_chan_add(conn, chan);
5108 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5111 if (!pdu.rsp.credits) {
5112 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
5113 pdu.rsp.mps = cpu_to_le16(chan->mps);
5114 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
5117 pdu.dcid[i] = cpu_to_le16(chan->scid);
5119 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5121 chan->ident = cmd->ident;
5122 chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5124 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5125 l2cap_state_change(chan, BT_CONNECT2);
5127 chan->ops->defer(chan);
5129 l2cap_chan_ready(chan);
5134 l2cap_chan_unlock(pchan);
5135 mutex_unlock(&conn->chan_lock);
5136 l2cap_chan_put(pchan);
5139 pdu.rsp.result = cpu_to_le16(result);
5144 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5145 sizeof(pdu.rsp) + len, &pdu);
5150 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5151 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5154 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5155 struct hci_conn *hcon = conn->hcon;
5156 u16 mtu, mps, credits, result;
5157 struct l2cap_chan *chan, *tmp;
5158 int err = 0, sec_level;
5161 if (cmd_len < sizeof(*rsp))
5164 mtu = __le16_to_cpu(rsp->mtu);
5165 mps = __le16_to_cpu(rsp->mps);
5166 credits = __le16_to_cpu(rsp->credits);
5167 result = __le16_to_cpu(rsp->result);
5169 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5172 mutex_lock(&conn->chan_lock);
5174 cmd_len -= sizeof(*rsp);
5176 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5179 if (chan->ident != cmd->ident ||
5180 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5181 chan->state == BT_CONNECTED)
5184 l2cap_chan_lock(chan);
5186 /* Check that there is a dcid for each pending channel */
5187 if (cmd_len < sizeof(dcid)) {
5188 l2cap_chan_del(chan, ECONNREFUSED);
5189 l2cap_chan_unlock(chan);
5193 dcid = __le16_to_cpu(rsp->dcid[i++]);
5194 cmd_len -= sizeof(u16);
5196 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5198 /* Check if dcid is already in use */
5199 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5200 /* If a device receives a
5201 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5202 * already-assigned Destination CID, then both the
5203 * original channel and the new channel shall be
5204 * immediately discarded and not used.
5206 l2cap_chan_del(chan, ECONNREFUSED);
5207 l2cap_chan_unlock(chan);
5208 chan = __l2cap_get_chan_by_dcid(conn, dcid);
5209 l2cap_chan_lock(chan);
5210 l2cap_chan_del(chan, ECONNRESET);
5211 l2cap_chan_unlock(chan);
5216 case L2CAP_CR_LE_AUTHENTICATION:
5217 case L2CAP_CR_LE_ENCRYPTION:
5218 /* If we already have MITM protection we can't do
5221 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5222 l2cap_chan_del(chan, ECONNREFUSED);
5226 sec_level = hcon->sec_level + 1;
5227 if (chan->sec_level < sec_level)
5228 chan->sec_level = sec_level;
5230 /* We'll need to send a new Connect Request */
5231 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5233 smp_conn_security(hcon, chan->sec_level);
5236 case L2CAP_CR_LE_BAD_PSM:
5237 l2cap_chan_del(chan, ECONNREFUSED);
5241 /* If dcid was not set it means channels was refused */
5243 l2cap_chan_del(chan, ECONNREFUSED);
5250 chan->remote_mps = mps;
5251 chan->tx_credits = credits;
5252 l2cap_chan_ready(chan);
5256 l2cap_chan_unlock(chan);
5259 mutex_unlock(&conn->chan_lock);
5264 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5265 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5268 struct l2cap_ecred_reconf_req *req = (void *) data;
5269 struct l2cap_ecred_reconf_rsp rsp;
5270 u16 mtu, mps, result;
5271 struct l2cap_chan *chan;
5277 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5278 result = L2CAP_CR_LE_INVALID_PARAMS;
5282 mtu = __le16_to_cpu(req->mtu);
5283 mps = __le16_to_cpu(req->mps);
5285 BT_DBG("mtu %u mps %u", mtu, mps);
5287 if (mtu < L2CAP_ECRED_MIN_MTU) {
5288 result = L2CAP_RECONF_INVALID_MTU;
5292 if (mps < L2CAP_ECRED_MIN_MPS) {
5293 result = L2CAP_RECONF_INVALID_MPS;
5297 cmd_len -= sizeof(*req);
5298 num_scid = cmd_len / sizeof(u16);
5299 result = L2CAP_RECONF_SUCCESS;
5301 for (i = 0; i < num_scid; i++) {
5304 scid = __le16_to_cpu(req->scid[i]);
5308 chan = __l2cap_get_chan_by_dcid(conn, scid);
5312 /* If the MTU value is decreased for any of the included
5313 * channels, then the receiver shall disconnect all
5314 * included channels.
5316 if (chan->omtu > mtu) {
5317 BT_ERR("chan %p decreased MTU %u -> %u", chan,
5319 result = L2CAP_RECONF_INVALID_MTU;
5323 chan->remote_mps = mps;
5327 rsp.result = cpu_to_le16(result);
5329 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5335 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5336 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5339 struct l2cap_chan *chan, *tmp;
5340 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5343 if (cmd_len < sizeof(*rsp))
5346 result = __le16_to_cpu(rsp->result);
5348 BT_DBG("result 0x%4.4x", rsp->result);
5353 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5354 if (chan->ident != cmd->ident)
5357 l2cap_chan_del(chan, ECONNRESET);
5363 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5364 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5367 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5368 struct l2cap_chan *chan;
5370 if (cmd_len < sizeof(*rej))
5373 mutex_lock(&conn->chan_lock);
5375 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5379 chan = l2cap_chan_hold_unless_zero(chan);
5383 l2cap_chan_lock(chan);
5384 l2cap_chan_del(chan, ECONNREFUSED);
5385 l2cap_chan_unlock(chan);
5386 l2cap_chan_put(chan);
5389 mutex_unlock(&conn->chan_lock);
5393 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5394 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5399 switch (cmd->code) {
5400 case L2CAP_COMMAND_REJ:
5401 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5404 case L2CAP_CONN_PARAM_UPDATE_REQ:
5405 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5408 case L2CAP_CONN_PARAM_UPDATE_RSP:
5411 case L2CAP_LE_CONN_RSP:
5412 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5415 case L2CAP_LE_CONN_REQ:
5416 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5419 case L2CAP_LE_CREDITS:
5420 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5423 case L2CAP_ECRED_CONN_REQ:
5424 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5427 case L2CAP_ECRED_CONN_RSP:
5428 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5431 case L2CAP_ECRED_RECONF_REQ:
5432 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5435 case L2CAP_ECRED_RECONF_RSP:
5436 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5439 case L2CAP_DISCONN_REQ:
5440 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5443 case L2CAP_DISCONN_RSP:
5444 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5448 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5456 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5457 struct sk_buff *skb)
5459 struct hci_conn *hcon = conn->hcon;
5460 struct l2cap_cmd_hdr *cmd;
5464 if (hcon->type != LE_LINK)
5467 if (skb->len < L2CAP_CMD_HDR_SIZE)
5470 cmd = (void *) skb->data;
5471 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5473 len = le16_to_cpu(cmd->len);
5475 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5477 if (len != skb->len || !cmd->ident) {
5478 BT_DBG("corrupted command");
5482 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5484 struct l2cap_cmd_rej_unk rej;
5486 BT_ERR("Wrong link type (%d)", err);
5488 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5489 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5497 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5499 struct l2cap_cmd_rej_unk rej;
5501 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5502 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5505 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5506 struct sk_buff *skb)
5508 struct hci_conn *hcon = conn->hcon;
5509 struct l2cap_cmd_hdr *cmd;
5512 l2cap_raw_recv(conn, skb);
5514 if (hcon->type != ACL_LINK)
5517 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5520 cmd = (void *) skb->data;
5521 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5523 len = le16_to_cpu(cmd->len);
5525 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5528 if (len > skb->len || !cmd->ident) {
5529 BT_DBG("corrupted command");
5530 l2cap_sig_send_rej(conn, cmd->ident);
5531 skb_pull(skb, len > skb->len ? skb->len : len);
5535 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5537 BT_ERR("Wrong link type (%d)", err);
5538 l2cap_sig_send_rej(conn, cmd->ident);
5545 BT_DBG("corrupted command");
5546 l2cap_sig_send_rej(conn, 0);
5553 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5555 u16 our_fcs, rcv_fcs;
5558 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5559 hdr_size = L2CAP_EXT_HDR_SIZE;
5561 hdr_size = L2CAP_ENH_HDR_SIZE;
5563 if (chan->fcs == L2CAP_FCS_CRC16) {
5564 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5565 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5566 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5568 if (our_fcs != rcv_fcs)
5574 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5576 struct l2cap_ctrl control;
5578 BT_DBG("chan %p", chan);
5580 memset(&control, 0, sizeof(control));
5583 control.reqseq = chan->buffer_seq;
5584 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5586 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5587 control.super = L2CAP_SUPER_RNR;
5588 l2cap_send_sframe(chan, &control);
5591 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5592 chan->unacked_frames > 0)
5593 __set_retrans_timer(chan);
5595 /* Send pending iframes */
5596 l2cap_ertm_send(chan);
5598 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5599 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5600 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5603 control.super = L2CAP_SUPER_RR;
5604 l2cap_send_sframe(chan, &control);
5608 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5609 struct sk_buff **last_frag)
5611 /* skb->len reflects data in skb as well as all fragments
5612 * skb->data_len reflects only data in fragments
5614 if (!skb_has_frag_list(skb))
5615 skb_shinfo(skb)->frag_list = new_frag;
5617 new_frag->next = NULL;
5619 (*last_frag)->next = new_frag;
5620 *last_frag = new_frag;
5622 skb->len += new_frag->len;
5623 skb->data_len += new_frag->len;
5624 skb->truesize += new_frag->truesize;
5627 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5628 struct l2cap_ctrl *control)
5632 switch (control->sar) {
5633 case L2CAP_SAR_UNSEGMENTED:
5637 err = chan->ops->recv(chan, skb);
5640 case L2CAP_SAR_START:
5644 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5647 chan->sdu_len = get_unaligned_le16(skb->data);
5648 skb_pull(skb, L2CAP_SDULEN_SIZE);
5650 if (chan->sdu_len > chan->imtu) {
5655 if (skb->len >= chan->sdu_len)
5659 chan->sdu_last_frag = skb;
5665 case L2CAP_SAR_CONTINUE:
5669 append_skb_frag(chan->sdu, skb,
5670 &chan->sdu_last_frag);
5673 if (chan->sdu->len >= chan->sdu_len)
5683 append_skb_frag(chan->sdu, skb,
5684 &chan->sdu_last_frag);
5687 if (chan->sdu->len != chan->sdu_len)
5690 err = chan->ops->recv(chan, chan->sdu);
5693 /* Reassembly complete */
5695 chan->sdu_last_frag = NULL;
5703 kfree_skb(chan->sdu);
5705 chan->sdu_last_frag = NULL;
5712 static int l2cap_resegment(struct l2cap_chan *chan)
5718 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5722 if (chan->mode != L2CAP_MODE_ERTM)
5725 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5726 l2cap_tx(chan, NULL, NULL, event);
5729 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5732 /* Pass sequential frames to l2cap_reassemble_sdu()
5733 * until a gap is encountered.
5736 BT_DBG("chan %p", chan);
5738 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5739 struct sk_buff *skb;
5740 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5741 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5743 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5748 skb_unlink(skb, &chan->srej_q);
5749 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5750 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5755 if (skb_queue_empty(&chan->srej_q)) {
5756 chan->rx_state = L2CAP_RX_STATE_RECV;
5757 l2cap_send_ack(chan);
5763 static void l2cap_handle_srej(struct l2cap_chan *chan,
5764 struct l2cap_ctrl *control)
5766 struct sk_buff *skb;
5768 BT_DBG("chan %p, control %p", chan, control);
5770 if (control->reqseq == chan->next_tx_seq) {
5771 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5772 l2cap_send_disconn_req(chan, ECONNRESET);
5776 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5779 BT_DBG("Seq %d not available for retransmission",
5784 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5785 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5786 l2cap_send_disconn_req(chan, ECONNRESET);
5790 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5792 if (control->poll) {
5793 l2cap_pass_to_tx(chan, control);
5795 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5796 l2cap_retransmit(chan, control);
5797 l2cap_ertm_send(chan);
5799 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5800 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5801 chan->srej_save_reqseq = control->reqseq;
5804 l2cap_pass_to_tx_fbit(chan, control);
5806 if (control->final) {
5807 if (chan->srej_save_reqseq != control->reqseq ||
5808 !test_and_clear_bit(CONN_SREJ_ACT,
5810 l2cap_retransmit(chan, control);
5812 l2cap_retransmit(chan, control);
5813 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5814 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5815 chan->srej_save_reqseq = control->reqseq;
5821 static void l2cap_handle_rej(struct l2cap_chan *chan,
5822 struct l2cap_ctrl *control)
5824 struct sk_buff *skb;
5826 BT_DBG("chan %p, control %p", chan, control);
5828 if (control->reqseq == chan->next_tx_seq) {
5829 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5830 l2cap_send_disconn_req(chan, ECONNRESET);
5834 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5836 if (chan->max_tx && skb &&
5837 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5838 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5839 l2cap_send_disconn_req(chan, ECONNRESET);
5843 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5845 l2cap_pass_to_tx(chan, control);
5847 if (control->final) {
5848 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5849 l2cap_retransmit_all(chan, control);
5851 l2cap_retransmit_all(chan, control);
5852 l2cap_ertm_send(chan);
5853 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5854 set_bit(CONN_REJ_ACT, &chan->conn_state);
5858 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5860 BT_DBG("chan %p, txseq %d", chan, txseq);
5862 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5863 chan->expected_tx_seq);
5865 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5866 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5868 /* See notes below regarding "double poll" and
5871 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5872 BT_DBG("Invalid/Ignore - after SREJ");
5873 return L2CAP_TXSEQ_INVALID_IGNORE;
5875 BT_DBG("Invalid - in window after SREJ sent");
5876 return L2CAP_TXSEQ_INVALID;
5880 if (chan->srej_list.head == txseq) {
5881 BT_DBG("Expected SREJ");
5882 return L2CAP_TXSEQ_EXPECTED_SREJ;
5885 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5886 BT_DBG("Duplicate SREJ - txseq already stored");
5887 return L2CAP_TXSEQ_DUPLICATE_SREJ;
5890 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5891 BT_DBG("Unexpected SREJ - not requested");
5892 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5896 if (chan->expected_tx_seq == txseq) {
5897 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5899 BT_DBG("Invalid - txseq outside tx window");
5900 return L2CAP_TXSEQ_INVALID;
5903 return L2CAP_TXSEQ_EXPECTED;
5907 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5908 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5909 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5910 return L2CAP_TXSEQ_DUPLICATE;
5913 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5914 /* A source of invalid packets is a "double poll" condition,
5915 * where delays cause us to send multiple poll packets. If
5916 * the remote stack receives and processes both polls,
5917 * sequence numbers can wrap around in such a way that a
5918 * resent frame has a sequence number that looks like new data
5919 * with a sequence gap. This would trigger an erroneous SREJ
5922 * Fortunately, this is impossible with a tx window that's
5923 * less than half of the maximum sequence number, which allows
5924 * invalid frames to be safely ignored.
5926 * With tx window sizes greater than half of the tx window
5927 * maximum, the frame is invalid and cannot be ignored. This
5928 * causes a disconnect.
5931 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5932 BT_DBG("Invalid/Ignore - txseq outside tx window");
5933 return L2CAP_TXSEQ_INVALID_IGNORE;
5935 BT_DBG("Invalid - txseq outside tx window");
5936 return L2CAP_TXSEQ_INVALID;
5939 BT_DBG("Unexpected - txseq indicates missing frames");
5940 return L2CAP_TXSEQ_UNEXPECTED;
5944 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5945 struct l2cap_ctrl *control,
5946 struct sk_buff *skb, u8 event)
5948 struct l2cap_ctrl local_control;
5950 bool skb_in_use = false;
5952 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5956 case L2CAP_EV_RECV_IFRAME:
5957 switch (l2cap_classify_txseq(chan, control->txseq)) {
5958 case L2CAP_TXSEQ_EXPECTED:
5959 l2cap_pass_to_tx(chan, control);
5961 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5962 BT_DBG("Busy, discarding expected seq %d",
5967 chan->expected_tx_seq = __next_seq(chan,
5970 chan->buffer_seq = chan->expected_tx_seq;
5973 /* l2cap_reassemble_sdu may free skb, hence invalidate
5974 * control, so make a copy in advance to use it after
5975 * l2cap_reassemble_sdu returns and to avoid the race
5976 * condition, for example:
5978 * The current thread calls:
5979 * l2cap_reassemble_sdu
5980 * chan->ops->recv == l2cap_sock_recv_cb
5981 * __sock_queue_rcv_skb
5982 * Another thread calls:
5986 * Then the current thread tries to access control, but
5987 * it was freed by skb_free_datagram.
5989 local_control = *control;
5990 err = l2cap_reassemble_sdu(chan, skb, control);
5994 if (local_control.final) {
5995 if (!test_and_clear_bit(CONN_REJ_ACT,
5996 &chan->conn_state)) {
5997 local_control.final = 0;
5998 l2cap_retransmit_all(chan, &local_control);
5999 l2cap_ertm_send(chan);
6003 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6004 l2cap_send_ack(chan);
6006 case L2CAP_TXSEQ_UNEXPECTED:
6007 l2cap_pass_to_tx(chan, control);
6009 /* Can't issue SREJ frames in the local busy state.
6010 * Drop this frame, it will be seen as missing
6011 * when local busy is exited.
6013 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6014 BT_DBG("Busy, discarding unexpected seq %d",
6019 /* There was a gap in the sequence, so an SREJ
6020 * must be sent for each missing frame. The
6021 * current frame is stored for later use.
6023 skb_queue_tail(&chan->srej_q, skb);
6025 BT_DBG("Queued %p (queue len %d)", skb,
6026 skb_queue_len(&chan->srej_q));
6028 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6029 l2cap_seq_list_clear(&chan->srej_list);
6030 l2cap_send_srej(chan, control->txseq);
6032 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6034 case L2CAP_TXSEQ_DUPLICATE:
6035 l2cap_pass_to_tx(chan, control);
6037 case L2CAP_TXSEQ_INVALID_IGNORE:
6039 case L2CAP_TXSEQ_INVALID:
6041 l2cap_send_disconn_req(chan, ECONNRESET);
6045 case L2CAP_EV_RECV_RR:
6046 l2cap_pass_to_tx(chan, control);
6047 if (control->final) {
6048 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6050 if (!test_and_clear_bit(CONN_REJ_ACT,
6051 &chan->conn_state)) {
6053 l2cap_retransmit_all(chan, control);
6056 l2cap_ertm_send(chan);
6057 } else if (control->poll) {
6058 l2cap_send_i_or_rr_or_rnr(chan);
6060 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6061 &chan->conn_state) &&
6062 chan->unacked_frames)
6063 __set_retrans_timer(chan);
6065 l2cap_ertm_send(chan);
6068 case L2CAP_EV_RECV_RNR:
6069 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6070 l2cap_pass_to_tx(chan, control);
6071 if (control && control->poll) {
6072 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6073 l2cap_send_rr_or_rnr(chan, 0);
6075 __clear_retrans_timer(chan);
6076 l2cap_seq_list_clear(&chan->retrans_list);
6078 case L2CAP_EV_RECV_REJ:
6079 l2cap_handle_rej(chan, control);
6081 case L2CAP_EV_RECV_SREJ:
6082 l2cap_handle_srej(chan, control);
6088 if (skb && !skb_in_use) {
6089 BT_DBG("Freeing %p", skb);
6096 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6097 struct l2cap_ctrl *control,
6098 struct sk_buff *skb, u8 event)
6101 u16 txseq = control->txseq;
6102 bool skb_in_use = false;
6104 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6108 case L2CAP_EV_RECV_IFRAME:
6109 switch (l2cap_classify_txseq(chan, txseq)) {
6110 case L2CAP_TXSEQ_EXPECTED:
6111 /* Keep frame for reassembly later */
6112 l2cap_pass_to_tx(chan, control);
6113 skb_queue_tail(&chan->srej_q, skb);
6115 BT_DBG("Queued %p (queue len %d)", skb,
6116 skb_queue_len(&chan->srej_q));
6118 chan->expected_tx_seq = __next_seq(chan, txseq);
6120 case L2CAP_TXSEQ_EXPECTED_SREJ:
6121 l2cap_seq_list_pop(&chan->srej_list);
6123 l2cap_pass_to_tx(chan, control);
6124 skb_queue_tail(&chan->srej_q, skb);
6126 BT_DBG("Queued %p (queue len %d)", skb,
6127 skb_queue_len(&chan->srej_q));
6129 err = l2cap_rx_queued_iframes(chan);
6134 case L2CAP_TXSEQ_UNEXPECTED:
6135 /* Got a frame that can't be reassembled yet.
6136 * Save it for later, and send SREJs to cover
6137 * the missing frames.
6139 skb_queue_tail(&chan->srej_q, skb);
6141 BT_DBG("Queued %p (queue len %d)", skb,
6142 skb_queue_len(&chan->srej_q));
6144 l2cap_pass_to_tx(chan, control);
6145 l2cap_send_srej(chan, control->txseq);
6147 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6148 /* This frame was requested with an SREJ, but
6149 * some expected retransmitted frames are
6150 * missing. Request retransmission of missing
6153 skb_queue_tail(&chan->srej_q, skb);
6155 BT_DBG("Queued %p (queue len %d)", skb,
6156 skb_queue_len(&chan->srej_q));
6158 l2cap_pass_to_tx(chan, control);
6159 l2cap_send_srej_list(chan, control->txseq);
6161 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6162 /* We've already queued this frame. Drop this copy. */
6163 l2cap_pass_to_tx(chan, control);
6165 case L2CAP_TXSEQ_DUPLICATE:
6166 /* Expecting a later sequence number, so this frame
6167 * was already received. Ignore it completely.
6170 case L2CAP_TXSEQ_INVALID_IGNORE:
6172 case L2CAP_TXSEQ_INVALID:
6174 l2cap_send_disconn_req(chan, ECONNRESET);
6178 case L2CAP_EV_RECV_RR:
6179 l2cap_pass_to_tx(chan, control);
6180 if (control->final) {
6181 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6183 if (!test_and_clear_bit(CONN_REJ_ACT,
6184 &chan->conn_state)) {
6186 l2cap_retransmit_all(chan, control);
6189 l2cap_ertm_send(chan);
6190 } else if (control->poll) {
6191 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6192 &chan->conn_state) &&
6193 chan->unacked_frames) {
6194 __set_retrans_timer(chan);
6197 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6198 l2cap_send_srej_tail(chan);
6200 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6201 &chan->conn_state) &&
6202 chan->unacked_frames)
6203 __set_retrans_timer(chan);
6205 l2cap_send_ack(chan);
6208 case L2CAP_EV_RECV_RNR:
6209 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6210 l2cap_pass_to_tx(chan, control);
6211 if (control->poll) {
6212 l2cap_send_srej_tail(chan);
6214 struct l2cap_ctrl rr_control;
6215 memset(&rr_control, 0, sizeof(rr_control));
6216 rr_control.sframe = 1;
6217 rr_control.super = L2CAP_SUPER_RR;
6218 rr_control.reqseq = chan->buffer_seq;
6219 l2cap_send_sframe(chan, &rr_control);
6223 case L2CAP_EV_RECV_REJ:
6224 l2cap_handle_rej(chan, control);
6226 case L2CAP_EV_RECV_SREJ:
6227 l2cap_handle_srej(chan, control);
6231 if (skb && !skb_in_use) {
6232 BT_DBG("Freeing %p", skb);
6239 static int l2cap_finish_move(struct l2cap_chan *chan)
6241 BT_DBG("chan %p", chan);
6243 chan->rx_state = L2CAP_RX_STATE_RECV;
6244 chan->conn->mtu = chan->conn->hcon->mtu;
6246 return l2cap_resegment(chan);
6249 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6250 struct l2cap_ctrl *control,
6251 struct sk_buff *skb, u8 event)
6255 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6261 l2cap_process_reqseq(chan, control->reqseq);
6263 if (!skb_queue_empty(&chan->tx_q))
6264 chan->tx_send_head = skb_peek(&chan->tx_q);
6266 chan->tx_send_head = NULL;
6268 /* Rewind next_tx_seq to the point expected
6271 chan->next_tx_seq = control->reqseq;
6272 chan->unacked_frames = 0;
6274 err = l2cap_finish_move(chan);
6278 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6279 l2cap_send_i_or_rr_or_rnr(chan);
6281 if (event == L2CAP_EV_RECV_IFRAME)
6284 return l2cap_rx_state_recv(chan, control, NULL, event);
6287 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6288 struct l2cap_ctrl *control,
6289 struct sk_buff *skb, u8 event)
6293 if (!control->final)
6296 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6298 chan->rx_state = L2CAP_RX_STATE_RECV;
6299 l2cap_process_reqseq(chan, control->reqseq);
6301 if (!skb_queue_empty(&chan->tx_q))
6302 chan->tx_send_head = skb_peek(&chan->tx_q);
6304 chan->tx_send_head = NULL;
6306 /* Rewind next_tx_seq to the point expected
6309 chan->next_tx_seq = control->reqseq;
6310 chan->unacked_frames = 0;
6311 chan->conn->mtu = chan->conn->hcon->mtu;
6313 err = l2cap_resegment(chan);
6316 err = l2cap_rx_state_recv(chan, control, skb, event);
6321 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6323 /* Make sure reqseq is for a packet that has been sent but not acked */
6326 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6327 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6330 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6331 struct sk_buff *skb, u8 event)
6335 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6336 control, skb, event, chan->rx_state);
6338 if (__valid_reqseq(chan, control->reqseq)) {
6339 switch (chan->rx_state) {
6340 case L2CAP_RX_STATE_RECV:
6341 err = l2cap_rx_state_recv(chan, control, skb, event);
6343 case L2CAP_RX_STATE_SREJ_SENT:
6344 err = l2cap_rx_state_srej_sent(chan, control, skb,
6347 case L2CAP_RX_STATE_WAIT_P:
6348 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6350 case L2CAP_RX_STATE_WAIT_F:
6351 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6358 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6359 control->reqseq, chan->next_tx_seq,
6360 chan->expected_ack_seq);
6361 l2cap_send_disconn_req(chan, ECONNRESET);
6367 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6368 struct sk_buff *skb)
6370 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6371 * the txseq field in advance to use it after l2cap_reassemble_sdu
6372 * returns and to avoid the race condition, for example:
6374 * The current thread calls:
6375 * l2cap_reassemble_sdu
6376 * chan->ops->recv == l2cap_sock_recv_cb
6377 * __sock_queue_rcv_skb
6378 * Another thread calls:
6382 * Then the current thread tries to access control, but it was freed by
6383 * skb_free_datagram.
6385 u16 txseq = control->txseq;
6387 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6390 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6391 l2cap_pass_to_tx(chan, control);
6393 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6394 __next_seq(chan, chan->buffer_seq));
6396 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6398 l2cap_reassemble_sdu(chan, skb, control);
6401 kfree_skb(chan->sdu);
6404 chan->sdu_last_frag = NULL;
6408 BT_DBG("Freeing %p", skb);
6413 chan->last_acked_seq = txseq;
6414 chan->expected_tx_seq = __next_seq(chan, txseq);
6419 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6421 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6425 __unpack_control(chan, skb);
6430 * We can just drop the corrupted I-frame here.
6431 * Receiver will miss it and start proper recovery
6432 * procedures and ask for retransmission.
6434 if (l2cap_check_fcs(chan, skb))
6437 if (!control->sframe && control->sar == L2CAP_SAR_START)
6438 len -= L2CAP_SDULEN_SIZE;
6440 if (chan->fcs == L2CAP_FCS_CRC16)
6441 len -= L2CAP_FCS_SIZE;
6443 if (len > chan->mps) {
6444 l2cap_send_disconn_req(chan, ECONNRESET);
6448 if (chan->ops->filter) {
6449 if (chan->ops->filter(chan, skb))
6453 if (!control->sframe) {
6456 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6457 control->sar, control->reqseq, control->final,
6460 /* Validate F-bit - F=0 always valid, F=1 only
6461 * valid in TX WAIT_F
6463 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6466 if (chan->mode != L2CAP_MODE_STREAMING) {
6467 event = L2CAP_EV_RECV_IFRAME;
6468 err = l2cap_rx(chan, control, skb, event);
6470 err = l2cap_stream_rx(chan, control, skb);
6474 l2cap_send_disconn_req(chan, ECONNRESET);
6476 const u8 rx_func_to_event[4] = {
6477 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6478 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6481 /* Only I-frames are expected in streaming mode */
6482 if (chan->mode == L2CAP_MODE_STREAMING)
6485 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6486 control->reqseq, control->final, control->poll,
6490 BT_ERR("Trailing bytes: %d in sframe", len);
6491 l2cap_send_disconn_req(chan, ECONNRESET);
6495 /* Validate F and P bits */
6496 if (control->final && (control->poll ||
6497 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6500 event = rx_func_to_event[control->super];
6501 if (l2cap_rx(chan, control, skb, event))
6502 l2cap_send_disconn_req(chan, ECONNRESET);
6512 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6514 struct l2cap_conn *conn = chan->conn;
6515 struct l2cap_le_credits pkt;
6518 return_credits = (chan->imtu / chan->mps) + 1;
6520 if (chan->rx_credits >= return_credits)
6523 return_credits -= chan->rx_credits;
6525 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6527 chan->rx_credits += return_credits;
6529 pkt.cid = cpu_to_le16(chan->scid);
6530 pkt.credits = cpu_to_le16(return_credits);
6532 chan->ident = l2cap_get_ident(conn);
6534 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6537 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6541 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6543 /* Wait recv to confirm reception before updating the credits */
6544 err = chan->ops->recv(chan, skb);
6546 /* Update credits whenever an SDU is received */
6547 l2cap_chan_le_send_credits(chan);
6552 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6556 if (!chan->rx_credits) {
6557 BT_ERR("No credits to receive LE L2CAP data");
6558 l2cap_send_disconn_req(chan, ECONNRESET);
6562 if (chan->imtu < skb->len) {
6563 BT_ERR("Too big LE L2CAP PDU");
6568 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6570 /* Update if remote had run out of credits, this should only happens
6571 * if the remote is not using the entire MPS.
6573 if (!chan->rx_credits)
6574 l2cap_chan_le_send_credits(chan);
6581 sdu_len = get_unaligned_le16(skb->data);
6582 skb_pull(skb, L2CAP_SDULEN_SIZE);
6584 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6585 sdu_len, skb->len, chan->imtu);
6587 if (sdu_len > chan->imtu) {
6588 BT_ERR("Too big LE L2CAP SDU length received");
6593 if (skb->len > sdu_len) {
6594 BT_ERR("Too much LE L2CAP data received");
6599 if (skb->len == sdu_len)
6600 return l2cap_ecred_recv(chan, skb);
6603 chan->sdu_len = sdu_len;
6604 chan->sdu_last_frag = skb;
6606 /* Detect if remote is not able to use the selected MPS */
6607 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6608 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6610 /* Adjust the number of credits */
6611 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6612 chan->mps = mps_len;
6613 l2cap_chan_le_send_credits(chan);
6619 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6620 chan->sdu->len, skb->len, chan->sdu_len);
6622 if (chan->sdu->len + skb->len > chan->sdu_len) {
6623 BT_ERR("Too much LE L2CAP data received");
6628 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6631 if (chan->sdu->len == chan->sdu_len) {
6632 err = l2cap_ecred_recv(chan, chan->sdu);
6635 chan->sdu_last_frag = NULL;
6643 kfree_skb(chan->sdu);
6645 chan->sdu_last_frag = NULL;
6649 /* We can't return an error here since we took care of the skb
6650 * freeing internally. An error return would cause the caller to
6651 * do a double-free of the skb.
6656 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6657 struct sk_buff *skb)
6659 struct l2cap_chan *chan;
6661 chan = l2cap_get_chan_by_scid(conn, cid);
6663 BT_DBG("unknown cid 0x%4.4x", cid);
6664 /* Drop packet and return */
6669 BT_DBG("chan %p, len %d", chan, skb->len);
6671 /* If we receive data on a fixed channel before the info req/rsp
6672 * procedure is done simply assume that the channel is supported
6673 * and mark it as ready.
6675 if (chan->chan_type == L2CAP_CHAN_FIXED)
6676 l2cap_chan_ready(chan);
6678 if (chan->state != BT_CONNECTED)
6681 switch (chan->mode) {
6682 case L2CAP_MODE_LE_FLOWCTL:
6683 case L2CAP_MODE_EXT_FLOWCTL:
6684 if (l2cap_ecred_data_rcv(chan, skb) < 0)
6689 case L2CAP_MODE_BASIC:
6690 /* If socket recv buffers overflows we drop data here
6691 * which is *bad* because L2CAP has to be reliable.
6692 * But we don't have any other choice. L2CAP doesn't
6693 * provide flow control mechanism. */
6695 if (chan->imtu < skb->len) {
6696 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6700 if (!chan->ops->recv(chan, skb))
6704 case L2CAP_MODE_ERTM:
6705 case L2CAP_MODE_STREAMING:
6706 l2cap_data_rcv(chan, skb);
6710 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6718 l2cap_chan_unlock(chan);
6719 l2cap_chan_put(chan);
6722 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6723 struct sk_buff *skb)
6725 struct hci_conn *hcon = conn->hcon;
6726 struct l2cap_chan *chan;
6728 if (hcon->type != ACL_LINK)
6731 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6736 BT_DBG("chan %p, len %d", chan, skb->len);
6738 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6741 if (chan->imtu < skb->len)
6744 /* Store remote BD_ADDR and PSM for msg_name */
6745 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6746 bt_cb(skb)->l2cap.psm = psm;
6748 if (!chan->ops->recv(chan, skb)) {
6749 l2cap_chan_put(chan);
6754 l2cap_chan_put(chan);
6759 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6761 struct l2cap_hdr *lh = (void *) skb->data;
6762 struct hci_conn *hcon = conn->hcon;
6766 if (hcon->state != BT_CONNECTED) {
6767 BT_DBG("queueing pending rx skb");
6768 skb_queue_tail(&conn->pending_rx, skb);
6772 skb_pull(skb, L2CAP_HDR_SIZE);
6773 cid = __le16_to_cpu(lh->cid);
6774 len = __le16_to_cpu(lh->len);
6776 if (len != skb->len) {
6781 /* Since we can't actively block incoming LE connections we must
6782 * at least ensure that we ignore incoming data from them.
6784 if (hcon->type == LE_LINK &&
6785 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6786 bdaddr_dst_type(hcon))) {
6791 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6794 case L2CAP_CID_SIGNALING:
6795 l2cap_sig_channel(conn, skb);
6798 case L2CAP_CID_CONN_LESS:
6799 psm = get_unaligned((__le16 *) skb->data);
6800 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6801 l2cap_conless_channel(conn, psm, skb);
6804 case L2CAP_CID_LE_SIGNALING:
6805 l2cap_le_sig_channel(conn, skb);
6809 l2cap_data_channel(conn, cid, skb);
6814 static void process_pending_rx(struct work_struct *work)
6816 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6818 struct sk_buff *skb;
6822 while ((skb = skb_dequeue(&conn->pending_rx)))
6823 l2cap_recv_frame(conn, skb);
6826 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6828 struct l2cap_conn *conn = hcon->l2cap_data;
6829 struct hci_chan *hchan;
6834 hchan = hci_chan_create(hcon);
6838 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6840 hci_chan_del(hchan);
6844 kref_init(&conn->ref);
6845 hcon->l2cap_data = conn;
6846 conn->hcon = hci_conn_get(hcon);
6847 conn->hchan = hchan;
6849 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6851 conn->mtu = hcon->mtu;
6852 conn->feat_mask = 0;
6854 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6856 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6857 (bredr_sc_enabled(hcon->hdev) ||
6858 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6859 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6861 mutex_init(&conn->ident_lock);
6862 mutex_init(&conn->chan_lock);
6864 INIT_LIST_HEAD(&conn->chan_l);
6865 INIT_LIST_HEAD(&conn->users);
6867 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6869 skb_queue_head_init(&conn->pending_rx);
6870 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6871 INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6873 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6878 static bool is_valid_psm(u16 psm, u8 dst_type)
6883 if (bdaddr_type_is_le(dst_type))
6884 return (psm <= 0x00ff);
6886 /* PSM must be odd and lsb of upper byte must be 0 */
6887 return ((psm & 0x0101) == 0x0001);
6890 struct l2cap_chan_data {
6891 struct l2cap_chan *chan;
6896 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6898 struct l2cap_chan_data *d = data;
6901 if (chan == d->chan)
6904 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6907 pid = chan->ops->get_peer_pid(chan);
6909 /* Only count deferred channels with the same PID/PSM */
6910 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6911 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6917 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6918 bdaddr_t *dst, u8 dst_type, u16 timeout)
6920 struct l2cap_conn *conn;
6921 struct hci_conn *hcon;
6922 struct hci_dev *hdev;
6925 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6926 dst, dst_type, __le16_to_cpu(psm), chan->mode);
6928 hdev = hci_get_route(dst, &chan->src, chan->src_type);
6930 return -EHOSTUNREACH;
6934 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6935 chan->chan_type != L2CAP_CHAN_RAW) {
6940 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6945 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6950 switch (chan->mode) {
6951 case L2CAP_MODE_BASIC:
6953 case L2CAP_MODE_LE_FLOWCTL:
6955 case L2CAP_MODE_EXT_FLOWCTL:
6956 if (!enable_ecred) {
6961 case L2CAP_MODE_ERTM:
6962 case L2CAP_MODE_STREAMING:
6971 switch (chan->state) {
6975 /* Already connecting */
6980 /* Already connected */
6994 /* Set destination address and psm */
6995 bacpy(&chan->dst, dst);
6996 chan->dst_type = dst_type;
7001 if (bdaddr_type_is_le(dst_type)) {
7002 /* Convert from L2CAP channel address type to HCI address type
7004 if (dst_type == BDADDR_LE_PUBLIC)
7005 dst_type = ADDR_LE_DEV_PUBLIC;
7007 dst_type = ADDR_LE_DEV_RANDOM;
7009 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7010 hcon = hci_connect_le(hdev, dst, dst_type, false,
7011 chan->sec_level, timeout,
7012 HCI_ROLE_SLAVE, 0, 0);
7014 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7015 chan->sec_level, timeout,
7016 CONN_REASON_L2CAP_CHAN);
7019 u8 auth_type = l2cap_get_auth_type(chan);
7020 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7021 CONN_REASON_L2CAP_CHAN, timeout);
7025 err = PTR_ERR(hcon);
7029 conn = l2cap_conn_add(hcon);
7031 hci_conn_drop(hcon);
7036 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7037 struct l2cap_chan_data data;
7040 data.pid = chan->ops->get_peer_pid(chan);
7043 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7045 /* Check if there isn't too many channels being connected */
7046 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7047 hci_conn_drop(hcon);
7053 mutex_lock(&conn->chan_lock);
7054 l2cap_chan_lock(chan);
7056 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7057 hci_conn_drop(hcon);
7062 /* Update source addr of the socket */
7063 bacpy(&chan->src, &hcon->src);
7064 chan->src_type = bdaddr_src_type(hcon);
7066 __l2cap_chan_add(conn, chan);
7068 /* l2cap_chan_add takes its own ref so we can drop this one */
7069 hci_conn_drop(hcon);
7071 l2cap_state_change(chan, BT_CONNECT);
7072 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7074 /* Release chan->sport so that it can be reused by other
7075 * sockets (as it's only used for listening sockets).
7077 write_lock(&chan_list_lock);
7079 write_unlock(&chan_list_lock);
7081 if (hcon->state == BT_CONNECTED) {
7082 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7083 __clear_chan_timer(chan);
7084 if (l2cap_chan_check_security(chan, true))
7085 l2cap_state_change(chan, BT_CONNECTED);
7087 l2cap_do_start(chan);
7093 l2cap_chan_unlock(chan);
7094 mutex_unlock(&conn->chan_lock);
7096 hci_dev_unlock(hdev);
7100 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7102 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7104 struct l2cap_conn *conn = chan->conn;
7106 struct l2cap_ecred_reconf_req req;
7110 pdu.req.mtu = cpu_to_le16(chan->imtu);
7111 pdu.req.mps = cpu_to_le16(chan->mps);
7112 pdu.scid = cpu_to_le16(chan->scid);
7114 chan->ident = l2cap_get_ident(conn);
7116 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7120 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7122 if (chan->imtu > mtu)
7125 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7129 l2cap_ecred_reconfigure(chan);
7134 /* ---- L2CAP interface with lower layer (HCI) ---- */
7136 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7138 int exact = 0, lm1 = 0, lm2 = 0;
7139 struct l2cap_chan *c;
7141 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7143 /* Find listening sockets and check their link_mode */
7144 read_lock(&chan_list_lock);
7145 list_for_each_entry(c, &chan_list, global_l) {
7146 if (c->state != BT_LISTEN)
7149 if (!bacmp(&c->src, &hdev->bdaddr)) {
7150 lm1 |= HCI_LM_ACCEPT;
7151 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7152 lm1 |= HCI_LM_MASTER;
7154 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7155 lm2 |= HCI_LM_ACCEPT;
7156 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7157 lm2 |= HCI_LM_MASTER;
7160 read_unlock(&chan_list_lock);
7162 return exact ? lm1 : lm2;
7165 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7166 * from an existing channel in the list or from the beginning of the
7167 * global list (by passing NULL as first parameter).
7169 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7170 struct hci_conn *hcon)
7172 u8 src_type = bdaddr_src_type(hcon);
7174 read_lock(&chan_list_lock);
7177 c = list_next_entry(c, global_l);
7179 c = list_entry(chan_list.next, typeof(*c), global_l);
7181 list_for_each_entry_from(c, &chan_list, global_l) {
7182 if (c->chan_type != L2CAP_CHAN_FIXED)
7184 if (c->state != BT_LISTEN)
7186 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7188 if (src_type != c->src_type)
7191 c = l2cap_chan_hold_unless_zero(c);
7192 read_unlock(&chan_list_lock);
7196 read_unlock(&chan_list_lock);
7201 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7203 struct hci_dev *hdev = hcon->hdev;
7204 struct l2cap_conn *conn;
7205 struct l2cap_chan *pchan;
7208 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7211 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7214 l2cap_conn_del(hcon, bt_to_errno(status));
7218 conn = l2cap_conn_add(hcon);
7222 dst_type = bdaddr_dst_type(hcon);
7224 /* If device is blocked, do not create channels for it */
7225 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7228 /* Find fixed channels and notify them of the new connection. We
7229 * use multiple individual lookups, continuing each time where
7230 * we left off, because the list lock would prevent calling the
7231 * potentially sleeping l2cap_chan_lock() function.
7233 pchan = l2cap_global_fixed_chan(NULL, hcon);
7235 struct l2cap_chan *chan, *next;
7237 /* Client fixed channels should override server ones */
7238 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7241 l2cap_chan_lock(pchan);
7242 chan = pchan->ops->new_connection(pchan);
7244 bacpy(&chan->src, &hcon->src);
7245 bacpy(&chan->dst, &hcon->dst);
7246 chan->src_type = bdaddr_src_type(hcon);
7247 chan->dst_type = dst_type;
7249 __l2cap_chan_add(conn, chan);
7252 l2cap_chan_unlock(pchan);
7254 next = l2cap_global_fixed_chan(pchan, hcon);
7255 l2cap_chan_put(pchan);
7259 l2cap_conn_ready(conn);
7262 int l2cap_disconn_ind(struct hci_conn *hcon)
7264 struct l2cap_conn *conn = hcon->l2cap_data;
7266 BT_DBG("hcon %p", hcon);
7269 return HCI_ERROR_REMOTE_USER_TERM;
7270 return conn->disc_reason;
7273 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7275 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7278 BT_DBG("hcon %p reason %d", hcon, reason);
7280 l2cap_conn_del(hcon, bt_to_errno(reason));
7283 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7285 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7288 if (encrypt == 0x00) {
7289 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7290 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7291 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7292 chan->sec_level == BT_SECURITY_FIPS)
7293 l2cap_chan_close(chan, ECONNREFUSED);
7295 if (chan->sec_level == BT_SECURITY_MEDIUM)
7296 __clear_chan_timer(chan);
7300 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7302 struct l2cap_conn *conn = hcon->l2cap_data;
7303 struct l2cap_chan *chan;
7308 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7310 mutex_lock(&conn->chan_lock);
7312 list_for_each_entry(chan, &conn->chan_l, list) {
7313 l2cap_chan_lock(chan);
7315 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7316 state_to_string(chan->state));
7318 if (!status && encrypt)
7319 chan->sec_level = hcon->sec_level;
7321 if (!__l2cap_no_conn_pending(chan)) {
7322 l2cap_chan_unlock(chan);
7326 if (!status && (chan->state == BT_CONNECTED ||
7327 chan->state == BT_CONFIG)) {
7328 chan->ops->resume(chan);
7329 l2cap_check_encryption(chan, encrypt);
7330 l2cap_chan_unlock(chan);
7334 if (chan->state == BT_CONNECT) {
7335 if (!status && l2cap_check_enc_key_size(hcon))
7336 l2cap_start_connection(chan);
7338 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7339 } else if (chan->state == BT_CONNECT2 &&
7340 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7341 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7342 struct l2cap_conn_rsp rsp;
7345 if (!status && l2cap_check_enc_key_size(hcon)) {
7346 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7347 res = L2CAP_CR_PEND;
7348 stat = L2CAP_CS_AUTHOR_PEND;
7349 chan->ops->defer(chan);
7351 l2cap_state_change(chan, BT_CONFIG);
7352 res = L2CAP_CR_SUCCESS;
7353 stat = L2CAP_CS_NO_INFO;
7356 l2cap_state_change(chan, BT_DISCONN);
7357 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7358 res = L2CAP_CR_SEC_BLOCK;
7359 stat = L2CAP_CS_NO_INFO;
7362 rsp.scid = cpu_to_le16(chan->dcid);
7363 rsp.dcid = cpu_to_le16(chan->scid);
7364 rsp.result = cpu_to_le16(res);
7365 rsp.status = cpu_to_le16(stat);
7366 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7369 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7370 res == L2CAP_CR_SUCCESS) {
7372 set_bit(CONF_REQ_SENT, &chan->conf_state);
7373 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7375 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7377 chan->num_conf_req++;
7381 l2cap_chan_unlock(chan);
7384 mutex_unlock(&conn->chan_lock);
7387 /* Append fragment into frame respecting the maximum len of rx_skb */
7388 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7391 if (!conn->rx_skb) {
7392 /* Allocate skb for the complete frame (with header) */
7393 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7400 /* Copy as much as the rx_skb can hold */
7401 len = min_t(u16, len, skb->len);
7402 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7404 conn->rx_len -= len;
7409 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7411 struct sk_buff *rx_skb;
7414 /* Append just enough to complete the header */
7415 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7417 /* If header could not be read just continue */
7418 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7421 rx_skb = conn->rx_skb;
7422 len = get_unaligned_le16(rx_skb->data);
7424 /* Check if rx_skb has enough space to received all fragments */
7425 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7426 /* Update expected len */
7427 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7428 return L2CAP_LEN_SIZE;
7431 /* Reset conn->rx_skb since it will need to be reallocated in order to
7432 * fit all fragments.
7434 conn->rx_skb = NULL;
7436 /* Reallocates rx_skb using the exact expected length */
7437 len = l2cap_recv_frag(conn, rx_skb,
7438 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7444 static void l2cap_recv_reset(struct l2cap_conn *conn)
7446 kfree_skb(conn->rx_skb);
7447 conn->rx_skb = NULL;
7451 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7453 struct l2cap_conn *conn = hcon->l2cap_data;
7456 /* For AMP controller do not create l2cap conn */
7457 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7461 conn = l2cap_conn_add(hcon);
7466 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7470 case ACL_START_NO_FLUSH:
7473 BT_ERR("Unexpected start frame (len %d)", skb->len);
7474 l2cap_recv_reset(conn);
7475 l2cap_conn_unreliable(conn, ECOMM);
7478 /* Start fragment may not contain the L2CAP length so just
7479 * copy the initial byte when that happens and use conn->mtu as
7482 if (skb->len < L2CAP_LEN_SIZE) {
7483 l2cap_recv_frag(conn, skb, conn->mtu);
7487 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7489 if (len == skb->len) {
7490 /* Complete frame received */
7491 l2cap_recv_frame(conn, skb);
7495 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7497 if (skb->len > len) {
7498 BT_ERR("Frame is too long (len %u, expected len %d)",
7500 l2cap_conn_unreliable(conn, ECOMM);
7504 /* Append fragment into frame (with header) */
7505 if (l2cap_recv_frag(conn, skb, len) < 0)
7511 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7513 if (!conn->rx_skb) {
7514 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7515 l2cap_conn_unreliable(conn, ECOMM);
7519 /* Complete the L2CAP length if it has not been read */
7520 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7521 if (l2cap_recv_len(conn, skb) < 0) {
7522 l2cap_conn_unreliable(conn, ECOMM);
7526 /* Header still could not be read just continue */
7527 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7531 if (skb->len > conn->rx_len) {
7532 BT_ERR("Fragment is too long (len %u, expected %u)",
7533 skb->len, conn->rx_len);
7534 l2cap_recv_reset(conn);
7535 l2cap_conn_unreliable(conn, ECOMM);
7539 /* Append fragment into frame (with header) */
7540 l2cap_recv_frag(conn, skb, skb->len);
7542 if (!conn->rx_len) {
7543 /* Complete frame received. l2cap_recv_frame
7544 * takes ownership of the skb so set the global
7545 * rx_skb pointer to NULL first.
7547 struct sk_buff *rx_skb = conn->rx_skb;
7548 conn->rx_skb = NULL;
7549 l2cap_recv_frame(conn, rx_skb);
7558 static struct hci_cb l2cap_cb = {
7560 .connect_cfm = l2cap_connect_cfm,
7561 .disconn_cfm = l2cap_disconn_cfm,
7562 .security_cfm = l2cap_security_cfm,
7565 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7567 struct l2cap_chan *c;
7569 read_lock(&chan_list_lock);
7571 list_for_each_entry(c, &chan_list, global_l) {
7572 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7573 &c->src, c->src_type, &c->dst, c->dst_type,
7574 c->state, __le16_to_cpu(c->psm),
7575 c->scid, c->dcid, c->imtu, c->omtu,
7576 c->sec_level, c->mode);
7579 read_unlock(&chan_list_lock);
7584 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7586 static struct dentry *l2cap_debugfs;
7588 int __init l2cap_init(void)
7592 err = l2cap_init_sockets();
7596 hci_register_cb(&l2cap_cb);
7598 if (IS_ERR_OR_NULL(bt_debugfs))
7601 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7602 NULL, &l2cap_debugfs_fops);
7607 void l2cap_exit(void)
7609 debugfs_remove(l2cap_debugfs);
7610 hci_unregister_cb(&l2cap_cb);
7611 l2cap_cleanup_sockets();
7614 module_param(disable_ertm, bool, 0644);
7615 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7617 module_param(enable_ecred, bool, 0644);
7618 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");