2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
70 if (link_type == LE_LINK) {
71 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 return BDADDR_LE_PUBLIC;
74 return BDADDR_LE_RANDOM;
80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
82 return bdaddr_type(hcon->type, hcon->src_type);
85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
87 return bdaddr_type(hcon->type, hcon->dst_type);
90 /* ---- L2CAP channels ---- */
92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
97 list_for_each_entry(c, &conn->chan_l, list) {
104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
107 struct l2cap_chan *c;
109 list_for_each_entry(c, &conn->chan_l, list) {
116 /* Find channel with given SCID.
117 * Returns a reference locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_scid(conn, cid);
127 /* Only lock if chan reference is not 0 */
128 c = l2cap_chan_hold_unless_zero(c);
132 mutex_unlock(&conn->chan_lock);
137 /* Find channel with given DCID.
138 * Returns a reference locked channel.
140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
143 struct l2cap_chan *c;
145 mutex_lock(&conn->chan_lock);
146 c = __l2cap_get_chan_by_dcid(conn, cid);
148 /* Only lock if chan reference is not 0 */
149 c = l2cap_chan_hold_unless_zero(c);
153 mutex_unlock(&conn->chan_lock);
158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &conn->chan_l, list) {
164 if (c->ident == ident)
170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
173 struct l2cap_chan *c;
175 mutex_lock(&conn->chan_lock);
176 c = __l2cap_get_chan_by_ident(conn, ident);
178 /* Only lock if chan reference is not 0 */
179 c = l2cap_chan_hold_unless_zero(c);
183 mutex_unlock(&conn->chan_lock);
188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
191 struct l2cap_chan *c;
193 list_for_each_entry(c, &chan_list, global_l) {
194 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
197 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
200 if (c->sport == psm && !bacmp(&c->src, src))
206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
210 write_lock(&chan_list_lock);
212 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
222 u16 p, start, end, incr;
224 if (chan->src_type == BDADDR_BREDR) {
225 start = L2CAP_PSM_DYN_START;
226 end = L2CAP_PSM_AUTO_END;
229 start = L2CAP_PSM_LE_DYN_START;
230 end = L2CAP_PSM_LE_DYN_END;
235 for (p = start; p <= end; p += incr)
236 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
238 chan->psm = cpu_to_le16(p);
239 chan->sport = cpu_to_le16(p);
246 write_unlock(&chan_list_lock);
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
251 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
253 write_lock(&chan_list_lock);
255 /* Override the defaults (which are for conn-oriented) */
256 chan->omtu = L2CAP_DEFAULT_MTU;
257 chan->chan_type = L2CAP_CHAN_FIXED;
261 write_unlock(&chan_list_lock);
266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
270 if (conn->hcon->type == LE_LINK)
271 dyn_end = L2CAP_CID_LE_DYN_END;
273 dyn_end = L2CAP_CID_DYN_END;
275 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 if (!__l2cap_get_chan_by_scid(conn, cid))
283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
285 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 state_to_string(state));
289 chan->ops->state_change(chan, state, 0);
292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
296 chan->ops->state_change(chan, chan->state, err);
299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
301 chan->ops->state_change(chan, chan->state, err);
304 static void __set_retrans_timer(struct l2cap_chan *chan)
306 if (!delayed_work_pending(&chan->monitor_timer) &&
307 chan->retrans_timeout) {
308 l2cap_set_timer(chan, &chan->retrans_timer,
309 msecs_to_jiffies(chan->retrans_timeout));
313 static void __set_monitor_timer(struct l2cap_chan *chan)
315 __clear_retrans_timer(chan);
316 if (chan->monitor_timeout) {
317 l2cap_set_timer(chan, &chan->monitor_timer,
318 msecs_to_jiffies(chan->monitor_timeout));
322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
327 skb_queue_walk(head, skb) {
328 if (bt_cb(skb)->l2cap.txseq == seq)
335 /* ---- L2CAP sequence number lists ---- */
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338 * SREJ requests that are received and for frames that are to be
339 * retransmitted. These seq_list functions implement a singly-linked
340 * list in an array, where membership in the list can also be checked
341 * in constant time. Items can also be added to the tail of the list
342 * and removed from the head in constant time, without further memory
346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
348 size_t alloc_size, i;
350 /* Allocated size is a power of 2 to map sequence numbers
351 * (which may be up to 14 bits) in to a smaller array that is
352 * sized for the negotiated ERTM transmit windows.
354 alloc_size = roundup_pow_of_two(size);
356 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
360 seq_list->mask = alloc_size - 1;
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 for (i = 0; i < alloc_size; i++)
364 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
371 kfree(seq_list->list);
374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
377 /* Constant-time check for list membership */
378 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
383 u16 seq = seq_list->head;
384 u16 mask = seq_list->mask;
386 seq_list->head = seq_list->list[seq & mask];
387 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
389 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
401 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
404 for (i = 0; i <= seq_list->mask; i++)
405 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
407 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
413 u16 mask = seq_list->mask;
415 /* All appends happen in constant time */
417 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
420 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 seq_list->head = seq;
423 seq_list->list[seq_list->tail & mask] = seq;
425 seq_list->tail = seq;
426 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
429 static void l2cap_chan_timeout(struct work_struct *work)
431 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
433 struct l2cap_conn *conn = chan->conn;
436 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
441 mutex_lock(&conn->chan_lock);
442 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
443 * this work. No need to call l2cap_chan_hold(chan) here again.
445 l2cap_chan_lock(chan);
447 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
448 reason = ECONNREFUSED;
449 else if (chan->state == BT_CONNECT &&
450 chan->sec_level != BT_SECURITY_SDP)
451 reason = ECONNREFUSED;
455 l2cap_chan_close(chan, reason);
457 chan->ops->close(chan);
459 l2cap_chan_unlock(chan);
460 l2cap_chan_put(chan);
462 mutex_unlock(&conn->chan_lock);
465 struct l2cap_chan *l2cap_chan_create(void)
467 struct l2cap_chan *chan;
469 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
473 skb_queue_head_init(&chan->tx_q);
474 skb_queue_head_init(&chan->srej_q);
475 mutex_init(&chan->lock);
477 /* Set default lock nesting level */
478 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
480 write_lock(&chan_list_lock);
481 list_add(&chan->global_l, &chan_list);
482 write_unlock(&chan_list_lock);
484 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
485 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
486 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
487 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
489 chan->state = BT_OPEN;
491 kref_init(&chan->kref);
493 /* This flag is cleared in l2cap_chan_ready() */
494 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
496 BT_DBG("chan %p", chan);
500 EXPORT_SYMBOL_GPL(l2cap_chan_create);
502 static void l2cap_chan_destroy(struct kref *kref)
504 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
506 BT_DBG("chan %p", chan);
508 write_lock(&chan_list_lock);
509 list_del(&chan->global_l);
510 write_unlock(&chan_list_lock);
515 void l2cap_chan_hold(struct l2cap_chan *c)
517 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
522 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
524 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
526 if (!kref_get_unless_zero(&c->kref))
532 void l2cap_chan_put(struct l2cap_chan *c)
534 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
536 kref_put(&c->kref, l2cap_chan_destroy);
538 EXPORT_SYMBOL_GPL(l2cap_chan_put);
540 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
542 chan->fcs = L2CAP_FCS_CRC16;
543 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
544 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
545 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
546 chan->remote_max_tx = chan->max_tx;
547 chan->remote_tx_win = chan->tx_win;
548 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
549 chan->sec_level = BT_SECURITY_LOW;
550 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
551 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
552 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
554 chan->conf_state = 0;
555 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
557 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
559 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
561 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
564 chan->sdu_last_frag = NULL;
566 chan->tx_credits = tx_credits;
567 /* Derive MPS from connection MTU to stop HCI fragmentation */
568 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
569 /* Give enough credits for a full packet */
570 chan->rx_credits = (chan->imtu / chan->mps) + 1;
572 skb_queue_head_init(&chan->tx_q);
575 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
577 l2cap_le_flowctl_init(chan, tx_credits);
579 /* L2CAP implementations shall support a minimum MPS of 64 octets */
580 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
581 chan->mps = L2CAP_ECRED_MIN_MPS;
582 chan->rx_credits = (chan->imtu / chan->mps) + 1;
586 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
588 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
589 __le16_to_cpu(chan->psm), chan->dcid);
591 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
595 switch (chan->chan_type) {
596 case L2CAP_CHAN_CONN_ORIENTED:
597 /* Alloc CID for connection-oriented socket */
598 chan->scid = l2cap_alloc_cid(conn);
599 if (conn->hcon->type == ACL_LINK)
600 chan->omtu = L2CAP_DEFAULT_MTU;
603 case L2CAP_CHAN_CONN_LESS:
604 /* Connectionless socket */
605 chan->scid = L2CAP_CID_CONN_LESS;
606 chan->dcid = L2CAP_CID_CONN_LESS;
607 chan->omtu = L2CAP_DEFAULT_MTU;
610 case L2CAP_CHAN_FIXED:
611 /* Caller will set CID and CID specific MTU values */
615 /* Raw socket can send/recv signalling messages only */
616 chan->scid = L2CAP_CID_SIGNALING;
617 chan->dcid = L2CAP_CID_SIGNALING;
618 chan->omtu = L2CAP_DEFAULT_MTU;
621 chan->local_id = L2CAP_BESTEFFORT_ID;
622 chan->local_stype = L2CAP_SERV_BESTEFFORT;
623 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
624 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
625 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
626 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
628 l2cap_chan_hold(chan);
630 /* Only keep a reference for fixed channels if they requested it */
631 if (chan->chan_type != L2CAP_CHAN_FIXED ||
632 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
633 hci_conn_hold(conn->hcon);
635 list_add(&chan->list, &conn->chan_l);
638 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
640 mutex_lock(&conn->chan_lock);
641 __l2cap_chan_add(conn, chan);
642 mutex_unlock(&conn->chan_lock);
645 void l2cap_chan_del(struct l2cap_chan *chan, int err)
647 struct l2cap_conn *conn = chan->conn;
649 __clear_chan_timer(chan);
651 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
652 state_to_string(chan->state));
654 chan->ops->teardown(chan, err);
657 struct amp_mgr *mgr = conn->hcon->amp_mgr;
658 /* Delete from channel list */
659 list_del(&chan->list);
661 l2cap_chan_put(chan);
665 /* Reference was only held for non-fixed channels or
666 * fixed channels that explicitly requested it using the
667 * FLAG_HOLD_HCI_CONN flag.
669 if (chan->chan_type != L2CAP_CHAN_FIXED ||
670 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
671 hci_conn_drop(conn->hcon);
673 if (mgr && mgr->bredr_chan == chan)
674 mgr->bredr_chan = NULL;
677 if (chan->hs_hchan) {
678 struct hci_chan *hs_hchan = chan->hs_hchan;
680 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
681 amp_disconnect_logical_link(hs_hchan);
684 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
688 case L2CAP_MODE_BASIC:
691 case L2CAP_MODE_LE_FLOWCTL:
692 case L2CAP_MODE_EXT_FLOWCTL:
693 skb_queue_purge(&chan->tx_q);
696 case L2CAP_MODE_ERTM:
697 __clear_retrans_timer(chan);
698 __clear_monitor_timer(chan);
699 __clear_ack_timer(chan);
701 skb_queue_purge(&chan->srej_q);
703 l2cap_seq_list_free(&chan->srej_list);
704 l2cap_seq_list_free(&chan->retrans_list);
707 case L2CAP_MODE_STREAMING:
708 skb_queue_purge(&chan->tx_q);
714 EXPORT_SYMBOL_GPL(l2cap_chan_del);
716 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
717 l2cap_chan_func_t func, void *data)
719 struct l2cap_chan *chan, *l;
721 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
722 if (chan->ident == id)
727 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
730 struct l2cap_chan *chan;
732 list_for_each_entry(chan, &conn->chan_l, list) {
737 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
743 mutex_lock(&conn->chan_lock);
744 __l2cap_chan_list(conn, func, data);
745 mutex_unlock(&conn->chan_lock);
748 EXPORT_SYMBOL_GPL(l2cap_chan_list);
750 static void l2cap_conn_update_id_addr(struct work_struct *work)
752 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
753 id_addr_update_work);
754 struct hci_conn *hcon = conn->hcon;
755 struct l2cap_chan *chan;
757 mutex_lock(&conn->chan_lock);
759 list_for_each_entry(chan, &conn->chan_l, list) {
760 l2cap_chan_lock(chan);
761 bacpy(&chan->dst, &hcon->dst);
762 chan->dst_type = bdaddr_dst_type(hcon);
763 l2cap_chan_unlock(chan);
766 mutex_unlock(&conn->chan_lock);
769 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
771 struct l2cap_conn *conn = chan->conn;
772 struct l2cap_le_conn_rsp rsp;
775 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
776 result = L2CAP_CR_LE_AUTHORIZATION;
778 result = L2CAP_CR_LE_BAD_PSM;
780 l2cap_state_change(chan, BT_DISCONN);
782 rsp.dcid = cpu_to_le16(chan->scid);
783 rsp.mtu = cpu_to_le16(chan->imtu);
784 rsp.mps = cpu_to_le16(chan->mps);
785 rsp.credits = cpu_to_le16(chan->rx_credits);
786 rsp.result = cpu_to_le16(result);
788 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
792 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
794 l2cap_state_change(chan, BT_DISCONN);
796 __l2cap_ecred_conn_rsp_defer(chan);
799 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
801 struct l2cap_conn *conn = chan->conn;
802 struct l2cap_conn_rsp rsp;
805 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
806 result = L2CAP_CR_SEC_BLOCK;
808 result = L2CAP_CR_BAD_PSM;
810 l2cap_state_change(chan, BT_DISCONN);
812 rsp.scid = cpu_to_le16(chan->dcid);
813 rsp.dcid = cpu_to_le16(chan->scid);
814 rsp.result = cpu_to_le16(result);
815 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
817 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
820 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
822 struct l2cap_conn *conn = chan->conn;
824 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
826 switch (chan->state) {
828 chan->ops->teardown(chan, 0);
833 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
834 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
835 l2cap_send_disconn_req(chan, reason);
837 l2cap_chan_del(chan, reason);
841 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
842 if (conn->hcon->type == ACL_LINK)
843 l2cap_chan_connect_reject(chan);
844 else if (conn->hcon->type == LE_LINK) {
845 switch (chan->mode) {
846 case L2CAP_MODE_LE_FLOWCTL:
847 l2cap_chan_le_connect_reject(chan);
849 case L2CAP_MODE_EXT_FLOWCTL:
850 l2cap_chan_ecred_connect_reject(chan);
856 l2cap_chan_del(chan, reason);
861 l2cap_chan_del(chan, reason);
865 chan->ops->teardown(chan, 0);
869 EXPORT_SYMBOL(l2cap_chan_close);
871 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
873 switch (chan->chan_type) {
875 switch (chan->sec_level) {
876 case BT_SECURITY_HIGH:
877 case BT_SECURITY_FIPS:
878 return HCI_AT_DEDICATED_BONDING_MITM;
879 case BT_SECURITY_MEDIUM:
880 return HCI_AT_DEDICATED_BONDING;
882 return HCI_AT_NO_BONDING;
885 case L2CAP_CHAN_CONN_LESS:
886 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
887 if (chan->sec_level == BT_SECURITY_LOW)
888 chan->sec_level = BT_SECURITY_SDP;
890 if (chan->sec_level == BT_SECURITY_HIGH ||
891 chan->sec_level == BT_SECURITY_FIPS)
892 return HCI_AT_NO_BONDING_MITM;
894 return HCI_AT_NO_BONDING;
896 case L2CAP_CHAN_CONN_ORIENTED:
897 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
898 if (chan->sec_level == BT_SECURITY_LOW)
899 chan->sec_level = BT_SECURITY_SDP;
901 if (chan->sec_level == BT_SECURITY_HIGH ||
902 chan->sec_level == BT_SECURITY_FIPS)
903 return HCI_AT_NO_BONDING_MITM;
905 return HCI_AT_NO_BONDING;
910 switch (chan->sec_level) {
911 case BT_SECURITY_HIGH:
912 case BT_SECURITY_FIPS:
913 return HCI_AT_GENERAL_BONDING_MITM;
914 case BT_SECURITY_MEDIUM:
915 return HCI_AT_GENERAL_BONDING;
917 return HCI_AT_NO_BONDING;
923 /* Service level security */
924 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
926 struct l2cap_conn *conn = chan->conn;
929 if (conn->hcon->type == LE_LINK)
930 return smp_conn_security(conn->hcon, chan->sec_level);
932 auth_type = l2cap_get_auth_type(chan);
934 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
938 static u8 l2cap_get_ident(struct l2cap_conn *conn)
942 /* Get next available identificator.
943 * 1 - 128 are used by kernel.
944 * 129 - 199 are reserved.
945 * 200 - 254 are used by utilities like l2ping, etc.
948 mutex_lock(&conn->ident_lock);
950 if (++conn->tx_ident > 128)
955 mutex_unlock(&conn->ident_lock);
960 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
963 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
966 BT_DBG("code 0x%2.2x", code);
971 /* Use NO_FLUSH if supported or we have an LE link (which does
972 * not support auto-flushing packets) */
973 if (lmp_no_flush_capable(conn->hcon->hdev) ||
974 conn->hcon->type == LE_LINK)
975 flags = ACL_START_NO_FLUSH;
979 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
980 skb->priority = HCI_PRIO_MAX;
982 hci_send_acl(conn->hchan, skb, flags);
985 static bool __chan_is_moving(struct l2cap_chan *chan)
987 return chan->move_state != L2CAP_MOVE_STABLE &&
988 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
991 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
993 struct hci_conn *hcon = chan->conn->hcon;
996 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
999 if (chan->hs_hcon && !__chan_is_moving(chan)) {
1001 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1008 /* Use NO_FLUSH for LE links (where this is the only option) or
1009 * if the BR/EDR link supports it and flushing has not been
1010 * explicitly requested (through FLAG_FLUSHABLE).
1012 if (hcon->type == LE_LINK ||
1013 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1014 lmp_no_flush_capable(hcon->hdev)))
1015 flags = ACL_START_NO_FLUSH;
1019 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1020 hci_send_acl(chan->conn->hchan, skb, flags);
1023 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1025 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1026 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1028 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1030 control->sframe = 1;
1031 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1032 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1038 control->sframe = 0;
1039 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1040 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1047 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1049 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1050 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1052 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1054 control->sframe = 1;
1055 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1056 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1062 control->sframe = 0;
1063 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1064 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1071 static inline void __unpack_control(struct l2cap_chan *chan,
1072 struct sk_buff *skb)
1074 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1075 __unpack_extended_control(get_unaligned_le32(skb->data),
1076 &bt_cb(skb)->l2cap);
1077 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1079 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1080 &bt_cb(skb)->l2cap);
1081 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1085 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1089 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1090 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1092 if (control->sframe) {
1093 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1094 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1095 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1097 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1098 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1104 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1108 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1109 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1111 if (control->sframe) {
1112 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1113 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1114 packed |= L2CAP_CTRL_FRAME_TYPE;
1116 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1117 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1123 static inline void __pack_control(struct l2cap_chan *chan,
1124 struct l2cap_ctrl *control,
1125 struct sk_buff *skb)
1127 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1128 put_unaligned_le32(__pack_extended_control(control),
1129 skb->data + L2CAP_HDR_SIZE);
1131 put_unaligned_le16(__pack_enhanced_control(control),
1132 skb->data + L2CAP_HDR_SIZE);
1136 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1138 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1139 return L2CAP_EXT_HDR_SIZE;
1141 return L2CAP_ENH_HDR_SIZE;
1144 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1147 struct sk_buff *skb;
1148 struct l2cap_hdr *lh;
1149 int hlen = __ertm_hdr_size(chan);
1151 if (chan->fcs == L2CAP_FCS_CRC16)
1152 hlen += L2CAP_FCS_SIZE;
1154 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1157 return ERR_PTR(-ENOMEM);
1159 lh = skb_put(skb, L2CAP_HDR_SIZE);
1160 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1161 lh->cid = cpu_to_le16(chan->dcid);
1163 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1164 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1166 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1168 if (chan->fcs == L2CAP_FCS_CRC16) {
1169 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1170 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1173 skb->priority = HCI_PRIO_MAX;
1177 static void l2cap_send_sframe(struct l2cap_chan *chan,
1178 struct l2cap_ctrl *control)
1180 struct sk_buff *skb;
1183 BT_DBG("chan %p, control %p", chan, control);
1185 if (!control->sframe)
1188 if (__chan_is_moving(chan))
1191 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1195 if (control->super == L2CAP_SUPER_RR)
1196 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1197 else if (control->super == L2CAP_SUPER_RNR)
1198 set_bit(CONN_RNR_SENT, &chan->conn_state);
1200 if (control->super != L2CAP_SUPER_SREJ) {
1201 chan->last_acked_seq = control->reqseq;
1202 __clear_ack_timer(chan);
1205 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1206 control->final, control->poll, control->super);
1208 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1209 control_field = __pack_extended_control(control);
1211 control_field = __pack_enhanced_control(control);
1213 skb = l2cap_create_sframe_pdu(chan, control_field);
1215 l2cap_do_send(chan, skb);
1218 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1220 struct l2cap_ctrl control;
1222 BT_DBG("chan %p, poll %d", chan, poll);
1224 memset(&control, 0, sizeof(control));
1226 control.poll = poll;
1228 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1229 control.super = L2CAP_SUPER_RNR;
1231 control.super = L2CAP_SUPER_RR;
1233 control.reqseq = chan->buffer_seq;
1234 l2cap_send_sframe(chan, &control);
1237 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1239 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1242 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1245 static bool __amp_capable(struct l2cap_chan *chan)
1247 struct l2cap_conn *conn = chan->conn;
1248 struct hci_dev *hdev;
1249 bool amp_available = false;
1251 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1254 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1257 read_lock(&hci_dev_list_lock);
1258 list_for_each_entry(hdev, &hci_dev_list, list) {
1259 if (hdev->amp_type != AMP_TYPE_BREDR &&
1260 test_bit(HCI_UP, &hdev->flags)) {
1261 amp_available = true;
1265 read_unlock(&hci_dev_list_lock);
1267 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1268 return amp_available;
1273 static bool l2cap_check_efs(struct l2cap_chan *chan)
1275 /* Check EFS parameters */
1279 void l2cap_send_conn_req(struct l2cap_chan *chan)
1281 struct l2cap_conn *conn = chan->conn;
1282 struct l2cap_conn_req req;
1284 req.scid = cpu_to_le16(chan->scid);
1285 req.psm = chan->psm;
1287 chan->ident = l2cap_get_ident(conn);
1289 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1291 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1294 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1296 struct l2cap_create_chan_req req;
1297 req.scid = cpu_to_le16(chan->scid);
1298 req.psm = chan->psm;
1299 req.amp_id = amp_id;
1301 chan->ident = l2cap_get_ident(chan->conn);
1303 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1307 static void l2cap_move_setup(struct l2cap_chan *chan)
1309 struct sk_buff *skb;
1311 BT_DBG("chan %p", chan);
1313 if (chan->mode != L2CAP_MODE_ERTM)
1316 __clear_retrans_timer(chan);
1317 __clear_monitor_timer(chan);
1318 __clear_ack_timer(chan);
1320 chan->retry_count = 0;
1321 skb_queue_walk(&chan->tx_q, skb) {
1322 if (bt_cb(skb)->l2cap.retries)
1323 bt_cb(skb)->l2cap.retries = 1;
1328 chan->expected_tx_seq = chan->buffer_seq;
1330 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1331 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1332 l2cap_seq_list_clear(&chan->retrans_list);
1333 l2cap_seq_list_clear(&chan->srej_list);
1334 skb_queue_purge(&chan->srej_q);
1336 chan->tx_state = L2CAP_TX_STATE_XMIT;
1337 chan->rx_state = L2CAP_RX_STATE_MOVE;
1339 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1342 static void l2cap_move_done(struct l2cap_chan *chan)
1344 u8 move_role = chan->move_role;
1345 BT_DBG("chan %p", chan);
1347 chan->move_state = L2CAP_MOVE_STABLE;
1348 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1350 if (chan->mode != L2CAP_MODE_ERTM)
1353 switch (move_role) {
1354 case L2CAP_MOVE_ROLE_INITIATOR:
1355 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1356 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1358 case L2CAP_MOVE_ROLE_RESPONDER:
1359 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1364 static void l2cap_chan_ready(struct l2cap_chan *chan)
1366 /* The channel may have already been flagged as connected in
1367 * case of receiving data before the L2CAP info req/rsp
1368 * procedure is complete.
1370 if (chan->state == BT_CONNECTED)
1373 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1374 chan->conf_state = 0;
1375 __clear_chan_timer(chan);
1377 switch (chan->mode) {
1378 case L2CAP_MODE_LE_FLOWCTL:
1379 case L2CAP_MODE_EXT_FLOWCTL:
1380 if (!chan->tx_credits)
1381 chan->ops->suspend(chan);
1385 chan->state = BT_CONNECTED;
1387 chan->ops->ready(chan);
1390 static void l2cap_le_connect(struct l2cap_chan *chan)
1392 struct l2cap_conn *conn = chan->conn;
1393 struct l2cap_le_conn_req req;
1395 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1399 chan->imtu = chan->conn->mtu;
1401 l2cap_le_flowctl_init(chan, 0);
1403 req.psm = chan->psm;
1404 req.scid = cpu_to_le16(chan->scid);
1405 req.mtu = cpu_to_le16(chan->imtu);
1406 req.mps = cpu_to_le16(chan->mps);
1407 req.credits = cpu_to_le16(chan->rx_credits);
1409 chan->ident = l2cap_get_ident(conn);
1411 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1415 struct l2cap_ecred_conn_data {
1417 struct l2cap_ecred_conn_req req;
1420 struct l2cap_chan *chan;
1425 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1427 struct l2cap_ecred_conn_data *conn = data;
1430 if (chan == conn->chan)
1433 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1436 pid = chan->ops->get_peer_pid(chan);
1438 /* Only add deferred channels with the same PID/PSM */
1439 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1440 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1443 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1446 l2cap_ecred_init(chan, 0);
1448 /* Set the same ident so we can match on the rsp */
1449 chan->ident = conn->chan->ident;
1451 /* Include all channels deferred */
1452 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1457 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1459 struct l2cap_conn *conn = chan->conn;
1460 struct l2cap_ecred_conn_data data;
1462 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1465 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1468 l2cap_ecred_init(chan, 0);
1470 memset(&data, 0, sizeof(data));
1471 data.pdu.req.psm = chan->psm;
1472 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1473 data.pdu.req.mps = cpu_to_le16(chan->mps);
1474 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1475 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1477 chan->ident = l2cap_get_ident(conn);
1478 data.pid = chan->ops->get_peer_pid(chan);
1482 data.pid = chan->ops->get_peer_pid(chan);
1484 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1486 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1487 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1491 static void l2cap_le_start(struct l2cap_chan *chan)
1493 struct l2cap_conn *conn = chan->conn;
1495 if (!smp_conn_security(conn->hcon, chan->sec_level))
1499 l2cap_chan_ready(chan);
1503 if (chan->state == BT_CONNECT) {
1504 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1505 l2cap_ecred_connect(chan);
1507 l2cap_le_connect(chan);
1511 static void l2cap_start_connection(struct l2cap_chan *chan)
1513 if (__amp_capable(chan)) {
1514 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1515 a2mp_discover_amp(chan);
1516 } else if (chan->conn->hcon->type == LE_LINK) {
1517 l2cap_le_start(chan);
1519 l2cap_send_conn_req(chan);
1523 static void l2cap_request_info(struct l2cap_conn *conn)
1525 struct l2cap_info_req req;
1527 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1530 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1532 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1533 conn->info_ident = l2cap_get_ident(conn);
1535 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1537 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1541 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1543 /* The minimum encryption key size needs to be enforced by the
1544 * host stack before establishing any L2CAP connections. The
1545 * specification in theory allows a minimum of 1, but to align
1546 * BR/EDR and LE transports, a minimum of 7 is chosen.
1548 * This check might also be called for unencrypted connections
1549 * that have no key size requirements. Ensure that the link is
1550 * actually encrypted before enforcing a key size.
1552 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1553 hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1556 static void l2cap_do_start(struct l2cap_chan *chan)
1558 struct l2cap_conn *conn = chan->conn;
1560 if (conn->hcon->type == LE_LINK) {
1561 l2cap_le_start(chan);
1565 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1566 l2cap_request_info(conn);
1570 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1573 if (!l2cap_chan_check_security(chan, true) ||
1574 !__l2cap_no_conn_pending(chan))
1577 if (l2cap_check_enc_key_size(conn->hcon))
1578 l2cap_start_connection(chan);
1580 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1583 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1585 u32 local_feat_mask = l2cap_feat_mask;
1587 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1590 case L2CAP_MODE_ERTM:
1591 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1592 case L2CAP_MODE_STREAMING:
1593 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1599 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1601 struct l2cap_conn *conn = chan->conn;
1602 struct l2cap_disconn_req req;
1607 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1608 __clear_retrans_timer(chan);
1609 __clear_monitor_timer(chan);
1610 __clear_ack_timer(chan);
1613 if (chan->scid == L2CAP_CID_A2MP) {
1614 l2cap_state_change(chan, BT_DISCONN);
1618 req.dcid = cpu_to_le16(chan->dcid);
1619 req.scid = cpu_to_le16(chan->scid);
1620 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1623 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1626 /* ---- L2CAP connections ---- */
1627 static void l2cap_conn_start(struct l2cap_conn *conn)
1629 struct l2cap_chan *chan, *tmp;
1631 BT_DBG("conn %p", conn);
1633 mutex_lock(&conn->chan_lock);
1635 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1636 l2cap_chan_lock(chan);
1638 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1639 l2cap_chan_ready(chan);
1640 l2cap_chan_unlock(chan);
1644 if (chan->state == BT_CONNECT) {
1645 if (!l2cap_chan_check_security(chan, true) ||
1646 !__l2cap_no_conn_pending(chan)) {
1647 l2cap_chan_unlock(chan);
1651 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1652 && test_bit(CONF_STATE2_DEVICE,
1653 &chan->conf_state)) {
1654 l2cap_chan_close(chan, ECONNRESET);
1655 l2cap_chan_unlock(chan);
1659 if (l2cap_check_enc_key_size(conn->hcon))
1660 l2cap_start_connection(chan);
1662 l2cap_chan_close(chan, ECONNREFUSED);
1664 } else if (chan->state == BT_CONNECT2) {
1665 struct l2cap_conn_rsp rsp;
1667 rsp.scid = cpu_to_le16(chan->dcid);
1668 rsp.dcid = cpu_to_le16(chan->scid);
1670 if (l2cap_chan_check_security(chan, false)) {
1671 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1672 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1673 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1674 chan->ops->defer(chan);
1677 l2cap_state_change(chan, BT_CONFIG);
1678 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1679 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1682 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1683 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1686 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1689 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1690 rsp.result != L2CAP_CR_SUCCESS) {
1691 l2cap_chan_unlock(chan);
1695 set_bit(CONF_REQ_SENT, &chan->conf_state);
1696 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1697 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1698 chan->num_conf_req++;
1701 l2cap_chan_unlock(chan);
1704 mutex_unlock(&conn->chan_lock);
1707 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1709 struct hci_conn *hcon = conn->hcon;
1710 struct hci_dev *hdev = hcon->hdev;
1712 BT_DBG("%s conn %p", hdev->name, conn);
1714 /* For outgoing pairing which doesn't necessarily have an
1715 * associated socket (e.g. mgmt_pair_device).
1718 smp_conn_security(hcon, hcon->pending_sec_level);
1720 /* For LE peripheral connections, make sure the connection interval
1721 * is in the range of the minimum and maximum interval that has
1722 * been configured for this connection. If not, then trigger
1723 * the connection update procedure.
1725 if (hcon->role == HCI_ROLE_SLAVE &&
1726 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1727 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1728 struct l2cap_conn_param_update_req req;
1730 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1731 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1732 req.latency = cpu_to_le16(hcon->le_conn_latency);
1733 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1735 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1736 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1740 static void l2cap_conn_ready(struct l2cap_conn *conn)
1742 struct l2cap_chan *chan;
1743 struct hci_conn *hcon = conn->hcon;
1745 BT_DBG("conn %p", conn);
1747 if (hcon->type == ACL_LINK)
1748 l2cap_request_info(conn);
1750 mutex_lock(&conn->chan_lock);
1752 list_for_each_entry(chan, &conn->chan_l, list) {
1754 l2cap_chan_lock(chan);
1756 if (chan->scid == L2CAP_CID_A2MP) {
1757 l2cap_chan_unlock(chan);
1761 if (hcon->type == LE_LINK) {
1762 l2cap_le_start(chan);
1763 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1764 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1765 l2cap_chan_ready(chan);
1766 } else if (chan->state == BT_CONNECT) {
1767 l2cap_do_start(chan);
1770 l2cap_chan_unlock(chan);
1773 mutex_unlock(&conn->chan_lock);
1775 if (hcon->type == LE_LINK)
1776 l2cap_le_conn_ready(conn);
1778 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1781 /* Notify sockets that we cannot guaranty reliability anymore */
1782 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1784 struct l2cap_chan *chan;
1786 BT_DBG("conn %p", conn);
1788 mutex_lock(&conn->chan_lock);
1790 list_for_each_entry(chan, &conn->chan_l, list) {
1791 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1792 l2cap_chan_set_err(chan, err);
1795 mutex_unlock(&conn->chan_lock);
1798 static void l2cap_info_timeout(struct work_struct *work)
1800 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1803 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1804 conn->info_ident = 0;
1806 l2cap_conn_start(conn);
1811 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1812 * callback is called during registration. The ->remove callback is called
1813 * during unregistration.
1814 * An l2cap_user object can either be explicitly unregistered or when the
1815 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1816 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1817 * External modules must own a reference to the l2cap_conn object if they intend
1818 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1819 * any time if they don't.
1822 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1824 struct hci_dev *hdev = conn->hcon->hdev;
1827 /* We need to check whether l2cap_conn is registered. If it is not, we
1828 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1829 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1830 * relies on the parent hci_conn object to be locked. This itself relies
1831 * on the hci_dev object to be locked. So we must lock the hci device
1836 if (!list_empty(&user->list)) {
1841 /* conn->hchan is NULL after l2cap_conn_del() was called */
1847 ret = user->probe(conn, user);
1851 list_add(&user->list, &conn->users);
1855 hci_dev_unlock(hdev);
1858 EXPORT_SYMBOL(l2cap_register_user);
1860 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1862 struct hci_dev *hdev = conn->hcon->hdev;
1866 if (list_empty(&user->list))
1869 list_del_init(&user->list);
1870 user->remove(conn, user);
1873 hci_dev_unlock(hdev);
1875 EXPORT_SYMBOL(l2cap_unregister_user);
1877 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1879 struct l2cap_user *user;
1881 while (!list_empty(&conn->users)) {
1882 user = list_first_entry(&conn->users, struct l2cap_user, list);
1883 list_del_init(&user->list);
1884 user->remove(conn, user);
1888 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1890 struct l2cap_conn *conn = hcon->l2cap_data;
1891 struct l2cap_chan *chan, *l;
1896 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1898 kfree_skb(conn->rx_skb);
1900 skb_queue_purge(&conn->pending_rx);
1902 /* We can not call flush_work(&conn->pending_rx_work) here since we
1903 * might block if we are running on a worker from the same workqueue
1904 * pending_rx_work is waiting on.
1906 if (work_pending(&conn->pending_rx_work))
1907 cancel_work_sync(&conn->pending_rx_work);
1909 if (work_pending(&conn->id_addr_update_work))
1910 cancel_work_sync(&conn->id_addr_update_work);
1912 l2cap_unregister_all_users(conn);
1914 /* Force the connection to be immediately dropped */
1915 hcon->disc_timeout = 0;
1917 mutex_lock(&conn->chan_lock);
1920 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1921 l2cap_chan_hold(chan);
1922 l2cap_chan_lock(chan);
1924 l2cap_chan_del(chan, err);
1926 chan->ops->close(chan);
1928 l2cap_chan_unlock(chan);
1929 l2cap_chan_put(chan);
1932 mutex_unlock(&conn->chan_lock);
1934 hci_chan_del(conn->hchan);
1936 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1937 cancel_delayed_work_sync(&conn->info_timer);
1939 hcon->l2cap_data = NULL;
1941 l2cap_conn_put(conn);
1944 static void l2cap_conn_free(struct kref *ref)
1946 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1948 hci_conn_put(conn->hcon);
1952 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1954 kref_get(&conn->ref);
1957 EXPORT_SYMBOL(l2cap_conn_get);
1959 void l2cap_conn_put(struct l2cap_conn *conn)
1961 kref_put(&conn->ref, l2cap_conn_free);
1963 EXPORT_SYMBOL(l2cap_conn_put);
1965 /* ---- Socket interface ---- */
1967 /* Find socket with psm and source / destination bdaddr.
1968 * Returns closest match.
1970 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1975 struct l2cap_chan *c, *tmp, *c1 = NULL;
1977 read_lock(&chan_list_lock);
1979 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1980 if (state && c->state != state)
1983 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1986 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1989 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1990 int src_match, dst_match;
1991 int src_any, dst_any;
1994 src_match = !bacmp(&c->src, src);
1995 dst_match = !bacmp(&c->dst, dst);
1996 if (src_match && dst_match) {
1997 if (!l2cap_chan_hold_unless_zero(c))
2000 read_unlock(&chan_list_lock);
2005 src_any = !bacmp(&c->src, BDADDR_ANY);
2006 dst_any = !bacmp(&c->dst, BDADDR_ANY);
2007 if ((src_match && dst_any) || (src_any && dst_match) ||
2008 (src_any && dst_any))
2014 c1 = l2cap_chan_hold_unless_zero(c1);
2016 read_unlock(&chan_list_lock);
2021 static void l2cap_monitor_timeout(struct work_struct *work)
2023 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2024 monitor_timer.work);
2026 BT_DBG("chan %p", chan);
2028 l2cap_chan_lock(chan);
2031 l2cap_chan_unlock(chan);
2032 l2cap_chan_put(chan);
2036 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2038 l2cap_chan_unlock(chan);
2039 l2cap_chan_put(chan);
2042 static void l2cap_retrans_timeout(struct work_struct *work)
2044 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2045 retrans_timer.work);
2047 BT_DBG("chan %p", chan);
2049 l2cap_chan_lock(chan);
2052 l2cap_chan_unlock(chan);
2053 l2cap_chan_put(chan);
2057 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2058 l2cap_chan_unlock(chan);
2059 l2cap_chan_put(chan);
2062 static void l2cap_streaming_send(struct l2cap_chan *chan,
2063 struct sk_buff_head *skbs)
2065 struct sk_buff *skb;
2066 struct l2cap_ctrl *control;
2068 BT_DBG("chan %p, skbs %p", chan, skbs);
2070 if (__chan_is_moving(chan))
2073 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2075 while (!skb_queue_empty(&chan->tx_q)) {
2077 skb = skb_dequeue(&chan->tx_q);
2079 bt_cb(skb)->l2cap.retries = 1;
2080 control = &bt_cb(skb)->l2cap;
2082 control->reqseq = 0;
2083 control->txseq = chan->next_tx_seq;
2085 __pack_control(chan, control, skb);
2087 if (chan->fcs == L2CAP_FCS_CRC16) {
2088 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2089 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2092 l2cap_do_send(chan, skb);
2094 BT_DBG("Sent txseq %u", control->txseq);
2096 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2097 chan->frames_sent++;
2101 static int l2cap_ertm_send(struct l2cap_chan *chan)
2103 struct sk_buff *skb, *tx_skb;
2104 struct l2cap_ctrl *control;
2107 BT_DBG("chan %p", chan);
2109 if (chan->state != BT_CONNECTED)
2112 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2115 if (__chan_is_moving(chan))
2118 while (chan->tx_send_head &&
2119 chan->unacked_frames < chan->remote_tx_win &&
2120 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2122 skb = chan->tx_send_head;
2124 bt_cb(skb)->l2cap.retries = 1;
2125 control = &bt_cb(skb)->l2cap;
2127 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2130 control->reqseq = chan->buffer_seq;
2131 chan->last_acked_seq = chan->buffer_seq;
2132 control->txseq = chan->next_tx_seq;
2134 __pack_control(chan, control, skb);
2136 if (chan->fcs == L2CAP_FCS_CRC16) {
2137 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2138 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2141 /* Clone after data has been modified. Data is assumed to be
2142 read-only (for locking purposes) on cloned sk_buffs.
2144 tx_skb = skb_clone(skb, GFP_KERNEL);
2149 __set_retrans_timer(chan);
2151 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2152 chan->unacked_frames++;
2153 chan->frames_sent++;
2156 if (skb_queue_is_last(&chan->tx_q, skb))
2157 chan->tx_send_head = NULL;
2159 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2161 l2cap_do_send(chan, tx_skb);
2162 BT_DBG("Sent txseq %u", control->txseq);
2165 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2166 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2171 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2173 struct l2cap_ctrl control;
2174 struct sk_buff *skb;
2175 struct sk_buff *tx_skb;
2178 BT_DBG("chan %p", chan);
2180 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2183 if (__chan_is_moving(chan))
2186 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2187 seq = l2cap_seq_list_pop(&chan->retrans_list);
2189 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2191 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2196 bt_cb(skb)->l2cap.retries++;
2197 control = bt_cb(skb)->l2cap;
2199 if (chan->max_tx != 0 &&
2200 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2201 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2202 l2cap_send_disconn_req(chan, ECONNRESET);
2203 l2cap_seq_list_clear(&chan->retrans_list);
2207 control.reqseq = chan->buffer_seq;
2208 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2213 if (skb_cloned(skb)) {
2214 /* Cloned sk_buffs are read-only, so we need a
2217 tx_skb = skb_copy(skb, GFP_KERNEL);
2219 tx_skb = skb_clone(skb, GFP_KERNEL);
2223 l2cap_seq_list_clear(&chan->retrans_list);
2227 /* Update skb contents */
2228 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2229 put_unaligned_le32(__pack_extended_control(&control),
2230 tx_skb->data + L2CAP_HDR_SIZE);
2232 put_unaligned_le16(__pack_enhanced_control(&control),
2233 tx_skb->data + L2CAP_HDR_SIZE);
2237 if (chan->fcs == L2CAP_FCS_CRC16) {
2238 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2239 tx_skb->len - L2CAP_FCS_SIZE);
2240 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2244 l2cap_do_send(chan, tx_skb);
2246 BT_DBG("Resent txseq %d", control.txseq);
2248 chan->last_acked_seq = chan->buffer_seq;
2252 static void l2cap_retransmit(struct l2cap_chan *chan,
2253 struct l2cap_ctrl *control)
2255 BT_DBG("chan %p, control %p", chan, control);
2257 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2258 l2cap_ertm_resend(chan);
2261 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2262 struct l2cap_ctrl *control)
2264 struct sk_buff *skb;
2266 BT_DBG("chan %p, control %p", chan, control);
2269 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2271 l2cap_seq_list_clear(&chan->retrans_list);
2273 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2276 if (chan->unacked_frames) {
2277 skb_queue_walk(&chan->tx_q, skb) {
2278 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2279 skb == chan->tx_send_head)
2283 skb_queue_walk_from(&chan->tx_q, skb) {
2284 if (skb == chan->tx_send_head)
2287 l2cap_seq_list_append(&chan->retrans_list,
2288 bt_cb(skb)->l2cap.txseq);
2291 l2cap_ertm_resend(chan);
2295 static void l2cap_send_ack(struct l2cap_chan *chan)
2297 struct l2cap_ctrl control;
2298 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2299 chan->last_acked_seq);
2302 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2303 chan, chan->last_acked_seq, chan->buffer_seq);
2305 memset(&control, 0, sizeof(control));
2308 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2309 chan->rx_state == L2CAP_RX_STATE_RECV) {
2310 __clear_ack_timer(chan);
2311 control.super = L2CAP_SUPER_RNR;
2312 control.reqseq = chan->buffer_seq;
2313 l2cap_send_sframe(chan, &control);
2315 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2316 l2cap_ertm_send(chan);
2317 /* If any i-frames were sent, they included an ack */
2318 if (chan->buffer_seq == chan->last_acked_seq)
2322 /* Ack now if the window is 3/4ths full.
2323 * Calculate without mul or div
2325 threshold = chan->ack_win;
2326 threshold += threshold << 1;
2329 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2332 if (frames_to_ack >= threshold) {
2333 __clear_ack_timer(chan);
2334 control.super = L2CAP_SUPER_RR;
2335 control.reqseq = chan->buffer_seq;
2336 l2cap_send_sframe(chan, &control);
2341 __set_ack_timer(chan);
2345 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2346 struct msghdr *msg, int len,
2347 int count, struct sk_buff *skb)
2349 struct l2cap_conn *conn = chan->conn;
2350 struct sk_buff **frag;
2353 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2359 /* Continuation fragments (no L2CAP header) */
2360 frag = &skb_shinfo(skb)->frag_list;
2362 struct sk_buff *tmp;
2364 count = min_t(unsigned int, conn->mtu, len);
2366 tmp = chan->ops->alloc_skb(chan, 0, count,
2367 msg->msg_flags & MSG_DONTWAIT);
2369 return PTR_ERR(tmp);
2373 if (!copy_from_iter_full(skb_put(*frag, count), count,
2380 skb->len += (*frag)->len;
2381 skb->data_len += (*frag)->len;
2383 frag = &(*frag)->next;
2389 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2390 struct msghdr *msg, size_t len)
2392 struct l2cap_conn *conn = chan->conn;
2393 struct sk_buff *skb;
2394 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2395 struct l2cap_hdr *lh;
2397 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2398 __le16_to_cpu(chan->psm), len);
2400 count = min_t(unsigned int, (conn->mtu - hlen), len);
2402 skb = chan->ops->alloc_skb(chan, hlen, count,
2403 msg->msg_flags & MSG_DONTWAIT);
2407 /* Create L2CAP header */
2408 lh = skb_put(skb, L2CAP_HDR_SIZE);
2409 lh->cid = cpu_to_le16(chan->dcid);
2410 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2411 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2413 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2414 if (unlikely(err < 0)) {
2416 return ERR_PTR(err);
2421 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2422 struct msghdr *msg, size_t len)
2424 struct l2cap_conn *conn = chan->conn;
2425 struct sk_buff *skb;
2427 struct l2cap_hdr *lh;
2429 BT_DBG("chan %p len %zu", chan, len);
2431 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2433 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2434 msg->msg_flags & MSG_DONTWAIT);
2438 /* Create L2CAP header */
2439 lh = skb_put(skb, L2CAP_HDR_SIZE);
2440 lh->cid = cpu_to_le16(chan->dcid);
2441 lh->len = cpu_to_le16(len);
2443 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2444 if (unlikely(err < 0)) {
2446 return ERR_PTR(err);
2451 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2452 struct msghdr *msg, size_t len,
2455 struct l2cap_conn *conn = chan->conn;
2456 struct sk_buff *skb;
2457 int err, count, hlen;
2458 struct l2cap_hdr *lh;
2460 BT_DBG("chan %p len %zu", chan, len);
2463 return ERR_PTR(-ENOTCONN);
2465 hlen = __ertm_hdr_size(chan);
2468 hlen += L2CAP_SDULEN_SIZE;
2470 if (chan->fcs == L2CAP_FCS_CRC16)
2471 hlen += L2CAP_FCS_SIZE;
2473 count = min_t(unsigned int, (conn->mtu - hlen), len);
2475 skb = chan->ops->alloc_skb(chan, hlen, count,
2476 msg->msg_flags & MSG_DONTWAIT);
2480 /* Create L2CAP header */
2481 lh = skb_put(skb, L2CAP_HDR_SIZE);
2482 lh->cid = cpu_to_le16(chan->dcid);
2483 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2485 /* Control header is populated later */
2486 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2487 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2489 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2492 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2494 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2495 if (unlikely(err < 0)) {
2497 return ERR_PTR(err);
2500 bt_cb(skb)->l2cap.fcs = chan->fcs;
2501 bt_cb(skb)->l2cap.retries = 0;
2505 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2506 struct sk_buff_head *seg_queue,
2507 struct msghdr *msg, size_t len)
2509 struct sk_buff *skb;
2514 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2516 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2517 * so fragmented skbs are not used. The HCI layer's handling
2518 * of fragmented skbs is not compatible with ERTM's queueing.
2521 /* PDU size is derived from the HCI MTU */
2522 pdu_len = chan->conn->mtu;
2524 /* Constrain PDU size for BR/EDR connections */
2526 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2528 /* Adjust for largest possible L2CAP overhead. */
2530 pdu_len -= L2CAP_FCS_SIZE;
2532 pdu_len -= __ertm_hdr_size(chan);
2534 /* Remote device may have requested smaller PDUs */
2535 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2537 if (len <= pdu_len) {
2538 sar = L2CAP_SAR_UNSEGMENTED;
2542 sar = L2CAP_SAR_START;
2547 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2550 __skb_queue_purge(seg_queue);
2551 return PTR_ERR(skb);
2554 bt_cb(skb)->l2cap.sar = sar;
2555 __skb_queue_tail(seg_queue, skb);
2561 if (len <= pdu_len) {
2562 sar = L2CAP_SAR_END;
2565 sar = L2CAP_SAR_CONTINUE;
2572 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2574 size_t len, u16 sdulen)
2576 struct l2cap_conn *conn = chan->conn;
2577 struct sk_buff *skb;
2578 int err, count, hlen;
2579 struct l2cap_hdr *lh;
2581 BT_DBG("chan %p len %zu", chan, len);
2584 return ERR_PTR(-ENOTCONN);
2586 hlen = L2CAP_HDR_SIZE;
2589 hlen += L2CAP_SDULEN_SIZE;
2591 count = min_t(unsigned int, (conn->mtu - hlen), len);
2593 skb = chan->ops->alloc_skb(chan, hlen, count,
2594 msg->msg_flags & MSG_DONTWAIT);
2598 /* Create L2CAP header */
2599 lh = skb_put(skb, L2CAP_HDR_SIZE);
2600 lh->cid = cpu_to_le16(chan->dcid);
2601 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2604 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2606 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2607 if (unlikely(err < 0)) {
2609 return ERR_PTR(err);
2615 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2616 struct sk_buff_head *seg_queue,
2617 struct msghdr *msg, size_t len)
2619 struct sk_buff *skb;
2623 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2626 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2632 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2634 __skb_queue_purge(seg_queue);
2635 return PTR_ERR(skb);
2638 __skb_queue_tail(seg_queue, skb);
2644 pdu_len += L2CAP_SDULEN_SIZE;
2651 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2655 BT_DBG("chan %p", chan);
2657 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2658 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2663 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2664 skb_queue_len(&chan->tx_q));
2667 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2669 struct sk_buff *skb;
2671 struct sk_buff_head seg_queue;
2676 /* Connectionless channel */
2677 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2678 skb = l2cap_create_connless_pdu(chan, msg, len);
2680 return PTR_ERR(skb);
2682 l2cap_do_send(chan, skb);
2686 switch (chan->mode) {
2687 case L2CAP_MODE_LE_FLOWCTL:
2688 case L2CAP_MODE_EXT_FLOWCTL:
2689 /* Check outgoing MTU */
2690 if (len > chan->omtu)
2693 __skb_queue_head_init(&seg_queue);
2695 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2697 if (chan->state != BT_CONNECTED) {
2698 __skb_queue_purge(&seg_queue);
2705 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2707 l2cap_le_flowctl_send(chan);
2709 if (!chan->tx_credits)
2710 chan->ops->suspend(chan);
2716 case L2CAP_MODE_BASIC:
2717 /* Check outgoing MTU */
2718 if (len > chan->omtu)
2721 /* Create a basic PDU */
2722 skb = l2cap_create_basic_pdu(chan, msg, len);
2724 return PTR_ERR(skb);
2726 l2cap_do_send(chan, skb);
2730 case L2CAP_MODE_ERTM:
2731 case L2CAP_MODE_STREAMING:
2732 /* Check outgoing MTU */
2733 if (len > chan->omtu) {
2738 __skb_queue_head_init(&seg_queue);
2740 /* Do segmentation before calling in to the state machine,
2741 * since it's possible to block while waiting for memory
2744 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2749 if (chan->mode == L2CAP_MODE_ERTM)
2750 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2752 l2cap_streaming_send(chan, &seg_queue);
2756 /* If the skbs were not queued for sending, they'll still be in
2757 * seg_queue and need to be purged.
2759 __skb_queue_purge(&seg_queue);
2763 BT_DBG("bad state %1.1x", chan->mode);
2769 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2771 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2773 struct l2cap_ctrl control;
2776 BT_DBG("chan %p, txseq %u", chan, txseq);
2778 memset(&control, 0, sizeof(control));
2780 control.super = L2CAP_SUPER_SREJ;
2782 for (seq = chan->expected_tx_seq; seq != txseq;
2783 seq = __next_seq(chan, seq)) {
2784 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2785 control.reqseq = seq;
2786 l2cap_send_sframe(chan, &control);
2787 l2cap_seq_list_append(&chan->srej_list, seq);
2791 chan->expected_tx_seq = __next_seq(chan, txseq);
2794 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2796 struct l2cap_ctrl control;
2798 BT_DBG("chan %p", chan);
2800 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2803 memset(&control, 0, sizeof(control));
2805 control.super = L2CAP_SUPER_SREJ;
2806 control.reqseq = chan->srej_list.tail;
2807 l2cap_send_sframe(chan, &control);
2810 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2812 struct l2cap_ctrl control;
2816 BT_DBG("chan %p, txseq %u", chan, txseq);
2818 memset(&control, 0, sizeof(control));
2820 control.super = L2CAP_SUPER_SREJ;
2822 /* Capture initial list head to allow only one pass through the list. */
2823 initial_head = chan->srej_list.head;
2826 seq = l2cap_seq_list_pop(&chan->srej_list);
2827 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2830 control.reqseq = seq;
2831 l2cap_send_sframe(chan, &control);
2832 l2cap_seq_list_append(&chan->srej_list, seq);
2833 } while (chan->srej_list.head != initial_head);
2836 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2838 struct sk_buff *acked_skb;
2841 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2843 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2846 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2847 chan->expected_ack_seq, chan->unacked_frames);
2849 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2850 ackseq = __next_seq(chan, ackseq)) {
2852 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2854 skb_unlink(acked_skb, &chan->tx_q);
2855 kfree_skb(acked_skb);
2856 chan->unacked_frames--;
2860 chan->expected_ack_seq = reqseq;
2862 if (chan->unacked_frames == 0)
2863 __clear_retrans_timer(chan);
2865 BT_DBG("unacked_frames %u", chan->unacked_frames);
2868 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2870 BT_DBG("chan %p", chan);
2872 chan->expected_tx_seq = chan->buffer_seq;
2873 l2cap_seq_list_clear(&chan->srej_list);
2874 skb_queue_purge(&chan->srej_q);
2875 chan->rx_state = L2CAP_RX_STATE_RECV;
2878 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2879 struct l2cap_ctrl *control,
2880 struct sk_buff_head *skbs, u8 event)
2882 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2886 case L2CAP_EV_DATA_REQUEST:
2887 if (chan->tx_send_head == NULL)
2888 chan->tx_send_head = skb_peek(skbs);
2890 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2891 l2cap_ertm_send(chan);
2893 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2894 BT_DBG("Enter LOCAL_BUSY");
2895 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2897 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2898 /* The SREJ_SENT state must be aborted if we are to
2899 * enter the LOCAL_BUSY state.
2901 l2cap_abort_rx_srej_sent(chan);
2904 l2cap_send_ack(chan);
2907 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2908 BT_DBG("Exit LOCAL_BUSY");
2909 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2911 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2912 struct l2cap_ctrl local_control;
2914 memset(&local_control, 0, sizeof(local_control));
2915 local_control.sframe = 1;
2916 local_control.super = L2CAP_SUPER_RR;
2917 local_control.poll = 1;
2918 local_control.reqseq = chan->buffer_seq;
2919 l2cap_send_sframe(chan, &local_control);
2921 chan->retry_count = 1;
2922 __set_monitor_timer(chan);
2923 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2926 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2927 l2cap_process_reqseq(chan, control->reqseq);
2929 case L2CAP_EV_EXPLICIT_POLL:
2930 l2cap_send_rr_or_rnr(chan, 1);
2931 chan->retry_count = 1;
2932 __set_monitor_timer(chan);
2933 __clear_ack_timer(chan);
2934 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2936 case L2CAP_EV_RETRANS_TO:
2937 l2cap_send_rr_or_rnr(chan, 1);
2938 chan->retry_count = 1;
2939 __set_monitor_timer(chan);
2940 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2942 case L2CAP_EV_RECV_FBIT:
2943 /* Nothing to process */
2950 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2951 struct l2cap_ctrl *control,
2952 struct sk_buff_head *skbs, u8 event)
2954 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2958 case L2CAP_EV_DATA_REQUEST:
2959 if (chan->tx_send_head == NULL)
2960 chan->tx_send_head = skb_peek(skbs);
2961 /* Queue data, but don't send. */
2962 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2964 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2965 BT_DBG("Enter LOCAL_BUSY");
2966 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2968 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2969 /* The SREJ_SENT state must be aborted if we are to
2970 * enter the LOCAL_BUSY state.
2972 l2cap_abort_rx_srej_sent(chan);
2975 l2cap_send_ack(chan);
2978 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2979 BT_DBG("Exit LOCAL_BUSY");
2980 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2982 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2983 struct l2cap_ctrl local_control;
2984 memset(&local_control, 0, sizeof(local_control));
2985 local_control.sframe = 1;
2986 local_control.super = L2CAP_SUPER_RR;
2987 local_control.poll = 1;
2988 local_control.reqseq = chan->buffer_seq;
2989 l2cap_send_sframe(chan, &local_control);
2991 chan->retry_count = 1;
2992 __set_monitor_timer(chan);
2993 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2996 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2997 l2cap_process_reqseq(chan, control->reqseq);
3000 case L2CAP_EV_RECV_FBIT:
3001 if (control && control->final) {
3002 __clear_monitor_timer(chan);
3003 if (chan->unacked_frames > 0)
3004 __set_retrans_timer(chan);
3005 chan->retry_count = 0;
3006 chan->tx_state = L2CAP_TX_STATE_XMIT;
3007 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3010 case L2CAP_EV_EXPLICIT_POLL:
3013 case L2CAP_EV_MONITOR_TO:
3014 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3015 l2cap_send_rr_or_rnr(chan, 1);
3016 __set_monitor_timer(chan);
3017 chan->retry_count++;
3019 l2cap_send_disconn_req(chan, ECONNABORTED);
3027 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3028 struct sk_buff_head *skbs, u8 event)
3030 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3031 chan, control, skbs, event, chan->tx_state);
3033 switch (chan->tx_state) {
3034 case L2CAP_TX_STATE_XMIT:
3035 l2cap_tx_state_xmit(chan, control, skbs, event);
3037 case L2CAP_TX_STATE_WAIT_F:
3038 l2cap_tx_state_wait_f(chan, control, skbs, event);
3046 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3047 struct l2cap_ctrl *control)
3049 BT_DBG("chan %p, control %p", chan, control);
3050 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3053 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3054 struct l2cap_ctrl *control)
3056 BT_DBG("chan %p, control %p", chan, control);
3057 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3060 /* Copy frame to all raw sockets on that connection */
3061 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3063 struct sk_buff *nskb;
3064 struct l2cap_chan *chan;
3066 BT_DBG("conn %p", conn);
3068 mutex_lock(&conn->chan_lock);
3070 list_for_each_entry(chan, &conn->chan_l, list) {
3071 if (chan->chan_type != L2CAP_CHAN_RAW)
3074 /* Don't send frame to the channel it came from */
3075 if (bt_cb(skb)->l2cap.chan == chan)
3078 nskb = skb_clone(skb, GFP_KERNEL);
3081 if (chan->ops->recv(chan, nskb))
3085 mutex_unlock(&conn->chan_lock);
3088 /* ---- L2CAP signalling commands ---- */
3089 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3090 u8 ident, u16 dlen, void *data)
3092 struct sk_buff *skb, **frag;
3093 struct l2cap_cmd_hdr *cmd;
3094 struct l2cap_hdr *lh;
3097 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3098 conn, code, ident, dlen);
3100 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3103 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3104 count = min_t(unsigned int, conn->mtu, len);
3106 skb = bt_skb_alloc(count, GFP_KERNEL);
3110 lh = skb_put(skb, L2CAP_HDR_SIZE);
3111 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3113 if (conn->hcon->type == LE_LINK)
3114 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3116 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3118 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3121 cmd->len = cpu_to_le16(dlen);
3124 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3125 skb_put_data(skb, data, count);
3131 /* Continuation fragments (no L2CAP header) */
3132 frag = &skb_shinfo(skb)->frag_list;
3134 count = min_t(unsigned int, conn->mtu, len);
3136 *frag = bt_skb_alloc(count, GFP_KERNEL);
3140 skb_put_data(*frag, data, count);
3145 frag = &(*frag)->next;
3155 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3158 struct l2cap_conf_opt *opt = *ptr;
3161 len = L2CAP_CONF_OPT_SIZE + opt->len;
3169 *val = *((u8 *) opt->val);
3173 *val = get_unaligned_le16(opt->val);
3177 *val = get_unaligned_le32(opt->val);
3181 *val = (unsigned long) opt->val;
3185 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3189 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3191 struct l2cap_conf_opt *opt = *ptr;
3193 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3195 if (size < L2CAP_CONF_OPT_SIZE + len)
3203 *((u8 *) opt->val) = val;
3207 put_unaligned_le16(val, opt->val);
3211 put_unaligned_le32(val, opt->val);
3215 memcpy(opt->val, (void *) val, len);
3219 *ptr += L2CAP_CONF_OPT_SIZE + len;
3222 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3224 struct l2cap_conf_efs efs;
3226 switch (chan->mode) {
3227 case L2CAP_MODE_ERTM:
3228 efs.id = chan->local_id;
3229 efs.stype = chan->local_stype;
3230 efs.msdu = cpu_to_le16(chan->local_msdu);
3231 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3232 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3233 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3236 case L2CAP_MODE_STREAMING:
3238 efs.stype = L2CAP_SERV_BESTEFFORT;
3239 efs.msdu = cpu_to_le16(chan->local_msdu);
3240 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3249 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3250 (unsigned long) &efs, size);
3253 static void l2cap_ack_timeout(struct work_struct *work)
3255 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3259 BT_DBG("chan %p", chan);
3261 l2cap_chan_lock(chan);
3263 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3264 chan->last_acked_seq);
3267 l2cap_send_rr_or_rnr(chan, 0);
3269 l2cap_chan_unlock(chan);
3270 l2cap_chan_put(chan);
3273 int l2cap_ertm_init(struct l2cap_chan *chan)
3277 chan->next_tx_seq = 0;
3278 chan->expected_tx_seq = 0;
3279 chan->expected_ack_seq = 0;
3280 chan->unacked_frames = 0;
3281 chan->buffer_seq = 0;
3282 chan->frames_sent = 0;
3283 chan->last_acked_seq = 0;
3285 chan->sdu_last_frag = NULL;
3288 skb_queue_head_init(&chan->tx_q);
3290 chan->local_amp_id = AMP_ID_BREDR;
3291 chan->move_id = AMP_ID_BREDR;
3292 chan->move_state = L2CAP_MOVE_STABLE;
3293 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3295 if (chan->mode != L2CAP_MODE_ERTM)
3298 chan->rx_state = L2CAP_RX_STATE_RECV;
3299 chan->tx_state = L2CAP_TX_STATE_XMIT;
3301 skb_queue_head_init(&chan->srej_q);
3303 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3307 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3309 l2cap_seq_list_free(&chan->srej_list);
3314 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3317 case L2CAP_MODE_STREAMING:
3318 case L2CAP_MODE_ERTM:
3319 if (l2cap_mode_supported(mode, remote_feat_mask))
3323 return L2CAP_MODE_BASIC;
3327 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3329 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3330 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3333 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3335 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3336 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3339 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3340 struct l2cap_conf_rfc *rfc)
3342 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3343 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3345 /* Class 1 devices have must have ERTM timeouts
3346 * exceeding the Link Supervision Timeout. The
3347 * default Link Supervision Timeout for AMP
3348 * controllers is 10 seconds.
3350 * Class 1 devices use 0xffffffff for their
3351 * best-effort flush timeout, so the clamping logic
3352 * will result in a timeout that meets the above
3353 * requirement. ERTM timeouts are 16-bit values, so
3354 * the maximum timeout is 65.535 seconds.
3357 /* Convert timeout to milliseconds and round */
3358 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3360 /* This is the recommended formula for class 2 devices
3361 * that start ERTM timers when packets are sent to the
3364 ertm_to = 3 * ertm_to + 500;
3366 if (ertm_to > 0xffff)
3369 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3370 rfc->monitor_timeout = rfc->retrans_timeout;
3372 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3373 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3377 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3379 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3380 __l2cap_ews_supported(chan->conn)) {
3381 /* use extended control field */
3382 set_bit(FLAG_EXT_CTRL, &chan->flags);
3383 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3385 chan->tx_win = min_t(u16, chan->tx_win,
3386 L2CAP_DEFAULT_TX_WINDOW);
3387 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3389 chan->ack_win = chan->tx_win;
3392 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3394 struct hci_conn *conn = chan->conn->hcon;
3396 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3398 /* The 2-DH1 packet has between 2 and 56 information bytes
3399 * (including the 2-byte payload header)
3401 if (!(conn->pkt_type & HCI_2DH1))
3404 /* The 3-DH1 packet has between 2 and 85 information bytes
3405 * (including the 2-byte payload header)
3407 if (!(conn->pkt_type & HCI_3DH1))
3410 /* The 2-DH3 packet has between 2 and 369 information bytes
3411 * (including the 2-byte payload header)
3413 if (!(conn->pkt_type & HCI_2DH3))
3416 /* The 3-DH3 packet has between 2 and 554 information bytes
3417 * (including the 2-byte payload header)
3419 if (!(conn->pkt_type & HCI_3DH3))
3422 /* The 2-DH5 packet has between 2 and 681 information bytes
3423 * (including the 2-byte payload header)
3425 if (!(conn->pkt_type & HCI_2DH5))
3428 /* The 3-DH5 packet has between 2 and 1023 information bytes
3429 * (including the 2-byte payload header)
3431 if (!(conn->pkt_type & HCI_3DH5))
3435 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3437 struct l2cap_conf_req *req = data;
3438 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3439 void *ptr = req->data;
3440 void *endptr = data + data_size;
3443 BT_DBG("chan %p", chan);
3445 if (chan->num_conf_req || chan->num_conf_rsp)
3448 switch (chan->mode) {
3449 case L2CAP_MODE_STREAMING:
3450 case L2CAP_MODE_ERTM:
3451 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3454 if (__l2cap_efs_supported(chan->conn))
3455 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3459 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3464 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3466 l2cap_mtu_auto(chan);
3467 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3471 switch (chan->mode) {
3472 case L2CAP_MODE_BASIC:
3476 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3477 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3480 rfc.mode = L2CAP_MODE_BASIC;
3482 rfc.max_transmit = 0;
3483 rfc.retrans_timeout = 0;
3484 rfc.monitor_timeout = 0;
3485 rfc.max_pdu_size = 0;
3487 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3488 (unsigned long) &rfc, endptr - ptr);
3491 case L2CAP_MODE_ERTM:
3492 rfc.mode = L2CAP_MODE_ERTM;
3493 rfc.max_transmit = chan->max_tx;
3495 __l2cap_set_ertm_timeouts(chan, &rfc);
3497 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3498 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3500 rfc.max_pdu_size = cpu_to_le16(size);
3502 l2cap_txwin_setup(chan);
3504 rfc.txwin_size = min_t(u16, chan->tx_win,
3505 L2CAP_DEFAULT_TX_WINDOW);
3507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3508 (unsigned long) &rfc, endptr - ptr);
3510 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3511 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3513 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3514 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3515 chan->tx_win, endptr - ptr);
3517 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3518 if (chan->fcs == L2CAP_FCS_NONE ||
3519 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3520 chan->fcs = L2CAP_FCS_NONE;
3521 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3522 chan->fcs, endptr - ptr);
3526 case L2CAP_MODE_STREAMING:
3527 l2cap_txwin_setup(chan);
3528 rfc.mode = L2CAP_MODE_STREAMING;
3530 rfc.max_transmit = 0;
3531 rfc.retrans_timeout = 0;
3532 rfc.monitor_timeout = 0;
3534 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3535 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3537 rfc.max_pdu_size = cpu_to_le16(size);
3539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3540 (unsigned long) &rfc, endptr - ptr);
3542 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3543 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3545 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3546 if (chan->fcs == L2CAP_FCS_NONE ||
3547 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3548 chan->fcs = L2CAP_FCS_NONE;
3549 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3550 chan->fcs, endptr - ptr);
3555 req->dcid = cpu_to_le16(chan->dcid);
3556 req->flags = cpu_to_le16(0);
3561 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3563 struct l2cap_conf_rsp *rsp = data;
3564 void *ptr = rsp->data;
3565 void *endptr = data + data_size;
3566 void *req = chan->conf_req;
3567 int len = chan->conf_len;
3568 int type, hint, olen;
3570 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3571 struct l2cap_conf_efs efs;
3573 u16 mtu = L2CAP_DEFAULT_MTU;
3574 u16 result = L2CAP_CONF_SUCCESS;
3577 BT_DBG("chan %p", chan);
3579 while (len >= L2CAP_CONF_OPT_SIZE) {
3580 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3584 hint = type & L2CAP_CONF_HINT;
3585 type &= L2CAP_CONF_MASK;
3588 case L2CAP_CONF_MTU:
3594 case L2CAP_CONF_FLUSH_TO:
3597 chan->flush_to = val;
3600 case L2CAP_CONF_QOS:
3603 case L2CAP_CONF_RFC:
3604 if (olen != sizeof(rfc))
3606 memcpy(&rfc, (void *) val, olen);
3609 case L2CAP_CONF_FCS:
3612 if (val == L2CAP_FCS_NONE)
3613 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3616 case L2CAP_CONF_EFS:
3617 if (olen != sizeof(efs))
3620 memcpy(&efs, (void *) val, olen);
3623 case L2CAP_CONF_EWS:
3626 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3627 return -ECONNREFUSED;
3628 set_bit(FLAG_EXT_CTRL, &chan->flags);
3629 set_bit(CONF_EWS_RECV, &chan->conf_state);
3630 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3631 chan->remote_tx_win = val;
3637 result = L2CAP_CONF_UNKNOWN;
3638 *((u8 *) ptr++) = type;
3643 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3646 switch (chan->mode) {
3647 case L2CAP_MODE_STREAMING:
3648 case L2CAP_MODE_ERTM:
3649 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3650 chan->mode = l2cap_select_mode(rfc.mode,
3651 chan->conn->feat_mask);
3656 if (__l2cap_efs_supported(chan->conn))
3657 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3659 return -ECONNREFUSED;
3662 if (chan->mode != rfc.mode)
3663 return -ECONNREFUSED;
3669 if (chan->mode != rfc.mode) {
3670 result = L2CAP_CONF_UNACCEPT;
3671 rfc.mode = chan->mode;
3673 if (chan->num_conf_rsp == 1)
3674 return -ECONNREFUSED;
3676 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3677 (unsigned long) &rfc, endptr - ptr);
3680 if (result == L2CAP_CONF_SUCCESS) {
3681 /* Configure output options and let the other side know
3682 * which ones we don't like. */
3684 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3685 result = L2CAP_CONF_UNACCEPT;
3688 set_bit(CONF_MTU_DONE, &chan->conf_state);
3690 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3693 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3694 efs.stype != L2CAP_SERV_NOTRAFIC &&
3695 efs.stype != chan->local_stype) {
3697 result = L2CAP_CONF_UNACCEPT;
3699 if (chan->num_conf_req >= 1)
3700 return -ECONNREFUSED;
3702 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3704 (unsigned long) &efs, endptr - ptr);
3706 /* Send PENDING Conf Rsp */
3707 result = L2CAP_CONF_PENDING;
3708 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3713 case L2CAP_MODE_BASIC:
3714 chan->fcs = L2CAP_FCS_NONE;
3715 set_bit(CONF_MODE_DONE, &chan->conf_state);
3718 case L2CAP_MODE_ERTM:
3719 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3720 chan->remote_tx_win = rfc.txwin_size;
3722 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3724 chan->remote_max_tx = rfc.max_transmit;
3726 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3727 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3728 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3729 rfc.max_pdu_size = cpu_to_le16(size);
3730 chan->remote_mps = size;
3732 __l2cap_set_ertm_timeouts(chan, &rfc);
3734 set_bit(CONF_MODE_DONE, &chan->conf_state);
3736 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3737 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3740 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3741 chan->remote_id = efs.id;
3742 chan->remote_stype = efs.stype;
3743 chan->remote_msdu = le16_to_cpu(efs.msdu);
3744 chan->remote_flush_to =
3745 le32_to_cpu(efs.flush_to);
3746 chan->remote_acc_lat =
3747 le32_to_cpu(efs.acc_lat);
3748 chan->remote_sdu_itime =
3749 le32_to_cpu(efs.sdu_itime);
3750 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3752 (unsigned long) &efs, endptr - ptr);
3756 case L2CAP_MODE_STREAMING:
3757 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3758 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3759 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3760 rfc.max_pdu_size = cpu_to_le16(size);
3761 chan->remote_mps = size;
3763 set_bit(CONF_MODE_DONE, &chan->conf_state);
3765 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3766 (unsigned long) &rfc, endptr - ptr);
3771 result = L2CAP_CONF_UNACCEPT;
3773 memset(&rfc, 0, sizeof(rfc));
3774 rfc.mode = chan->mode;
3777 if (result == L2CAP_CONF_SUCCESS)
3778 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3780 rsp->scid = cpu_to_le16(chan->dcid);
3781 rsp->result = cpu_to_le16(result);
3782 rsp->flags = cpu_to_le16(0);
3787 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3788 void *data, size_t size, u16 *result)
3790 struct l2cap_conf_req *req = data;
3791 void *ptr = req->data;
3792 void *endptr = data + size;
3795 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3796 struct l2cap_conf_efs efs;
3798 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3800 while (len >= L2CAP_CONF_OPT_SIZE) {
3801 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3806 case L2CAP_CONF_MTU:
3809 if (val < L2CAP_DEFAULT_MIN_MTU) {
3810 *result = L2CAP_CONF_UNACCEPT;
3811 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3814 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3818 case L2CAP_CONF_FLUSH_TO:
3821 chan->flush_to = val;
3822 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3823 chan->flush_to, endptr - ptr);
3826 case L2CAP_CONF_RFC:
3827 if (olen != sizeof(rfc))
3829 memcpy(&rfc, (void *)val, olen);
3830 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3831 rfc.mode != chan->mode)
3832 return -ECONNREFUSED;
3834 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3835 (unsigned long) &rfc, endptr - ptr);
3838 case L2CAP_CONF_EWS:
3841 chan->ack_win = min_t(u16, val, chan->ack_win);
3842 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3843 chan->tx_win, endptr - ptr);
3846 case L2CAP_CONF_EFS:
3847 if (olen != sizeof(efs))
3849 memcpy(&efs, (void *)val, olen);
3850 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3851 efs.stype != L2CAP_SERV_NOTRAFIC &&
3852 efs.stype != chan->local_stype)
3853 return -ECONNREFUSED;
3854 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3855 (unsigned long) &efs, endptr - ptr);
3858 case L2CAP_CONF_FCS:
3861 if (*result == L2CAP_CONF_PENDING)
3862 if (val == L2CAP_FCS_NONE)
3863 set_bit(CONF_RECV_NO_FCS,
3869 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3870 return -ECONNREFUSED;
3872 chan->mode = rfc.mode;
3874 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3876 case L2CAP_MODE_ERTM:
3877 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3878 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3879 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3880 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3881 chan->ack_win = min_t(u16, chan->ack_win,
3884 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3885 chan->local_msdu = le16_to_cpu(efs.msdu);
3886 chan->local_sdu_itime =
3887 le32_to_cpu(efs.sdu_itime);
3888 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3889 chan->local_flush_to =
3890 le32_to_cpu(efs.flush_to);
3894 case L2CAP_MODE_STREAMING:
3895 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3899 req->dcid = cpu_to_le16(chan->dcid);
3900 req->flags = cpu_to_le16(0);
3905 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3906 u16 result, u16 flags)
3908 struct l2cap_conf_rsp *rsp = data;
3909 void *ptr = rsp->data;
3911 BT_DBG("chan %p", chan);
3913 rsp->scid = cpu_to_le16(chan->dcid);
3914 rsp->result = cpu_to_le16(result);
3915 rsp->flags = cpu_to_le16(flags);
3920 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3922 struct l2cap_le_conn_rsp rsp;
3923 struct l2cap_conn *conn = chan->conn;
3925 BT_DBG("chan %p", chan);
3927 rsp.dcid = cpu_to_le16(chan->scid);
3928 rsp.mtu = cpu_to_le16(chan->imtu);
3929 rsp.mps = cpu_to_le16(chan->mps);
3930 rsp.credits = cpu_to_le16(chan->rx_credits);
3931 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3933 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3937 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3941 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3944 switch (chan->state) {
3946 /* If channel still pending accept add to result */
3952 /* If not connected or pending accept it has been refused */
3953 *result = -ECONNREFUSED;
3958 struct l2cap_ecred_rsp_data {
3960 struct l2cap_ecred_conn_rsp rsp;
3961 __le16 scid[L2CAP_ECRED_MAX_CID];
3966 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3968 struct l2cap_ecred_rsp_data *rsp = data;
3970 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3973 /* Reset ident so only one response is sent */
3976 /* Include all channels pending with the same ident */
3977 if (!rsp->pdu.rsp.result)
3978 rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3980 l2cap_chan_del(chan, ECONNRESET);
3983 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3985 struct l2cap_conn *conn = chan->conn;
3986 struct l2cap_ecred_rsp_data data;
3987 u16 id = chan->ident;
3993 BT_DBG("chan %p id %d", chan, id);
3995 memset(&data, 0, sizeof(data));
3997 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3998 data.pdu.rsp.mps = cpu_to_le16(chan->mps);
3999 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
4000 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
4002 /* Verify that all channels are ready */
4003 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
4009 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
4011 /* Build response */
4012 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
4014 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
4015 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
4019 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4021 struct l2cap_conn_rsp rsp;
4022 struct l2cap_conn *conn = chan->conn;
4026 rsp.scid = cpu_to_le16(chan->dcid);
4027 rsp.dcid = cpu_to_le16(chan->scid);
4028 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4029 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4032 rsp_code = L2CAP_CREATE_CHAN_RSP;
4034 rsp_code = L2CAP_CONN_RSP;
4036 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4038 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4040 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4043 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4044 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4045 chan->num_conf_req++;
4048 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4052 /* Use sane default values in case a misbehaving remote device
4053 * did not send an RFC or extended window size option.
4055 u16 txwin_ext = chan->ack_win;
4056 struct l2cap_conf_rfc rfc = {
4058 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4059 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4060 .max_pdu_size = cpu_to_le16(chan->imtu),
4061 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4064 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4066 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4069 while (len >= L2CAP_CONF_OPT_SIZE) {
4070 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4075 case L2CAP_CONF_RFC:
4076 if (olen != sizeof(rfc))
4078 memcpy(&rfc, (void *)val, olen);
4080 case L2CAP_CONF_EWS:
4089 case L2CAP_MODE_ERTM:
4090 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4091 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4092 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4093 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4094 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4096 chan->ack_win = min_t(u16, chan->ack_win,
4099 case L2CAP_MODE_STREAMING:
4100 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4104 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4105 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4108 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4110 if (cmd_len < sizeof(*rej))
4113 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4116 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4117 cmd->ident == conn->info_ident) {
4118 cancel_delayed_work(&conn->info_timer);
4120 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4121 conn->info_ident = 0;
4123 l2cap_conn_start(conn);
4129 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4130 struct l2cap_cmd_hdr *cmd,
4131 u8 *data, u8 rsp_code, u8 amp_id)
4133 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4134 struct l2cap_conn_rsp rsp;
4135 struct l2cap_chan *chan = NULL, *pchan;
4136 int result, status = L2CAP_CS_NO_INFO;
4138 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4139 __le16 psm = req->psm;
4141 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4143 /* Check if we have socket listening on psm */
4144 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4145 &conn->hcon->dst, ACL_LINK);
4147 result = L2CAP_CR_BAD_PSM;
4151 mutex_lock(&conn->chan_lock);
4152 l2cap_chan_lock(pchan);
4154 /* Check if the ACL is secure enough (if not SDP) */
4155 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4156 !hci_conn_check_link_mode(conn->hcon)) {
4157 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4158 result = L2CAP_CR_SEC_BLOCK;
4162 result = L2CAP_CR_NO_MEM;
4164 /* Check for valid dynamic CID range (as per Erratum 3253) */
4165 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4166 result = L2CAP_CR_INVALID_SCID;
4170 /* Check if we already have channel with that dcid */
4171 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4172 result = L2CAP_CR_SCID_IN_USE;
4176 chan = pchan->ops->new_connection(pchan);
4180 /* For certain devices (ex: HID mouse), support for authentication,
4181 * pairing and bonding is optional. For such devices, inorder to avoid
4182 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4183 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4185 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4187 bacpy(&chan->src, &conn->hcon->src);
4188 bacpy(&chan->dst, &conn->hcon->dst);
4189 chan->src_type = bdaddr_src_type(conn->hcon);
4190 chan->dst_type = bdaddr_dst_type(conn->hcon);
4193 chan->local_amp_id = amp_id;
4195 __l2cap_chan_add(conn, chan);
4199 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4201 chan->ident = cmd->ident;
4203 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4204 if (l2cap_chan_check_security(chan, false)) {
4205 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4206 l2cap_state_change(chan, BT_CONNECT2);
4207 result = L2CAP_CR_PEND;
4208 status = L2CAP_CS_AUTHOR_PEND;
4209 chan->ops->defer(chan);
4211 /* Force pending result for AMP controllers.
4212 * The connection will succeed after the
4213 * physical link is up.
4215 if (amp_id == AMP_ID_BREDR) {
4216 l2cap_state_change(chan, BT_CONFIG);
4217 result = L2CAP_CR_SUCCESS;
4219 l2cap_state_change(chan, BT_CONNECT2);
4220 result = L2CAP_CR_PEND;
4222 status = L2CAP_CS_NO_INFO;
4225 l2cap_state_change(chan, BT_CONNECT2);
4226 result = L2CAP_CR_PEND;
4227 status = L2CAP_CS_AUTHEN_PEND;
4230 l2cap_state_change(chan, BT_CONNECT2);
4231 result = L2CAP_CR_PEND;
4232 status = L2CAP_CS_NO_INFO;
4236 l2cap_chan_unlock(pchan);
4237 mutex_unlock(&conn->chan_lock);
4238 l2cap_chan_put(pchan);
4241 rsp.scid = cpu_to_le16(scid);
4242 rsp.dcid = cpu_to_le16(dcid);
4243 rsp.result = cpu_to_le16(result);
4244 rsp.status = cpu_to_le16(status);
4245 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4247 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4248 struct l2cap_info_req info;
4249 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4251 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4252 conn->info_ident = l2cap_get_ident(conn);
4254 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4256 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4257 sizeof(info), &info);
4260 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4261 result == L2CAP_CR_SUCCESS) {
4263 set_bit(CONF_REQ_SENT, &chan->conf_state);
4264 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4265 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4266 chan->num_conf_req++;
4272 static int l2cap_connect_req(struct l2cap_conn *conn,
4273 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4275 struct hci_dev *hdev = conn->hcon->hdev;
4276 struct hci_conn *hcon = conn->hcon;
4278 if (cmd_len < sizeof(struct l2cap_conn_req))
4282 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4283 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4284 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4285 hci_dev_unlock(hdev);
4287 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4291 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4292 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4295 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4296 u16 scid, dcid, result, status;
4297 struct l2cap_chan *chan;
4301 if (cmd_len < sizeof(*rsp))
4304 scid = __le16_to_cpu(rsp->scid);
4305 dcid = __le16_to_cpu(rsp->dcid);
4306 result = __le16_to_cpu(rsp->result);
4307 status = __le16_to_cpu(rsp->status);
4309 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4310 dcid > L2CAP_CID_DYN_END))
4313 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4314 dcid, scid, result, status);
4316 mutex_lock(&conn->chan_lock);
4319 chan = __l2cap_get_chan_by_scid(conn, scid);
4325 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4332 chan = l2cap_chan_hold_unless_zero(chan);
4340 l2cap_chan_lock(chan);
4343 case L2CAP_CR_SUCCESS:
4344 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4349 l2cap_state_change(chan, BT_CONFIG);
4352 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4354 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4357 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4358 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4359 chan->num_conf_req++;
4363 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4367 l2cap_chan_del(chan, ECONNREFUSED);
4371 l2cap_chan_unlock(chan);
4372 l2cap_chan_put(chan);
4375 mutex_unlock(&conn->chan_lock);
4380 static inline void set_default_fcs(struct l2cap_chan *chan)
4382 /* FCS is enabled only in ERTM or streaming mode, if one or both
4385 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4386 chan->fcs = L2CAP_FCS_NONE;
4387 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4388 chan->fcs = L2CAP_FCS_CRC16;
4391 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4392 u8 ident, u16 flags)
4394 struct l2cap_conn *conn = chan->conn;
4396 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4399 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4400 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4402 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4403 l2cap_build_conf_rsp(chan, data,
4404 L2CAP_CONF_SUCCESS, flags), data);
4407 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4410 struct l2cap_cmd_rej_cid rej;
4412 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4413 rej.scid = __cpu_to_le16(scid);
4414 rej.dcid = __cpu_to_le16(dcid);
4416 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4419 static inline int l2cap_config_req(struct l2cap_conn *conn,
4420 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4423 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4426 struct l2cap_chan *chan;
4429 if (cmd_len < sizeof(*req))
4432 dcid = __le16_to_cpu(req->dcid);
4433 flags = __le16_to_cpu(req->flags);
4435 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4437 chan = l2cap_get_chan_by_scid(conn, dcid);
4439 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4443 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4444 chan->state != BT_CONNECTED) {
4445 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4450 /* Reject if config buffer is too small. */
4451 len = cmd_len - sizeof(*req);
4452 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4453 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4454 l2cap_build_conf_rsp(chan, rsp,
4455 L2CAP_CONF_REJECT, flags), rsp);
4460 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4461 chan->conf_len += len;
4463 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4464 /* Incomplete config. Send empty response. */
4465 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4466 l2cap_build_conf_rsp(chan, rsp,
4467 L2CAP_CONF_SUCCESS, flags), rsp);
4471 /* Complete config. */
4472 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4474 l2cap_send_disconn_req(chan, ECONNRESET);
4478 chan->ident = cmd->ident;
4479 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4480 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4481 chan->num_conf_rsp++;
4483 /* Reset config buffer. */
4486 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4489 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4490 set_default_fcs(chan);
4492 if (chan->mode == L2CAP_MODE_ERTM ||
4493 chan->mode == L2CAP_MODE_STREAMING)
4494 err = l2cap_ertm_init(chan);
4497 l2cap_send_disconn_req(chan, -err);
4499 l2cap_chan_ready(chan);
4504 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4506 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4507 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4508 chan->num_conf_req++;
4511 /* Got Conf Rsp PENDING from remote side and assume we sent
4512 Conf Rsp PENDING in the code above */
4513 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4514 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4516 /* check compatibility */
4518 /* Send rsp for BR/EDR channel */
4520 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4522 chan->ident = cmd->ident;
4526 l2cap_chan_unlock(chan);
4527 l2cap_chan_put(chan);
4531 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4532 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4535 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4536 u16 scid, flags, result;
4537 struct l2cap_chan *chan;
4538 int len = cmd_len - sizeof(*rsp);
4541 if (cmd_len < sizeof(*rsp))
4544 scid = __le16_to_cpu(rsp->scid);
4545 flags = __le16_to_cpu(rsp->flags);
4546 result = __le16_to_cpu(rsp->result);
4548 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4551 chan = l2cap_get_chan_by_scid(conn, scid);
4556 case L2CAP_CONF_SUCCESS:
4557 l2cap_conf_rfc_get(chan, rsp->data, len);
4558 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4561 case L2CAP_CONF_PENDING:
4562 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4564 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4567 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4568 buf, sizeof(buf), &result);
4570 l2cap_send_disconn_req(chan, ECONNRESET);
4574 if (!chan->hs_hcon) {
4575 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4578 if (l2cap_check_efs(chan)) {
4579 amp_create_logical_link(chan);
4580 chan->ident = cmd->ident;
4586 case L2CAP_CONF_UNACCEPT:
4587 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4590 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4591 l2cap_send_disconn_req(chan, ECONNRESET);
4595 /* throw out any old stored conf requests */
4596 result = L2CAP_CONF_SUCCESS;
4597 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4598 req, sizeof(req), &result);
4600 l2cap_send_disconn_req(chan, ECONNRESET);
4604 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4605 L2CAP_CONF_REQ, len, req);
4606 chan->num_conf_req++;
4607 if (result != L2CAP_CONF_SUCCESS)
4614 l2cap_chan_set_err(chan, ECONNRESET);
4616 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4617 l2cap_send_disconn_req(chan, ECONNRESET);
4621 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4624 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4626 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4627 set_default_fcs(chan);
4629 if (chan->mode == L2CAP_MODE_ERTM ||
4630 chan->mode == L2CAP_MODE_STREAMING)
4631 err = l2cap_ertm_init(chan);
4634 l2cap_send_disconn_req(chan, -err);
4636 l2cap_chan_ready(chan);
4640 l2cap_chan_unlock(chan);
4641 l2cap_chan_put(chan);
4645 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4646 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4649 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4650 struct l2cap_disconn_rsp rsp;
4652 struct l2cap_chan *chan;
4654 if (cmd_len != sizeof(*req))
4657 scid = __le16_to_cpu(req->scid);
4658 dcid = __le16_to_cpu(req->dcid);
4660 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4662 chan = l2cap_get_chan_by_scid(conn, dcid);
4664 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4668 rsp.dcid = cpu_to_le16(chan->scid);
4669 rsp.scid = cpu_to_le16(chan->dcid);
4670 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4672 chan->ops->set_shutdown(chan);
4674 l2cap_chan_unlock(chan);
4675 mutex_lock(&conn->chan_lock);
4676 l2cap_chan_lock(chan);
4677 l2cap_chan_del(chan, ECONNRESET);
4678 mutex_unlock(&conn->chan_lock);
4680 chan->ops->close(chan);
4682 l2cap_chan_unlock(chan);
4683 l2cap_chan_put(chan);
4688 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4689 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4692 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4694 struct l2cap_chan *chan;
4696 if (cmd_len != sizeof(*rsp))
4699 scid = __le16_to_cpu(rsp->scid);
4700 dcid = __le16_to_cpu(rsp->dcid);
4702 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4704 chan = l2cap_get_chan_by_scid(conn, scid);
4709 if (chan->state != BT_DISCONN) {
4710 l2cap_chan_unlock(chan);
4711 l2cap_chan_put(chan);
4715 l2cap_chan_unlock(chan);
4716 mutex_lock(&conn->chan_lock);
4717 l2cap_chan_lock(chan);
4718 l2cap_chan_del(chan, 0);
4719 mutex_unlock(&conn->chan_lock);
4721 chan->ops->close(chan);
4723 l2cap_chan_unlock(chan);
4724 l2cap_chan_put(chan);
4729 static inline int l2cap_information_req(struct l2cap_conn *conn,
4730 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4733 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4736 if (cmd_len != sizeof(*req))
4739 type = __le16_to_cpu(req->type);
4741 BT_DBG("type 0x%4.4x", type);
4743 if (type == L2CAP_IT_FEAT_MASK) {
4745 u32 feat_mask = l2cap_feat_mask;
4746 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4747 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4748 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4750 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4752 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4753 feat_mask |= L2CAP_FEAT_EXT_FLOW
4754 | L2CAP_FEAT_EXT_WINDOW;
4756 put_unaligned_le32(feat_mask, rsp->data);
4757 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4759 } else if (type == L2CAP_IT_FIXED_CHAN) {
4761 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4763 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4764 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4765 rsp->data[0] = conn->local_fixed_chan;
4766 memset(rsp->data + 1, 0, 7);
4767 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4770 struct l2cap_info_rsp rsp;
4771 rsp.type = cpu_to_le16(type);
4772 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4773 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4780 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4781 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4784 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4787 if (cmd_len < sizeof(*rsp))
4790 type = __le16_to_cpu(rsp->type);
4791 result = __le16_to_cpu(rsp->result);
4793 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4795 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4796 if (cmd->ident != conn->info_ident ||
4797 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4800 cancel_delayed_work(&conn->info_timer);
4802 if (result != L2CAP_IR_SUCCESS) {
4803 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4804 conn->info_ident = 0;
4806 l2cap_conn_start(conn);
4812 case L2CAP_IT_FEAT_MASK:
4813 conn->feat_mask = get_unaligned_le32(rsp->data);
4815 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4816 struct l2cap_info_req req;
4817 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4819 conn->info_ident = l2cap_get_ident(conn);
4821 l2cap_send_cmd(conn, conn->info_ident,
4822 L2CAP_INFO_REQ, sizeof(req), &req);
4824 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4825 conn->info_ident = 0;
4827 l2cap_conn_start(conn);
4831 case L2CAP_IT_FIXED_CHAN:
4832 conn->remote_fixed_chan = rsp->data[0];
4833 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4834 conn->info_ident = 0;
4836 l2cap_conn_start(conn);
4843 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4844 struct l2cap_cmd_hdr *cmd,
4845 u16 cmd_len, void *data)
4847 struct l2cap_create_chan_req *req = data;
4848 struct l2cap_create_chan_rsp rsp;
4849 struct l2cap_chan *chan;
4850 struct hci_dev *hdev;
4853 if (cmd_len != sizeof(*req))
4856 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4859 psm = le16_to_cpu(req->psm);
4860 scid = le16_to_cpu(req->scid);
4862 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4864 /* For controller id 0 make BR/EDR connection */
4865 if (req->amp_id == AMP_ID_BREDR) {
4866 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4871 /* Validate AMP controller id */
4872 hdev = hci_dev_get(req->amp_id);
4876 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4881 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4884 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4885 struct hci_conn *hs_hcon;
4887 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4891 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4896 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4898 mgr->bredr_chan = chan;
4899 chan->hs_hcon = hs_hcon;
4900 chan->fcs = L2CAP_FCS_NONE;
4901 conn->mtu = hdev->block_mtu;
4910 rsp.scid = cpu_to_le16(scid);
4911 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4912 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4914 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4920 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4922 struct l2cap_move_chan_req req;
4925 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4927 ident = l2cap_get_ident(chan->conn);
4928 chan->ident = ident;
4930 req.icid = cpu_to_le16(chan->scid);
4931 req.dest_amp_id = dest_amp_id;
4933 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4936 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4939 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4941 struct l2cap_move_chan_rsp rsp;
4943 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4945 rsp.icid = cpu_to_le16(chan->dcid);
4946 rsp.result = cpu_to_le16(result);
4948 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4952 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4954 struct l2cap_move_chan_cfm cfm;
4956 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4958 chan->ident = l2cap_get_ident(chan->conn);
4960 cfm.icid = cpu_to_le16(chan->scid);
4961 cfm.result = cpu_to_le16(result);
4963 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4966 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4969 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4971 struct l2cap_move_chan_cfm cfm;
4973 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4975 cfm.icid = cpu_to_le16(icid);
4976 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4978 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4982 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4985 struct l2cap_move_chan_cfm_rsp rsp;
4987 BT_DBG("icid 0x%4.4x", icid);
4989 rsp.icid = cpu_to_le16(icid);
4990 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4993 static void __release_logical_link(struct l2cap_chan *chan)
4995 chan->hs_hchan = NULL;
4996 chan->hs_hcon = NULL;
4998 /* Placeholder - release the logical link */
5001 static void l2cap_logical_fail(struct l2cap_chan *chan)
5003 /* Logical link setup failed */
5004 if (chan->state != BT_CONNECTED) {
5005 /* Create channel failure, disconnect */
5006 l2cap_send_disconn_req(chan, ECONNRESET);
5010 switch (chan->move_role) {
5011 case L2CAP_MOVE_ROLE_RESPONDER:
5012 l2cap_move_done(chan);
5013 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
5015 case L2CAP_MOVE_ROLE_INITIATOR:
5016 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5017 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5018 /* Remote has only sent pending or
5019 * success responses, clean up
5021 l2cap_move_done(chan);
5024 /* Other amp move states imply that the move
5025 * has already aborted
5027 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5032 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5033 struct hci_chan *hchan)
5035 struct l2cap_conf_rsp rsp;
5037 chan->hs_hchan = hchan;
5038 chan->hs_hcon->l2cap_data = chan->conn;
5040 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5042 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5045 set_default_fcs(chan);
5047 err = l2cap_ertm_init(chan);
5049 l2cap_send_disconn_req(chan, -err);
5051 l2cap_chan_ready(chan);
5055 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5056 struct hci_chan *hchan)
5058 chan->hs_hcon = hchan->conn;
5059 chan->hs_hcon->l2cap_data = chan->conn;
5061 BT_DBG("move_state %d", chan->move_state);
5063 switch (chan->move_state) {
5064 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5065 /* Move confirm will be sent after a success
5066 * response is received
5068 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5070 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5071 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5072 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5073 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5074 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5075 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5076 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5077 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5078 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5082 /* Move was not in expected state, free the channel */
5083 __release_logical_link(chan);
5085 chan->move_state = L2CAP_MOVE_STABLE;
5089 /* Call with chan locked */
5090 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5093 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5096 l2cap_logical_fail(chan);
5097 __release_logical_link(chan);
5101 if (chan->state != BT_CONNECTED) {
5102 /* Ignore logical link if channel is on BR/EDR */
5103 if (chan->local_amp_id != AMP_ID_BREDR)
5104 l2cap_logical_finish_create(chan, hchan);
5106 l2cap_logical_finish_move(chan, hchan);
5110 void l2cap_move_start(struct l2cap_chan *chan)
5112 BT_DBG("chan %p", chan);
5114 if (chan->local_amp_id == AMP_ID_BREDR) {
5115 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5117 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5118 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5119 /* Placeholder - start physical link setup */
5121 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5122 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5124 l2cap_move_setup(chan);
5125 l2cap_send_move_chan_req(chan, 0);
5129 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5130 u8 local_amp_id, u8 remote_amp_id)
5132 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5133 local_amp_id, remote_amp_id);
5135 chan->fcs = L2CAP_FCS_NONE;
5137 /* Outgoing channel on AMP */
5138 if (chan->state == BT_CONNECT) {
5139 if (result == L2CAP_CR_SUCCESS) {
5140 chan->local_amp_id = local_amp_id;
5141 l2cap_send_create_chan_req(chan, remote_amp_id);
5143 /* Revert to BR/EDR connect */
5144 l2cap_send_conn_req(chan);
5150 /* Incoming channel on AMP */
5151 if (__l2cap_no_conn_pending(chan)) {
5152 struct l2cap_conn_rsp rsp;
5154 rsp.scid = cpu_to_le16(chan->dcid);
5155 rsp.dcid = cpu_to_le16(chan->scid);
5157 if (result == L2CAP_CR_SUCCESS) {
5158 /* Send successful response */
5159 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5160 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5162 /* Send negative response */
5163 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5164 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5167 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5170 if (result == L2CAP_CR_SUCCESS) {
5171 l2cap_state_change(chan, BT_CONFIG);
5172 set_bit(CONF_REQ_SENT, &chan->conf_state);
5173 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5175 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5176 chan->num_conf_req++;
5181 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5184 l2cap_move_setup(chan);
5185 chan->move_id = local_amp_id;
5186 chan->move_state = L2CAP_MOVE_WAIT_RSP;
5188 l2cap_send_move_chan_req(chan, remote_amp_id);
5191 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5193 struct hci_chan *hchan = NULL;
5195 /* Placeholder - get hci_chan for logical link */
5198 if (hchan->state == BT_CONNECTED) {
5199 /* Logical link is ready to go */
5200 chan->hs_hcon = hchan->conn;
5201 chan->hs_hcon->l2cap_data = chan->conn;
5202 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5203 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5205 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5207 /* Wait for logical link to be ready */
5208 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5211 /* Logical link not available */
5212 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5216 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5218 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5220 if (result == -EINVAL)
5221 rsp_result = L2CAP_MR_BAD_ID;
5223 rsp_result = L2CAP_MR_NOT_ALLOWED;
5225 l2cap_send_move_chan_rsp(chan, rsp_result);
5228 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5229 chan->move_state = L2CAP_MOVE_STABLE;
5231 /* Restart data transmission */
5232 l2cap_ertm_send(chan);
5235 /* Invoke with locked chan */
5236 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5238 u8 local_amp_id = chan->local_amp_id;
5239 u8 remote_amp_id = chan->remote_amp_id;
5241 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5242 chan, result, local_amp_id, remote_amp_id);
5244 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5247 if (chan->state != BT_CONNECTED) {
5248 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5249 } else if (result != L2CAP_MR_SUCCESS) {
5250 l2cap_do_move_cancel(chan, result);
5252 switch (chan->move_role) {
5253 case L2CAP_MOVE_ROLE_INITIATOR:
5254 l2cap_do_move_initiate(chan, local_amp_id,
5257 case L2CAP_MOVE_ROLE_RESPONDER:
5258 l2cap_do_move_respond(chan, result);
5261 l2cap_do_move_cancel(chan, result);
5267 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5268 struct l2cap_cmd_hdr *cmd,
5269 u16 cmd_len, void *data)
5271 struct l2cap_move_chan_req *req = data;
5272 struct l2cap_move_chan_rsp rsp;
5273 struct l2cap_chan *chan;
5275 u16 result = L2CAP_MR_NOT_ALLOWED;
5277 if (cmd_len != sizeof(*req))
5280 icid = le16_to_cpu(req->icid);
5282 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5284 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5287 chan = l2cap_get_chan_by_dcid(conn, icid);
5289 rsp.icid = cpu_to_le16(icid);
5290 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5291 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5296 chan->ident = cmd->ident;
5298 if (chan->scid < L2CAP_CID_DYN_START ||
5299 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5300 (chan->mode != L2CAP_MODE_ERTM &&
5301 chan->mode != L2CAP_MODE_STREAMING)) {
5302 result = L2CAP_MR_NOT_ALLOWED;
5303 goto send_move_response;
5306 if (chan->local_amp_id == req->dest_amp_id) {
5307 result = L2CAP_MR_SAME_ID;
5308 goto send_move_response;
5311 if (req->dest_amp_id != AMP_ID_BREDR) {
5312 struct hci_dev *hdev;
5313 hdev = hci_dev_get(req->dest_amp_id);
5314 if (!hdev || hdev->dev_type != HCI_AMP ||
5315 !test_bit(HCI_UP, &hdev->flags)) {
5319 result = L2CAP_MR_BAD_ID;
5320 goto send_move_response;
5325 /* Detect a move collision. Only send a collision response
5326 * if this side has "lost", otherwise proceed with the move.
5327 * The winner has the larger bd_addr.
5329 if ((__chan_is_moving(chan) ||
5330 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5331 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5332 result = L2CAP_MR_COLLISION;
5333 goto send_move_response;
5336 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5337 l2cap_move_setup(chan);
5338 chan->move_id = req->dest_amp_id;
5340 if (req->dest_amp_id == AMP_ID_BREDR) {
5341 /* Moving to BR/EDR */
5342 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5343 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5344 result = L2CAP_MR_PEND;
5346 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5347 result = L2CAP_MR_SUCCESS;
5350 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5351 /* Placeholder - uncomment when amp functions are available */
5352 /*amp_accept_physical(chan, req->dest_amp_id);*/
5353 result = L2CAP_MR_PEND;
5357 l2cap_send_move_chan_rsp(chan, result);
5359 l2cap_chan_unlock(chan);
5360 l2cap_chan_put(chan);
5365 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5367 struct l2cap_chan *chan;
5368 struct hci_chan *hchan = NULL;
5370 chan = l2cap_get_chan_by_scid(conn, icid);
5372 l2cap_send_move_chan_cfm_icid(conn, icid);
5376 __clear_chan_timer(chan);
5377 if (result == L2CAP_MR_PEND)
5378 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5380 switch (chan->move_state) {
5381 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5382 /* Move confirm will be sent when logical link
5385 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5387 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5388 if (result == L2CAP_MR_PEND) {
5390 } else if (test_bit(CONN_LOCAL_BUSY,
5391 &chan->conn_state)) {
5392 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5394 /* Logical link is up or moving to BR/EDR,
5397 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5398 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5401 case L2CAP_MOVE_WAIT_RSP:
5403 if (result == L2CAP_MR_SUCCESS) {
5404 /* Remote is ready, send confirm immediately
5405 * after logical link is ready
5407 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5409 /* Both logical link and move success
5410 * are required to confirm
5412 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5415 /* Placeholder - get hci_chan for logical link */
5417 /* Logical link not available */
5418 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5422 /* If the logical link is not yet connected, do not
5423 * send confirmation.
5425 if (hchan->state != BT_CONNECTED)
5428 /* Logical link is already ready to go */
5430 chan->hs_hcon = hchan->conn;
5431 chan->hs_hcon->l2cap_data = chan->conn;
5433 if (result == L2CAP_MR_SUCCESS) {
5434 /* Can confirm now */
5435 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5437 /* Now only need move success
5440 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5443 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5446 /* Any other amp move state means the move failed. */
5447 chan->move_id = chan->local_amp_id;
5448 l2cap_move_done(chan);
5449 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5452 l2cap_chan_unlock(chan);
5453 l2cap_chan_put(chan);
5456 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5459 struct l2cap_chan *chan;
5461 chan = l2cap_get_chan_by_ident(conn, ident);
5463 /* Could not locate channel, icid is best guess */
5464 l2cap_send_move_chan_cfm_icid(conn, icid);
5468 __clear_chan_timer(chan);
5470 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5471 if (result == L2CAP_MR_COLLISION) {
5472 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5474 /* Cleanup - cancel move */
5475 chan->move_id = chan->local_amp_id;
5476 l2cap_move_done(chan);
5480 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5482 l2cap_chan_unlock(chan);
5483 l2cap_chan_put(chan);
5486 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5487 struct l2cap_cmd_hdr *cmd,
5488 u16 cmd_len, void *data)
5490 struct l2cap_move_chan_rsp *rsp = data;
5493 if (cmd_len != sizeof(*rsp))
5496 icid = le16_to_cpu(rsp->icid);
5497 result = le16_to_cpu(rsp->result);
5499 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5501 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5502 l2cap_move_continue(conn, icid, result);
5504 l2cap_move_fail(conn, cmd->ident, icid, result);
5509 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5510 struct l2cap_cmd_hdr *cmd,
5511 u16 cmd_len, void *data)
5513 struct l2cap_move_chan_cfm *cfm = data;
5514 struct l2cap_chan *chan;
5517 if (cmd_len != sizeof(*cfm))
5520 icid = le16_to_cpu(cfm->icid);
5521 result = le16_to_cpu(cfm->result);
5523 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5525 chan = l2cap_get_chan_by_dcid(conn, icid);
5527 /* Spec requires a response even if the icid was not found */
5528 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5532 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5533 if (result == L2CAP_MC_CONFIRMED) {
5534 chan->local_amp_id = chan->move_id;
5535 if (chan->local_amp_id == AMP_ID_BREDR)
5536 __release_logical_link(chan);
5538 chan->move_id = chan->local_amp_id;
5541 l2cap_move_done(chan);
5544 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5546 l2cap_chan_unlock(chan);
5547 l2cap_chan_put(chan);
5552 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5553 struct l2cap_cmd_hdr *cmd,
5554 u16 cmd_len, void *data)
5556 struct l2cap_move_chan_cfm_rsp *rsp = data;
5557 struct l2cap_chan *chan;
5560 if (cmd_len != sizeof(*rsp))
5563 icid = le16_to_cpu(rsp->icid);
5565 BT_DBG("icid 0x%4.4x", icid);
5567 chan = l2cap_get_chan_by_scid(conn, icid);
5571 __clear_chan_timer(chan);
5573 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5574 chan->local_amp_id = chan->move_id;
5576 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5577 __release_logical_link(chan);
5579 l2cap_move_done(chan);
5582 l2cap_chan_unlock(chan);
5583 l2cap_chan_put(chan);
5588 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5589 struct l2cap_cmd_hdr *cmd,
5590 u16 cmd_len, u8 *data)
5592 struct hci_conn *hcon = conn->hcon;
5593 struct l2cap_conn_param_update_req *req;
5594 struct l2cap_conn_param_update_rsp rsp;
5595 u16 min, max, latency, to_multiplier;
5598 if (hcon->role != HCI_ROLE_MASTER)
5601 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5604 req = (struct l2cap_conn_param_update_req *) data;
5605 min = __le16_to_cpu(req->min);
5606 max = __le16_to_cpu(req->max);
5607 latency = __le16_to_cpu(req->latency);
5608 to_multiplier = __le16_to_cpu(req->to_multiplier);
5610 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5611 min, max, latency, to_multiplier);
5613 memset(&rsp, 0, sizeof(rsp));
5615 if (max > hcon->le_conn_max_interval) {
5616 BT_DBG("requested connection interval exceeds current bounds.");
5619 err = hci_check_conn_params(min, max, latency, to_multiplier);
5623 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5625 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5627 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5633 store_hint = hci_le_conn_update(hcon, min, max, latency,
5635 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5636 store_hint, min, max, latency,
5644 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5645 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5648 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5649 struct hci_conn *hcon = conn->hcon;
5650 u16 dcid, mtu, mps, credits, result;
5651 struct l2cap_chan *chan;
5654 if (cmd_len < sizeof(*rsp))
5657 dcid = __le16_to_cpu(rsp->dcid);
5658 mtu = __le16_to_cpu(rsp->mtu);
5659 mps = __le16_to_cpu(rsp->mps);
5660 credits = __le16_to_cpu(rsp->credits);
5661 result = __le16_to_cpu(rsp->result);
5663 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5664 dcid < L2CAP_CID_DYN_START ||
5665 dcid > L2CAP_CID_LE_DYN_END))
5668 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5669 dcid, mtu, mps, credits, result);
5671 mutex_lock(&conn->chan_lock);
5673 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5681 l2cap_chan_lock(chan);
5684 case L2CAP_CR_LE_SUCCESS:
5685 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5693 chan->remote_mps = mps;
5694 chan->tx_credits = credits;
5695 l2cap_chan_ready(chan);
5698 case L2CAP_CR_LE_AUTHENTICATION:
5699 case L2CAP_CR_LE_ENCRYPTION:
5700 /* If we already have MITM protection we can't do
5703 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5704 l2cap_chan_del(chan, ECONNREFUSED);
5708 sec_level = hcon->sec_level + 1;
5709 if (chan->sec_level < sec_level)
5710 chan->sec_level = sec_level;
5712 /* We'll need to send a new Connect Request */
5713 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5715 smp_conn_security(hcon, chan->sec_level);
5719 l2cap_chan_del(chan, ECONNREFUSED);
5723 l2cap_chan_unlock(chan);
5726 mutex_unlock(&conn->chan_lock);
5731 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5732 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5737 switch (cmd->code) {
5738 case L2CAP_COMMAND_REJ:
5739 l2cap_command_rej(conn, cmd, cmd_len, data);
5742 case L2CAP_CONN_REQ:
5743 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5746 case L2CAP_CONN_RSP:
5747 case L2CAP_CREATE_CHAN_RSP:
5748 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5751 case L2CAP_CONF_REQ:
5752 err = l2cap_config_req(conn, cmd, cmd_len, data);
5755 case L2CAP_CONF_RSP:
5756 l2cap_config_rsp(conn, cmd, cmd_len, data);
5759 case L2CAP_DISCONN_REQ:
5760 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5763 case L2CAP_DISCONN_RSP:
5764 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5767 case L2CAP_ECHO_REQ:
5768 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5771 case L2CAP_ECHO_RSP:
5774 case L2CAP_INFO_REQ:
5775 err = l2cap_information_req(conn, cmd, cmd_len, data);
5778 case L2CAP_INFO_RSP:
5779 l2cap_information_rsp(conn, cmd, cmd_len, data);
5782 case L2CAP_CREATE_CHAN_REQ:
5783 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5786 case L2CAP_MOVE_CHAN_REQ:
5787 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5790 case L2CAP_MOVE_CHAN_RSP:
5791 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5794 case L2CAP_MOVE_CHAN_CFM:
5795 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5798 case L2CAP_MOVE_CHAN_CFM_RSP:
5799 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5803 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5811 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5812 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5815 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5816 struct l2cap_le_conn_rsp rsp;
5817 struct l2cap_chan *chan, *pchan;
5818 u16 dcid, scid, credits, mtu, mps;
5822 if (cmd_len != sizeof(*req))
5825 scid = __le16_to_cpu(req->scid);
5826 mtu = __le16_to_cpu(req->mtu);
5827 mps = __le16_to_cpu(req->mps);
5832 if (mtu < 23 || mps < 23)
5835 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5838 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5841 * Valid range: 0x0001-0x00ff
5843 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5845 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5846 result = L2CAP_CR_LE_BAD_PSM;
5851 /* Check if we have socket listening on psm */
5852 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5853 &conn->hcon->dst, LE_LINK);
5855 result = L2CAP_CR_LE_BAD_PSM;
5860 mutex_lock(&conn->chan_lock);
5861 l2cap_chan_lock(pchan);
5863 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5865 result = L2CAP_CR_LE_AUTHENTICATION;
5867 goto response_unlock;
5870 /* Check for valid dynamic CID range */
5871 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5872 result = L2CAP_CR_LE_INVALID_SCID;
5874 goto response_unlock;
5877 /* Check if we already have channel with that dcid */
5878 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5879 result = L2CAP_CR_LE_SCID_IN_USE;
5881 goto response_unlock;
5884 chan = pchan->ops->new_connection(pchan);
5886 result = L2CAP_CR_LE_NO_MEM;
5887 goto response_unlock;
5890 bacpy(&chan->src, &conn->hcon->src);
5891 bacpy(&chan->dst, &conn->hcon->dst);
5892 chan->src_type = bdaddr_src_type(conn->hcon);
5893 chan->dst_type = bdaddr_dst_type(conn->hcon);
5897 chan->remote_mps = mps;
5899 __l2cap_chan_add(conn, chan);
5901 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5904 credits = chan->rx_credits;
5906 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5908 chan->ident = cmd->ident;
5910 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5911 l2cap_state_change(chan, BT_CONNECT2);
5912 /* The following result value is actually not defined
5913 * for LE CoC but we use it to let the function know
5914 * that it should bail out after doing its cleanup
5915 * instead of sending a response.
5917 result = L2CAP_CR_PEND;
5918 chan->ops->defer(chan);
5920 l2cap_chan_ready(chan);
5921 result = L2CAP_CR_LE_SUCCESS;
5925 l2cap_chan_unlock(pchan);
5926 mutex_unlock(&conn->chan_lock);
5927 l2cap_chan_put(pchan);
5929 if (result == L2CAP_CR_PEND)
5934 rsp.mtu = cpu_to_le16(chan->imtu);
5935 rsp.mps = cpu_to_le16(chan->mps);
5941 rsp.dcid = cpu_to_le16(dcid);
5942 rsp.credits = cpu_to_le16(credits);
5943 rsp.result = cpu_to_le16(result);
5945 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5950 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5951 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5954 struct l2cap_le_credits *pkt;
5955 struct l2cap_chan *chan;
5956 u16 cid, credits, max_credits;
5958 if (cmd_len != sizeof(*pkt))
5961 pkt = (struct l2cap_le_credits *) data;
5962 cid = __le16_to_cpu(pkt->cid);
5963 credits = __le16_to_cpu(pkt->credits);
5965 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5967 chan = l2cap_get_chan_by_dcid(conn, cid);
5971 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5972 if (credits > max_credits) {
5973 BT_ERR("LE credits overflow");
5974 l2cap_send_disconn_req(chan, ECONNRESET);
5976 /* Return 0 so that we don't trigger an unnecessary
5977 * command reject packet.
5982 chan->tx_credits += credits;
5984 /* Resume sending */
5985 l2cap_le_flowctl_send(chan);
5987 if (chan->tx_credits)
5988 chan->ops->resume(chan);
5991 l2cap_chan_unlock(chan);
5992 l2cap_chan_put(chan);
5997 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5998 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6001 struct l2cap_ecred_conn_req *req = (void *) data;
6003 struct l2cap_ecred_conn_rsp rsp;
6004 __le16 dcid[L2CAP_ECRED_MAX_CID];
6006 struct l2cap_chan *chan, *pchan;
6016 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
6017 result = L2CAP_CR_LE_INVALID_PARAMS;
6021 cmd_len -= sizeof(*req);
6022 num_scid = cmd_len / sizeof(u16);
6024 if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6025 result = L2CAP_CR_LE_INVALID_PARAMS;
6029 mtu = __le16_to_cpu(req->mtu);
6030 mps = __le16_to_cpu(req->mps);
6032 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6033 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6039 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6042 * Valid range: 0x0001-0x00ff
6044 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6046 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6047 result = L2CAP_CR_LE_BAD_PSM;
6051 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6053 memset(&pdu, 0, sizeof(pdu));
6055 /* Check if we have socket listening on psm */
6056 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6057 &conn->hcon->dst, LE_LINK);
6059 result = L2CAP_CR_LE_BAD_PSM;
6063 mutex_lock(&conn->chan_lock);
6064 l2cap_chan_lock(pchan);
6066 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6068 result = L2CAP_CR_LE_AUTHENTICATION;
6072 result = L2CAP_CR_LE_SUCCESS;
6074 for (i = 0; i < num_scid; i++) {
6075 u16 scid = __le16_to_cpu(req->scid[i]);
6077 BT_DBG("scid[%d] 0x%4.4x", i, scid);
6079 pdu.dcid[i] = 0x0000;
6080 len += sizeof(*pdu.dcid);
6082 /* Check for valid dynamic CID range */
6083 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6084 result = L2CAP_CR_LE_INVALID_SCID;
6088 /* Check if we already have channel with that dcid */
6089 if (__l2cap_get_chan_by_dcid(conn, scid)) {
6090 result = L2CAP_CR_LE_SCID_IN_USE;
6094 chan = pchan->ops->new_connection(pchan);
6096 result = L2CAP_CR_LE_NO_MEM;
6100 bacpy(&chan->src, &conn->hcon->src);
6101 bacpy(&chan->dst, &conn->hcon->dst);
6102 chan->src_type = bdaddr_src_type(conn->hcon);
6103 chan->dst_type = bdaddr_dst_type(conn->hcon);
6107 chan->remote_mps = mps;
6109 __l2cap_chan_add(conn, chan);
6111 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6114 if (!pdu.rsp.credits) {
6115 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6116 pdu.rsp.mps = cpu_to_le16(chan->mps);
6117 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6120 pdu.dcid[i] = cpu_to_le16(chan->scid);
6122 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6124 chan->ident = cmd->ident;
6125 chan->mode = L2CAP_MODE_EXT_FLOWCTL;
6127 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6128 l2cap_state_change(chan, BT_CONNECT2);
6130 chan->ops->defer(chan);
6132 l2cap_chan_ready(chan);
6137 l2cap_chan_unlock(pchan);
6138 mutex_unlock(&conn->chan_lock);
6139 l2cap_chan_put(pchan);
6142 pdu.rsp.result = cpu_to_le16(result);
6147 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6148 sizeof(pdu.rsp) + len, &pdu);
6153 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6154 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6157 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6158 struct hci_conn *hcon = conn->hcon;
6159 u16 mtu, mps, credits, result;
6160 struct l2cap_chan *chan, *tmp;
6161 int err = 0, sec_level;
6164 if (cmd_len < sizeof(*rsp))
6167 mtu = __le16_to_cpu(rsp->mtu);
6168 mps = __le16_to_cpu(rsp->mps);
6169 credits = __le16_to_cpu(rsp->credits);
6170 result = __le16_to_cpu(rsp->result);
6172 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6175 mutex_lock(&conn->chan_lock);
6177 cmd_len -= sizeof(*rsp);
6179 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6182 if (chan->ident != cmd->ident ||
6183 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6184 chan->state == BT_CONNECTED)
6187 l2cap_chan_lock(chan);
6189 /* Check that there is a dcid for each pending channel */
6190 if (cmd_len < sizeof(dcid)) {
6191 l2cap_chan_del(chan, ECONNREFUSED);
6192 l2cap_chan_unlock(chan);
6196 dcid = __le16_to_cpu(rsp->dcid[i++]);
6197 cmd_len -= sizeof(u16);
6199 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6201 /* Check if dcid is already in use */
6202 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6203 /* If a device receives a
6204 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6205 * already-assigned Destination CID, then both the
6206 * original channel and the new channel shall be
6207 * immediately discarded and not used.
6209 l2cap_chan_del(chan, ECONNREFUSED);
6210 l2cap_chan_unlock(chan);
6211 chan = __l2cap_get_chan_by_dcid(conn, dcid);
6212 l2cap_chan_lock(chan);
6213 l2cap_chan_del(chan, ECONNRESET);
6214 l2cap_chan_unlock(chan);
6219 case L2CAP_CR_LE_AUTHENTICATION:
6220 case L2CAP_CR_LE_ENCRYPTION:
6221 /* If we already have MITM protection we can't do
6224 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6225 l2cap_chan_del(chan, ECONNREFUSED);
6229 sec_level = hcon->sec_level + 1;
6230 if (chan->sec_level < sec_level)
6231 chan->sec_level = sec_level;
6233 /* We'll need to send a new Connect Request */
6234 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6236 smp_conn_security(hcon, chan->sec_level);
6239 case L2CAP_CR_LE_BAD_PSM:
6240 l2cap_chan_del(chan, ECONNREFUSED);
6244 /* If dcid was not set it means channels was refused */
6246 l2cap_chan_del(chan, ECONNREFUSED);
6253 chan->remote_mps = mps;
6254 chan->tx_credits = credits;
6255 l2cap_chan_ready(chan);
6259 l2cap_chan_unlock(chan);
6262 mutex_unlock(&conn->chan_lock);
6267 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6268 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6271 struct l2cap_ecred_reconf_req *req = (void *) data;
6272 struct l2cap_ecred_reconf_rsp rsp;
6273 u16 mtu, mps, result;
6274 struct l2cap_chan *chan;
6280 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6281 result = L2CAP_CR_LE_INVALID_PARAMS;
6285 mtu = __le16_to_cpu(req->mtu);
6286 mps = __le16_to_cpu(req->mps);
6288 BT_DBG("mtu %u mps %u", mtu, mps);
6290 if (mtu < L2CAP_ECRED_MIN_MTU) {
6291 result = L2CAP_RECONF_INVALID_MTU;
6295 if (mps < L2CAP_ECRED_MIN_MPS) {
6296 result = L2CAP_RECONF_INVALID_MPS;
6300 cmd_len -= sizeof(*req);
6301 num_scid = cmd_len / sizeof(u16);
6302 result = L2CAP_RECONF_SUCCESS;
6304 for (i = 0; i < num_scid; i++) {
6307 scid = __le16_to_cpu(req->scid[i]);
6311 chan = __l2cap_get_chan_by_dcid(conn, scid);
6315 /* If the MTU value is decreased for any of the included
6316 * channels, then the receiver shall disconnect all
6317 * included channels.
6319 if (chan->omtu > mtu) {
6320 BT_ERR("chan %p decreased MTU %u -> %u", chan,
6322 result = L2CAP_RECONF_INVALID_MTU;
6326 chan->remote_mps = mps;
6330 rsp.result = cpu_to_le16(result);
6332 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6338 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6339 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6342 struct l2cap_chan *chan, *tmp;
6343 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6346 if (cmd_len < sizeof(*rsp))
6349 result = __le16_to_cpu(rsp->result);
6351 BT_DBG("result 0x%4.4x", rsp->result);
6356 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6357 if (chan->ident != cmd->ident)
6360 l2cap_chan_del(chan, ECONNRESET);
6366 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6367 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6370 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6371 struct l2cap_chan *chan;
6373 if (cmd_len < sizeof(*rej))
6376 mutex_lock(&conn->chan_lock);
6378 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6382 chan = l2cap_chan_hold_unless_zero(chan);
6386 l2cap_chan_lock(chan);
6387 l2cap_chan_del(chan, ECONNREFUSED);
6388 l2cap_chan_unlock(chan);
6389 l2cap_chan_put(chan);
6392 mutex_unlock(&conn->chan_lock);
6396 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6397 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6402 switch (cmd->code) {
6403 case L2CAP_COMMAND_REJ:
6404 l2cap_le_command_rej(conn, cmd, cmd_len, data);
6407 case L2CAP_CONN_PARAM_UPDATE_REQ:
6408 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6411 case L2CAP_CONN_PARAM_UPDATE_RSP:
6414 case L2CAP_LE_CONN_RSP:
6415 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6418 case L2CAP_LE_CONN_REQ:
6419 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6422 case L2CAP_LE_CREDITS:
6423 err = l2cap_le_credits(conn, cmd, cmd_len, data);
6426 case L2CAP_ECRED_CONN_REQ:
6427 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6430 case L2CAP_ECRED_CONN_RSP:
6431 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6434 case L2CAP_ECRED_RECONF_REQ:
6435 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6438 case L2CAP_ECRED_RECONF_RSP:
6439 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6442 case L2CAP_DISCONN_REQ:
6443 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6446 case L2CAP_DISCONN_RSP:
6447 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6451 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6459 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6460 struct sk_buff *skb)
6462 struct hci_conn *hcon = conn->hcon;
6463 struct l2cap_cmd_hdr *cmd;
6467 if (hcon->type != LE_LINK)
6470 if (skb->len < L2CAP_CMD_HDR_SIZE)
6473 cmd = (void *) skb->data;
6474 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6476 len = le16_to_cpu(cmd->len);
6478 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6480 if (len != skb->len || !cmd->ident) {
6481 BT_DBG("corrupted command");
6485 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6487 struct l2cap_cmd_rej_unk rej;
6489 BT_ERR("Wrong link type (%d)", err);
6491 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6492 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6500 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
6502 struct l2cap_cmd_rej_unk rej;
6504 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6505 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
6508 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6509 struct sk_buff *skb)
6511 struct hci_conn *hcon = conn->hcon;
6512 struct l2cap_cmd_hdr *cmd;
6515 l2cap_raw_recv(conn, skb);
6517 if (hcon->type != ACL_LINK)
6520 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6523 cmd = (void *) skb->data;
6524 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6526 len = le16_to_cpu(cmd->len);
6528 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6531 if (len > skb->len || !cmd->ident) {
6532 BT_DBG("corrupted command");
6533 l2cap_sig_send_rej(conn, cmd->ident);
6534 skb_pull(skb, len > skb->len ? skb->len : len);
6538 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6540 BT_ERR("Wrong link type (%d)", err);
6541 l2cap_sig_send_rej(conn, cmd->ident);
6548 BT_DBG("corrupted command");
6549 l2cap_sig_send_rej(conn, 0);
6556 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
6558 u16 our_fcs, rcv_fcs;
6561 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6562 hdr_size = L2CAP_EXT_HDR_SIZE;
6564 hdr_size = L2CAP_ENH_HDR_SIZE;
6566 if (chan->fcs == L2CAP_FCS_CRC16) {
6567 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6568 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6569 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6571 if (our_fcs != rcv_fcs)
6577 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6579 struct l2cap_ctrl control;
6581 BT_DBG("chan %p", chan);
6583 memset(&control, 0, sizeof(control));
6586 control.reqseq = chan->buffer_seq;
6587 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6589 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6590 control.super = L2CAP_SUPER_RNR;
6591 l2cap_send_sframe(chan, &control);
6594 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6595 chan->unacked_frames > 0)
6596 __set_retrans_timer(chan);
6598 /* Send pending iframes */
6599 l2cap_ertm_send(chan);
6601 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6602 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6603 /* F-bit wasn't sent in an s-frame or i-frame yet, so
6606 control.super = L2CAP_SUPER_RR;
6607 l2cap_send_sframe(chan, &control);
6611 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6612 struct sk_buff **last_frag)
6614 /* skb->len reflects data in skb as well as all fragments
6615 * skb->data_len reflects only data in fragments
6617 if (!skb_has_frag_list(skb))
6618 skb_shinfo(skb)->frag_list = new_frag;
6620 new_frag->next = NULL;
6622 (*last_frag)->next = new_frag;
6623 *last_frag = new_frag;
6625 skb->len += new_frag->len;
6626 skb->data_len += new_frag->len;
6627 skb->truesize += new_frag->truesize;
6630 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6631 struct l2cap_ctrl *control)
6635 switch (control->sar) {
6636 case L2CAP_SAR_UNSEGMENTED:
6640 err = chan->ops->recv(chan, skb);
6643 case L2CAP_SAR_START:
6647 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6650 chan->sdu_len = get_unaligned_le16(skb->data);
6651 skb_pull(skb, L2CAP_SDULEN_SIZE);
6653 if (chan->sdu_len > chan->imtu) {
6658 if (skb->len >= chan->sdu_len)
6662 chan->sdu_last_frag = skb;
6668 case L2CAP_SAR_CONTINUE:
6672 append_skb_frag(chan->sdu, skb,
6673 &chan->sdu_last_frag);
6676 if (chan->sdu->len >= chan->sdu_len)
6686 append_skb_frag(chan->sdu, skb,
6687 &chan->sdu_last_frag);
6690 if (chan->sdu->len != chan->sdu_len)
6693 err = chan->ops->recv(chan, chan->sdu);
6696 /* Reassembly complete */
6698 chan->sdu_last_frag = NULL;
6706 kfree_skb(chan->sdu);
6708 chan->sdu_last_frag = NULL;
6715 static int l2cap_resegment(struct l2cap_chan *chan)
6721 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6725 if (chan->mode != L2CAP_MODE_ERTM)
6728 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6729 l2cap_tx(chan, NULL, NULL, event);
6732 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6735 /* Pass sequential frames to l2cap_reassemble_sdu()
6736 * until a gap is encountered.
6739 BT_DBG("chan %p", chan);
6741 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6742 struct sk_buff *skb;
6743 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6744 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6746 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6751 skb_unlink(skb, &chan->srej_q);
6752 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6753 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6758 if (skb_queue_empty(&chan->srej_q)) {
6759 chan->rx_state = L2CAP_RX_STATE_RECV;
6760 l2cap_send_ack(chan);
6766 static void l2cap_handle_srej(struct l2cap_chan *chan,
6767 struct l2cap_ctrl *control)
6769 struct sk_buff *skb;
6771 BT_DBG("chan %p, control %p", chan, control);
6773 if (control->reqseq == chan->next_tx_seq) {
6774 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6775 l2cap_send_disconn_req(chan, ECONNRESET);
6779 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6782 BT_DBG("Seq %d not available for retransmission",
6787 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6788 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6789 l2cap_send_disconn_req(chan, ECONNRESET);
6793 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6795 if (control->poll) {
6796 l2cap_pass_to_tx(chan, control);
6798 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6799 l2cap_retransmit(chan, control);
6800 l2cap_ertm_send(chan);
6802 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6803 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6804 chan->srej_save_reqseq = control->reqseq;
6807 l2cap_pass_to_tx_fbit(chan, control);
6809 if (control->final) {
6810 if (chan->srej_save_reqseq != control->reqseq ||
6811 !test_and_clear_bit(CONN_SREJ_ACT,
6813 l2cap_retransmit(chan, control);
6815 l2cap_retransmit(chan, control);
6816 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6817 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6818 chan->srej_save_reqseq = control->reqseq;
6824 static void l2cap_handle_rej(struct l2cap_chan *chan,
6825 struct l2cap_ctrl *control)
6827 struct sk_buff *skb;
6829 BT_DBG("chan %p, control %p", chan, control);
6831 if (control->reqseq == chan->next_tx_seq) {
6832 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6833 l2cap_send_disconn_req(chan, ECONNRESET);
6837 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6839 if (chan->max_tx && skb &&
6840 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6841 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6842 l2cap_send_disconn_req(chan, ECONNRESET);
6846 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6848 l2cap_pass_to_tx(chan, control);
6850 if (control->final) {
6851 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6852 l2cap_retransmit_all(chan, control);
6854 l2cap_retransmit_all(chan, control);
6855 l2cap_ertm_send(chan);
6856 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6857 set_bit(CONN_REJ_ACT, &chan->conn_state);
6861 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6863 BT_DBG("chan %p, txseq %d", chan, txseq);
6865 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6866 chan->expected_tx_seq);
6868 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6869 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6871 /* See notes below regarding "double poll" and
6874 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6875 BT_DBG("Invalid/Ignore - after SREJ");
6876 return L2CAP_TXSEQ_INVALID_IGNORE;
6878 BT_DBG("Invalid - in window after SREJ sent");
6879 return L2CAP_TXSEQ_INVALID;
6883 if (chan->srej_list.head == txseq) {
6884 BT_DBG("Expected SREJ");
6885 return L2CAP_TXSEQ_EXPECTED_SREJ;
6888 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6889 BT_DBG("Duplicate SREJ - txseq already stored");
6890 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6893 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6894 BT_DBG("Unexpected SREJ - not requested");
6895 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6899 if (chan->expected_tx_seq == txseq) {
6900 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6902 BT_DBG("Invalid - txseq outside tx window");
6903 return L2CAP_TXSEQ_INVALID;
6906 return L2CAP_TXSEQ_EXPECTED;
6910 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6911 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6912 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6913 return L2CAP_TXSEQ_DUPLICATE;
6916 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6917 /* A source of invalid packets is a "double poll" condition,
6918 * where delays cause us to send multiple poll packets. If
6919 * the remote stack receives and processes both polls,
6920 * sequence numbers can wrap around in such a way that a
6921 * resent frame has a sequence number that looks like new data
6922 * with a sequence gap. This would trigger an erroneous SREJ
6925 * Fortunately, this is impossible with a tx window that's
6926 * less than half of the maximum sequence number, which allows
6927 * invalid frames to be safely ignored.
6929 * With tx window sizes greater than half of the tx window
6930 * maximum, the frame is invalid and cannot be ignored. This
6931 * causes a disconnect.
6934 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6935 BT_DBG("Invalid/Ignore - txseq outside tx window");
6936 return L2CAP_TXSEQ_INVALID_IGNORE;
6938 BT_DBG("Invalid - txseq outside tx window");
6939 return L2CAP_TXSEQ_INVALID;
6942 BT_DBG("Unexpected - txseq indicates missing frames");
6943 return L2CAP_TXSEQ_UNEXPECTED;
6947 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6948 struct l2cap_ctrl *control,
6949 struct sk_buff *skb, u8 event)
6951 struct l2cap_ctrl local_control;
6953 bool skb_in_use = false;
6955 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6959 case L2CAP_EV_RECV_IFRAME:
6960 switch (l2cap_classify_txseq(chan, control->txseq)) {
6961 case L2CAP_TXSEQ_EXPECTED:
6962 l2cap_pass_to_tx(chan, control);
6964 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6965 BT_DBG("Busy, discarding expected seq %d",
6970 chan->expected_tx_seq = __next_seq(chan,
6973 chan->buffer_seq = chan->expected_tx_seq;
6976 /* l2cap_reassemble_sdu may free skb, hence invalidate
6977 * control, so make a copy in advance to use it after
6978 * l2cap_reassemble_sdu returns and to avoid the race
6979 * condition, for example:
6981 * The current thread calls:
6982 * l2cap_reassemble_sdu
6983 * chan->ops->recv == l2cap_sock_recv_cb
6984 * __sock_queue_rcv_skb
6985 * Another thread calls:
6989 * Then the current thread tries to access control, but
6990 * it was freed by skb_free_datagram.
6992 local_control = *control;
6993 err = l2cap_reassemble_sdu(chan, skb, control);
6997 if (local_control.final) {
6998 if (!test_and_clear_bit(CONN_REJ_ACT,
6999 &chan->conn_state)) {
7000 local_control.final = 0;
7001 l2cap_retransmit_all(chan, &local_control);
7002 l2cap_ertm_send(chan);
7006 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
7007 l2cap_send_ack(chan);
7009 case L2CAP_TXSEQ_UNEXPECTED:
7010 l2cap_pass_to_tx(chan, control);
7012 /* Can't issue SREJ frames in the local busy state.
7013 * Drop this frame, it will be seen as missing
7014 * when local busy is exited.
7016 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
7017 BT_DBG("Busy, discarding unexpected seq %d",
7022 /* There was a gap in the sequence, so an SREJ
7023 * must be sent for each missing frame. The
7024 * current frame is stored for later use.
7026 skb_queue_tail(&chan->srej_q, skb);
7028 BT_DBG("Queued %p (queue len %d)", skb,
7029 skb_queue_len(&chan->srej_q));
7031 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
7032 l2cap_seq_list_clear(&chan->srej_list);
7033 l2cap_send_srej(chan, control->txseq);
7035 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
7037 case L2CAP_TXSEQ_DUPLICATE:
7038 l2cap_pass_to_tx(chan, control);
7040 case L2CAP_TXSEQ_INVALID_IGNORE:
7042 case L2CAP_TXSEQ_INVALID:
7044 l2cap_send_disconn_req(chan, ECONNRESET);
7048 case L2CAP_EV_RECV_RR:
7049 l2cap_pass_to_tx(chan, control);
7050 if (control->final) {
7051 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7053 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7054 !__chan_is_moving(chan)) {
7056 l2cap_retransmit_all(chan, control);
7059 l2cap_ertm_send(chan);
7060 } else if (control->poll) {
7061 l2cap_send_i_or_rr_or_rnr(chan);
7063 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7064 &chan->conn_state) &&
7065 chan->unacked_frames)
7066 __set_retrans_timer(chan);
7068 l2cap_ertm_send(chan);
7071 case L2CAP_EV_RECV_RNR:
7072 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7073 l2cap_pass_to_tx(chan, control);
7074 if (control && control->poll) {
7075 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7076 l2cap_send_rr_or_rnr(chan, 0);
7078 __clear_retrans_timer(chan);
7079 l2cap_seq_list_clear(&chan->retrans_list);
7081 case L2CAP_EV_RECV_REJ:
7082 l2cap_handle_rej(chan, control);
7084 case L2CAP_EV_RECV_SREJ:
7085 l2cap_handle_srej(chan, control);
7091 if (skb && !skb_in_use) {
7092 BT_DBG("Freeing %p", skb);
7099 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7100 struct l2cap_ctrl *control,
7101 struct sk_buff *skb, u8 event)
7104 u16 txseq = control->txseq;
7105 bool skb_in_use = false;
7107 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7111 case L2CAP_EV_RECV_IFRAME:
7112 switch (l2cap_classify_txseq(chan, txseq)) {
7113 case L2CAP_TXSEQ_EXPECTED:
7114 /* Keep frame for reassembly later */
7115 l2cap_pass_to_tx(chan, control);
7116 skb_queue_tail(&chan->srej_q, skb);
7118 BT_DBG("Queued %p (queue len %d)", skb,
7119 skb_queue_len(&chan->srej_q));
7121 chan->expected_tx_seq = __next_seq(chan, txseq);
7123 case L2CAP_TXSEQ_EXPECTED_SREJ:
7124 l2cap_seq_list_pop(&chan->srej_list);
7126 l2cap_pass_to_tx(chan, control);
7127 skb_queue_tail(&chan->srej_q, skb);
7129 BT_DBG("Queued %p (queue len %d)", skb,
7130 skb_queue_len(&chan->srej_q));
7132 err = l2cap_rx_queued_iframes(chan);
7137 case L2CAP_TXSEQ_UNEXPECTED:
7138 /* Got a frame that can't be reassembled yet.
7139 * Save it for later, and send SREJs to cover
7140 * the missing frames.
7142 skb_queue_tail(&chan->srej_q, skb);
7144 BT_DBG("Queued %p (queue len %d)", skb,
7145 skb_queue_len(&chan->srej_q));
7147 l2cap_pass_to_tx(chan, control);
7148 l2cap_send_srej(chan, control->txseq);
7150 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7151 /* This frame was requested with an SREJ, but
7152 * some expected retransmitted frames are
7153 * missing. Request retransmission of missing
7156 skb_queue_tail(&chan->srej_q, skb);
7158 BT_DBG("Queued %p (queue len %d)", skb,
7159 skb_queue_len(&chan->srej_q));
7161 l2cap_pass_to_tx(chan, control);
7162 l2cap_send_srej_list(chan, control->txseq);
7164 case L2CAP_TXSEQ_DUPLICATE_SREJ:
7165 /* We've already queued this frame. Drop this copy. */
7166 l2cap_pass_to_tx(chan, control);
7168 case L2CAP_TXSEQ_DUPLICATE:
7169 /* Expecting a later sequence number, so this frame
7170 * was already received. Ignore it completely.
7173 case L2CAP_TXSEQ_INVALID_IGNORE:
7175 case L2CAP_TXSEQ_INVALID:
7177 l2cap_send_disconn_req(chan, ECONNRESET);
7181 case L2CAP_EV_RECV_RR:
7182 l2cap_pass_to_tx(chan, control);
7183 if (control->final) {
7184 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7186 if (!test_and_clear_bit(CONN_REJ_ACT,
7187 &chan->conn_state)) {
7189 l2cap_retransmit_all(chan, control);
7192 l2cap_ertm_send(chan);
7193 } else if (control->poll) {
7194 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7195 &chan->conn_state) &&
7196 chan->unacked_frames) {
7197 __set_retrans_timer(chan);
7200 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7201 l2cap_send_srej_tail(chan);
7203 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7204 &chan->conn_state) &&
7205 chan->unacked_frames)
7206 __set_retrans_timer(chan);
7208 l2cap_send_ack(chan);
7211 case L2CAP_EV_RECV_RNR:
7212 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7213 l2cap_pass_to_tx(chan, control);
7214 if (control->poll) {
7215 l2cap_send_srej_tail(chan);
7217 struct l2cap_ctrl rr_control;
7218 memset(&rr_control, 0, sizeof(rr_control));
7219 rr_control.sframe = 1;
7220 rr_control.super = L2CAP_SUPER_RR;
7221 rr_control.reqseq = chan->buffer_seq;
7222 l2cap_send_sframe(chan, &rr_control);
7226 case L2CAP_EV_RECV_REJ:
7227 l2cap_handle_rej(chan, control);
7229 case L2CAP_EV_RECV_SREJ:
7230 l2cap_handle_srej(chan, control);
7234 if (skb && !skb_in_use) {
7235 BT_DBG("Freeing %p", skb);
7242 static int l2cap_finish_move(struct l2cap_chan *chan)
7244 BT_DBG("chan %p", chan);
7246 chan->rx_state = L2CAP_RX_STATE_RECV;
7249 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7251 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7253 return l2cap_resegment(chan);
7256 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7257 struct l2cap_ctrl *control,
7258 struct sk_buff *skb, u8 event)
7262 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7268 l2cap_process_reqseq(chan, control->reqseq);
7270 if (!skb_queue_empty(&chan->tx_q))
7271 chan->tx_send_head = skb_peek(&chan->tx_q);
7273 chan->tx_send_head = NULL;
7275 /* Rewind next_tx_seq to the point expected
7278 chan->next_tx_seq = control->reqseq;
7279 chan->unacked_frames = 0;
7281 err = l2cap_finish_move(chan);
7285 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7286 l2cap_send_i_or_rr_or_rnr(chan);
7288 if (event == L2CAP_EV_RECV_IFRAME)
7291 return l2cap_rx_state_recv(chan, control, NULL, event);
7294 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7295 struct l2cap_ctrl *control,
7296 struct sk_buff *skb, u8 event)
7300 if (!control->final)
7303 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7305 chan->rx_state = L2CAP_RX_STATE_RECV;
7306 l2cap_process_reqseq(chan, control->reqseq);
7308 if (!skb_queue_empty(&chan->tx_q))
7309 chan->tx_send_head = skb_peek(&chan->tx_q);
7311 chan->tx_send_head = NULL;
7313 /* Rewind next_tx_seq to the point expected
7316 chan->next_tx_seq = control->reqseq;
7317 chan->unacked_frames = 0;
7320 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7322 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7324 err = l2cap_resegment(chan);
7327 err = l2cap_rx_state_recv(chan, control, skb, event);
7332 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7334 /* Make sure reqseq is for a packet that has been sent but not acked */
7337 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7338 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7341 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7342 struct sk_buff *skb, u8 event)
7346 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7347 control, skb, event, chan->rx_state);
7349 if (__valid_reqseq(chan, control->reqseq)) {
7350 switch (chan->rx_state) {
7351 case L2CAP_RX_STATE_RECV:
7352 err = l2cap_rx_state_recv(chan, control, skb, event);
7354 case L2CAP_RX_STATE_SREJ_SENT:
7355 err = l2cap_rx_state_srej_sent(chan, control, skb,
7358 case L2CAP_RX_STATE_WAIT_P:
7359 err = l2cap_rx_state_wait_p(chan, control, skb, event);
7361 case L2CAP_RX_STATE_WAIT_F:
7362 err = l2cap_rx_state_wait_f(chan, control, skb, event);
7369 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7370 control->reqseq, chan->next_tx_seq,
7371 chan->expected_ack_seq);
7372 l2cap_send_disconn_req(chan, ECONNRESET);
7378 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7379 struct sk_buff *skb)
7381 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7382 * the txseq field in advance to use it after l2cap_reassemble_sdu
7383 * returns and to avoid the race condition, for example:
7385 * The current thread calls:
7386 * l2cap_reassemble_sdu
7387 * chan->ops->recv == l2cap_sock_recv_cb
7388 * __sock_queue_rcv_skb
7389 * Another thread calls:
7393 * Then the current thread tries to access control, but it was freed by
7394 * skb_free_datagram.
7396 u16 txseq = control->txseq;
7398 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7401 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7402 l2cap_pass_to_tx(chan, control);
7404 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
7405 __next_seq(chan, chan->buffer_seq));
7407 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7409 l2cap_reassemble_sdu(chan, skb, control);
7412 kfree_skb(chan->sdu);
7415 chan->sdu_last_frag = NULL;
7419 BT_DBG("Freeing %p", skb);
7424 chan->last_acked_seq = txseq;
7425 chan->expected_tx_seq = __next_seq(chan, txseq);
7430 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7432 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7436 __unpack_control(chan, skb);
7441 * We can just drop the corrupted I-frame here.
7442 * Receiver will miss it and start proper recovery
7443 * procedures and ask for retransmission.
7445 if (l2cap_check_fcs(chan, skb))
7448 if (!control->sframe && control->sar == L2CAP_SAR_START)
7449 len -= L2CAP_SDULEN_SIZE;
7451 if (chan->fcs == L2CAP_FCS_CRC16)
7452 len -= L2CAP_FCS_SIZE;
7454 if (len > chan->mps) {
7455 l2cap_send_disconn_req(chan, ECONNRESET);
7459 if (chan->ops->filter) {
7460 if (chan->ops->filter(chan, skb))
7464 if (!control->sframe) {
7467 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7468 control->sar, control->reqseq, control->final,
7471 /* Validate F-bit - F=0 always valid, F=1 only
7472 * valid in TX WAIT_F
7474 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7477 if (chan->mode != L2CAP_MODE_STREAMING) {
7478 event = L2CAP_EV_RECV_IFRAME;
7479 err = l2cap_rx(chan, control, skb, event);
7481 err = l2cap_stream_rx(chan, control, skb);
7485 l2cap_send_disconn_req(chan, ECONNRESET);
7487 const u8 rx_func_to_event[4] = {
7488 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7489 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7492 /* Only I-frames are expected in streaming mode */
7493 if (chan->mode == L2CAP_MODE_STREAMING)
7496 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7497 control->reqseq, control->final, control->poll,
7501 BT_ERR("Trailing bytes: %d in sframe", len);
7502 l2cap_send_disconn_req(chan, ECONNRESET);
7506 /* Validate F and P bits */
7507 if (control->final && (control->poll ||
7508 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7511 event = rx_func_to_event[control->super];
7512 if (l2cap_rx(chan, control, skb, event))
7513 l2cap_send_disconn_req(chan, ECONNRESET);
7523 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7525 struct l2cap_conn *conn = chan->conn;
7526 struct l2cap_le_credits pkt;
7529 return_credits = (chan->imtu / chan->mps) + 1;
7531 if (chan->rx_credits >= return_credits)
7534 return_credits -= chan->rx_credits;
7536 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7538 chan->rx_credits += return_credits;
7540 pkt.cid = cpu_to_le16(chan->scid);
7541 pkt.credits = cpu_to_le16(return_credits);
7543 chan->ident = l2cap_get_ident(conn);
7545 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7548 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7552 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7554 /* Wait recv to confirm reception before updating the credits */
7555 err = chan->ops->recv(chan, skb);
7557 /* Update credits whenever an SDU is received */
7558 l2cap_chan_le_send_credits(chan);
7563 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7567 if (!chan->rx_credits) {
7568 BT_ERR("No credits to receive LE L2CAP data");
7569 l2cap_send_disconn_req(chan, ECONNRESET);
7573 if (chan->imtu < skb->len) {
7574 BT_ERR("Too big LE L2CAP PDU");
7579 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7581 /* Update if remote had run out of credits, this should only happens
7582 * if the remote is not using the entire MPS.
7584 if (!chan->rx_credits)
7585 l2cap_chan_le_send_credits(chan);
7592 sdu_len = get_unaligned_le16(skb->data);
7593 skb_pull(skb, L2CAP_SDULEN_SIZE);
7595 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7596 sdu_len, skb->len, chan->imtu);
7598 if (sdu_len > chan->imtu) {
7599 BT_ERR("Too big LE L2CAP SDU length received");
7604 if (skb->len > sdu_len) {
7605 BT_ERR("Too much LE L2CAP data received");
7610 if (skb->len == sdu_len)
7611 return l2cap_ecred_recv(chan, skb);
7614 chan->sdu_len = sdu_len;
7615 chan->sdu_last_frag = skb;
7617 /* Detect if remote is not able to use the selected MPS */
7618 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7619 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7621 /* Adjust the number of credits */
7622 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7623 chan->mps = mps_len;
7624 l2cap_chan_le_send_credits(chan);
7630 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7631 chan->sdu->len, skb->len, chan->sdu_len);
7633 if (chan->sdu->len + skb->len > chan->sdu_len) {
7634 BT_ERR("Too much LE L2CAP data received");
7639 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7642 if (chan->sdu->len == chan->sdu_len) {
7643 err = l2cap_ecred_recv(chan, chan->sdu);
7646 chan->sdu_last_frag = NULL;
7654 kfree_skb(chan->sdu);
7656 chan->sdu_last_frag = NULL;
7660 /* We can't return an error here since we took care of the skb
7661 * freeing internally. An error return would cause the caller to
7662 * do a double-free of the skb.
7667 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7668 struct sk_buff *skb)
7670 struct l2cap_chan *chan;
7672 chan = l2cap_get_chan_by_scid(conn, cid);
7674 if (cid == L2CAP_CID_A2MP) {
7675 chan = a2mp_channel_create(conn, skb);
7681 l2cap_chan_hold(chan);
7682 l2cap_chan_lock(chan);
7684 BT_DBG("unknown cid 0x%4.4x", cid);
7685 /* Drop packet and return */
7691 BT_DBG("chan %p, len %d", chan, skb->len);
7693 /* If we receive data on a fixed channel before the info req/rsp
7694 * procedure is done simply assume that the channel is supported
7695 * and mark it as ready.
7697 if (chan->chan_type == L2CAP_CHAN_FIXED)
7698 l2cap_chan_ready(chan);
7700 if (chan->state != BT_CONNECTED)
7703 switch (chan->mode) {
7704 case L2CAP_MODE_LE_FLOWCTL:
7705 case L2CAP_MODE_EXT_FLOWCTL:
7706 if (l2cap_ecred_data_rcv(chan, skb) < 0)
7711 case L2CAP_MODE_BASIC:
7712 /* If socket recv buffers overflows we drop data here
7713 * which is *bad* because L2CAP has to be reliable.
7714 * But we don't have any other choice. L2CAP doesn't
7715 * provide flow control mechanism. */
7717 if (chan->imtu < skb->len) {
7718 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7722 if (!chan->ops->recv(chan, skb))
7726 case L2CAP_MODE_ERTM:
7727 case L2CAP_MODE_STREAMING:
7728 l2cap_data_rcv(chan, skb);
7732 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7740 l2cap_chan_unlock(chan);
7741 l2cap_chan_put(chan);
7744 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7745 struct sk_buff *skb)
7747 struct hci_conn *hcon = conn->hcon;
7748 struct l2cap_chan *chan;
7750 if (hcon->type != ACL_LINK)
7753 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7758 BT_DBG("chan %p, len %d", chan, skb->len);
7760 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7763 if (chan->imtu < skb->len)
7766 /* Store remote BD_ADDR and PSM for msg_name */
7767 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7768 bt_cb(skb)->l2cap.psm = psm;
7770 if (!chan->ops->recv(chan, skb)) {
7771 l2cap_chan_put(chan);
7776 l2cap_chan_put(chan);
7781 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7783 struct l2cap_hdr *lh = (void *) skb->data;
7784 struct hci_conn *hcon = conn->hcon;
7788 if (hcon->state != BT_CONNECTED) {
7789 BT_DBG("queueing pending rx skb");
7790 skb_queue_tail(&conn->pending_rx, skb);
7794 skb_pull(skb, L2CAP_HDR_SIZE);
7795 cid = __le16_to_cpu(lh->cid);
7796 len = __le16_to_cpu(lh->len);
7798 if (len != skb->len) {
7803 /* Since we can't actively block incoming LE connections we must
7804 * at least ensure that we ignore incoming data from them.
7806 if (hcon->type == LE_LINK &&
7807 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7808 bdaddr_dst_type(hcon))) {
7813 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7816 case L2CAP_CID_SIGNALING:
7817 l2cap_sig_channel(conn, skb);
7820 case L2CAP_CID_CONN_LESS:
7821 psm = get_unaligned((__le16 *) skb->data);
7822 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7823 l2cap_conless_channel(conn, psm, skb);
7826 case L2CAP_CID_LE_SIGNALING:
7827 l2cap_le_sig_channel(conn, skb);
7831 l2cap_data_channel(conn, cid, skb);
7836 static void process_pending_rx(struct work_struct *work)
7838 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7840 struct sk_buff *skb;
7844 while ((skb = skb_dequeue(&conn->pending_rx)))
7845 l2cap_recv_frame(conn, skb);
7848 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7850 struct l2cap_conn *conn = hcon->l2cap_data;
7851 struct hci_chan *hchan;
7856 hchan = hci_chan_create(hcon);
7860 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7862 hci_chan_del(hchan);
7866 kref_init(&conn->ref);
7867 hcon->l2cap_data = conn;
7868 conn->hcon = hci_conn_get(hcon);
7869 conn->hchan = hchan;
7871 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7873 switch (hcon->type) {
7875 if (hcon->hdev->le_mtu) {
7876 conn->mtu = hcon->hdev->le_mtu;
7881 conn->mtu = hcon->hdev->acl_mtu;
7885 conn->feat_mask = 0;
7887 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7889 if (hcon->type == ACL_LINK &&
7890 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7891 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7893 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7894 (bredr_sc_enabled(hcon->hdev) ||
7895 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7896 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7898 mutex_init(&conn->ident_lock);
7899 mutex_init(&conn->chan_lock);
7901 INIT_LIST_HEAD(&conn->chan_l);
7902 INIT_LIST_HEAD(&conn->users);
7904 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7906 skb_queue_head_init(&conn->pending_rx);
7907 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7908 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7910 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7915 static bool is_valid_psm(u16 psm, u8 dst_type) {
7919 if (bdaddr_type_is_le(dst_type))
7920 return (psm <= 0x00ff);
7922 /* PSM must be odd and lsb of upper byte must be 0 */
7923 return ((psm & 0x0101) == 0x0001);
7926 struct l2cap_chan_data {
7927 struct l2cap_chan *chan;
7932 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7934 struct l2cap_chan_data *d = data;
7937 if (chan == d->chan)
7940 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7943 pid = chan->ops->get_peer_pid(chan);
7945 /* Only count deferred channels with the same PID/PSM */
7946 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7947 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7953 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7954 bdaddr_t *dst, u8 dst_type)
7956 struct l2cap_conn *conn;
7957 struct hci_conn *hcon;
7958 struct hci_dev *hdev;
7961 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7962 dst, dst_type, __le16_to_cpu(psm), chan->mode);
7964 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7966 return -EHOSTUNREACH;
7970 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7971 chan->chan_type != L2CAP_CHAN_RAW) {
7976 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7981 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7986 switch (chan->mode) {
7987 case L2CAP_MODE_BASIC:
7989 case L2CAP_MODE_LE_FLOWCTL:
7991 case L2CAP_MODE_EXT_FLOWCTL:
7992 if (!enable_ecred) {
7997 case L2CAP_MODE_ERTM:
7998 case L2CAP_MODE_STREAMING:
8007 switch (chan->state) {
8011 /* Already connecting */
8016 /* Already connected */
8030 /* Set destination address and psm */
8031 bacpy(&chan->dst, dst);
8032 chan->dst_type = dst_type;
8037 if (bdaddr_type_is_le(dst_type)) {
8038 /* Convert from L2CAP channel address type to HCI address type
8040 if (dst_type == BDADDR_LE_PUBLIC)
8041 dst_type = ADDR_LE_DEV_PUBLIC;
8043 dst_type = ADDR_LE_DEV_RANDOM;
8045 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8046 hcon = hci_connect_le(hdev, dst, dst_type,
8048 HCI_LE_CONN_TIMEOUT,
8049 HCI_ROLE_SLAVE, NULL);
8051 hcon = hci_connect_le_scan(hdev, dst, dst_type,
8053 HCI_LE_CONN_TIMEOUT,
8054 CONN_REASON_L2CAP_CHAN);
8057 u8 auth_type = l2cap_get_auth_type(chan);
8058 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8059 CONN_REASON_L2CAP_CHAN);
8063 err = PTR_ERR(hcon);
8067 conn = l2cap_conn_add(hcon);
8069 hci_conn_drop(hcon);
8074 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8075 struct l2cap_chan_data data;
8078 data.pid = chan->ops->get_peer_pid(chan);
8081 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8083 /* Check if there isn't too many channels being connected */
8084 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8085 hci_conn_drop(hcon);
8091 mutex_lock(&conn->chan_lock);
8092 l2cap_chan_lock(chan);
8094 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8095 hci_conn_drop(hcon);
8100 /* Update source addr of the socket */
8101 bacpy(&chan->src, &hcon->src);
8102 chan->src_type = bdaddr_src_type(hcon);
8104 __l2cap_chan_add(conn, chan);
8106 /* l2cap_chan_add takes its own ref so we can drop this one */
8107 hci_conn_drop(hcon);
8109 l2cap_state_change(chan, BT_CONNECT);
8110 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8112 /* Release chan->sport so that it can be reused by other
8113 * sockets (as it's only used for listening sockets).
8115 write_lock(&chan_list_lock);
8117 write_unlock(&chan_list_lock);
8119 if (hcon->state == BT_CONNECTED) {
8120 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8121 __clear_chan_timer(chan);
8122 if (l2cap_chan_check_security(chan, true))
8123 l2cap_state_change(chan, BT_CONNECTED);
8125 l2cap_do_start(chan);
8131 l2cap_chan_unlock(chan);
8132 mutex_unlock(&conn->chan_lock);
8134 hci_dev_unlock(hdev);
8138 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8140 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8142 struct l2cap_conn *conn = chan->conn;
8144 struct l2cap_ecred_reconf_req req;
8148 pdu.req.mtu = cpu_to_le16(chan->imtu);
8149 pdu.req.mps = cpu_to_le16(chan->mps);
8150 pdu.scid = cpu_to_le16(chan->scid);
8152 chan->ident = l2cap_get_ident(conn);
8154 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8158 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8160 if (chan->imtu > mtu)
8163 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8167 l2cap_ecred_reconfigure(chan);
8172 /* ---- L2CAP interface with lower layer (HCI) ---- */
8174 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8176 int exact = 0, lm1 = 0, lm2 = 0;
8177 struct l2cap_chan *c;
8179 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8181 /* Find listening sockets and check their link_mode */
8182 read_lock(&chan_list_lock);
8183 list_for_each_entry(c, &chan_list, global_l) {
8184 if (c->state != BT_LISTEN)
8187 if (!bacmp(&c->src, &hdev->bdaddr)) {
8188 lm1 |= HCI_LM_ACCEPT;
8189 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8190 lm1 |= HCI_LM_MASTER;
8192 } else if (!bacmp(&c->src, BDADDR_ANY)) {
8193 lm2 |= HCI_LM_ACCEPT;
8194 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8195 lm2 |= HCI_LM_MASTER;
8198 read_unlock(&chan_list_lock);
8200 return exact ? lm1 : lm2;
8203 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8204 * from an existing channel in the list or from the beginning of the
8205 * global list (by passing NULL as first parameter).
8207 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8208 struct hci_conn *hcon)
8210 u8 src_type = bdaddr_src_type(hcon);
8212 read_lock(&chan_list_lock);
8215 c = list_next_entry(c, global_l);
8217 c = list_entry(chan_list.next, typeof(*c), global_l);
8219 list_for_each_entry_from(c, &chan_list, global_l) {
8220 if (c->chan_type != L2CAP_CHAN_FIXED)
8222 if (c->state != BT_LISTEN)
8224 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8226 if (src_type != c->src_type)
8229 c = l2cap_chan_hold_unless_zero(c);
8230 read_unlock(&chan_list_lock);
8234 read_unlock(&chan_list_lock);
8239 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8241 struct hci_dev *hdev = hcon->hdev;
8242 struct l2cap_conn *conn;
8243 struct l2cap_chan *pchan;
8246 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8249 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8252 l2cap_conn_del(hcon, bt_to_errno(status));
8256 conn = l2cap_conn_add(hcon);
8260 dst_type = bdaddr_dst_type(hcon);
8262 /* If device is blocked, do not create channels for it */
8263 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8266 /* Find fixed channels and notify them of the new connection. We
8267 * use multiple individual lookups, continuing each time where
8268 * we left off, because the list lock would prevent calling the
8269 * potentially sleeping l2cap_chan_lock() function.
8271 pchan = l2cap_global_fixed_chan(NULL, hcon);
8273 struct l2cap_chan *chan, *next;
8275 /* Client fixed channels should override server ones */
8276 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8279 l2cap_chan_lock(pchan);
8280 chan = pchan->ops->new_connection(pchan);
8282 bacpy(&chan->src, &hcon->src);
8283 bacpy(&chan->dst, &hcon->dst);
8284 chan->src_type = bdaddr_src_type(hcon);
8285 chan->dst_type = dst_type;
8287 __l2cap_chan_add(conn, chan);
8290 l2cap_chan_unlock(pchan);
8292 next = l2cap_global_fixed_chan(pchan, hcon);
8293 l2cap_chan_put(pchan);
8297 l2cap_conn_ready(conn);
8300 int l2cap_disconn_ind(struct hci_conn *hcon)
8302 struct l2cap_conn *conn = hcon->l2cap_data;
8304 BT_DBG("hcon %p", hcon);
8307 return HCI_ERROR_REMOTE_USER_TERM;
8308 return conn->disc_reason;
8311 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8313 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8316 BT_DBG("hcon %p reason %d", hcon, reason);
8318 l2cap_conn_del(hcon, bt_to_errno(reason));
8321 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8323 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8326 if (encrypt == 0x00) {
8327 if (chan->sec_level == BT_SECURITY_MEDIUM) {
8328 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8329 } else if (chan->sec_level == BT_SECURITY_HIGH ||
8330 chan->sec_level == BT_SECURITY_FIPS)
8331 l2cap_chan_close(chan, ECONNREFUSED);
8333 if (chan->sec_level == BT_SECURITY_MEDIUM)
8334 __clear_chan_timer(chan);
8338 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8340 struct l2cap_conn *conn = hcon->l2cap_data;
8341 struct l2cap_chan *chan;
8346 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8348 mutex_lock(&conn->chan_lock);
8350 list_for_each_entry(chan, &conn->chan_l, list) {
8351 l2cap_chan_lock(chan);
8353 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8354 state_to_string(chan->state));
8356 if (chan->scid == L2CAP_CID_A2MP) {
8357 l2cap_chan_unlock(chan);
8361 if (!status && encrypt)
8362 chan->sec_level = hcon->sec_level;
8364 if (!__l2cap_no_conn_pending(chan)) {
8365 l2cap_chan_unlock(chan);
8369 if (!status && (chan->state == BT_CONNECTED ||
8370 chan->state == BT_CONFIG)) {
8371 chan->ops->resume(chan);
8372 l2cap_check_encryption(chan, encrypt);
8373 l2cap_chan_unlock(chan);
8377 if (chan->state == BT_CONNECT) {
8378 if (!status && l2cap_check_enc_key_size(hcon))
8379 l2cap_start_connection(chan);
8381 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8382 } else if (chan->state == BT_CONNECT2 &&
8383 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8384 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8385 struct l2cap_conn_rsp rsp;
8388 if (!status && l2cap_check_enc_key_size(hcon)) {
8389 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8390 res = L2CAP_CR_PEND;
8391 stat = L2CAP_CS_AUTHOR_PEND;
8392 chan->ops->defer(chan);
8394 l2cap_state_change(chan, BT_CONFIG);
8395 res = L2CAP_CR_SUCCESS;
8396 stat = L2CAP_CS_NO_INFO;
8399 l2cap_state_change(chan, BT_DISCONN);
8400 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8401 res = L2CAP_CR_SEC_BLOCK;
8402 stat = L2CAP_CS_NO_INFO;
8405 rsp.scid = cpu_to_le16(chan->dcid);
8406 rsp.dcid = cpu_to_le16(chan->scid);
8407 rsp.result = cpu_to_le16(res);
8408 rsp.status = cpu_to_le16(stat);
8409 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8412 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8413 res == L2CAP_CR_SUCCESS) {
8415 set_bit(CONF_REQ_SENT, &chan->conf_state);
8416 l2cap_send_cmd(conn, l2cap_get_ident(conn),
8418 l2cap_build_conf_req(chan, buf, sizeof(buf)),
8420 chan->num_conf_req++;
8424 l2cap_chan_unlock(chan);
8427 mutex_unlock(&conn->chan_lock);
8430 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8432 struct l2cap_conn *conn = hcon->l2cap_data;
8433 struct l2cap_hdr *hdr;
8436 /* For AMP controller do not create l2cap conn */
8437 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8441 conn = l2cap_conn_add(hcon);
8446 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
8450 case ACL_START_NO_FLUSH:
8453 BT_ERR("Unexpected start frame (len %d)", skb->len);
8454 kfree_skb(conn->rx_skb);
8455 conn->rx_skb = NULL;
8457 l2cap_conn_unreliable(conn, ECOMM);
8460 /* Start fragment always begin with Basic L2CAP header */
8461 if (skb->len < L2CAP_HDR_SIZE) {
8462 BT_ERR("Frame is too short (len %d)", skb->len);
8463 l2cap_conn_unreliable(conn, ECOMM);
8467 hdr = (struct l2cap_hdr *) skb->data;
8468 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
8470 if (len == skb->len) {
8471 /* Complete frame received */
8472 l2cap_recv_frame(conn, skb);
8476 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
8478 if (skb->len > len) {
8479 BT_ERR("Frame is too long (len %d, expected len %d)",
8481 l2cap_conn_unreliable(conn, ECOMM);
8485 /* Allocate skb for the complete frame (with header) */
8486 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8490 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8492 conn->rx_len = len - skb->len;
8496 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
8498 if (!conn->rx_len) {
8499 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8500 l2cap_conn_unreliable(conn, ECOMM);
8504 if (skb->len > conn->rx_len) {
8505 BT_ERR("Fragment is too long (len %d, expected %d)",
8506 skb->len, conn->rx_len);
8507 kfree_skb(conn->rx_skb);
8508 conn->rx_skb = NULL;
8510 l2cap_conn_unreliable(conn, ECOMM);
8514 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8516 conn->rx_len -= skb->len;
8518 if (!conn->rx_len) {
8519 /* Complete frame received. l2cap_recv_frame
8520 * takes ownership of the skb so set the global
8521 * rx_skb pointer to NULL first.
8523 struct sk_buff *rx_skb = conn->rx_skb;
8524 conn->rx_skb = NULL;
8525 l2cap_recv_frame(conn, rx_skb);
8534 static struct hci_cb l2cap_cb = {
8536 .connect_cfm = l2cap_connect_cfm,
8537 .disconn_cfm = l2cap_disconn_cfm,
8538 .security_cfm = l2cap_security_cfm,
8541 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8543 struct l2cap_chan *c;
8545 read_lock(&chan_list_lock);
8547 list_for_each_entry(c, &chan_list, global_l) {
8548 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8549 &c->src, c->src_type, &c->dst, c->dst_type,
8550 c->state, __le16_to_cpu(c->psm),
8551 c->scid, c->dcid, c->imtu, c->omtu,
8552 c->sec_level, c->mode);
8555 read_unlock(&chan_list_lock);
8560 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8562 static struct dentry *l2cap_debugfs;
8564 int __init l2cap_init(void)
8568 err = l2cap_init_sockets();
8572 hci_register_cb(&l2cap_cb);
8574 if (IS_ERR_OR_NULL(bt_debugfs))
8577 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8578 NULL, &l2cap_debugfs_fops);
8583 void l2cap_exit(void)
8585 debugfs_remove(l2cap_debugfs);
8586 hci_unregister_cb(&l2cap_cb);
8587 l2cap_cleanup_sockets();
8590 module_param(disable_ertm, bool, 0644);
8591 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8593 module_param(enable_ecred, bool, 0644);
8594 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");