2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
67 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 if (link_type == LE_LINK) {
70 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
73 return BDADDR_LE_RANDOM;
79 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 return bdaddr_type(hcon->type, hcon->src_type);
84 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 return bdaddr_type(hcon->type, hcon->dst_type);
89 /* ---- L2CAP channels ---- */
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
96 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
106 struct l2cap_chan *c;
108 list_for_each_entry(c, &conn->chan_l, list) {
115 /* Find channel with given SCID.
116 * Returns a reference locked channel.
118 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
121 struct l2cap_chan *c;
123 mutex_lock(&conn->chan_lock);
124 c = __l2cap_get_chan_by_scid(conn, cid);
126 /* Only lock if chan reference is not 0 */
127 c = l2cap_chan_hold_unless_zero(c);
131 mutex_unlock(&conn->chan_lock);
136 /* Find channel with given DCID.
137 * Returns a reference locked channel.
139 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
142 struct l2cap_chan *c;
144 mutex_lock(&conn->chan_lock);
145 c = __l2cap_get_chan_by_dcid(conn, cid);
147 /* Only lock if chan reference is not 0 */
148 c = l2cap_chan_hold_unless_zero(c);
152 mutex_unlock(&conn->chan_lock);
157 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
160 struct l2cap_chan *c;
162 list_for_each_entry(c, &conn->chan_l, list) {
163 if (c->ident == ident)
169 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
172 struct l2cap_chan *c;
174 mutex_lock(&conn->chan_lock);
175 c = __l2cap_get_chan_by_ident(conn, ident);
177 /* Only lock if chan reference is not 0 */
178 c = l2cap_chan_hold_unless_zero(c);
182 mutex_unlock(&conn->chan_lock);
187 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
189 struct l2cap_chan *c;
191 list_for_each_entry(c, &chan_list, global_l) {
192 if (c->sport == psm && !bacmp(&c->src, src))
198 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
202 write_lock(&chan_list_lock);
204 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
214 u16 p, start, end, incr;
216 if (chan->src_type == BDADDR_BREDR) {
217 start = L2CAP_PSM_DYN_START;
218 end = L2CAP_PSM_AUTO_END;
221 start = L2CAP_PSM_LE_DYN_START;
222 end = L2CAP_PSM_LE_DYN_END;
227 for (p = start; p <= end; p += incr)
228 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
229 chan->psm = cpu_to_le16(p);
230 chan->sport = cpu_to_le16(p);
237 write_unlock(&chan_list_lock);
240 EXPORT_SYMBOL_GPL(l2cap_add_psm);
242 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
244 write_lock(&chan_list_lock);
246 /* Override the defaults (which are for conn-oriented) */
247 chan->omtu = L2CAP_DEFAULT_MTU;
248 chan->chan_type = L2CAP_CHAN_FIXED;
252 write_unlock(&chan_list_lock);
257 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
261 if (conn->hcon->type == LE_LINK)
262 dyn_end = L2CAP_CID_LE_DYN_END;
264 dyn_end = L2CAP_CID_DYN_END;
266 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
267 if (!__l2cap_get_chan_by_scid(conn, cid))
274 static void l2cap_state_change(struct l2cap_chan *chan, int state)
276 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
277 state_to_string(state));
280 chan->ops->state_change(chan, state, 0);
283 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
287 chan->ops->state_change(chan, chan->state, err);
290 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
292 chan->ops->state_change(chan, chan->state, err);
295 static void __set_retrans_timer(struct l2cap_chan *chan)
297 if (!delayed_work_pending(&chan->monitor_timer) &&
298 chan->retrans_timeout) {
299 l2cap_set_timer(chan, &chan->retrans_timer,
300 msecs_to_jiffies(chan->retrans_timeout));
304 static void __set_monitor_timer(struct l2cap_chan *chan)
306 __clear_retrans_timer(chan);
307 if (chan->monitor_timeout) {
308 l2cap_set_timer(chan, &chan->monitor_timer,
309 msecs_to_jiffies(chan->monitor_timeout));
313 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
318 skb_queue_walk(head, skb) {
319 if (bt_cb(skb)->l2cap.txseq == seq)
326 /* ---- L2CAP sequence number lists ---- */
328 /* For ERTM, ordered lists of sequence numbers must be tracked for
329 * SREJ requests that are received and for frames that are to be
330 * retransmitted. These seq_list functions implement a singly-linked
331 * list in an array, where membership in the list can also be checked
332 * in constant time. Items can also be added to the tail of the list
333 * and removed from the head in constant time, without further memory
337 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
339 size_t alloc_size, i;
341 /* Allocated size is a power of 2 to map sequence numbers
342 * (which may be up to 14 bits) in to a smaller array that is
343 * sized for the negotiated ERTM transmit windows.
345 alloc_size = roundup_pow_of_two(size);
347 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
351 seq_list->mask = alloc_size - 1;
352 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
353 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
354 for (i = 0; i < alloc_size; i++)
355 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
360 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
362 kfree(seq_list->list);
365 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
368 /* Constant-time check for list membership */
369 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
372 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
374 u16 seq = seq_list->head;
375 u16 mask = seq_list->mask;
377 seq_list->head = seq_list->list[seq & mask];
378 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
380 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
381 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
382 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
388 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
392 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
395 for (i = 0; i <= seq_list->mask; i++)
396 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
398 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
399 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
402 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
404 u16 mask = seq_list->mask;
406 /* All appends happen in constant time */
408 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
411 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
412 seq_list->head = seq;
414 seq_list->list[seq_list->tail & mask] = seq;
416 seq_list->tail = seq;
417 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
420 static void l2cap_chan_timeout(struct work_struct *work)
422 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
424 struct l2cap_conn *conn = chan->conn;
427 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
429 mutex_lock(&conn->chan_lock);
430 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
431 * this work. No need to call l2cap_chan_hold(chan) here again.
433 l2cap_chan_lock(chan);
435 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
436 reason = ECONNREFUSED;
437 else if (chan->state == BT_CONNECT &&
438 chan->sec_level != BT_SECURITY_SDP)
439 reason = ECONNREFUSED;
443 l2cap_chan_close(chan, reason);
445 chan->ops->close(chan);
447 l2cap_chan_unlock(chan);
448 l2cap_chan_put(chan);
450 mutex_unlock(&conn->chan_lock);
453 struct l2cap_chan *l2cap_chan_create(void)
455 struct l2cap_chan *chan;
457 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
461 skb_queue_head_init(&chan->tx_q);
462 skb_queue_head_init(&chan->srej_q);
463 mutex_init(&chan->lock);
465 /* Set default lock nesting level */
466 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
468 write_lock(&chan_list_lock);
469 list_add(&chan->global_l, &chan_list);
470 write_unlock(&chan_list_lock);
472 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
474 chan->state = BT_OPEN;
476 kref_init(&chan->kref);
478 /* This flag is cleared in l2cap_chan_ready() */
479 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
481 BT_DBG("chan %p", chan);
485 EXPORT_SYMBOL_GPL(l2cap_chan_create);
487 static void l2cap_chan_destroy(struct kref *kref)
489 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
491 BT_DBG("chan %p", chan);
493 write_lock(&chan_list_lock);
494 list_del(&chan->global_l);
495 write_unlock(&chan_list_lock);
500 void l2cap_chan_hold(struct l2cap_chan *c)
502 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
507 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
509 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
511 if (!kref_get_unless_zero(&c->kref))
517 void l2cap_chan_put(struct l2cap_chan *c)
519 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
521 kref_put(&c->kref, l2cap_chan_destroy);
523 EXPORT_SYMBOL_GPL(l2cap_chan_put);
525 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
527 chan->fcs = L2CAP_FCS_CRC16;
528 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
529 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
530 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
531 chan->remote_max_tx = chan->max_tx;
532 chan->remote_tx_win = chan->tx_win;
533 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
534 chan->sec_level = BT_SECURITY_LOW;
535 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
536 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
537 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
539 chan->conf_state = 0;
540 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
542 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
544 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
546 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
549 chan->sdu_last_frag = NULL;
551 chan->tx_credits = 0;
552 chan->rx_credits = le_max_credits;
553 chan->mps = min_t(u16, chan->imtu, le_default_mps);
555 skb_queue_head_init(&chan->tx_q);
558 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
560 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
561 __le16_to_cpu(chan->psm), chan->dcid);
563 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
567 switch (chan->chan_type) {
568 case L2CAP_CHAN_CONN_ORIENTED:
569 /* Alloc CID for connection-oriented socket */
570 chan->scid = l2cap_alloc_cid(conn);
571 if (conn->hcon->type == ACL_LINK)
572 chan->omtu = L2CAP_DEFAULT_MTU;
575 case L2CAP_CHAN_CONN_LESS:
576 /* Connectionless socket */
577 chan->scid = L2CAP_CID_CONN_LESS;
578 chan->dcid = L2CAP_CID_CONN_LESS;
579 chan->omtu = L2CAP_DEFAULT_MTU;
582 case L2CAP_CHAN_FIXED:
583 /* Caller will set CID and CID specific MTU values */
587 /* Raw socket can send/recv signalling messages only */
588 chan->scid = L2CAP_CID_SIGNALING;
589 chan->dcid = L2CAP_CID_SIGNALING;
590 chan->omtu = L2CAP_DEFAULT_MTU;
593 chan->local_id = L2CAP_BESTEFFORT_ID;
594 chan->local_stype = L2CAP_SERV_BESTEFFORT;
595 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
596 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
597 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
598 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
600 l2cap_chan_hold(chan);
602 /* Only keep a reference for fixed channels if they requested it */
603 if (chan->chan_type != L2CAP_CHAN_FIXED ||
604 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
605 hci_conn_hold(conn->hcon);
607 list_add(&chan->list, &conn->chan_l);
610 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
612 mutex_lock(&conn->chan_lock);
613 __l2cap_chan_add(conn, chan);
614 mutex_unlock(&conn->chan_lock);
617 void l2cap_chan_del(struct l2cap_chan *chan, int err)
619 struct l2cap_conn *conn = chan->conn;
621 __clear_chan_timer(chan);
623 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
624 state_to_string(chan->state));
626 chan->ops->teardown(chan, err);
629 struct amp_mgr *mgr = conn->hcon->amp_mgr;
630 /* Delete from channel list */
631 list_del(&chan->list);
633 l2cap_chan_put(chan);
637 /* Reference was only held for non-fixed channels or
638 * fixed channels that explicitly requested it using the
639 * FLAG_HOLD_HCI_CONN flag.
641 if (chan->chan_type != L2CAP_CHAN_FIXED ||
642 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
643 hci_conn_drop(conn->hcon);
645 if (mgr && mgr->bredr_chan == chan)
646 mgr->bredr_chan = NULL;
649 if (chan->hs_hchan) {
650 struct hci_chan *hs_hchan = chan->hs_hchan;
652 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
653 amp_disconnect_logical_link(hs_hchan);
656 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
660 case L2CAP_MODE_BASIC:
663 case L2CAP_MODE_LE_FLOWCTL:
664 skb_queue_purge(&chan->tx_q);
667 case L2CAP_MODE_ERTM:
668 __clear_retrans_timer(chan);
669 __clear_monitor_timer(chan);
670 __clear_ack_timer(chan);
672 skb_queue_purge(&chan->srej_q);
674 l2cap_seq_list_free(&chan->srej_list);
675 l2cap_seq_list_free(&chan->retrans_list);
679 case L2CAP_MODE_STREAMING:
680 skb_queue_purge(&chan->tx_q);
686 EXPORT_SYMBOL_GPL(l2cap_chan_del);
688 static void l2cap_conn_update_id_addr(struct work_struct *work)
690 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
691 id_addr_update_work);
692 struct hci_conn *hcon = conn->hcon;
693 struct l2cap_chan *chan;
695 mutex_lock(&conn->chan_lock);
697 list_for_each_entry(chan, &conn->chan_l, list) {
698 l2cap_chan_lock(chan);
699 bacpy(&chan->dst, &hcon->dst);
700 chan->dst_type = bdaddr_dst_type(hcon);
701 l2cap_chan_unlock(chan);
704 mutex_unlock(&conn->chan_lock);
707 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
709 struct l2cap_conn *conn = chan->conn;
710 struct l2cap_le_conn_rsp rsp;
713 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
714 result = L2CAP_CR_AUTHORIZATION;
716 result = L2CAP_CR_BAD_PSM;
718 l2cap_state_change(chan, BT_DISCONN);
720 rsp.dcid = cpu_to_le16(chan->scid);
721 rsp.mtu = cpu_to_le16(chan->imtu);
722 rsp.mps = cpu_to_le16(chan->mps);
723 rsp.credits = cpu_to_le16(chan->rx_credits);
724 rsp.result = cpu_to_le16(result);
726 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
730 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
732 struct l2cap_conn *conn = chan->conn;
733 struct l2cap_conn_rsp rsp;
736 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
737 result = L2CAP_CR_SEC_BLOCK;
739 result = L2CAP_CR_BAD_PSM;
741 l2cap_state_change(chan, BT_DISCONN);
743 rsp.scid = cpu_to_le16(chan->dcid);
744 rsp.dcid = cpu_to_le16(chan->scid);
745 rsp.result = cpu_to_le16(result);
746 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
748 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
751 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
753 struct l2cap_conn *conn = chan->conn;
755 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
757 switch (chan->state) {
759 chan->ops->teardown(chan, 0);
764 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
765 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
766 l2cap_send_disconn_req(chan, reason);
768 l2cap_chan_del(chan, reason);
772 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
773 if (conn->hcon->type == ACL_LINK)
774 l2cap_chan_connect_reject(chan);
775 else if (conn->hcon->type == LE_LINK)
776 l2cap_chan_le_connect_reject(chan);
779 l2cap_chan_del(chan, reason);
784 l2cap_chan_del(chan, reason);
788 chan->ops->teardown(chan, 0);
792 EXPORT_SYMBOL(l2cap_chan_close);
794 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
796 switch (chan->chan_type) {
798 switch (chan->sec_level) {
799 case BT_SECURITY_HIGH:
800 case BT_SECURITY_FIPS:
801 return HCI_AT_DEDICATED_BONDING_MITM;
802 case BT_SECURITY_MEDIUM:
803 return HCI_AT_DEDICATED_BONDING;
805 return HCI_AT_NO_BONDING;
808 case L2CAP_CHAN_CONN_LESS:
809 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
810 if (chan->sec_level == BT_SECURITY_LOW)
811 chan->sec_level = BT_SECURITY_SDP;
813 if (chan->sec_level == BT_SECURITY_HIGH ||
814 chan->sec_level == BT_SECURITY_FIPS)
815 return HCI_AT_NO_BONDING_MITM;
817 return HCI_AT_NO_BONDING;
819 case L2CAP_CHAN_CONN_ORIENTED:
820 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
821 if (chan->sec_level == BT_SECURITY_LOW)
822 chan->sec_level = BT_SECURITY_SDP;
824 if (chan->sec_level == BT_SECURITY_HIGH ||
825 chan->sec_level == BT_SECURITY_FIPS)
826 return HCI_AT_NO_BONDING_MITM;
828 return HCI_AT_NO_BONDING;
832 switch (chan->sec_level) {
833 case BT_SECURITY_HIGH:
834 case BT_SECURITY_FIPS:
835 return HCI_AT_GENERAL_BONDING_MITM;
836 case BT_SECURITY_MEDIUM:
837 return HCI_AT_GENERAL_BONDING;
839 return HCI_AT_NO_BONDING;
845 /* Service level security */
846 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
848 struct l2cap_conn *conn = chan->conn;
851 if (conn->hcon->type == LE_LINK)
852 return smp_conn_security(conn->hcon, chan->sec_level);
854 auth_type = l2cap_get_auth_type(chan);
856 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
860 static u8 l2cap_get_ident(struct l2cap_conn *conn)
864 /* Get next available identificator.
865 * 1 - 128 are used by kernel.
866 * 129 - 199 are reserved.
867 * 200 - 254 are used by utilities like l2ping, etc.
870 mutex_lock(&conn->ident_lock);
872 if (++conn->tx_ident > 128)
877 mutex_unlock(&conn->ident_lock);
882 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
885 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
888 BT_DBG("code 0x%2.2x", code);
893 /* Use NO_FLUSH if supported or we have an LE link (which does
894 * not support auto-flushing packets) */
895 if (lmp_no_flush_capable(conn->hcon->hdev) ||
896 conn->hcon->type == LE_LINK)
897 flags = ACL_START_NO_FLUSH;
901 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
902 skb->priority = HCI_PRIO_MAX;
904 hci_send_acl(conn->hchan, skb, flags);
907 static bool __chan_is_moving(struct l2cap_chan *chan)
909 return chan->move_state != L2CAP_MOVE_STABLE &&
910 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
913 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
915 struct hci_conn *hcon = chan->conn->hcon;
918 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
921 if (chan->hs_hcon && !__chan_is_moving(chan)) {
923 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
930 /* Use NO_FLUSH for LE links (where this is the only option) or
931 * if the BR/EDR link supports it and flushing has not been
932 * explicitly requested (through FLAG_FLUSHABLE).
934 if (hcon->type == LE_LINK ||
935 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
936 lmp_no_flush_capable(hcon->hdev)))
937 flags = ACL_START_NO_FLUSH;
941 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
942 hci_send_acl(chan->conn->hchan, skb, flags);
945 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
947 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
948 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
950 if (enh & L2CAP_CTRL_FRAME_TYPE) {
953 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
954 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
961 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
962 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
969 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
971 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
972 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
974 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
977 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
978 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
985 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
986 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
993 static inline void __unpack_control(struct l2cap_chan *chan,
996 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
997 __unpack_extended_control(get_unaligned_le32(skb->data),
999 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1001 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1002 &bt_cb(skb)->l2cap);
1003 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1007 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1011 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1012 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1014 if (control->sframe) {
1015 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1016 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1017 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1019 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1020 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1026 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1030 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1031 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1033 if (control->sframe) {
1034 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1035 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1036 packed |= L2CAP_CTRL_FRAME_TYPE;
1038 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1039 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1045 static inline void __pack_control(struct l2cap_chan *chan,
1046 struct l2cap_ctrl *control,
1047 struct sk_buff *skb)
1049 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1050 put_unaligned_le32(__pack_extended_control(control),
1051 skb->data + L2CAP_HDR_SIZE);
1053 put_unaligned_le16(__pack_enhanced_control(control),
1054 skb->data + L2CAP_HDR_SIZE);
1058 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1060 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1061 return L2CAP_EXT_HDR_SIZE;
1063 return L2CAP_ENH_HDR_SIZE;
1066 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1069 struct sk_buff *skb;
1070 struct l2cap_hdr *lh;
1071 int hlen = __ertm_hdr_size(chan);
1073 if (chan->fcs == L2CAP_FCS_CRC16)
1074 hlen += L2CAP_FCS_SIZE;
1076 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1079 return ERR_PTR(-ENOMEM);
1081 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1082 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1083 lh->cid = cpu_to_le16(chan->dcid);
1085 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1086 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1088 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1090 if (chan->fcs == L2CAP_FCS_CRC16) {
1091 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1092 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1095 skb->priority = HCI_PRIO_MAX;
1099 static void l2cap_send_sframe(struct l2cap_chan *chan,
1100 struct l2cap_ctrl *control)
1102 struct sk_buff *skb;
1105 BT_DBG("chan %p, control %p", chan, control);
1107 if (!control->sframe)
1110 if (__chan_is_moving(chan))
1113 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1117 if (control->super == L2CAP_SUPER_RR)
1118 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1119 else if (control->super == L2CAP_SUPER_RNR)
1120 set_bit(CONN_RNR_SENT, &chan->conn_state);
1122 if (control->super != L2CAP_SUPER_SREJ) {
1123 chan->last_acked_seq = control->reqseq;
1124 __clear_ack_timer(chan);
1127 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1128 control->final, control->poll, control->super);
1130 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1131 control_field = __pack_extended_control(control);
1133 control_field = __pack_enhanced_control(control);
1135 skb = l2cap_create_sframe_pdu(chan, control_field);
1137 l2cap_do_send(chan, skb);
1140 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1142 struct l2cap_ctrl control;
1144 BT_DBG("chan %p, poll %d", chan, poll);
1146 memset(&control, 0, sizeof(control));
1148 control.poll = poll;
1150 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1151 control.super = L2CAP_SUPER_RNR;
1153 control.super = L2CAP_SUPER_RR;
1155 control.reqseq = chan->buffer_seq;
1156 l2cap_send_sframe(chan, &control);
1159 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1161 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1164 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1167 static bool __amp_capable(struct l2cap_chan *chan)
1169 struct l2cap_conn *conn = chan->conn;
1170 struct hci_dev *hdev;
1171 bool amp_available = false;
1173 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1176 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1179 read_lock(&hci_dev_list_lock);
1180 list_for_each_entry(hdev, &hci_dev_list, list) {
1181 if (hdev->amp_type != AMP_TYPE_BREDR &&
1182 test_bit(HCI_UP, &hdev->flags)) {
1183 amp_available = true;
1187 read_unlock(&hci_dev_list_lock);
1189 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1190 return amp_available;
1195 static bool l2cap_check_efs(struct l2cap_chan *chan)
1197 /* Check EFS parameters */
1201 void l2cap_send_conn_req(struct l2cap_chan *chan)
1203 struct l2cap_conn *conn = chan->conn;
1204 struct l2cap_conn_req req;
1206 req.scid = cpu_to_le16(chan->scid);
1207 req.psm = chan->psm;
1209 chan->ident = l2cap_get_ident(conn);
1211 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1213 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1216 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1218 struct l2cap_create_chan_req req;
1219 req.scid = cpu_to_le16(chan->scid);
1220 req.psm = chan->psm;
1221 req.amp_id = amp_id;
1223 chan->ident = l2cap_get_ident(chan->conn);
1225 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1229 static void l2cap_move_setup(struct l2cap_chan *chan)
1231 struct sk_buff *skb;
1233 BT_DBG("chan %p", chan);
1235 if (chan->mode != L2CAP_MODE_ERTM)
1238 __clear_retrans_timer(chan);
1239 __clear_monitor_timer(chan);
1240 __clear_ack_timer(chan);
1242 chan->retry_count = 0;
1243 skb_queue_walk(&chan->tx_q, skb) {
1244 if (bt_cb(skb)->l2cap.retries)
1245 bt_cb(skb)->l2cap.retries = 1;
1250 chan->expected_tx_seq = chan->buffer_seq;
1252 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1253 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1254 l2cap_seq_list_clear(&chan->retrans_list);
1255 l2cap_seq_list_clear(&chan->srej_list);
1256 skb_queue_purge(&chan->srej_q);
1258 chan->tx_state = L2CAP_TX_STATE_XMIT;
1259 chan->rx_state = L2CAP_RX_STATE_MOVE;
1261 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1264 static void l2cap_move_done(struct l2cap_chan *chan)
1266 u8 move_role = chan->move_role;
1267 BT_DBG("chan %p", chan);
1269 chan->move_state = L2CAP_MOVE_STABLE;
1270 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1272 if (chan->mode != L2CAP_MODE_ERTM)
1275 switch (move_role) {
1276 case L2CAP_MOVE_ROLE_INITIATOR:
1277 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1278 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1280 case L2CAP_MOVE_ROLE_RESPONDER:
1281 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1286 static void l2cap_chan_ready(struct l2cap_chan *chan)
1288 /* The channel may have already been flagged as connected in
1289 * case of receiving data before the L2CAP info req/rsp
1290 * procedure is complete.
1292 if (chan->state == BT_CONNECTED)
1295 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1296 chan->conf_state = 0;
1297 __clear_chan_timer(chan);
1299 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1300 chan->ops->suspend(chan);
1302 chan->state = BT_CONNECTED;
1304 chan->ops->ready(chan);
1307 static void l2cap_le_connect(struct l2cap_chan *chan)
1309 struct l2cap_conn *conn = chan->conn;
1310 struct l2cap_le_conn_req req;
1312 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1315 req.psm = chan->psm;
1316 req.scid = cpu_to_le16(chan->scid);
1317 req.mtu = cpu_to_le16(chan->imtu);
1318 req.mps = cpu_to_le16(chan->mps);
1319 req.credits = cpu_to_le16(chan->rx_credits);
1321 chan->ident = l2cap_get_ident(conn);
1323 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1327 static void l2cap_le_start(struct l2cap_chan *chan)
1329 struct l2cap_conn *conn = chan->conn;
1331 if (!smp_conn_security(conn->hcon, chan->sec_level))
1335 l2cap_chan_ready(chan);
1339 if (chan->state == BT_CONNECT)
1340 l2cap_le_connect(chan);
1343 static void l2cap_start_connection(struct l2cap_chan *chan)
1345 if (__amp_capable(chan)) {
1346 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1347 a2mp_discover_amp(chan);
1348 } else if (chan->conn->hcon->type == LE_LINK) {
1349 l2cap_le_start(chan);
1351 l2cap_send_conn_req(chan);
1355 static void l2cap_request_info(struct l2cap_conn *conn)
1357 struct l2cap_info_req req;
1359 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1362 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1364 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1365 conn->info_ident = l2cap_get_ident(conn);
1367 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1369 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1373 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1375 /* The minimum encryption key size needs to be enforced by the
1376 * host stack before establishing any L2CAP connections. The
1377 * specification in theory allows a minimum of 1, but to align
1378 * BR/EDR and LE transports, a minimum of 7 is chosen.
1380 * This check might also be called for unencrypted connections
1381 * that have no key size requirements. Ensure that the link is
1382 * actually encrypted before enforcing a key size.
1384 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1385 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
1388 static void l2cap_do_start(struct l2cap_chan *chan)
1390 struct l2cap_conn *conn = chan->conn;
1392 if (conn->hcon->type == LE_LINK) {
1393 l2cap_le_start(chan);
1397 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1398 l2cap_request_info(conn);
1402 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1405 if (!l2cap_chan_check_security(chan, true) ||
1406 !__l2cap_no_conn_pending(chan))
1409 if (l2cap_check_enc_key_size(conn->hcon))
1410 l2cap_start_connection(chan);
1412 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1415 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1417 u32 local_feat_mask = l2cap_feat_mask;
1419 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1422 case L2CAP_MODE_ERTM:
1423 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1424 case L2CAP_MODE_STREAMING:
1425 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1431 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1433 struct l2cap_conn *conn = chan->conn;
1434 struct l2cap_disconn_req req;
1439 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1440 __clear_retrans_timer(chan);
1441 __clear_monitor_timer(chan);
1442 __clear_ack_timer(chan);
1445 if (chan->scid == L2CAP_CID_A2MP) {
1446 l2cap_state_change(chan, BT_DISCONN);
1450 req.dcid = cpu_to_le16(chan->dcid);
1451 req.scid = cpu_to_le16(chan->scid);
1452 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1455 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1458 /* ---- L2CAP connections ---- */
1459 static void l2cap_conn_start(struct l2cap_conn *conn)
1461 struct l2cap_chan *chan, *tmp;
1463 BT_DBG("conn %p", conn);
1465 mutex_lock(&conn->chan_lock);
1467 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1468 l2cap_chan_lock(chan);
1470 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1471 l2cap_chan_ready(chan);
1472 l2cap_chan_unlock(chan);
1476 if (chan->state == BT_CONNECT) {
1477 if (!l2cap_chan_check_security(chan, true) ||
1478 !__l2cap_no_conn_pending(chan)) {
1479 l2cap_chan_unlock(chan);
1483 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1484 && test_bit(CONF_STATE2_DEVICE,
1485 &chan->conf_state)) {
1486 l2cap_chan_close(chan, ECONNRESET);
1487 l2cap_chan_unlock(chan);
1491 if (l2cap_check_enc_key_size(conn->hcon))
1492 l2cap_start_connection(chan);
1494 l2cap_chan_close(chan, ECONNREFUSED);
1496 } else if (chan->state == BT_CONNECT2) {
1497 struct l2cap_conn_rsp rsp;
1499 rsp.scid = cpu_to_le16(chan->dcid);
1500 rsp.dcid = cpu_to_le16(chan->scid);
1502 if (l2cap_chan_check_security(chan, false)) {
1503 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1504 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1505 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1506 chan->ops->defer(chan);
1509 l2cap_state_change(chan, BT_CONFIG);
1510 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1511 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1514 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1515 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1518 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1521 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1522 rsp.result != L2CAP_CR_SUCCESS) {
1523 l2cap_chan_unlock(chan);
1527 set_bit(CONF_REQ_SENT, &chan->conf_state);
1528 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1529 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1530 chan->num_conf_req++;
1533 l2cap_chan_unlock(chan);
1536 mutex_unlock(&conn->chan_lock);
1539 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1541 struct hci_conn *hcon = conn->hcon;
1542 struct hci_dev *hdev = hcon->hdev;
1544 BT_DBG("%s conn %p", hdev->name, conn);
1546 /* For outgoing pairing which doesn't necessarily have an
1547 * associated socket (e.g. mgmt_pair_device).
1550 smp_conn_security(hcon, hcon->pending_sec_level);
1552 /* For LE slave connections, make sure the connection interval
1553 * is in the range of the minium and maximum interval that has
1554 * been configured for this connection. If not, then trigger
1555 * the connection update procedure.
1557 if (hcon->role == HCI_ROLE_SLAVE &&
1558 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1559 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1560 struct l2cap_conn_param_update_req req;
1562 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1563 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1564 req.latency = cpu_to_le16(hcon->le_conn_latency);
1565 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1567 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1568 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1572 static void l2cap_conn_ready(struct l2cap_conn *conn)
1574 struct l2cap_chan *chan;
1575 struct hci_conn *hcon = conn->hcon;
1577 BT_DBG("conn %p", conn);
1579 if (hcon->type == ACL_LINK)
1580 l2cap_request_info(conn);
1582 mutex_lock(&conn->chan_lock);
1584 list_for_each_entry(chan, &conn->chan_l, list) {
1586 l2cap_chan_lock(chan);
1588 if (chan->scid == L2CAP_CID_A2MP) {
1589 l2cap_chan_unlock(chan);
1593 if (hcon->type == LE_LINK) {
1594 l2cap_le_start(chan);
1595 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1596 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1597 l2cap_chan_ready(chan);
1598 } else if (chan->state == BT_CONNECT) {
1599 l2cap_do_start(chan);
1602 l2cap_chan_unlock(chan);
1605 mutex_unlock(&conn->chan_lock);
1607 if (hcon->type == LE_LINK)
1608 l2cap_le_conn_ready(conn);
1610 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1613 /* Notify sockets that we cannot guaranty reliability anymore */
1614 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1616 struct l2cap_chan *chan;
1618 BT_DBG("conn %p", conn);
1620 mutex_lock(&conn->chan_lock);
1622 list_for_each_entry(chan, &conn->chan_l, list) {
1623 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1624 l2cap_chan_set_err(chan, err);
1627 mutex_unlock(&conn->chan_lock);
1630 static void l2cap_info_timeout(struct work_struct *work)
1632 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1635 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1636 conn->info_ident = 0;
1638 l2cap_conn_start(conn);
1643 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1644 * callback is called during registration. The ->remove callback is called
1645 * during unregistration.
1646 * An l2cap_user object can either be explicitly unregistered or when the
1647 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1648 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1649 * External modules must own a reference to the l2cap_conn object if they intend
1650 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1651 * any time if they don't.
1654 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1656 struct hci_dev *hdev = conn->hcon->hdev;
1659 /* We need to check whether l2cap_conn is registered. If it is not, we
1660 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1661 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1662 * relies on the parent hci_conn object to be locked. This itself relies
1663 * on the hci_dev object to be locked. So we must lock the hci device
1668 if (!list_empty(&user->list)) {
1673 /* conn->hchan is NULL after l2cap_conn_del() was called */
1679 ret = user->probe(conn, user);
1683 list_add(&user->list, &conn->users);
1687 hci_dev_unlock(hdev);
1690 EXPORT_SYMBOL(l2cap_register_user);
1692 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1694 struct hci_dev *hdev = conn->hcon->hdev;
1698 if (list_empty(&user->list))
1701 list_del_init(&user->list);
1702 user->remove(conn, user);
1705 hci_dev_unlock(hdev);
1707 EXPORT_SYMBOL(l2cap_unregister_user);
1709 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1711 struct l2cap_user *user;
1713 while (!list_empty(&conn->users)) {
1714 user = list_first_entry(&conn->users, struct l2cap_user, list);
1715 list_del_init(&user->list);
1716 user->remove(conn, user);
1720 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1722 struct l2cap_conn *conn = hcon->l2cap_data;
1723 struct l2cap_chan *chan, *l;
1728 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1730 kfree_skb(conn->rx_skb);
1732 skb_queue_purge(&conn->pending_rx);
1734 /* We can not call flush_work(&conn->pending_rx_work) here since we
1735 * might block if we are running on a worker from the same workqueue
1736 * pending_rx_work is waiting on.
1738 if (work_pending(&conn->pending_rx_work))
1739 cancel_work_sync(&conn->pending_rx_work);
1741 if (work_pending(&conn->id_addr_update_work))
1742 cancel_work_sync(&conn->id_addr_update_work);
1744 l2cap_unregister_all_users(conn);
1746 /* Force the connection to be immediately dropped */
1747 hcon->disc_timeout = 0;
1749 mutex_lock(&conn->chan_lock);
1752 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1753 l2cap_chan_hold(chan);
1754 l2cap_chan_lock(chan);
1756 l2cap_chan_del(chan, err);
1758 chan->ops->close(chan);
1760 l2cap_chan_unlock(chan);
1761 l2cap_chan_put(chan);
1764 mutex_unlock(&conn->chan_lock);
1766 hci_chan_del(conn->hchan);
1768 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1769 cancel_delayed_work_sync(&conn->info_timer);
1771 hcon->l2cap_data = NULL;
1773 l2cap_conn_put(conn);
1776 static void l2cap_conn_free(struct kref *ref)
1778 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1780 hci_conn_put(conn->hcon);
1784 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1786 kref_get(&conn->ref);
1789 EXPORT_SYMBOL(l2cap_conn_get);
1791 void l2cap_conn_put(struct l2cap_conn *conn)
1793 kref_put(&conn->ref, l2cap_conn_free);
1795 EXPORT_SYMBOL(l2cap_conn_put);
1797 /* ---- Socket interface ---- */
1799 /* Find socket with psm and source / destination bdaddr.
1800 * Returns closest match.
1802 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1807 struct l2cap_chan *c, *tmp, *c1 = NULL;
1809 read_lock(&chan_list_lock);
1811 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1812 if (state && c->state != state)
1815 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1818 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1821 if (c->psm == psm) {
1822 int src_match, dst_match;
1823 int src_any, dst_any;
1826 src_match = !bacmp(&c->src, src);
1827 dst_match = !bacmp(&c->dst, dst);
1828 if (src_match && dst_match) {
1829 c = l2cap_chan_hold_unless_zero(c);
1831 read_unlock(&chan_list_lock);
1837 src_any = !bacmp(&c->src, BDADDR_ANY);
1838 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1839 if ((src_match && dst_any) || (src_any && dst_match) ||
1840 (src_any && dst_any))
1846 c1 = l2cap_chan_hold_unless_zero(c1);
1848 read_unlock(&chan_list_lock);
1853 static void l2cap_monitor_timeout(struct work_struct *work)
1855 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1856 monitor_timer.work);
1858 BT_DBG("chan %p", chan);
1860 l2cap_chan_lock(chan);
1863 l2cap_chan_unlock(chan);
1864 l2cap_chan_put(chan);
1868 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1870 l2cap_chan_unlock(chan);
1871 l2cap_chan_put(chan);
1874 static void l2cap_retrans_timeout(struct work_struct *work)
1876 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1877 retrans_timer.work);
1879 BT_DBG("chan %p", chan);
1881 l2cap_chan_lock(chan);
1884 l2cap_chan_unlock(chan);
1885 l2cap_chan_put(chan);
1889 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1890 l2cap_chan_unlock(chan);
1891 l2cap_chan_put(chan);
1894 static void l2cap_streaming_send(struct l2cap_chan *chan,
1895 struct sk_buff_head *skbs)
1897 struct sk_buff *skb;
1898 struct l2cap_ctrl *control;
1900 BT_DBG("chan %p, skbs %p", chan, skbs);
1902 if (__chan_is_moving(chan))
1905 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1907 while (!skb_queue_empty(&chan->tx_q)) {
1909 skb = skb_dequeue(&chan->tx_q);
1911 bt_cb(skb)->l2cap.retries = 1;
1912 control = &bt_cb(skb)->l2cap;
1914 control->reqseq = 0;
1915 control->txseq = chan->next_tx_seq;
1917 __pack_control(chan, control, skb);
1919 if (chan->fcs == L2CAP_FCS_CRC16) {
1920 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1921 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1924 l2cap_do_send(chan, skb);
1926 BT_DBG("Sent txseq %u", control->txseq);
1928 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1929 chan->frames_sent++;
1933 static int l2cap_ertm_send(struct l2cap_chan *chan)
1935 struct sk_buff *skb, *tx_skb;
1936 struct l2cap_ctrl *control;
1939 BT_DBG("chan %p", chan);
1941 if (chan->state != BT_CONNECTED)
1944 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1947 if (__chan_is_moving(chan))
1950 while (chan->tx_send_head &&
1951 chan->unacked_frames < chan->remote_tx_win &&
1952 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1954 skb = chan->tx_send_head;
1956 bt_cb(skb)->l2cap.retries = 1;
1957 control = &bt_cb(skb)->l2cap;
1959 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1962 control->reqseq = chan->buffer_seq;
1963 chan->last_acked_seq = chan->buffer_seq;
1964 control->txseq = chan->next_tx_seq;
1966 __pack_control(chan, control, skb);
1968 if (chan->fcs == L2CAP_FCS_CRC16) {
1969 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1970 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1973 /* Clone after data has been modified. Data is assumed to be
1974 read-only (for locking purposes) on cloned sk_buffs.
1976 tx_skb = skb_clone(skb, GFP_KERNEL);
1981 __set_retrans_timer(chan);
1983 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1984 chan->unacked_frames++;
1985 chan->frames_sent++;
1988 if (skb_queue_is_last(&chan->tx_q, skb))
1989 chan->tx_send_head = NULL;
1991 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1993 l2cap_do_send(chan, tx_skb);
1994 BT_DBG("Sent txseq %u", control->txseq);
1997 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1998 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2003 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2005 struct l2cap_ctrl control;
2006 struct sk_buff *skb;
2007 struct sk_buff *tx_skb;
2010 BT_DBG("chan %p", chan);
2012 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2015 if (__chan_is_moving(chan))
2018 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2019 seq = l2cap_seq_list_pop(&chan->retrans_list);
2021 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2023 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2028 bt_cb(skb)->l2cap.retries++;
2029 control = bt_cb(skb)->l2cap;
2031 if (chan->max_tx != 0 &&
2032 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2033 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2034 l2cap_send_disconn_req(chan, ECONNRESET);
2035 l2cap_seq_list_clear(&chan->retrans_list);
2039 control.reqseq = chan->buffer_seq;
2040 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2045 if (skb_cloned(skb)) {
2046 /* Cloned sk_buffs are read-only, so we need a
2049 tx_skb = skb_copy(skb, GFP_KERNEL);
2051 tx_skb = skb_clone(skb, GFP_KERNEL);
2055 l2cap_seq_list_clear(&chan->retrans_list);
2059 /* Update skb contents */
2060 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2061 put_unaligned_le32(__pack_extended_control(&control),
2062 tx_skb->data + L2CAP_HDR_SIZE);
2064 put_unaligned_le16(__pack_enhanced_control(&control),
2065 tx_skb->data + L2CAP_HDR_SIZE);
2069 if (chan->fcs == L2CAP_FCS_CRC16) {
2070 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2071 tx_skb->len - L2CAP_FCS_SIZE);
2072 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2076 l2cap_do_send(chan, tx_skb);
2078 BT_DBG("Resent txseq %d", control.txseq);
2080 chan->last_acked_seq = chan->buffer_seq;
2084 static void l2cap_retransmit(struct l2cap_chan *chan,
2085 struct l2cap_ctrl *control)
2087 BT_DBG("chan %p, control %p", chan, control);
2089 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2090 l2cap_ertm_resend(chan);
2093 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2094 struct l2cap_ctrl *control)
2096 struct sk_buff *skb;
2098 BT_DBG("chan %p, control %p", chan, control);
2101 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2103 l2cap_seq_list_clear(&chan->retrans_list);
2105 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2108 if (chan->unacked_frames) {
2109 skb_queue_walk(&chan->tx_q, skb) {
2110 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2111 skb == chan->tx_send_head)
2115 skb_queue_walk_from(&chan->tx_q, skb) {
2116 if (skb == chan->tx_send_head)
2119 l2cap_seq_list_append(&chan->retrans_list,
2120 bt_cb(skb)->l2cap.txseq);
2123 l2cap_ertm_resend(chan);
2127 static void l2cap_send_ack(struct l2cap_chan *chan)
2129 struct l2cap_ctrl control;
2130 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2131 chan->last_acked_seq);
2134 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2135 chan, chan->last_acked_seq, chan->buffer_seq);
2137 memset(&control, 0, sizeof(control));
2140 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2141 chan->rx_state == L2CAP_RX_STATE_RECV) {
2142 __clear_ack_timer(chan);
2143 control.super = L2CAP_SUPER_RNR;
2144 control.reqseq = chan->buffer_seq;
2145 l2cap_send_sframe(chan, &control);
2147 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2148 l2cap_ertm_send(chan);
2149 /* If any i-frames were sent, they included an ack */
2150 if (chan->buffer_seq == chan->last_acked_seq)
2154 /* Ack now if the window is 3/4ths full.
2155 * Calculate without mul or div
2157 threshold = chan->ack_win;
2158 threshold += threshold << 1;
2161 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2164 if (frames_to_ack >= threshold) {
2165 __clear_ack_timer(chan);
2166 control.super = L2CAP_SUPER_RR;
2167 control.reqseq = chan->buffer_seq;
2168 l2cap_send_sframe(chan, &control);
2173 __set_ack_timer(chan);
2177 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2178 struct msghdr *msg, int len,
2179 int count, struct sk_buff *skb)
2181 struct l2cap_conn *conn = chan->conn;
2182 struct sk_buff **frag;
2185 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2191 /* Continuation fragments (no L2CAP header) */
2192 frag = &skb_shinfo(skb)->frag_list;
2194 struct sk_buff *tmp;
2196 count = min_t(unsigned int, conn->mtu, len);
2198 tmp = chan->ops->alloc_skb(chan, 0, count,
2199 msg->msg_flags & MSG_DONTWAIT);
2201 return PTR_ERR(tmp);
2205 if (copy_from_iter(skb_put(*frag, count), count,
2206 &msg->msg_iter) != count)
2212 skb->len += (*frag)->len;
2213 skb->data_len += (*frag)->len;
2215 frag = &(*frag)->next;
2221 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2222 struct msghdr *msg, size_t len)
2224 struct l2cap_conn *conn = chan->conn;
2225 struct sk_buff *skb;
2226 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2227 struct l2cap_hdr *lh;
2229 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2230 __le16_to_cpu(chan->psm), len);
2232 count = min_t(unsigned int, (conn->mtu - hlen), len);
2234 skb = chan->ops->alloc_skb(chan, hlen, count,
2235 msg->msg_flags & MSG_DONTWAIT);
2239 /* Create L2CAP header */
2240 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2241 lh->cid = cpu_to_le16(chan->dcid);
2242 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2243 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2245 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2246 if (unlikely(err < 0)) {
2248 return ERR_PTR(err);
2253 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2254 struct msghdr *msg, size_t len)
2256 struct l2cap_conn *conn = chan->conn;
2257 struct sk_buff *skb;
2259 struct l2cap_hdr *lh;
2261 BT_DBG("chan %p len %zu", chan, len);
2263 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2265 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2266 msg->msg_flags & MSG_DONTWAIT);
2270 /* Create L2CAP header */
2271 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2272 lh->cid = cpu_to_le16(chan->dcid);
2273 lh->len = cpu_to_le16(len);
2275 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2276 if (unlikely(err < 0)) {
2278 return ERR_PTR(err);
2283 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2284 struct msghdr *msg, size_t len,
2287 struct l2cap_conn *conn = chan->conn;
2288 struct sk_buff *skb;
2289 int err, count, hlen;
2290 struct l2cap_hdr *lh;
2292 BT_DBG("chan %p len %zu", chan, len);
2295 return ERR_PTR(-ENOTCONN);
2297 hlen = __ertm_hdr_size(chan);
2300 hlen += L2CAP_SDULEN_SIZE;
2302 if (chan->fcs == L2CAP_FCS_CRC16)
2303 hlen += L2CAP_FCS_SIZE;
2305 count = min_t(unsigned int, (conn->mtu - hlen), len);
2307 skb = chan->ops->alloc_skb(chan, hlen, count,
2308 msg->msg_flags & MSG_DONTWAIT);
2312 /* Create L2CAP header */
2313 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2314 lh->cid = cpu_to_le16(chan->dcid);
2315 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2317 /* Control header is populated later */
2318 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2319 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2321 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2324 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2326 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2327 if (unlikely(err < 0)) {
2329 return ERR_PTR(err);
2332 bt_cb(skb)->l2cap.fcs = chan->fcs;
2333 bt_cb(skb)->l2cap.retries = 0;
2337 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2338 struct sk_buff_head *seg_queue,
2339 struct msghdr *msg, size_t len)
2341 struct sk_buff *skb;
2346 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2348 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2349 * so fragmented skbs are not used. The HCI layer's handling
2350 * of fragmented skbs is not compatible with ERTM's queueing.
2353 /* PDU size is derived from the HCI MTU */
2354 pdu_len = chan->conn->mtu;
2356 /* Constrain PDU size for BR/EDR connections */
2358 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2360 /* Adjust for largest possible L2CAP overhead. */
2362 pdu_len -= L2CAP_FCS_SIZE;
2364 pdu_len -= __ertm_hdr_size(chan);
2366 /* Remote device may have requested smaller PDUs */
2367 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2369 if (len <= pdu_len) {
2370 sar = L2CAP_SAR_UNSEGMENTED;
2374 sar = L2CAP_SAR_START;
2379 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2382 __skb_queue_purge(seg_queue);
2383 return PTR_ERR(skb);
2386 bt_cb(skb)->l2cap.sar = sar;
2387 __skb_queue_tail(seg_queue, skb);
2393 if (len <= pdu_len) {
2394 sar = L2CAP_SAR_END;
2397 sar = L2CAP_SAR_CONTINUE;
2404 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2406 size_t len, u16 sdulen)
2408 struct l2cap_conn *conn = chan->conn;
2409 struct sk_buff *skb;
2410 int err, count, hlen;
2411 struct l2cap_hdr *lh;
2413 BT_DBG("chan %p len %zu", chan, len);
2416 return ERR_PTR(-ENOTCONN);
2418 hlen = L2CAP_HDR_SIZE;
2421 hlen += L2CAP_SDULEN_SIZE;
2423 count = min_t(unsigned int, (conn->mtu - hlen), len);
2425 skb = chan->ops->alloc_skb(chan, hlen, count,
2426 msg->msg_flags & MSG_DONTWAIT);
2430 /* Create L2CAP header */
2431 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2432 lh->cid = cpu_to_le16(chan->dcid);
2433 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2436 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2438 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2439 if (unlikely(err < 0)) {
2441 return ERR_PTR(err);
2447 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2448 struct sk_buff_head *seg_queue,
2449 struct msghdr *msg, size_t len)
2451 struct sk_buff *skb;
2455 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2458 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2464 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2466 __skb_queue_purge(seg_queue);
2467 return PTR_ERR(skb);
2470 __skb_queue_tail(seg_queue, skb);
2476 pdu_len += L2CAP_SDULEN_SIZE;
2483 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2485 struct sk_buff *skb;
2487 struct sk_buff_head seg_queue;
2492 /* Connectionless channel */
2493 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2494 skb = l2cap_create_connless_pdu(chan, msg, len);
2496 return PTR_ERR(skb);
2498 /* Channel lock is released before requesting new skb and then
2499 * reacquired thus we need to recheck channel state.
2501 if (chan->state != BT_CONNECTED) {
2506 l2cap_do_send(chan, skb);
2510 switch (chan->mode) {
2511 case L2CAP_MODE_LE_FLOWCTL:
2512 /* Check outgoing MTU */
2513 if (len > chan->omtu)
2516 if (!chan->tx_credits)
2519 __skb_queue_head_init(&seg_queue);
2521 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2523 if (chan->state != BT_CONNECTED) {
2524 __skb_queue_purge(&seg_queue);
2531 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2533 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2534 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2538 if (!chan->tx_credits)
2539 chan->ops->suspend(chan);
2545 case L2CAP_MODE_BASIC:
2546 /* Check outgoing MTU */
2547 if (len > chan->omtu)
2550 /* Create a basic PDU */
2551 skb = l2cap_create_basic_pdu(chan, msg, len);
2553 return PTR_ERR(skb);
2555 /* Channel lock is released before requesting new skb and then
2556 * reacquired thus we need to recheck channel state.
2558 if (chan->state != BT_CONNECTED) {
2563 l2cap_do_send(chan, skb);
2567 case L2CAP_MODE_ERTM:
2568 case L2CAP_MODE_STREAMING:
2569 /* Check outgoing MTU */
2570 if (len > chan->omtu) {
2575 __skb_queue_head_init(&seg_queue);
2577 /* Do segmentation before calling in to the state machine,
2578 * since it's possible to block while waiting for memory
2581 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2583 /* The channel could have been closed while segmenting,
2584 * check that it is still connected.
2586 if (chan->state != BT_CONNECTED) {
2587 __skb_queue_purge(&seg_queue);
2594 if (chan->mode == L2CAP_MODE_ERTM)
2595 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2597 l2cap_streaming_send(chan, &seg_queue);
2601 /* If the skbs were not queued for sending, they'll still be in
2602 * seg_queue and need to be purged.
2604 __skb_queue_purge(&seg_queue);
2608 BT_DBG("bad state %1.1x", chan->mode);
2614 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2616 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2618 struct l2cap_ctrl control;
2621 BT_DBG("chan %p, txseq %u", chan, txseq);
2623 memset(&control, 0, sizeof(control));
2625 control.super = L2CAP_SUPER_SREJ;
2627 for (seq = chan->expected_tx_seq; seq != txseq;
2628 seq = __next_seq(chan, seq)) {
2629 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2630 control.reqseq = seq;
2631 l2cap_send_sframe(chan, &control);
2632 l2cap_seq_list_append(&chan->srej_list, seq);
2636 chan->expected_tx_seq = __next_seq(chan, txseq);
2639 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2641 struct l2cap_ctrl control;
2643 BT_DBG("chan %p", chan);
2645 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2648 memset(&control, 0, sizeof(control));
2650 control.super = L2CAP_SUPER_SREJ;
2651 control.reqseq = chan->srej_list.tail;
2652 l2cap_send_sframe(chan, &control);
2655 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2657 struct l2cap_ctrl control;
2661 BT_DBG("chan %p, txseq %u", chan, txseq);
2663 memset(&control, 0, sizeof(control));
2665 control.super = L2CAP_SUPER_SREJ;
2667 /* Capture initial list head to allow only one pass through the list. */
2668 initial_head = chan->srej_list.head;
2671 seq = l2cap_seq_list_pop(&chan->srej_list);
2672 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2675 control.reqseq = seq;
2676 l2cap_send_sframe(chan, &control);
2677 l2cap_seq_list_append(&chan->srej_list, seq);
2678 } while (chan->srej_list.head != initial_head);
2681 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2683 struct sk_buff *acked_skb;
2686 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2688 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2691 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2692 chan->expected_ack_seq, chan->unacked_frames);
2694 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2695 ackseq = __next_seq(chan, ackseq)) {
2697 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2699 skb_unlink(acked_skb, &chan->tx_q);
2700 kfree_skb(acked_skb);
2701 chan->unacked_frames--;
2705 chan->expected_ack_seq = reqseq;
2707 if (chan->unacked_frames == 0)
2708 __clear_retrans_timer(chan);
2710 BT_DBG("unacked_frames %u", chan->unacked_frames);
2713 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2715 BT_DBG("chan %p", chan);
2717 chan->expected_tx_seq = chan->buffer_seq;
2718 l2cap_seq_list_clear(&chan->srej_list);
2719 skb_queue_purge(&chan->srej_q);
2720 chan->rx_state = L2CAP_RX_STATE_RECV;
2723 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2724 struct l2cap_ctrl *control,
2725 struct sk_buff_head *skbs, u8 event)
2727 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2731 case L2CAP_EV_DATA_REQUEST:
2732 if (chan->tx_send_head == NULL)
2733 chan->tx_send_head = skb_peek(skbs);
2735 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2736 l2cap_ertm_send(chan);
2738 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2739 BT_DBG("Enter LOCAL_BUSY");
2740 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2742 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2743 /* The SREJ_SENT state must be aborted if we are to
2744 * enter the LOCAL_BUSY state.
2746 l2cap_abort_rx_srej_sent(chan);
2749 l2cap_send_ack(chan);
2752 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2753 BT_DBG("Exit LOCAL_BUSY");
2754 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2756 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2757 struct l2cap_ctrl local_control;
2759 memset(&local_control, 0, sizeof(local_control));
2760 local_control.sframe = 1;
2761 local_control.super = L2CAP_SUPER_RR;
2762 local_control.poll = 1;
2763 local_control.reqseq = chan->buffer_seq;
2764 l2cap_send_sframe(chan, &local_control);
2766 chan->retry_count = 1;
2767 __set_monitor_timer(chan);
2768 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2771 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2772 l2cap_process_reqseq(chan, control->reqseq);
2774 case L2CAP_EV_EXPLICIT_POLL:
2775 l2cap_send_rr_or_rnr(chan, 1);
2776 chan->retry_count = 1;
2777 __set_monitor_timer(chan);
2778 __clear_ack_timer(chan);
2779 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2781 case L2CAP_EV_RETRANS_TO:
2782 l2cap_send_rr_or_rnr(chan, 1);
2783 chan->retry_count = 1;
2784 __set_monitor_timer(chan);
2785 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2787 case L2CAP_EV_RECV_FBIT:
2788 /* Nothing to process */
2795 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2796 struct l2cap_ctrl *control,
2797 struct sk_buff_head *skbs, u8 event)
2799 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2803 case L2CAP_EV_DATA_REQUEST:
2804 if (chan->tx_send_head == NULL)
2805 chan->tx_send_head = skb_peek(skbs);
2806 /* Queue data, but don't send. */
2807 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2809 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2810 BT_DBG("Enter LOCAL_BUSY");
2811 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2813 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2814 /* The SREJ_SENT state must be aborted if we are to
2815 * enter the LOCAL_BUSY state.
2817 l2cap_abort_rx_srej_sent(chan);
2820 l2cap_send_ack(chan);
2823 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2824 BT_DBG("Exit LOCAL_BUSY");
2825 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2827 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2828 struct l2cap_ctrl local_control;
2829 memset(&local_control, 0, sizeof(local_control));
2830 local_control.sframe = 1;
2831 local_control.super = L2CAP_SUPER_RR;
2832 local_control.poll = 1;
2833 local_control.reqseq = chan->buffer_seq;
2834 l2cap_send_sframe(chan, &local_control);
2836 chan->retry_count = 1;
2837 __set_monitor_timer(chan);
2838 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2841 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2842 l2cap_process_reqseq(chan, control->reqseq);
2846 case L2CAP_EV_RECV_FBIT:
2847 if (control && control->final) {
2848 __clear_monitor_timer(chan);
2849 if (chan->unacked_frames > 0)
2850 __set_retrans_timer(chan);
2851 chan->retry_count = 0;
2852 chan->tx_state = L2CAP_TX_STATE_XMIT;
2853 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2856 case L2CAP_EV_EXPLICIT_POLL:
2859 case L2CAP_EV_MONITOR_TO:
2860 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2861 l2cap_send_rr_or_rnr(chan, 1);
2862 __set_monitor_timer(chan);
2863 chan->retry_count++;
2865 l2cap_send_disconn_req(chan, ECONNABORTED);
2873 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2874 struct sk_buff_head *skbs, u8 event)
2876 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2877 chan, control, skbs, event, chan->tx_state);
2879 switch (chan->tx_state) {
2880 case L2CAP_TX_STATE_XMIT:
2881 l2cap_tx_state_xmit(chan, control, skbs, event);
2883 case L2CAP_TX_STATE_WAIT_F:
2884 l2cap_tx_state_wait_f(chan, control, skbs, event);
2892 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2893 struct l2cap_ctrl *control)
2895 BT_DBG("chan %p, control %p", chan, control);
2896 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2899 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2900 struct l2cap_ctrl *control)
2902 BT_DBG("chan %p, control %p", chan, control);
2903 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2906 /* Copy frame to all raw sockets on that connection */
2907 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2909 struct sk_buff *nskb;
2910 struct l2cap_chan *chan;
2912 BT_DBG("conn %p", conn);
2914 mutex_lock(&conn->chan_lock);
2916 list_for_each_entry(chan, &conn->chan_l, list) {
2917 if (chan->chan_type != L2CAP_CHAN_RAW)
2920 /* Don't send frame to the channel it came from */
2921 if (bt_cb(skb)->l2cap.chan == chan)
2924 nskb = skb_clone(skb, GFP_KERNEL);
2927 if (chan->ops->recv(chan, nskb))
2931 mutex_unlock(&conn->chan_lock);
2934 /* ---- L2CAP signalling commands ---- */
2935 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2936 u8 ident, u16 dlen, void *data)
2938 struct sk_buff *skb, **frag;
2939 struct l2cap_cmd_hdr *cmd;
2940 struct l2cap_hdr *lh;
2943 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2944 conn, code, ident, dlen);
2946 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2949 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2950 count = min_t(unsigned int, conn->mtu, len);
2952 skb = bt_skb_alloc(count, GFP_KERNEL);
2956 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2957 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2959 if (conn->hcon->type == LE_LINK)
2960 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2962 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2964 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2967 cmd->len = cpu_to_le16(dlen);
2970 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2971 memcpy(skb_put(skb, count), data, count);
2977 /* Continuation fragments (no L2CAP header) */
2978 frag = &skb_shinfo(skb)->frag_list;
2980 count = min_t(unsigned int, conn->mtu, len);
2982 *frag = bt_skb_alloc(count, GFP_KERNEL);
2986 memcpy(skb_put(*frag, count), data, count);
2991 frag = &(*frag)->next;
3001 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3004 struct l2cap_conf_opt *opt = *ptr;
3007 len = L2CAP_CONF_OPT_SIZE + opt->len;
3015 *val = *((u8 *) opt->val);
3019 *val = get_unaligned_le16(opt->val);
3023 *val = get_unaligned_le32(opt->val);
3027 *val = (unsigned long) opt->val;
3031 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3035 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3037 struct l2cap_conf_opt *opt = *ptr;
3039 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3041 if (size < L2CAP_CONF_OPT_SIZE + len)
3049 *((u8 *) opt->val) = val;
3053 put_unaligned_le16(val, opt->val);
3057 put_unaligned_le32(val, opt->val);
3061 memcpy(opt->val, (void *) val, len);
3065 *ptr += L2CAP_CONF_OPT_SIZE + len;
3068 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3070 struct l2cap_conf_efs efs;
3072 switch (chan->mode) {
3073 case L2CAP_MODE_ERTM:
3074 efs.id = chan->local_id;
3075 efs.stype = chan->local_stype;
3076 efs.msdu = cpu_to_le16(chan->local_msdu);
3077 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3078 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3079 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3082 case L2CAP_MODE_STREAMING:
3084 efs.stype = L2CAP_SERV_BESTEFFORT;
3085 efs.msdu = cpu_to_le16(chan->local_msdu);
3086 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3095 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3096 (unsigned long) &efs, size);
3099 static void l2cap_ack_timeout(struct work_struct *work)
3101 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3105 BT_DBG("chan %p", chan);
3107 l2cap_chan_lock(chan);
3109 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3110 chan->last_acked_seq);
3113 l2cap_send_rr_or_rnr(chan, 0);
3115 l2cap_chan_unlock(chan);
3116 l2cap_chan_put(chan);
3119 int l2cap_ertm_init(struct l2cap_chan *chan)
3123 chan->next_tx_seq = 0;
3124 chan->expected_tx_seq = 0;
3125 chan->expected_ack_seq = 0;
3126 chan->unacked_frames = 0;
3127 chan->buffer_seq = 0;
3128 chan->frames_sent = 0;
3129 chan->last_acked_seq = 0;
3131 chan->sdu_last_frag = NULL;
3134 skb_queue_head_init(&chan->tx_q);
3136 chan->local_amp_id = AMP_ID_BREDR;
3137 chan->move_id = AMP_ID_BREDR;
3138 chan->move_state = L2CAP_MOVE_STABLE;
3139 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3141 if (chan->mode != L2CAP_MODE_ERTM)
3144 chan->rx_state = L2CAP_RX_STATE_RECV;
3145 chan->tx_state = L2CAP_TX_STATE_XMIT;
3147 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3148 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3149 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3151 skb_queue_head_init(&chan->srej_q);
3153 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3157 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3159 l2cap_seq_list_free(&chan->srej_list);
3164 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3167 case L2CAP_MODE_STREAMING:
3168 case L2CAP_MODE_ERTM:
3169 if (l2cap_mode_supported(mode, remote_feat_mask))
3173 return L2CAP_MODE_BASIC;
3177 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3179 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3180 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3183 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3185 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3186 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3189 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3190 struct l2cap_conf_rfc *rfc)
3192 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3193 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3195 /* Class 1 devices have must have ERTM timeouts
3196 * exceeding the Link Supervision Timeout. The
3197 * default Link Supervision Timeout for AMP
3198 * controllers is 10 seconds.
3200 * Class 1 devices use 0xffffffff for their
3201 * best-effort flush timeout, so the clamping logic
3202 * will result in a timeout that meets the above
3203 * requirement. ERTM timeouts are 16-bit values, so
3204 * the maximum timeout is 65.535 seconds.
3207 /* Convert timeout to milliseconds and round */
3208 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3210 /* This is the recommended formula for class 2 devices
3211 * that start ERTM timers when packets are sent to the
3214 ertm_to = 3 * ertm_to + 500;
3216 if (ertm_to > 0xffff)
3219 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3220 rfc->monitor_timeout = rfc->retrans_timeout;
3222 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3223 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3227 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3229 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3230 __l2cap_ews_supported(chan->conn)) {
3231 /* use extended control field */
3232 set_bit(FLAG_EXT_CTRL, &chan->flags);
3233 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3235 chan->tx_win = min_t(u16, chan->tx_win,
3236 L2CAP_DEFAULT_TX_WINDOW);
3237 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3239 chan->ack_win = chan->tx_win;
3242 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3244 struct l2cap_conf_req *req = data;
3245 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3246 void *ptr = req->data;
3247 void *endptr = data + data_size;
3250 BT_DBG("chan %p", chan);
3252 if (chan->num_conf_req || chan->num_conf_rsp)
3255 switch (chan->mode) {
3256 case L2CAP_MODE_STREAMING:
3257 case L2CAP_MODE_ERTM:
3258 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3261 if (__l2cap_efs_supported(chan->conn))
3262 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3266 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3271 if (chan->imtu != L2CAP_DEFAULT_MTU)
3272 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3274 switch (chan->mode) {
3275 case L2CAP_MODE_BASIC:
3279 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3280 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3283 rfc.mode = L2CAP_MODE_BASIC;
3285 rfc.max_transmit = 0;
3286 rfc.retrans_timeout = 0;
3287 rfc.monitor_timeout = 0;
3288 rfc.max_pdu_size = 0;
3290 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3291 (unsigned long) &rfc, endptr - ptr);
3294 case L2CAP_MODE_ERTM:
3295 rfc.mode = L2CAP_MODE_ERTM;
3296 rfc.max_transmit = chan->max_tx;
3298 __l2cap_set_ertm_timeouts(chan, &rfc);
3300 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3301 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3303 rfc.max_pdu_size = cpu_to_le16(size);
3305 l2cap_txwin_setup(chan);
3307 rfc.txwin_size = min_t(u16, chan->tx_win,
3308 L2CAP_DEFAULT_TX_WINDOW);
3310 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3311 (unsigned long) &rfc, endptr - ptr);
3313 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3314 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3316 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3317 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3318 chan->tx_win, endptr - ptr);
3320 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3321 if (chan->fcs == L2CAP_FCS_NONE ||
3322 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3323 chan->fcs = L2CAP_FCS_NONE;
3324 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3325 chan->fcs, endptr - ptr);
3329 case L2CAP_MODE_STREAMING:
3330 l2cap_txwin_setup(chan);
3331 rfc.mode = L2CAP_MODE_STREAMING;
3333 rfc.max_transmit = 0;
3334 rfc.retrans_timeout = 0;
3335 rfc.monitor_timeout = 0;
3337 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3338 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3340 rfc.max_pdu_size = cpu_to_le16(size);
3342 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3343 (unsigned long) &rfc, endptr - ptr);
3345 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3346 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3348 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3349 if (chan->fcs == L2CAP_FCS_NONE ||
3350 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3351 chan->fcs = L2CAP_FCS_NONE;
3352 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3353 chan->fcs, endptr - ptr);
3358 req->dcid = cpu_to_le16(chan->dcid);
3359 req->flags = cpu_to_le16(0);
3364 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3366 struct l2cap_conf_rsp *rsp = data;
3367 void *ptr = rsp->data;
3368 void *endptr = data + data_size;
3369 void *req = chan->conf_req;
3370 int len = chan->conf_len;
3371 int type, hint, olen;
3373 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3374 struct l2cap_conf_efs efs;
3376 u16 mtu = L2CAP_DEFAULT_MTU;
3377 u16 result = L2CAP_CONF_SUCCESS;
3380 BT_DBG("chan %p", chan);
3382 while (len >= L2CAP_CONF_OPT_SIZE) {
3383 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3387 hint = type & L2CAP_CONF_HINT;
3388 type &= L2CAP_CONF_MASK;
3391 case L2CAP_CONF_MTU:
3397 case L2CAP_CONF_FLUSH_TO:
3400 chan->flush_to = val;
3403 case L2CAP_CONF_QOS:
3406 case L2CAP_CONF_RFC:
3407 if (olen != sizeof(rfc))
3409 memcpy(&rfc, (void *) val, olen);
3412 case L2CAP_CONF_FCS:
3415 if (val == L2CAP_FCS_NONE)
3416 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3419 case L2CAP_CONF_EFS:
3420 if (olen != sizeof(efs))
3423 memcpy(&efs, (void *) val, olen);
3426 case L2CAP_CONF_EWS:
3429 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3430 return -ECONNREFUSED;
3431 set_bit(FLAG_EXT_CTRL, &chan->flags);
3432 set_bit(CONF_EWS_RECV, &chan->conf_state);
3433 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3434 chan->remote_tx_win = val;
3440 result = L2CAP_CONF_UNKNOWN;
3441 *((u8 *) ptr++) = type;
3446 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3449 switch (chan->mode) {
3450 case L2CAP_MODE_STREAMING:
3451 case L2CAP_MODE_ERTM:
3452 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3453 chan->mode = l2cap_select_mode(rfc.mode,
3454 chan->conn->feat_mask);
3459 if (__l2cap_efs_supported(chan->conn))
3460 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3462 return -ECONNREFUSED;
3465 if (chan->mode != rfc.mode)
3466 return -ECONNREFUSED;
3472 if (chan->mode != rfc.mode) {
3473 result = L2CAP_CONF_UNACCEPT;
3474 rfc.mode = chan->mode;
3476 if (chan->num_conf_rsp == 1)
3477 return -ECONNREFUSED;
3479 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3480 (unsigned long) &rfc, endptr - ptr);
3483 if (result == L2CAP_CONF_SUCCESS) {
3484 /* Configure output options and let the other side know
3485 * which ones we don't like. */
3487 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3488 result = L2CAP_CONF_UNACCEPT;
3491 set_bit(CONF_MTU_DONE, &chan->conf_state);
3493 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3496 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3497 efs.stype != L2CAP_SERV_NOTRAFIC &&
3498 efs.stype != chan->local_stype) {
3500 result = L2CAP_CONF_UNACCEPT;
3502 if (chan->num_conf_req >= 1)
3503 return -ECONNREFUSED;
3505 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3507 (unsigned long) &efs, endptr - ptr);
3509 /* Send PENDING Conf Rsp */
3510 result = L2CAP_CONF_PENDING;
3511 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3516 case L2CAP_MODE_BASIC:
3517 chan->fcs = L2CAP_FCS_NONE;
3518 set_bit(CONF_MODE_DONE, &chan->conf_state);
3521 case L2CAP_MODE_ERTM:
3522 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3523 chan->remote_tx_win = rfc.txwin_size;
3525 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3527 chan->remote_max_tx = rfc.max_transmit;
3529 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3530 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3531 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3532 rfc.max_pdu_size = cpu_to_le16(size);
3533 chan->remote_mps = size;
3535 __l2cap_set_ertm_timeouts(chan, &rfc);
3537 set_bit(CONF_MODE_DONE, &chan->conf_state);
3539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3540 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3542 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3543 chan->remote_id = efs.id;
3544 chan->remote_stype = efs.stype;
3545 chan->remote_msdu = le16_to_cpu(efs.msdu);
3546 chan->remote_flush_to =
3547 le32_to_cpu(efs.flush_to);
3548 chan->remote_acc_lat =
3549 le32_to_cpu(efs.acc_lat);
3550 chan->remote_sdu_itime =
3551 le32_to_cpu(efs.sdu_itime);
3552 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3554 (unsigned long) &efs, endptr - ptr);
3558 case L2CAP_MODE_STREAMING:
3559 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3560 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3561 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3562 rfc.max_pdu_size = cpu_to_le16(size);
3563 chan->remote_mps = size;
3565 set_bit(CONF_MODE_DONE, &chan->conf_state);
3567 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3568 (unsigned long) &rfc, endptr - ptr);
3573 result = L2CAP_CONF_UNACCEPT;
3575 memset(&rfc, 0, sizeof(rfc));
3576 rfc.mode = chan->mode;
3579 if (result == L2CAP_CONF_SUCCESS)
3580 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3582 rsp->scid = cpu_to_le16(chan->dcid);
3583 rsp->result = cpu_to_le16(result);
3584 rsp->flags = cpu_to_le16(0);
3589 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3590 void *data, size_t size, u16 *result)
3592 struct l2cap_conf_req *req = data;
3593 void *ptr = req->data;
3594 void *endptr = data + size;
3597 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3598 struct l2cap_conf_efs efs;
3600 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3602 while (len >= L2CAP_CONF_OPT_SIZE) {
3603 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3608 case L2CAP_CONF_MTU:
3611 if (val < L2CAP_DEFAULT_MIN_MTU) {
3612 *result = L2CAP_CONF_UNACCEPT;
3613 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3616 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3620 case L2CAP_CONF_FLUSH_TO:
3623 chan->flush_to = val;
3624 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3625 chan->flush_to, endptr - ptr);
3628 case L2CAP_CONF_RFC:
3629 if (olen != sizeof(rfc))
3631 memcpy(&rfc, (void *)val, olen);
3632 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3633 rfc.mode != chan->mode)
3634 return -ECONNREFUSED;
3636 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3637 (unsigned long) &rfc, endptr - ptr);
3640 case L2CAP_CONF_EWS:
3643 chan->ack_win = min_t(u16, val, chan->ack_win);
3644 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3645 chan->tx_win, endptr - ptr);
3648 case L2CAP_CONF_EFS:
3649 if (olen != sizeof(efs))
3651 memcpy(&efs, (void *)val, olen);
3652 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3653 efs.stype != L2CAP_SERV_NOTRAFIC &&
3654 efs.stype != chan->local_stype)
3655 return -ECONNREFUSED;
3656 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3657 (unsigned long) &efs, endptr - ptr);
3660 case L2CAP_CONF_FCS:
3663 if (*result == L2CAP_CONF_PENDING)
3664 if (val == L2CAP_FCS_NONE)
3665 set_bit(CONF_RECV_NO_FCS,
3671 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3672 return -ECONNREFUSED;
3674 chan->mode = rfc.mode;
3676 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3678 case L2CAP_MODE_ERTM:
3679 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3680 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3681 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3682 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3683 chan->ack_win = min_t(u16, chan->ack_win,
3686 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3687 chan->local_msdu = le16_to_cpu(efs.msdu);
3688 chan->local_sdu_itime =
3689 le32_to_cpu(efs.sdu_itime);
3690 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3691 chan->local_flush_to =
3692 le32_to_cpu(efs.flush_to);
3696 case L2CAP_MODE_STREAMING:
3697 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3701 req->dcid = cpu_to_le16(chan->dcid);
3702 req->flags = cpu_to_le16(0);
3707 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3708 u16 result, u16 flags)
3710 struct l2cap_conf_rsp *rsp = data;
3711 void *ptr = rsp->data;
3713 BT_DBG("chan %p", chan);
3715 rsp->scid = cpu_to_le16(chan->dcid);
3716 rsp->result = cpu_to_le16(result);
3717 rsp->flags = cpu_to_le16(flags);
3722 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3724 struct l2cap_le_conn_rsp rsp;
3725 struct l2cap_conn *conn = chan->conn;
3727 BT_DBG("chan %p", chan);
3729 rsp.dcid = cpu_to_le16(chan->scid);
3730 rsp.mtu = cpu_to_le16(chan->imtu);
3731 rsp.mps = cpu_to_le16(chan->mps);
3732 rsp.credits = cpu_to_le16(chan->rx_credits);
3733 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3735 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3739 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3741 struct l2cap_conn_rsp rsp;
3742 struct l2cap_conn *conn = chan->conn;
3746 rsp.scid = cpu_to_le16(chan->dcid);
3747 rsp.dcid = cpu_to_le16(chan->scid);
3748 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3749 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3752 rsp_code = L2CAP_CREATE_CHAN_RSP;
3754 rsp_code = L2CAP_CONN_RSP;
3756 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3758 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3760 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3763 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3764 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3765 chan->num_conf_req++;
3768 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3772 /* Use sane default values in case a misbehaving remote device
3773 * did not send an RFC or extended window size option.
3775 u16 txwin_ext = chan->ack_win;
3776 struct l2cap_conf_rfc rfc = {
3778 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3779 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3780 .max_pdu_size = cpu_to_le16(chan->imtu),
3781 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3784 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3786 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3789 while (len >= L2CAP_CONF_OPT_SIZE) {
3790 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3795 case L2CAP_CONF_RFC:
3796 if (olen != sizeof(rfc))
3798 memcpy(&rfc, (void *)val, olen);
3800 case L2CAP_CONF_EWS:
3809 case L2CAP_MODE_ERTM:
3810 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3811 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3812 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3813 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3814 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3816 chan->ack_win = min_t(u16, chan->ack_win,
3819 case L2CAP_MODE_STREAMING:
3820 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3824 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3825 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3828 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3830 if (cmd_len < sizeof(*rej))
3833 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3836 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3837 cmd->ident == conn->info_ident) {
3838 cancel_delayed_work(&conn->info_timer);
3840 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3841 conn->info_ident = 0;
3843 l2cap_conn_start(conn);
3849 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3850 struct l2cap_cmd_hdr *cmd,
3851 u8 *data, u8 rsp_code, u8 amp_id)
3853 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3854 struct l2cap_conn_rsp rsp;
3855 struct l2cap_chan *chan = NULL, *pchan;
3856 int result, status = L2CAP_CS_NO_INFO;
3858 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3859 __le16 psm = req->psm;
3861 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3863 /* Check if we have socket listening on psm */
3864 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3865 &conn->hcon->dst, ACL_LINK);
3867 result = L2CAP_CR_BAD_PSM;
3871 mutex_lock(&conn->chan_lock);
3872 l2cap_chan_lock(pchan);
3874 /* Check if the ACL is secure enough (if not SDP) */
3875 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3876 !hci_conn_check_link_mode(conn->hcon)) {
3877 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3878 result = L2CAP_CR_SEC_BLOCK;
3882 result = L2CAP_CR_NO_MEM;
3884 /* Check if we already have channel with that dcid */
3885 if (__l2cap_get_chan_by_dcid(conn, scid))
3888 chan = pchan->ops->new_connection(pchan);
3892 /* For certain devices (ex: HID mouse), support for authentication,
3893 * pairing and bonding is optional. For such devices, inorder to avoid
3894 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3895 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3897 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3899 bacpy(&chan->src, &conn->hcon->src);
3900 bacpy(&chan->dst, &conn->hcon->dst);
3901 chan->src_type = bdaddr_src_type(conn->hcon);
3902 chan->dst_type = bdaddr_dst_type(conn->hcon);
3905 chan->local_amp_id = amp_id;
3907 __l2cap_chan_add(conn, chan);
3911 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3913 chan->ident = cmd->ident;
3915 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3916 if (l2cap_chan_check_security(chan, false)) {
3917 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3918 l2cap_state_change(chan, BT_CONNECT2);
3919 result = L2CAP_CR_PEND;
3920 status = L2CAP_CS_AUTHOR_PEND;
3921 chan->ops->defer(chan);
3923 /* Force pending result for AMP controllers.
3924 * The connection will succeed after the
3925 * physical link is up.
3927 if (amp_id == AMP_ID_BREDR) {
3928 l2cap_state_change(chan, BT_CONFIG);
3929 result = L2CAP_CR_SUCCESS;
3931 l2cap_state_change(chan, BT_CONNECT2);
3932 result = L2CAP_CR_PEND;
3934 status = L2CAP_CS_NO_INFO;
3937 l2cap_state_change(chan, BT_CONNECT2);
3938 result = L2CAP_CR_PEND;
3939 status = L2CAP_CS_AUTHEN_PEND;
3942 l2cap_state_change(chan, BT_CONNECT2);
3943 result = L2CAP_CR_PEND;
3944 status = L2CAP_CS_NO_INFO;
3948 l2cap_chan_unlock(pchan);
3949 mutex_unlock(&conn->chan_lock);
3950 l2cap_chan_put(pchan);
3953 rsp.scid = cpu_to_le16(scid);
3954 rsp.dcid = cpu_to_le16(dcid);
3955 rsp.result = cpu_to_le16(result);
3956 rsp.status = cpu_to_le16(status);
3957 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3959 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3960 struct l2cap_info_req info;
3961 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3963 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3964 conn->info_ident = l2cap_get_ident(conn);
3966 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3968 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3969 sizeof(info), &info);
3972 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3973 result == L2CAP_CR_SUCCESS) {
3975 set_bit(CONF_REQ_SENT, &chan->conf_state);
3976 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3977 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3978 chan->num_conf_req++;
3984 static int l2cap_connect_req(struct l2cap_conn *conn,
3985 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3987 struct hci_dev *hdev = conn->hcon->hdev;
3988 struct hci_conn *hcon = conn->hcon;
3990 if (cmd_len < sizeof(struct l2cap_conn_req))
3994 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3995 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3996 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3997 hci_dev_unlock(hdev);
3999 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4003 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4004 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4007 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4008 u16 scid, dcid, result, status;
4009 struct l2cap_chan *chan;
4013 if (cmd_len < sizeof(*rsp))
4016 scid = __le16_to_cpu(rsp->scid);
4017 dcid = __le16_to_cpu(rsp->dcid);
4018 result = __le16_to_cpu(rsp->result);
4019 status = __le16_to_cpu(rsp->status);
4021 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4022 dcid, scid, result, status);
4024 mutex_lock(&conn->chan_lock);
4027 chan = __l2cap_get_chan_by_scid(conn, scid);
4033 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4042 l2cap_chan_lock(chan);
4045 case L2CAP_CR_SUCCESS:
4046 l2cap_state_change(chan, BT_CONFIG);
4049 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4051 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4054 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4055 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4056 chan->num_conf_req++;
4060 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4064 l2cap_chan_del(chan, ECONNREFUSED);
4068 l2cap_chan_unlock(chan);
4071 mutex_unlock(&conn->chan_lock);
4076 static inline void set_default_fcs(struct l2cap_chan *chan)
4078 /* FCS is enabled only in ERTM or streaming mode, if one or both
4081 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4082 chan->fcs = L2CAP_FCS_NONE;
4083 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4084 chan->fcs = L2CAP_FCS_CRC16;
4087 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4088 u8 ident, u16 flags)
4090 struct l2cap_conn *conn = chan->conn;
4092 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4095 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4096 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4098 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4099 l2cap_build_conf_rsp(chan, data,
4100 L2CAP_CONF_SUCCESS, flags), data);
4103 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4106 struct l2cap_cmd_rej_cid rej;
4108 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4109 rej.scid = __cpu_to_le16(scid);
4110 rej.dcid = __cpu_to_le16(dcid);
4112 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4115 static inline int l2cap_config_req(struct l2cap_conn *conn,
4116 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4119 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4122 struct l2cap_chan *chan;
4125 if (cmd_len < sizeof(*req))
4128 dcid = __le16_to_cpu(req->dcid);
4129 flags = __le16_to_cpu(req->flags);
4131 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4133 chan = l2cap_get_chan_by_scid(conn, dcid);
4135 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4139 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4140 chan->state != BT_CONNECTED) {
4141 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4146 /* Reject if config buffer is too small. */
4147 len = cmd_len - sizeof(*req);
4148 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4149 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4150 l2cap_build_conf_rsp(chan, rsp,
4151 L2CAP_CONF_REJECT, flags), rsp);
4156 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4157 chan->conf_len += len;
4159 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4160 /* Incomplete config. Send empty response. */
4161 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4162 l2cap_build_conf_rsp(chan, rsp,
4163 L2CAP_CONF_SUCCESS, flags), rsp);
4167 /* Complete config. */
4168 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4170 l2cap_send_disconn_req(chan, ECONNRESET);
4174 chan->ident = cmd->ident;
4175 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4176 chan->num_conf_rsp++;
4178 /* Reset config buffer. */
4181 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4184 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4185 set_default_fcs(chan);
4187 if (chan->mode == L2CAP_MODE_ERTM ||
4188 chan->mode == L2CAP_MODE_STREAMING)
4189 err = l2cap_ertm_init(chan);
4192 l2cap_send_disconn_req(chan, -err);
4194 l2cap_chan_ready(chan);
4199 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4201 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4202 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4203 chan->num_conf_req++;
4206 /* Got Conf Rsp PENDING from remote side and assume we sent
4207 Conf Rsp PENDING in the code above */
4208 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4209 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4211 /* check compatibility */
4213 /* Send rsp for BR/EDR channel */
4215 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4217 chan->ident = cmd->ident;
4221 l2cap_chan_unlock(chan);
4222 l2cap_chan_put(chan);
4226 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4227 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4230 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4231 u16 scid, flags, result;
4232 struct l2cap_chan *chan;
4233 int len = cmd_len - sizeof(*rsp);
4236 if (cmd_len < sizeof(*rsp))
4239 scid = __le16_to_cpu(rsp->scid);
4240 flags = __le16_to_cpu(rsp->flags);
4241 result = __le16_to_cpu(rsp->result);
4243 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4246 chan = l2cap_get_chan_by_scid(conn, scid);
4251 case L2CAP_CONF_SUCCESS:
4252 l2cap_conf_rfc_get(chan, rsp->data, len);
4253 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4256 case L2CAP_CONF_PENDING:
4257 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4259 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4262 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4263 buf, sizeof(buf), &result);
4265 l2cap_send_disconn_req(chan, ECONNRESET);
4269 if (!chan->hs_hcon) {
4270 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4273 if (l2cap_check_efs(chan)) {
4274 amp_create_logical_link(chan);
4275 chan->ident = cmd->ident;
4281 case L2CAP_CONF_UNACCEPT:
4282 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4285 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4286 l2cap_send_disconn_req(chan, ECONNRESET);
4290 /* throw out any old stored conf requests */
4291 result = L2CAP_CONF_SUCCESS;
4292 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4293 req, sizeof(req), &result);
4295 l2cap_send_disconn_req(chan, ECONNRESET);
4299 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4300 L2CAP_CONF_REQ, len, req);
4301 chan->num_conf_req++;
4302 if (result != L2CAP_CONF_SUCCESS)
4308 l2cap_chan_set_err(chan, ECONNRESET);
4310 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4311 l2cap_send_disconn_req(chan, ECONNRESET);
4315 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4318 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4320 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4321 set_default_fcs(chan);
4323 if (chan->mode == L2CAP_MODE_ERTM ||
4324 chan->mode == L2CAP_MODE_STREAMING)
4325 err = l2cap_ertm_init(chan);
4328 l2cap_send_disconn_req(chan, -err);
4330 l2cap_chan_ready(chan);
4334 l2cap_chan_unlock(chan);
4335 l2cap_chan_put(chan);
4339 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4340 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4343 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4344 struct l2cap_disconn_rsp rsp;
4346 struct l2cap_chan *chan;
4348 if (cmd_len != sizeof(*req))
4351 scid = __le16_to_cpu(req->scid);
4352 dcid = __le16_to_cpu(req->dcid);
4354 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4356 mutex_lock(&conn->chan_lock);
4358 chan = __l2cap_get_chan_by_scid(conn, dcid);
4360 mutex_unlock(&conn->chan_lock);
4361 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4365 l2cap_chan_hold(chan);
4366 l2cap_chan_lock(chan);
4368 rsp.dcid = cpu_to_le16(chan->scid);
4369 rsp.scid = cpu_to_le16(chan->dcid);
4370 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4372 chan->ops->set_shutdown(chan);
4374 l2cap_chan_del(chan, ECONNRESET);
4376 chan->ops->close(chan);
4378 l2cap_chan_unlock(chan);
4379 l2cap_chan_put(chan);
4381 mutex_unlock(&conn->chan_lock);
4386 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4387 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4390 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4392 struct l2cap_chan *chan;
4394 if (cmd_len != sizeof(*rsp))
4397 scid = __le16_to_cpu(rsp->scid);
4398 dcid = __le16_to_cpu(rsp->dcid);
4400 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4402 mutex_lock(&conn->chan_lock);
4404 chan = __l2cap_get_chan_by_scid(conn, scid);
4406 mutex_unlock(&conn->chan_lock);
4410 l2cap_chan_hold(chan);
4411 l2cap_chan_lock(chan);
4413 if (chan->state != BT_DISCONN) {
4414 l2cap_chan_unlock(chan);
4415 l2cap_chan_put(chan);
4416 mutex_unlock(&conn->chan_lock);
4420 l2cap_chan_del(chan, 0);
4422 chan->ops->close(chan);
4424 l2cap_chan_unlock(chan);
4425 l2cap_chan_put(chan);
4427 mutex_unlock(&conn->chan_lock);
4432 static inline int l2cap_information_req(struct l2cap_conn *conn,
4433 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4436 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4439 if (cmd_len != sizeof(*req))
4442 type = __le16_to_cpu(req->type);
4444 BT_DBG("type 0x%4.4x", type);
4446 if (type == L2CAP_IT_FEAT_MASK) {
4448 u32 feat_mask = l2cap_feat_mask;
4449 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4450 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4451 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4453 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4455 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4456 feat_mask |= L2CAP_FEAT_EXT_FLOW
4457 | L2CAP_FEAT_EXT_WINDOW;
4459 put_unaligned_le32(feat_mask, rsp->data);
4460 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4462 } else if (type == L2CAP_IT_FIXED_CHAN) {
4464 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4466 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4467 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4468 rsp->data[0] = conn->local_fixed_chan;
4469 memset(rsp->data + 1, 0, 7);
4470 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4473 struct l2cap_info_rsp rsp;
4474 rsp.type = cpu_to_le16(type);
4475 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4476 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4483 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4484 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4487 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4490 if (cmd_len < sizeof(*rsp))
4493 type = __le16_to_cpu(rsp->type);
4494 result = __le16_to_cpu(rsp->result);
4496 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4498 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4499 if (cmd->ident != conn->info_ident ||
4500 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4503 cancel_delayed_work(&conn->info_timer);
4505 if (result != L2CAP_IR_SUCCESS) {
4506 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4507 conn->info_ident = 0;
4509 l2cap_conn_start(conn);
4515 case L2CAP_IT_FEAT_MASK:
4516 conn->feat_mask = get_unaligned_le32(rsp->data);
4518 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4519 struct l2cap_info_req req;
4520 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4522 conn->info_ident = l2cap_get_ident(conn);
4524 l2cap_send_cmd(conn, conn->info_ident,
4525 L2CAP_INFO_REQ, sizeof(req), &req);
4527 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4528 conn->info_ident = 0;
4530 l2cap_conn_start(conn);
4534 case L2CAP_IT_FIXED_CHAN:
4535 conn->remote_fixed_chan = rsp->data[0];
4536 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4537 conn->info_ident = 0;
4539 l2cap_conn_start(conn);
4546 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4547 struct l2cap_cmd_hdr *cmd,
4548 u16 cmd_len, void *data)
4550 struct l2cap_create_chan_req *req = data;
4551 struct l2cap_create_chan_rsp rsp;
4552 struct l2cap_chan *chan;
4553 struct hci_dev *hdev;
4556 if (cmd_len != sizeof(*req))
4559 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4562 psm = le16_to_cpu(req->psm);
4563 scid = le16_to_cpu(req->scid);
4565 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4567 /* For controller id 0 make BR/EDR connection */
4568 if (req->amp_id == AMP_ID_BREDR) {
4569 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4574 /* Validate AMP controller id */
4575 hdev = hci_dev_get(req->amp_id);
4579 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4584 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4587 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4588 struct hci_conn *hs_hcon;
4590 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4594 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4599 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4601 mgr->bredr_chan = chan;
4602 chan->hs_hcon = hs_hcon;
4603 chan->fcs = L2CAP_FCS_NONE;
4604 conn->mtu = hdev->block_mtu;
4613 rsp.scid = cpu_to_le16(scid);
4614 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4615 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4617 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4623 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4625 struct l2cap_move_chan_req req;
4628 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4630 ident = l2cap_get_ident(chan->conn);
4631 chan->ident = ident;
4633 req.icid = cpu_to_le16(chan->scid);
4634 req.dest_amp_id = dest_amp_id;
4636 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4639 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4642 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4644 struct l2cap_move_chan_rsp rsp;
4646 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4648 rsp.icid = cpu_to_le16(chan->dcid);
4649 rsp.result = cpu_to_le16(result);
4651 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4655 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4657 struct l2cap_move_chan_cfm cfm;
4659 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4661 chan->ident = l2cap_get_ident(chan->conn);
4663 cfm.icid = cpu_to_le16(chan->scid);
4664 cfm.result = cpu_to_le16(result);
4666 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4669 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4672 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4674 struct l2cap_move_chan_cfm cfm;
4676 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4678 cfm.icid = cpu_to_le16(icid);
4679 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4681 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4685 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4688 struct l2cap_move_chan_cfm_rsp rsp;
4690 BT_DBG("icid 0x%4.4x", icid);
4692 rsp.icid = cpu_to_le16(icid);
4693 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4696 static void __release_logical_link(struct l2cap_chan *chan)
4698 chan->hs_hchan = NULL;
4699 chan->hs_hcon = NULL;
4701 /* Placeholder - release the logical link */
4704 static void l2cap_logical_fail(struct l2cap_chan *chan)
4706 /* Logical link setup failed */
4707 if (chan->state != BT_CONNECTED) {
4708 /* Create channel failure, disconnect */
4709 l2cap_send_disconn_req(chan, ECONNRESET);
4713 switch (chan->move_role) {
4714 case L2CAP_MOVE_ROLE_RESPONDER:
4715 l2cap_move_done(chan);
4716 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4718 case L2CAP_MOVE_ROLE_INITIATOR:
4719 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4720 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4721 /* Remote has only sent pending or
4722 * success responses, clean up
4724 l2cap_move_done(chan);
4727 /* Other amp move states imply that the move
4728 * has already aborted
4730 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4735 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4736 struct hci_chan *hchan)
4738 struct l2cap_conf_rsp rsp;
4740 chan->hs_hchan = hchan;
4741 chan->hs_hcon->l2cap_data = chan->conn;
4743 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4745 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4748 set_default_fcs(chan);
4750 err = l2cap_ertm_init(chan);
4752 l2cap_send_disconn_req(chan, -err);
4754 l2cap_chan_ready(chan);
4758 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4759 struct hci_chan *hchan)
4761 chan->hs_hcon = hchan->conn;
4762 chan->hs_hcon->l2cap_data = chan->conn;
4764 BT_DBG("move_state %d", chan->move_state);
4766 switch (chan->move_state) {
4767 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4768 /* Move confirm will be sent after a success
4769 * response is received
4771 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4773 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4774 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4775 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4776 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4777 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4778 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4779 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4780 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4781 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4785 /* Move was not in expected state, free the channel */
4786 __release_logical_link(chan);
4788 chan->move_state = L2CAP_MOVE_STABLE;
4792 /* Call with chan locked */
4793 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4796 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4799 l2cap_logical_fail(chan);
4800 __release_logical_link(chan);
4804 if (chan->state != BT_CONNECTED) {
4805 /* Ignore logical link if channel is on BR/EDR */
4806 if (chan->local_amp_id != AMP_ID_BREDR)
4807 l2cap_logical_finish_create(chan, hchan);
4809 l2cap_logical_finish_move(chan, hchan);
4813 void l2cap_move_start(struct l2cap_chan *chan)
4815 BT_DBG("chan %p", chan);
4817 if (chan->local_amp_id == AMP_ID_BREDR) {
4818 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4820 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4821 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4822 /* Placeholder - start physical link setup */
4824 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4825 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4827 l2cap_move_setup(chan);
4828 l2cap_send_move_chan_req(chan, 0);
4832 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4833 u8 local_amp_id, u8 remote_amp_id)
4835 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4836 local_amp_id, remote_amp_id);
4838 chan->fcs = L2CAP_FCS_NONE;
4840 /* Outgoing channel on AMP */
4841 if (chan->state == BT_CONNECT) {
4842 if (result == L2CAP_CR_SUCCESS) {
4843 chan->local_amp_id = local_amp_id;
4844 l2cap_send_create_chan_req(chan, remote_amp_id);
4846 /* Revert to BR/EDR connect */
4847 l2cap_send_conn_req(chan);
4853 /* Incoming channel on AMP */
4854 if (__l2cap_no_conn_pending(chan)) {
4855 struct l2cap_conn_rsp rsp;
4857 rsp.scid = cpu_to_le16(chan->dcid);
4858 rsp.dcid = cpu_to_le16(chan->scid);
4860 if (result == L2CAP_CR_SUCCESS) {
4861 /* Send successful response */
4862 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4863 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4865 /* Send negative response */
4866 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4867 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4870 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4873 if (result == L2CAP_CR_SUCCESS) {
4874 l2cap_state_change(chan, BT_CONFIG);
4875 set_bit(CONF_REQ_SENT, &chan->conf_state);
4876 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4878 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4879 chan->num_conf_req++;
4884 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4887 l2cap_move_setup(chan);
4888 chan->move_id = local_amp_id;
4889 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4891 l2cap_send_move_chan_req(chan, remote_amp_id);
4894 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4896 struct hci_chan *hchan = NULL;
4898 /* Placeholder - get hci_chan for logical link */
4901 if (hchan->state == BT_CONNECTED) {
4902 /* Logical link is ready to go */
4903 chan->hs_hcon = hchan->conn;
4904 chan->hs_hcon->l2cap_data = chan->conn;
4905 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4906 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4908 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4910 /* Wait for logical link to be ready */
4911 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4914 /* Logical link not available */
4915 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4919 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4921 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4923 if (result == -EINVAL)
4924 rsp_result = L2CAP_MR_BAD_ID;
4926 rsp_result = L2CAP_MR_NOT_ALLOWED;
4928 l2cap_send_move_chan_rsp(chan, rsp_result);
4931 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4932 chan->move_state = L2CAP_MOVE_STABLE;
4934 /* Restart data transmission */
4935 l2cap_ertm_send(chan);
4938 /* Invoke with locked chan */
4939 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4941 u8 local_amp_id = chan->local_amp_id;
4942 u8 remote_amp_id = chan->remote_amp_id;
4944 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4945 chan, result, local_amp_id, remote_amp_id);
4947 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4950 if (chan->state != BT_CONNECTED) {
4951 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4952 } else if (result != L2CAP_MR_SUCCESS) {
4953 l2cap_do_move_cancel(chan, result);
4955 switch (chan->move_role) {
4956 case L2CAP_MOVE_ROLE_INITIATOR:
4957 l2cap_do_move_initiate(chan, local_amp_id,
4960 case L2CAP_MOVE_ROLE_RESPONDER:
4961 l2cap_do_move_respond(chan, result);
4964 l2cap_do_move_cancel(chan, result);
4970 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4971 struct l2cap_cmd_hdr *cmd,
4972 u16 cmd_len, void *data)
4974 struct l2cap_move_chan_req *req = data;
4975 struct l2cap_move_chan_rsp rsp;
4976 struct l2cap_chan *chan;
4978 u16 result = L2CAP_MR_NOT_ALLOWED;
4980 if (cmd_len != sizeof(*req))
4983 icid = le16_to_cpu(req->icid);
4985 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4987 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4990 chan = l2cap_get_chan_by_dcid(conn, icid);
4992 rsp.icid = cpu_to_le16(icid);
4993 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4994 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4999 chan->ident = cmd->ident;
5001 if (chan->scid < L2CAP_CID_DYN_START ||
5002 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5003 (chan->mode != L2CAP_MODE_ERTM &&
5004 chan->mode != L2CAP_MODE_STREAMING)) {
5005 result = L2CAP_MR_NOT_ALLOWED;
5006 goto send_move_response;
5009 if (chan->local_amp_id == req->dest_amp_id) {
5010 result = L2CAP_MR_SAME_ID;
5011 goto send_move_response;
5014 if (req->dest_amp_id != AMP_ID_BREDR) {
5015 struct hci_dev *hdev;
5016 hdev = hci_dev_get(req->dest_amp_id);
5017 if (!hdev || hdev->dev_type != HCI_AMP ||
5018 !test_bit(HCI_UP, &hdev->flags)) {
5022 result = L2CAP_MR_BAD_ID;
5023 goto send_move_response;
5028 /* Detect a move collision. Only send a collision response
5029 * if this side has "lost", otherwise proceed with the move.
5030 * The winner has the larger bd_addr.
5032 if ((__chan_is_moving(chan) ||
5033 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5034 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5035 result = L2CAP_MR_COLLISION;
5036 goto send_move_response;
5039 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5040 l2cap_move_setup(chan);
5041 chan->move_id = req->dest_amp_id;
5044 if (req->dest_amp_id == AMP_ID_BREDR) {
5045 /* Moving to BR/EDR */
5046 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5047 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5048 result = L2CAP_MR_PEND;
5050 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5051 result = L2CAP_MR_SUCCESS;
5054 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5055 /* Placeholder - uncomment when amp functions are available */
5056 /*amp_accept_physical(chan, req->dest_amp_id);*/
5057 result = L2CAP_MR_PEND;
5061 l2cap_send_move_chan_rsp(chan, result);
5063 l2cap_chan_unlock(chan);
5064 l2cap_chan_put(chan);
5069 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5071 struct l2cap_chan *chan;
5072 struct hci_chan *hchan = NULL;
5074 chan = l2cap_get_chan_by_scid(conn, icid);
5076 l2cap_send_move_chan_cfm_icid(conn, icid);
5080 __clear_chan_timer(chan);
5081 if (result == L2CAP_MR_PEND)
5082 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5084 switch (chan->move_state) {
5085 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5086 /* Move confirm will be sent when logical link
5089 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5091 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5092 if (result == L2CAP_MR_PEND) {
5094 } else if (test_bit(CONN_LOCAL_BUSY,
5095 &chan->conn_state)) {
5096 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5098 /* Logical link is up or moving to BR/EDR,
5101 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5102 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5105 case L2CAP_MOVE_WAIT_RSP:
5107 if (result == L2CAP_MR_SUCCESS) {
5108 /* Remote is ready, send confirm immediately
5109 * after logical link is ready
5111 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5113 /* Both logical link and move success
5114 * are required to confirm
5116 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5119 /* Placeholder - get hci_chan for logical link */
5121 /* Logical link not available */
5122 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5126 /* If the logical link is not yet connected, do not
5127 * send confirmation.
5129 if (hchan->state != BT_CONNECTED)
5132 /* Logical link is already ready to go */
5134 chan->hs_hcon = hchan->conn;
5135 chan->hs_hcon->l2cap_data = chan->conn;
5137 if (result == L2CAP_MR_SUCCESS) {
5138 /* Can confirm now */
5139 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5141 /* Now only need move success
5144 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5147 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5150 /* Any other amp move state means the move failed. */
5151 chan->move_id = chan->local_amp_id;
5152 l2cap_move_done(chan);
5153 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5156 l2cap_chan_unlock(chan);
5157 l2cap_chan_put(chan);
5160 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5163 struct l2cap_chan *chan;
5165 chan = l2cap_get_chan_by_ident(conn, ident);
5167 /* Could not locate channel, icid is best guess */
5168 l2cap_send_move_chan_cfm_icid(conn, icid);
5172 __clear_chan_timer(chan);
5174 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5175 if (result == L2CAP_MR_COLLISION) {
5176 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5178 /* Cleanup - cancel move */
5179 chan->move_id = chan->local_amp_id;
5180 l2cap_move_done(chan);
5184 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5186 l2cap_chan_unlock(chan);
5187 l2cap_chan_put(chan);
5190 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5191 struct l2cap_cmd_hdr *cmd,
5192 u16 cmd_len, void *data)
5194 struct l2cap_move_chan_rsp *rsp = data;
5197 if (cmd_len != sizeof(*rsp))
5200 icid = le16_to_cpu(rsp->icid);
5201 result = le16_to_cpu(rsp->result);
5203 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5205 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5206 l2cap_move_continue(conn, icid, result);
5208 l2cap_move_fail(conn, cmd->ident, icid, result);
5213 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5214 struct l2cap_cmd_hdr *cmd,
5215 u16 cmd_len, void *data)
5217 struct l2cap_move_chan_cfm *cfm = data;
5218 struct l2cap_chan *chan;
5221 if (cmd_len != sizeof(*cfm))
5224 icid = le16_to_cpu(cfm->icid);
5225 result = le16_to_cpu(cfm->result);
5227 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5229 chan = l2cap_get_chan_by_dcid(conn, icid);
5231 /* Spec requires a response even if the icid was not found */
5232 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5236 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5237 if (result == L2CAP_MC_CONFIRMED) {
5238 chan->local_amp_id = chan->move_id;
5239 if (chan->local_amp_id == AMP_ID_BREDR)
5240 __release_logical_link(chan);
5242 chan->move_id = chan->local_amp_id;
5245 l2cap_move_done(chan);
5248 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5250 l2cap_chan_unlock(chan);
5251 l2cap_chan_put(chan);
5256 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5257 struct l2cap_cmd_hdr *cmd,
5258 u16 cmd_len, void *data)
5260 struct l2cap_move_chan_cfm_rsp *rsp = data;
5261 struct l2cap_chan *chan;
5264 if (cmd_len != sizeof(*rsp))
5267 icid = le16_to_cpu(rsp->icid);
5269 BT_DBG("icid 0x%4.4x", icid);
5271 chan = l2cap_get_chan_by_scid(conn, icid);
5275 __clear_chan_timer(chan);
5277 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5278 chan->local_amp_id = chan->move_id;
5280 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5281 __release_logical_link(chan);
5283 l2cap_move_done(chan);
5286 l2cap_chan_unlock(chan);
5287 l2cap_chan_put(chan);
5292 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5293 struct l2cap_cmd_hdr *cmd,
5294 u16 cmd_len, u8 *data)
5296 struct hci_conn *hcon = conn->hcon;
5297 struct l2cap_conn_param_update_req *req;
5298 struct l2cap_conn_param_update_rsp rsp;
5299 u16 min, max, latency, to_multiplier;
5302 if (hcon->role != HCI_ROLE_MASTER)
5305 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5308 req = (struct l2cap_conn_param_update_req *) data;
5309 min = __le16_to_cpu(req->min);
5310 max = __le16_to_cpu(req->max);
5311 latency = __le16_to_cpu(req->latency);
5312 to_multiplier = __le16_to_cpu(req->to_multiplier);
5314 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5315 min, max, latency, to_multiplier);
5317 memset(&rsp, 0, sizeof(rsp));
5319 err = hci_check_conn_params(min, max, latency, to_multiplier);
5321 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5323 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5325 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5331 store_hint = hci_le_conn_update(hcon, min, max, latency,
5333 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5334 store_hint, min, max, latency,
5342 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5343 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5346 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5347 struct hci_conn *hcon = conn->hcon;
5348 u16 dcid, mtu, mps, credits, result;
5349 struct l2cap_chan *chan;
5352 if (cmd_len < sizeof(*rsp))
5355 dcid = __le16_to_cpu(rsp->dcid);
5356 mtu = __le16_to_cpu(rsp->mtu);
5357 mps = __le16_to_cpu(rsp->mps);
5358 credits = __le16_to_cpu(rsp->credits);
5359 result = __le16_to_cpu(rsp->result);
5361 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5362 dcid < L2CAP_CID_DYN_START ||
5363 dcid > L2CAP_CID_LE_DYN_END))
5366 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5367 dcid, mtu, mps, credits, result);
5369 mutex_lock(&conn->chan_lock);
5371 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5379 l2cap_chan_lock(chan);
5382 case L2CAP_CR_SUCCESS:
5383 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5391 chan->remote_mps = mps;
5392 chan->tx_credits = credits;
5393 l2cap_chan_ready(chan);
5396 case L2CAP_CR_AUTHENTICATION:
5397 case L2CAP_CR_ENCRYPTION:
5398 /* If we already have MITM protection we can't do
5401 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5402 l2cap_chan_del(chan, ECONNREFUSED);
5406 sec_level = hcon->sec_level + 1;
5407 if (chan->sec_level < sec_level)
5408 chan->sec_level = sec_level;
5410 /* We'll need to send a new Connect Request */
5411 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5413 smp_conn_security(hcon, chan->sec_level);
5417 l2cap_chan_del(chan, ECONNREFUSED);
5421 l2cap_chan_unlock(chan);
5424 mutex_unlock(&conn->chan_lock);
5429 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5430 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5435 switch (cmd->code) {
5436 case L2CAP_COMMAND_REJ:
5437 l2cap_command_rej(conn, cmd, cmd_len, data);
5440 case L2CAP_CONN_REQ:
5441 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5444 case L2CAP_CONN_RSP:
5445 case L2CAP_CREATE_CHAN_RSP:
5446 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5449 case L2CAP_CONF_REQ:
5450 err = l2cap_config_req(conn, cmd, cmd_len, data);
5453 case L2CAP_CONF_RSP:
5454 l2cap_config_rsp(conn, cmd, cmd_len, data);
5457 case L2CAP_DISCONN_REQ:
5458 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5461 case L2CAP_DISCONN_RSP:
5462 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5465 case L2CAP_ECHO_REQ:
5466 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5469 case L2CAP_ECHO_RSP:
5472 case L2CAP_INFO_REQ:
5473 err = l2cap_information_req(conn, cmd, cmd_len, data);
5476 case L2CAP_INFO_RSP:
5477 l2cap_information_rsp(conn, cmd, cmd_len, data);
5480 case L2CAP_CREATE_CHAN_REQ:
5481 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5484 case L2CAP_MOVE_CHAN_REQ:
5485 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5488 case L2CAP_MOVE_CHAN_RSP:
5489 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5492 case L2CAP_MOVE_CHAN_CFM:
5493 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5496 case L2CAP_MOVE_CHAN_CFM_RSP:
5497 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5501 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5509 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5510 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5513 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5514 struct l2cap_le_conn_rsp rsp;
5515 struct l2cap_chan *chan, *pchan;
5516 u16 dcid, scid, credits, mtu, mps;
5520 if (cmd_len != sizeof(*req))
5523 scid = __le16_to_cpu(req->scid);
5524 mtu = __le16_to_cpu(req->mtu);
5525 mps = __le16_to_cpu(req->mps);
5530 if (mtu < 23 || mps < 23)
5533 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5536 /* Check if we have socket listening on psm */
5537 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5538 &conn->hcon->dst, LE_LINK);
5540 result = L2CAP_CR_BAD_PSM;
5545 mutex_lock(&conn->chan_lock);
5546 l2cap_chan_lock(pchan);
5548 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5550 result = L2CAP_CR_AUTHENTICATION;
5552 goto response_unlock;
5555 /* Check for valid dynamic CID range */
5556 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5557 result = L2CAP_CR_INVALID_SCID;
5559 goto response_unlock;
5562 /* Check if we already have channel with that dcid */
5563 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5564 result = L2CAP_CR_SCID_IN_USE;
5566 goto response_unlock;
5569 chan = pchan->ops->new_connection(pchan);
5571 result = L2CAP_CR_NO_MEM;
5572 goto response_unlock;
5575 l2cap_le_flowctl_init(chan);
5577 bacpy(&chan->src, &conn->hcon->src);
5578 bacpy(&chan->dst, &conn->hcon->dst);
5579 chan->src_type = bdaddr_src_type(conn->hcon);
5580 chan->dst_type = bdaddr_dst_type(conn->hcon);
5584 chan->remote_mps = mps;
5585 chan->tx_credits = __le16_to_cpu(req->credits);
5587 __l2cap_chan_add(conn, chan);
5589 credits = chan->rx_credits;
5591 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5593 chan->ident = cmd->ident;
5595 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5596 l2cap_state_change(chan, BT_CONNECT2);
5597 /* The following result value is actually not defined
5598 * for LE CoC but we use it to let the function know
5599 * that it should bail out after doing its cleanup
5600 * instead of sending a response.
5602 result = L2CAP_CR_PEND;
5603 chan->ops->defer(chan);
5605 l2cap_chan_ready(chan);
5606 result = L2CAP_CR_SUCCESS;
5610 l2cap_chan_unlock(pchan);
5611 mutex_unlock(&conn->chan_lock);
5612 l2cap_chan_put(pchan);
5614 if (result == L2CAP_CR_PEND)
5619 rsp.mtu = cpu_to_le16(chan->imtu);
5620 rsp.mps = cpu_to_le16(chan->mps);
5626 rsp.dcid = cpu_to_le16(dcid);
5627 rsp.credits = cpu_to_le16(credits);
5628 rsp.result = cpu_to_le16(result);
5630 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5635 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5636 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5639 struct l2cap_le_credits *pkt;
5640 struct l2cap_chan *chan;
5641 u16 cid, credits, max_credits;
5643 if (cmd_len != sizeof(*pkt))
5646 pkt = (struct l2cap_le_credits *) data;
5647 cid = __le16_to_cpu(pkt->cid);
5648 credits = __le16_to_cpu(pkt->credits);
5650 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5652 chan = l2cap_get_chan_by_dcid(conn, cid);
5656 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5657 if (credits > max_credits) {
5658 BT_ERR("LE credits overflow");
5659 l2cap_send_disconn_req(chan, ECONNRESET);
5661 /* Return 0 so that we don't trigger an unnecessary
5662 * command reject packet.
5667 chan->tx_credits += credits;
5669 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5670 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5674 if (chan->tx_credits)
5675 chan->ops->resume(chan);
5678 l2cap_chan_unlock(chan);
5679 l2cap_chan_put(chan);
5684 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5685 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5688 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5689 struct l2cap_chan *chan;
5691 if (cmd_len < sizeof(*rej))
5694 mutex_lock(&conn->chan_lock);
5696 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5700 l2cap_chan_lock(chan);
5701 l2cap_chan_del(chan, ECONNREFUSED);
5702 l2cap_chan_unlock(chan);
5705 mutex_unlock(&conn->chan_lock);
5709 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5710 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5715 switch (cmd->code) {
5716 case L2CAP_COMMAND_REJ:
5717 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5720 case L2CAP_CONN_PARAM_UPDATE_REQ:
5721 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5724 case L2CAP_CONN_PARAM_UPDATE_RSP:
5727 case L2CAP_LE_CONN_RSP:
5728 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5731 case L2CAP_LE_CONN_REQ:
5732 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5735 case L2CAP_LE_CREDITS:
5736 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5739 case L2CAP_DISCONN_REQ:
5740 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5743 case L2CAP_DISCONN_RSP:
5744 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5748 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5756 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5757 struct sk_buff *skb)
5759 struct hci_conn *hcon = conn->hcon;
5760 struct l2cap_cmd_hdr *cmd;
5764 if (hcon->type != LE_LINK)
5767 if (skb->len < L2CAP_CMD_HDR_SIZE)
5770 cmd = (void *) skb->data;
5771 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5773 len = le16_to_cpu(cmd->len);
5775 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5777 if (len != skb->len || !cmd->ident) {
5778 BT_DBG("corrupted command");
5782 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5784 struct l2cap_cmd_rej_unk rej;
5786 BT_ERR("Wrong link type (%d)", err);
5788 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5789 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5797 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5798 struct sk_buff *skb)
5800 struct hci_conn *hcon = conn->hcon;
5801 u8 *data = skb->data;
5803 struct l2cap_cmd_hdr cmd;
5806 l2cap_raw_recv(conn, skb);
5808 if (hcon->type != ACL_LINK)
5811 while (len >= L2CAP_CMD_HDR_SIZE) {
5813 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5814 data += L2CAP_CMD_HDR_SIZE;
5815 len -= L2CAP_CMD_HDR_SIZE;
5817 cmd_len = le16_to_cpu(cmd.len);
5819 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5822 if (cmd_len > len || !cmd.ident) {
5823 BT_DBG("corrupted command");
5827 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5829 struct l2cap_cmd_rej_unk rej;
5831 BT_ERR("Wrong link type (%d)", err);
5833 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5834 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5846 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5848 u16 our_fcs, rcv_fcs;
5851 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5852 hdr_size = L2CAP_EXT_HDR_SIZE;
5854 hdr_size = L2CAP_ENH_HDR_SIZE;
5856 if (chan->fcs == L2CAP_FCS_CRC16) {
5857 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5858 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5859 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5861 if (our_fcs != rcv_fcs)
5867 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5869 struct l2cap_ctrl control;
5871 BT_DBG("chan %p", chan);
5873 memset(&control, 0, sizeof(control));
5876 control.reqseq = chan->buffer_seq;
5877 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5879 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5880 control.super = L2CAP_SUPER_RNR;
5881 l2cap_send_sframe(chan, &control);
5884 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5885 chan->unacked_frames > 0)
5886 __set_retrans_timer(chan);
5888 /* Send pending iframes */
5889 l2cap_ertm_send(chan);
5891 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5892 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5893 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5896 control.super = L2CAP_SUPER_RR;
5897 l2cap_send_sframe(chan, &control);
5901 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5902 struct sk_buff **last_frag)
5904 /* skb->len reflects data in skb as well as all fragments
5905 * skb->data_len reflects only data in fragments
5907 if (!skb_has_frag_list(skb))
5908 skb_shinfo(skb)->frag_list = new_frag;
5910 new_frag->next = NULL;
5912 (*last_frag)->next = new_frag;
5913 *last_frag = new_frag;
5915 skb->len += new_frag->len;
5916 skb->data_len += new_frag->len;
5917 skb->truesize += new_frag->truesize;
5920 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5921 struct l2cap_ctrl *control)
5925 switch (control->sar) {
5926 case L2CAP_SAR_UNSEGMENTED:
5930 err = chan->ops->recv(chan, skb);
5933 case L2CAP_SAR_START:
5937 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5940 chan->sdu_len = get_unaligned_le16(skb->data);
5941 skb_pull(skb, L2CAP_SDULEN_SIZE);
5943 if (chan->sdu_len > chan->imtu) {
5948 if (skb->len >= chan->sdu_len)
5952 chan->sdu_last_frag = skb;
5958 case L2CAP_SAR_CONTINUE:
5962 append_skb_frag(chan->sdu, skb,
5963 &chan->sdu_last_frag);
5966 if (chan->sdu->len >= chan->sdu_len)
5976 append_skb_frag(chan->sdu, skb,
5977 &chan->sdu_last_frag);
5980 if (chan->sdu->len != chan->sdu_len)
5983 err = chan->ops->recv(chan, chan->sdu);
5986 /* Reassembly complete */
5988 chan->sdu_last_frag = NULL;
5996 kfree_skb(chan->sdu);
5998 chan->sdu_last_frag = NULL;
6005 static int l2cap_resegment(struct l2cap_chan *chan)
6011 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6015 if (chan->mode != L2CAP_MODE_ERTM)
6018 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6019 l2cap_tx(chan, NULL, NULL, event);
6022 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6025 /* Pass sequential frames to l2cap_reassemble_sdu()
6026 * until a gap is encountered.
6029 BT_DBG("chan %p", chan);
6031 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6032 struct sk_buff *skb;
6033 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6034 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6036 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6041 skb_unlink(skb, &chan->srej_q);
6042 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6043 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6048 if (skb_queue_empty(&chan->srej_q)) {
6049 chan->rx_state = L2CAP_RX_STATE_RECV;
6050 l2cap_send_ack(chan);
6056 static void l2cap_handle_srej(struct l2cap_chan *chan,
6057 struct l2cap_ctrl *control)
6059 struct sk_buff *skb;
6061 BT_DBG("chan %p, control %p", chan, control);
6063 if (control->reqseq == chan->next_tx_seq) {
6064 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6065 l2cap_send_disconn_req(chan, ECONNRESET);
6069 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6072 BT_DBG("Seq %d not available for retransmission",
6077 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6078 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6079 l2cap_send_disconn_req(chan, ECONNRESET);
6083 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6085 if (control->poll) {
6086 l2cap_pass_to_tx(chan, control);
6088 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6089 l2cap_retransmit(chan, control);
6090 l2cap_ertm_send(chan);
6092 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6093 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6094 chan->srej_save_reqseq = control->reqseq;
6097 l2cap_pass_to_tx_fbit(chan, control);
6099 if (control->final) {
6100 if (chan->srej_save_reqseq != control->reqseq ||
6101 !test_and_clear_bit(CONN_SREJ_ACT,
6103 l2cap_retransmit(chan, control);
6105 l2cap_retransmit(chan, control);
6106 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6107 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6108 chan->srej_save_reqseq = control->reqseq;
6114 static void l2cap_handle_rej(struct l2cap_chan *chan,
6115 struct l2cap_ctrl *control)
6117 struct sk_buff *skb;
6119 BT_DBG("chan %p, control %p", chan, control);
6121 if (control->reqseq == chan->next_tx_seq) {
6122 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6123 l2cap_send_disconn_req(chan, ECONNRESET);
6127 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6129 if (chan->max_tx && skb &&
6130 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6131 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6132 l2cap_send_disconn_req(chan, ECONNRESET);
6136 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6138 l2cap_pass_to_tx(chan, control);
6140 if (control->final) {
6141 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6142 l2cap_retransmit_all(chan, control);
6144 l2cap_retransmit_all(chan, control);
6145 l2cap_ertm_send(chan);
6146 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6147 set_bit(CONN_REJ_ACT, &chan->conn_state);
6151 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6153 BT_DBG("chan %p, txseq %d", chan, txseq);
6155 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6156 chan->expected_tx_seq);
6158 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6159 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6161 /* See notes below regarding "double poll" and
6164 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6165 BT_DBG("Invalid/Ignore - after SREJ");
6166 return L2CAP_TXSEQ_INVALID_IGNORE;
6168 BT_DBG("Invalid - in window after SREJ sent");
6169 return L2CAP_TXSEQ_INVALID;
6173 if (chan->srej_list.head == txseq) {
6174 BT_DBG("Expected SREJ");
6175 return L2CAP_TXSEQ_EXPECTED_SREJ;
6178 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6179 BT_DBG("Duplicate SREJ - txseq already stored");
6180 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6183 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6184 BT_DBG("Unexpected SREJ - not requested");
6185 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6189 if (chan->expected_tx_seq == txseq) {
6190 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6192 BT_DBG("Invalid - txseq outside tx window");
6193 return L2CAP_TXSEQ_INVALID;
6196 return L2CAP_TXSEQ_EXPECTED;
6200 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6201 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6202 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6203 return L2CAP_TXSEQ_DUPLICATE;
6206 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6207 /* A source of invalid packets is a "double poll" condition,
6208 * where delays cause us to send multiple poll packets. If
6209 * the remote stack receives and processes both polls,
6210 * sequence numbers can wrap around in such a way that a
6211 * resent frame has a sequence number that looks like new data
6212 * with a sequence gap. This would trigger an erroneous SREJ
6215 * Fortunately, this is impossible with a tx window that's
6216 * less than half of the maximum sequence number, which allows
6217 * invalid frames to be safely ignored.
6219 * With tx window sizes greater than half of the tx window
6220 * maximum, the frame is invalid and cannot be ignored. This
6221 * causes a disconnect.
6224 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6225 BT_DBG("Invalid/Ignore - txseq outside tx window");
6226 return L2CAP_TXSEQ_INVALID_IGNORE;
6228 BT_DBG("Invalid - txseq outside tx window");
6229 return L2CAP_TXSEQ_INVALID;
6232 BT_DBG("Unexpected - txseq indicates missing frames");
6233 return L2CAP_TXSEQ_UNEXPECTED;
6237 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6238 struct l2cap_ctrl *control,
6239 struct sk_buff *skb, u8 event)
6242 bool skb_in_use = false;
6244 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6248 case L2CAP_EV_RECV_IFRAME:
6249 switch (l2cap_classify_txseq(chan, control->txseq)) {
6250 case L2CAP_TXSEQ_EXPECTED:
6251 l2cap_pass_to_tx(chan, control);
6253 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6254 BT_DBG("Busy, discarding expected seq %d",
6259 chan->expected_tx_seq = __next_seq(chan,
6262 chan->buffer_seq = chan->expected_tx_seq;
6265 err = l2cap_reassemble_sdu(chan, skb, control);
6269 if (control->final) {
6270 if (!test_and_clear_bit(CONN_REJ_ACT,
6271 &chan->conn_state)) {
6273 l2cap_retransmit_all(chan, control);
6274 l2cap_ertm_send(chan);
6278 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6279 l2cap_send_ack(chan);
6281 case L2CAP_TXSEQ_UNEXPECTED:
6282 l2cap_pass_to_tx(chan, control);
6284 /* Can't issue SREJ frames in the local busy state.
6285 * Drop this frame, it will be seen as missing
6286 * when local busy is exited.
6288 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6289 BT_DBG("Busy, discarding unexpected seq %d",
6294 /* There was a gap in the sequence, so an SREJ
6295 * must be sent for each missing frame. The
6296 * current frame is stored for later use.
6298 skb_queue_tail(&chan->srej_q, skb);
6300 BT_DBG("Queued %p (queue len %d)", skb,
6301 skb_queue_len(&chan->srej_q));
6303 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6304 l2cap_seq_list_clear(&chan->srej_list);
6305 l2cap_send_srej(chan, control->txseq);
6307 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6309 case L2CAP_TXSEQ_DUPLICATE:
6310 l2cap_pass_to_tx(chan, control);
6312 case L2CAP_TXSEQ_INVALID_IGNORE:
6314 case L2CAP_TXSEQ_INVALID:
6316 l2cap_send_disconn_req(chan, ECONNRESET);
6320 case L2CAP_EV_RECV_RR:
6321 l2cap_pass_to_tx(chan, control);
6322 if (control->final) {
6323 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6325 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6326 !__chan_is_moving(chan)) {
6328 l2cap_retransmit_all(chan, control);
6331 l2cap_ertm_send(chan);
6332 } else if (control->poll) {
6333 l2cap_send_i_or_rr_or_rnr(chan);
6335 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6336 &chan->conn_state) &&
6337 chan->unacked_frames)
6338 __set_retrans_timer(chan);
6340 l2cap_ertm_send(chan);
6343 case L2CAP_EV_RECV_RNR:
6344 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6345 l2cap_pass_to_tx(chan, control);
6346 if (control && control->poll) {
6347 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6348 l2cap_send_rr_or_rnr(chan, 0);
6350 __clear_retrans_timer(chan);
6351 l2cap_seq_list_clear(&chan->retrans_list);
6353 case L2CAP_EV_RECV_REJ:
6354 l2cap_handle_rej(chan, control);
6356 case L2CAP_EV_RECV_SREJ:
6357 l2cap_handle_srej(chan, control);
6363 if (skb && !skb_in_use) {
6364 BT_DBG("Freeing %p", skb);
6371 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6372 struct l2cap_ctrl *control,
6373 struct sk_buff *skb, u8 event)
6376 u16 txseq = control->txseq;
6377 bool skb_in_use = false;
6379 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6383 case L2CAP_EV_RECV_IFRAME:
6384 switch (l2cap_classify_txseq(chan, txseq)) {
6385 case L2CAP_TXSEQ_EXPECTED:
6386 /* Keep frame for reassembly later */
6387 l2cap_pass_to_tx(chan, control);
6388 skb_queue_tail(&chan->srej_q, skb);
6390 BT_DBG("Queued %p (queue len %d)", skb,
6391 skb_queue_len(&chan->srej_q));
6393 chan->expected_tx_seq = __next_seq(chan, txseq);
6395 case L2CAP_TXSEQ_EXPECTED_SREJ:
6396 l2cap_seq_list_pop(&chan->srej_list);
6398 l2cap_pass_to_tx(chan, control);
6399 skb_queue_tail(&chan->srej_q, skb);
6401 BT_DBG("Queued %p (queue len %d)", skb,
6402 skb_queue_len(&chan->srej_q));
6404 err = l2cap_rx_queued_iframes(chan);
6409 case L2CAP_TXSEQ_UNEXPECTED:
6410 /* Got a frame that can't be reassembled yet.
6411 * Save it for later, and send SREJs to cover
6412 * the missing frames.
6414 skb_queue_tail(&chan->srej_q, skb);
6416 BT_DBG("Queued %p (queue len %d)", skb,
6417 skb_queue_len(&chan->srej_q));
6419 l2cap_pass_to_tx(chan, control);
6420 l2cap_send_srej(chan, control->txseq);
6422 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6423 /* This frame was requested with an SREJ, but
6424 * some expected retransmitted frames are
6425 * missing. Request retransmission of missing
6428 skb_queue_tail(&chan->srej_q, skb);
6430 BT_DBG("Queued %p (queue len %d)", skb,
6431 skb_queue_len(&chan->srej_q));
6433 l2cap_pass_to_tx(chan, control);
6434 l2cap_send_srej_list(chan, control->txseq);
6436 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6437 /* We've already queued this frame. Drop this copy. */
6438 l2cap_pass_to_tx(chan, control);
6440 case L2CAP_TXSEQ_DUPLICATE:
6441 /* Expecting a later sequence number, so this frame
6442 * was already received. Ignore it completely.
6445 case L2CAP_TXSEQ_INVALID_IGNORE:
6447 case L2CAP_TXSEQ_INVALID:
6449 l2cap_send_disconn_req(chan, ECONNRESET);
6453 case L2CAP_EV_RECV_RR:
6454 l2cap_pass_to_tx(chan, control);
6455 if (control->final) {
6456 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6458 if (!test_and_clear_bit(CONN_REJ_ACT,
6459 &chan->conn_state)) {
6461 l2cap_retransmit_all(chan, control);
6464 l2cap_ertm_send(chan);
6465 } else if (control->poll) {
6466 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6467 &chan->conn_state) &&
6468 chan->unacked_frames) {
6469 __set_retrans_timer(chan);
6472 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6473 l2cap_send_srej_tail(chan);
6475 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6476 &chan->conn_state) &&
6477 chan->unacked_frames)
6478 __set_retrans_timer(chan);
6480 l2cap_send_ack(chan);
6483 case L2CAP_EV_RECV_RNR:
6484 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6485 l2cap_pass_to_tx(chan, control);
6486 if (control->poll) {
6487 l2cap_send_srej_tail(chan);
6489 struct l2cap_ctrl rr_control;
6490 memset(&rr_control, 0, sizeof(rr_control));
6491 rr_control.sframe = 1;
6492 rr_control.super = L2CAP_SUPER_RR;
6493 rr_control.reqseq = chan->buffer_seq;
6494 l2cap_send_sframe(chan, &rr_control);
6498 case L2CAP_EV_RECV_REJ:
6499 l2cap_handle_rej(chan, control);
6501 case L2CAP_EV_RECV_SREJ:
6502 l2cap_handle_srej(chan, control);
6506 if (skb && !skb_in_use) {
6507 BT_DBG("Freeing %p", skb);
6514 static int l2cap_finish_move(struct l2cap_chan *chan)
6516 BT_DBG("chan %p", chan);
6518 chan->rx_state = L2CAP_RX_STATE_RECV;
6521 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6523 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6525 return l2cap_resegment(chan);
6528 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6529 struct l2cap_ctrl *control,
6530 struct sk_buff *skb, u8 event)
6534 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6540 l2cap_process_reqseq(chan, control->reqseq);
6542 if (!skb_queue_empty(&chan->tx_q))
6543 chan->tx_send_head = skb_peek(&chan->tx_q);
6545 chan->tx_send_head = NULL;
6547 /* Rewind next_tx_seq to the point expected
6550 chan->next_tx_seq = control->reqseq;
6551 chan->unacked_frames = 0;
6553 err = l2cap_finish_move(chan);
6557 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6558 l2cap_send_i_or_rr_or_rnr(chan);
6560 if (event == L2CAP_EV_RECV_IFRAME)
6563 return l2cap_rx_state_recv(chan, control, NULL, event);
6566 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6567 struct l2cap_ctrl *control,
6568 struct sk_buff *skb, u8 event)
6572 if (!control->final)
6575 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6577 chan->rx_state = L2CAP_RX_STATE_RECV;
6578 l2cap_process_reqseq(chan, control->reqseq);
6580 if (!skb_queue_empty(&chan->tx_q))
6581 chan->tx_send_head = skb_peek(&chan->tx_q);
6583 chan->tx_send_head = NULL;
6585 /* Rewind next_tx_seq to the point expected
6588 chan->next_tx_seq = control->reqseq;
6589 chan->unacked_frames = 0;
6592 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6594 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6596 err = l2cap_resegment(chan);
6599 err = l2cap_rx_state_recv(chan, control, skb, event);
6604 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6606 /* Make sure reqseq is for a packet that has been sent but not acked */
6609 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6610 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6613 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6614 struct sk_buff *skb, u8 event)
6618 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6619 control, skb, event, chan->rx_state);
6621 if (__valid_reqseq(chan, control->reqseq)) {
6622 switch (chan->rx_state) {
6623 case L2CAP_RX_STATE_RECV:
6624 err = l2cap_rx_state_recv(chan, control, skb, event);
6626 case L2CAP_RX_STATE_SREJ_SENT:
6627 err = l2cap_rx_state_srej_sent(chan, control, skb,
6630 case L2CAP_RX_STATE_WAIT_P:
6631 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6633 case L2CAP_RX_STATE_WAIT_F:
6634 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6641 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6642 control->reqseq, chan->next_tx_seq,
6643 chan->expected_ack_seq);
6644 l2cap_send_disconn_req(chan, ECONNRESET);
6650 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6651 struct sk_buff *skb)
6653 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6656 if (l2cap_classify_txseq(chan, control->txseq) ==
6657 L2CAP_TXSEQ_EXPECTED) {
6658 l2cap_pass_to_tx(chan, control);
6660 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6661 __next_seq(chan, chan->buffer_seq));
6663 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6665 l2cap_reassemble_sdu(chan, skb, control);
6668 kfree_skb(chan->sdu);
6671 chan->sdu_last_frag = NULL;
6675 BT_DBG("Freeing %p", skb);
6680 chan->last_acked_seq = control->txseq;
6681 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6686 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6688 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6692 __unpack_control(chan, skb);
6697 * We can just drop the corrupted I-frame here.
6698 * Receiver will miss it and start proper recovery
6699 * procedures and ask for retransmission.
6701 if (l2cap_check_fcs(chan, skb))
6704 if (!control->sframe && control->sar == L2CAP_SAR_START)
6705 len -= L2CAP_SDULEN_SIZE;
6707 if (chan->fcs == L2CAP_FCS_CRC16)
6708 len -= L2CAP_FCS_SIZE;
6710 if (len > chan->mps) {
6711 l2cap_send_disconn_req(chan, ECONNRESET);
6715 if (chan->ops->filter) {
6716 if (chan->ops->filter(chan, skb))
6720 if (!control->sframe) {
6723 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6724 control->sar, control->reqseq, control->final,
6727 /* Validate F-bit - F=0 always valid, F=1 only
6728 * valid in TX WAIT_F
6730 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6733 if (chan->mode != L2CAP_MODE_STREAMING) {
6734 event = L2CAP_EV_RECV_IFRAME;
6735 err = l2cap_rx(chan, control, skb, event);
6737 err = l2cap_stream_rx(chan, control, skb);
6741 l2cap_send_disconn_req(chan, ECONNRESET);
6743 const u8 rx_func_to_event[4] = {
6744 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6745 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6748 /* Only I-frames are expected in streaming mode */
6749 if (chan->mode == L2CAP_MODE_STREAMING)
6752 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6753 control->reqseq, control->final, control->poll,
6757 BT_ERR("Trailing bytes: %d in sframe", len);
6758 l2cap_send_disconn_req(chan, ECONNRESET);
6762 /* Validate F and P bits */
6763 if (control->final && (control->poll ||
6764 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6767 event = rx_func_to_event[control->super];
6768 if (l2cap_rx(chan, control, skb, event))
6769 l2cap_send_disconn_req(chan, ECONNRESET);
6779 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6781 struct l2cap_conn *conn = chan->conn;
6782 struct l2cap_le_credits pkt;
6785 /* We return more credits to the sender only after the amount of
6786 * credits falls below half of the initial amount.
6788 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6791 return_credits = le_max_credits - chan->rx_credits;
6793 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6795 chan->rx_credits += return_credits;
6797 pkt.cid = cpu_to_le16(chan->scid);
6798 pkt.credits = cpu_to_le16(return_credits);
6800 chan->ident = l2cap_get_ident(conn);
6802 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6805 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6809 if (!chan->rx_credits) {
6810 BT_ERR("No credits to receive LE L2CAP data");
6811 l2cap_send_disconn_req(chan, ECONNRESET);
6815 if (chan->imtu < skb->len) {
6816 BT_ERR("Too big LE L2CAP PDU");
6821 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6823 l2cap_chan_le_send_credits(chan);
6830 sdu_len = get_unaligned_le16(skb->data);
6831 skb_pull(skb, L2CAP_SDULEN_SIZE);
6833 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6834 sdu_len, skb->len, chan->imtu);
6836 if (sdu_len > chan->imtu) {
6837 BT_ERR("Too big LE L2CAP SDU length received");
6842 if (skb->len > sdu_len) {
6843 BT_ERR("Too much LE L2CAP data received");
6848 if (skb->len == sdu_len)
6849 return chan->ops->recv(chan, skb);
6852 chan->sdu_len = sdu_len;
6853 chan->sdu_last_frag = skb;
6855 /* Detect if remote is not able to use the selected MPS */
6856 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6857 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6859 /* Adjust the number of credits */
6860 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6861 chan->mps = mps_len;
6862 l2cap_chan_le_send_credits(chan);
6868 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6869 chan->sdu->len, skb->len, chan->sdu_len);
6871 if (chan->sdu->len + skb->len > chan->sdu_len) {
6872 BT_ERR("Too much LE L2CAP data received");
6877 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6880 if (chan->sdu->len == chan->sdu_len) {
6881 err = chan->ops->recv(chan, chan->sdu);
6884 chan->sdu_last_frag = NULL;
6892 kfree_skb(chan->sdu);
6894 chan->sdu_last_frag = NULL;
6898 /* We can't return an error here since we took care of the skb
6899 * freeing internally. An error return would cause the caller to
6900 * do a double-free of the skb.
6905 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6906 struct sk_buff *skb)
6908 struct l2cap_chan *chan;
6910 chan = l2cap_get_chan_by_scid(conn, cid);
6912 if (cid == L2CAP_CID_A2MP) {
6913 chan = a2mp_channel_create(conn, skb);
6919 l2cap_chan_lock(chan);
6921 BT_DBG("unknown cid 0x%4.4x", cid);
6922 /* Drop packet and return */
6928 BT_DBG("chan %p, len %d", chan, skb->len);
6930 /* If we receive data on a fixed channel before the info req/rsp
6931 * procdure is done simply assume that the channel is supported
6932 * and mark it as ready.
6934 if (chan->chan_type == L2CAP_CHAN_FIXED)
6935 l2cap_chan_ready(chan);
6937 if (chan->state != BT_CONNECTED)
6940 switch (chan->mode) {
6941 case L2CAP_MODE_LE_FLOWCTL:
6942 if (l2cap_le_data_rcv(chan, skb) < 0)
6947 case L2CAP_MODE_BASIC:
6948 /* If socket recv buffers overflows we drop data here
6949 * which is *bad* because L2CAP has to be reliable.
6950 * But we don't have any other choice. L2CAP doesn't
6951 * provide flow control mechanism. */
6953 if (chan->imtu < skb->len) {
6954 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6958 if (!chan->ops->recv(chan, skb))
6962 case L2CAP_MODE_ERTM:
6963 case L2CAP_MODE_STREAMING:
6964 l2cap_data_rcv(chan, skb);
6968 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6976 l2cap_chan_unlock(chan);
6977 l2cap_chan_put(chan);
6980 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6981 struct sk_buff *skb)
6983 struct hci_conn *hcon = conn->hcon;
6984 struct l2cap_chan *chan;
6986 if (hcon->type != ACL_LINK)
6989 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6994 BT_DBG("chan %p, len %d", chan, skb->len);
6996 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6999 if (chan->imtu < skb->len)
7002 /* Store remote BD_ADDR and PSM for msg_name */
7003 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7004 bt_cb(skb)->l2cap.psm = psm;
7006 if (!chan->ops->recv(chan, skb)) {
7007 l2cap_chan_put(chan);
7012 l2cap_chan_put(chan);
7017 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7019 struct l2cap_hdr *lh = (void *) skb->data;
7020 struct hci_conn *hcon = conn->hcon;
7024 if (hcon->state != BT_CONNECTED) {
7025 BT_DBG("queueing pending rx skb");
7026 skb_queue_tail(&conn->pending_rx, skb);
7030 skb_pull(skb, L2CAP_HDR_SIZE);
7031 cid = __le16_to_cpu(lh->cid);
7032 len = __le16_to_cpu(lh->len);
7034 if (len != skb->len) {
7039 /* Since we can't actively block incoming LE connections we must
7040 * at least ensure that we ignore incoming data from them.
7042 if (hcon->type == LE_LINK &&
7043 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7044 bdaddr_dst_type(hcon))) {
7049 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7052 case L2CAP_CID_SIGNALING:
7053 l2cap_sig_channel(conn, skb);
7056 case L2CAP_CID_CONN_LESS:
7057 psm = get_unaligned((__le16 *) skb->data);
7058 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7059 l2cap_conless_channel(conn, psm, skb);
7062 case L2CAP_CID_LE_SIGNALING:
7063 l2cap_le_sig_channel(conn, skb);
7067 l2cap_data_channel(conn, cid, skb);
7072 static void process_pending_rx(struct work_struct *work)
7074 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7076 struct sk_buff *skb;
7080 while ((skb = skb_dequeue(&conn->pending_rx)))
7081 l2cap_recv_frame(conn, skb);
7084 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7086 struct l2cap_conn *conn = hcon->l2cap_data;
7087 struct hci_chan *hchan;
7092 hchan = hci_chan_create(hcon);
7096 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7098 hci_chan_del(hchan);
7102 kref_init(&conn->ref);
7103 hcon->l2cap_data = conn;
7104 conn->hcon = hci_conn_get(hcon);
7105 conn->hchan = hchan;
7107 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7109 switch (hcon->type) {
7111 if (hcon->hdev->le_mtu) {
7112 conn->mtu = hcon->hdev->le_mtu;
7117 conn->mtu = hcon->hdev->acl_mtu;
7121 conn->feat_mask = 0;
7123 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7125 if (hcon->type == ACL_LINK &&
7126 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7127 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7129 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7130 (bredr_sc_enabled(hcon->hdev) ||
7131 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7132 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7134 mutex_init(&conn->ident_lock);
7135 mutex_init(&conn->chan_lock);
7137 INIT_LIST_HEAD(&conn->chan_l);
7138 INIT_LIST_HEAD(&conn->users);
7140 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7142 skb_queue_head_init(&conn->pending_rx);
7143 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7144 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7146 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7151 static bool is_valid_psm(u16 psm, u8 dst_type) {
7155 if (bdaddr_type_is_le(dst_type))
7156 return (psm <= 0x00ff);
7158 /* PSM must be odd and lsb of upper byte must be 0 */
7159 return ((psm & 0x0101) == 0x0001);
7162 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7163 bdaddr_t *dst, u8 dst_type)
7165 struct l2cap_conn *conn;
7166 struct hci_conn *hcon;
7167 struct hci_dev *hdev;
7170 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7171 dst_type, __le16_to_cpu(psm));
7173 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7175 return -EHOSTUNREACH;
7179 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7180 chan->chan_type != L2CAP_CHAN_RAW) {
7185 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7190 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7195 switch (chan->mode) {
7196 case L2CAP_MODE_BASIC:
7198 case L2CAP_MODE_LE_FLOWCTL:
7199 l2cap_le_flowctl_init(chan);
7201 case L2CAP_MODE_ERTM:
7202 case L2CAP_MODE_STREAMING:
7211 switch (chan->state) {
7215 /* Already connecting */
7220 /* Already connected */
7234 /* Set destination address and psm */
7235 bacpy(&chan->dst, dst);
7236 chan->dst_type = dst_type;
7241 if (bdaddr_type_is_le(dst_type)) {
7242 /* Convert from L2CAP channel address type to HCI address type
7244 if (dst_type == BDADDR_LE_PUBLIC)
7245 dst_type = ADDR_LE_DEV_PUBLIC;
7247 dst_type = ADDR_LE_DEV_RANDOM;
7249 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7250 hcon = hci_connect_le(hdev, dst, dst_type,
7252 HCI_LE_CONN_TIMEOUT,
7253 HCI_ROLE_SLAVE, NULL);
7255 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7257 HCI_LE_CONN_TIMEOUT);
7260 u8 auth_type = l2cap_get_auth_type(chan);
7261 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7265 err = PTR_ERR(hcon);
7269 conn = l2cap_conn_add(hcon);
7271 hci_conn_drop(hcon);
7276 mutex_lock(&conn->chan_lock);
7277 l2cap_chan_lock(chan);
7279 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7280 hci_conn_drop(hcon);
7285 /* Update source addr of the socket */
7286 bacpy(&chan->src, &hcon->src);
7287 chan->src_type = bdaddr_src_type(hcon);
7289 __l2cap_chan_add(conn, chan);
7291 /* l2cap_chan_add takes its own ref so we can drop this one */
7292 hci_conn_drop(hcon);
7294 l2cap_state_change(chan, BT_CONNECT);
7295 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7297 /* Release chan->sport so that it can be reused by other
7298 * sockets (as it's only used for listening sockets).
7300 write_lock(&chan_list_lock);
7302 write_unlock(&chan_list_lock);
7304 if (hcon->state == BT_CONNECTED) {
7305 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7306 __clear_chan_timer(chan);
7307 if (l2cap_chan_check_security(chan, true))
7308 l2cap_state_change(chan, BT_CONNECTED);
7310 l2cap_do_start(chan);
7316 l2cap_chan_unlock(chan);
7317 mutex_unlock(&conn->chan_lock);
7319 hci_dev_unlock(hdev);
7323 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7325 /* ---- L2CAP interface with lower layer (HCI) ---- */
7327 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7329 int exact = 0, lm1 = 0, lm2 = 0;
7330 struct l2cap_chan *c;
7332 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7334 /* Find listening sockets and check their link_mode */
7335 read_lock(&chan_list_lock);
7336 list_for_each_entry(c, &chan_list, global_l) {
7337 if (c->state != BT_LISTEN)
7340 if (!bacmp(&c->src, &hdev->bdaddr)) {
7341 lm1 |= HCI_LM_ACCEPT;
7342 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7343 lm1 |= HCI_LM_MASTER;
7345 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7346 lm2 |= HCI_LM_ACCEPT;
7347 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7348 lm2 |= HCI_LM_MASTER;
7351 read_unlock(&chan_list_lock);
7353 return exact ? lm1 : lm2;
7356 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7357 * from an existing channel in the list or from the beginning of the
7358 * global list (by passing NULL as first parameter).
7360 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7361 struct hci_conn *hcon)
7363 u8 src_type = bdaddr_src_type(hcon);
7365 read_lock(&chan_list_lock);
7368 c = list_next_entry(c, global_l);
7370 c = list_entry(chan_list.next, typeof(*c), global_l);
7372 list_for_each_entry_from(c, &chan_list, global_l) {
7373 if (c->chan_type != L2CAP_CHAN_FIXED)
7375 if (c->state != BT_LISTEN)
7377 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7379 if (src_type != c->src_type)
7382 c = l2cap_chan_hold_unless_zero(c);
7383 read_unlock(&chan_list_lock);
7387 read_unlock(&chan_list_lock);
7392 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7394 struct hci_dev *hdev = hcon->hdev;
7395 struct l2cap_conn *conn;
7396 struct l2cap_chan *pchan;
7399 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7402 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7405 l2cap_conn_del(hcon, bt_to_errno(status));
7409 conn = l2cap_conn_add(hcon);
7413 dst_type = bdaddr_dst_type(hcon);
7415 /* If device is blocked, do not create channels for it */
7416 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7419 /* Find fixed channels and notify them of the new connection. We
7420 * use multiple individual lookups, continuing each time where
7421 * we left off, because the list lock would prevent calling the
7422 * potentially sleeping l2cap_chan_lock() function.
7424 pchan = l2cap_global_fixed_chan(NULL, hcon);
7426 struct l2cap_chan *chan, *next;
7428 /* Client fixed channels should override server ones */
7429 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7432 l2cap_chan_lock(pchan);
7433 chan = pchan->ops->new_connection(pchan);
7435 bacpy(&chan->src, &hcon->src);
7436 bacpy(&chan->dst, &hcon->dst);
7437 chan->src_type = bdaddr_src_type(hcon);
7438 chan->dst_type = dst_type;
7440 __l2cap_chan_add(conn, chan);
7443 l2cap_chan_unlock(pchan);
7445 next = l2cap_global_fixed_chan(pchan, hcon);
7446 l2cap_chan_put(pchan);
7450 l2cap_conn_ready(conn);
7453 int l2cap_disconn_ind(struct hci_conn *hcon)
7455 struct l2cap_conn *conn = hcon->l2cap_data;
7457 BT_DBG("hcon %p", hcon);
7460 return HCI_ERROR_REMOTE_USER_TERM;
7461 return conn->disc_reason;
7464 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7466 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7469 BT_DBG("hcon %p reason %d", hcon, reason);
7471 l2cap_conn_del(hcon, bt_to_errno(reason));
7474 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7476 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7479 if (encrypt == 0x00) {
7480 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7481 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7482 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7483 chan->sec_level == BT_SECURITY_FIPS)
7484 l2cap_chan_close(chan, ECONNREFUSED);
7486 if (chan->sec_level == BT_SECURITY_MEDIUM)
7487 __clear_chan_timer(chan);
7491 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7493 struct l2cap_conn *conn = hcon->l2cap_data;
7494 struct l2cap_chan *chan;
7499 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7501 mutex_lock(&conn->chan_lock);
7503 list_for_each_entry(chan, &conn->chan_l, list) {
7504 l2cap_chan_lock(chan);
7506 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7507 state_to_string(chan->state));
7509 if (chan->scid == L2CAP_CID_A2MP) {
7510 l2cap_chan_unlock(chan);
7514 if (!status && encrypt)
7515 chan->sec_level = hcon->sec_level;
7517 if (!__l2cap_no_conn_pending(chan)) {
7518 l2cap_chan_unlock(chan);
7522 if (!status && (chan->state == BT_CONNECTED ||
7523 chan->state == BT_CONFIG)) {
7524 chan->ops->resume(chan);
7525 l2cap_check_encryption(chan, encrypt);
7526 l2cap_chan_unlock(chan);
7530 if (chan->state == BT_CONNECT) {
7531 if (!status && l2cap_check_enc_key_size(hcon))
7532 l2cap_start_connection(chan);
7534 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7535 } else if (chan->state == BT_CONNECT2 &&
7536 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7537 struct l2cap_conn_rsp rsp;
7540 if (!status && l2cap_check_enc_key_size(hcon)) {
7541 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7542 res = L2CAP_CR_PEND;
7543 stat = L2CAP_CS_AUTHOR_PEND;
7544 chan->ops->defer(chan);
7546 l2cap_state_change(chan, BT_CONFIG);
7547 res = L2CAP_CR_SUCCESS;
7548 stat = L2CAP_CS_NO_INFO;
7551 l2cap_state_change(chan, BT_DISCONN);
7552 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7553 res = L2CAP_CR_SEC_BLOCK;
7554 stat = L2CAP_CS_NO_INFO;
7557 rsp.scid = cpu_to_le16(chan->dcid);
7558 rsp.dcid = cpu_to_le16(chan->scid);
7559 rsp.result = cpu_to_le16(res);
7560 rsp.status = cpu_to_le16(stat);
7561 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7564 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7565 res == L2CAP_CR_SUCCESS) {
7567 set_bit(CONF_REQ_SENT, &chan->conf_state);
7568 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7570 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7572 chan->num_conf_req++;
7576 l2cap_chan_unlock(chan);
7579 mutex_unlock(&conn->chan_lock);
7582 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7584 struct l2cap_conn *conn = hcon->l2cap_data;
7585 struct l2cap_hdr *hdr;
7588 /* For AMP controller do not create l2cap conn */
7589 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7593 conn = l2cap_conn_add(hcon);
7598 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7602 case ACL_START_NO_FLUSH:
7605 BT_ERR("Unexpected start frame (len %d)", skb->len);
7606 kfree_skb(conn->rx_skb);
7607 conn->rx_skb = NULL;
7609 l2cap_conn_unreliable(conn, ECOMM);
7612 /* Start fragment always begin with Basic L2CAP header */
7613 if (skb->len < L2CAP_HDR_SIZE) {
7614 BT_ERR("Frame is too short (len %d)", skb->len);
7615 l2cap_conn_unreliable(conn, ECOMM);
7619 hdr = (struct l2cap_hdr *) skb->data;
7620 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7622 if (len == skb->len) {
7623 /* Complete frame received */
7624 l2cap_recv_frame(conn, skb);
7628 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7630 if (skb->len > len) {
7631 BT_ERR("Frame is too long (len %d, expected len %d)",
7633 l2cap_conn_unreliable(conn, ECOMM);
7637 /* Allocate skb for the complete frame (with header) */
7638 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7642 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7644 conn->rx_len = len - skb->len;
7648 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7650 if (!conn->rx_len) {
7651 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7652 l2cap_conn_unreliable(conn, ECOMM);
7656 if (skb->len > conn->rx_len) {
7657 BT_ERR("Fragment is too long (len %d, expected %d)",
7658 skb->len, conn->rx_len);
7659 kfree_skb(conn->rx_skb);
7660 conn->rx_skb = NULL;
7662 l2cap_conn_unreliable(conn, ECOMM);
7666 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7668 conn->rx_len -= skb->len;
7670 if (!conn->rx_len) {
7671 /* Complete frame received. l2cap_recv_frame
7672 * takes ownership of the skb so set the global
7673 * rx_skb pointer to NULL first.
7675 struct sk_buff *rx_skb = conn->rx_skb;
7676 conn->rx_skb = NULL;
7677 l2cap_recv_frame(conn, rx_skb);
7686 static struct hci_cb l2cap_cb = {
7688 .connect_cfm = l2cap_connect_cfm,
7689 .disconn_cfm = l2cap_disconn_cfm,
7690 .security_cfm = l2cap_security_cfm,
7693 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7695 struct l2cap_chan *c;
7697 read_lock(&chan_list_lock);
7699 list_for_each_entry(c, &chan_list, global_l) {
7700 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7701 &c->src, c->src_type, &c->dst, c->dst_type,
7702 c->state, __le16_to_cpu(c->psm),
7703 c->scid, c->dcid, c->imtu, c->omtu,
7704 c->sec_level, c->mode);
7707 read_unlock(&chan_list_lock);
7712 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7714 return single_open(file, l2cap_debugfs_show, inode->i_private);
7717 static const struct file_operations l2cap_debugfs_fops = {
7718 .open = l2cap_debugfs_open,
7720 .llseek = seq_lseek,
7721 .release = single_release,
7724 static struct dentry *l2cap_debugfs;
7726 int __init l2cap_init(void)
7730 err = l2cap_init_sockets();
7734 hci_register_cb(&l2cap_cb);
7736 if (IS_ERR_OR_NULL(bt_debugfs))
7739 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7740 NULL, &l2cap_debugfs_fops);
7742 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7744 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7750 void l2cap_exit(void)
7752 debugfs_remove(l2cap_debugfs);
7753 hci_unregister_cb(&l2cap_cb);
7754 l2cap_cleanup_sockets();
7757 module_param(disable_ertm, bool, 0644);
7758 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");