2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
67 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 if (link_type == LE_LINK) {
70 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
73 return BDADDR_LE_RANDOM;
79 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 return bdaddr_type(hcon->type, hcon->src_type);
84 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 return bdaddr_type(hcon->type, hcon->dst_type);
89 /* ---- L2CAP channels ---- */
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
96 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
106 struct l2cap_chan *c;
108 list_for_each_entry(c, &conn->chan_l, list) {
115 /* Find channel with given SCID.
116 * Returns a reference locked channel.
118 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
121 struct l2cap_chan *c;
123 mutex_lock(&conn->chan_lock);
124 c = __l2cap_get_chan_by_scid(conn, cid);
126 /* Only lock if chan reference is not 0 */
127 c = l2cap_chan_hold_unless_zero(c);
131 mutex_unlock(&conn->chan_lock);
136 /* Find channel with given DCID.
137 * Returns a reference locked channel.
139 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
142 struct l2cap_chan *c;
144 mutex_lock(&conn->chan_lock);
145 c = __l2cap_get_chan_by_dcid(conn, cid);
147 /* Only lock if chan reference is not 0 */
148 c = l2cap_chan_hold_unless_zero(c);
152 mutex_unlock(&conn->chan_lock);
157 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
160 struct l2cap_chan *c;
162 list_for_each_entry(c, &conn->chan_l, list) {
163 if (c->ident == ident)
169 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
172 struct l2cap_chan *c;
174 mutex_lock(&conn->chan_lock);
175 c = __l2cap_get_chan_by_ident(conn, ident);
177 /* Only lock if chan reference is not 0 */
178 c = l2cap_chan_hold_unless_zero(c);
182 mutex_unlock(&conn->chan_lock);
187 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
189 struct l2cap_chan *c;
191 list_for_each_entry(c, &chan_list, global_l) {
192 if (c->sport == psm && !bacmp(&c->src, src))
198 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
202 write_lock(&chan_list_lock);
204 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
214 u16 p, start, end, incr;
216 if (chan->src_type == BDADDR_BREDR) {
217 start = L2CAP_PSM_DYN_START;
218 end = L2CAP_PSM_AUTO_END;
221 start = L2CAP_PSM_LE_DYN_START;
222 end = L2CAP_PSM_LE_DYN_END;
227 for (p = start; p <= end; p += incr)
228 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
229 chan->psm = cpu_to_le16(p);
230 chan->sport = cpu_to_le16(p);
237 write_unlock(&chan_list_lock);
240 EXPORT_SYMBOL_GPL(l2cap_add_psm);
242 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
244 write_lock(&chan_list_lock);
246 /* Override the defaults (which are for conn-oriented) */
247 chan->omtu = L2CAP_DEFAULT_MTU;
248 chan->chan_type = L2CAP_CHAN_FIXED;
252 write_unlock(&chan_list_lock);
257 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
261 if (conn->hcon->type == LE_LINK)
262 dyn_end = L2CAP_CID_LE_DYN_END;
264 dyn_end = L2CAP_CID_DYN_END;
266 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
267 if (!__l2cap_get_chan_by_scid(conn, cid))
274 static void l2cap_state_change(struct l2cap_chan *chan, int state)
276 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
277 state_to_string(state));
280 chan->ops->state_change(chan, state, 0);
283 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
287 chan->ops->state_change(chan, chan->state, err);
290 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
292 chan->ops->state_change(chan, chan->state, err);
295 static void __set_retrans_timer(struct l2cap_chan *chan)
297 if (!delayed_work_pending(&chan->monitor_timer) &&
298 chan->retrans_timeout) {
299 l2cap_set_timer(chan, &chan->retrans_timer,
300 msecs_to_jiffies(chan->retrans_timeout));
304 static void __set_monitor_timer(struct l2cap_chan *chan)
306 __clear_retrans_timer(chan);
307 if (chan->monitor_timeout) {
308 l2cap_set_timer(chan, &chan->monitor_timer,
309 msecs_to_jiffies(chan->monitor_timeout));
313 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
318 skb_queue_walk(head, skb) {
319 if (bt_cb(skb)->l2cap.txseq == seq)
326 /* ---- L2CAP sequence number lists ---- */
328 /* For ERTM, ordered lists of sequence numbers must be tracked for
329 * SREJ requests that are received and for frames that are to be
330 * retransmitted. These seq_list functions implement a singly-linked
331 * list in an array, where membership in the list can also be checked
332 * in constant time. Items can also be added to the tail of the list
333 * and removed from the head in constant time, without further memory
337 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
339 size_t alloc_size, i;
341 /* Allocated size is a power of 2 to map sequence numbers
342 * (which may be up to 14 bits) in to a smaller array that is
343 * sized for the negotiated ERTM transmit windows.
345 alloc_size = roundup_pow_of_two(size);
347 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
351 seq_list->mask = alloc_size - 1;
352 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
353 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
354 for (i = 0; i < alloc_size; i++)
355 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
360 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
362 kfree(seq_list->list);
365 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
368 /* Constant-time check for list membership */
369 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
372 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
374 u16 seq = seq_list->head;
375 u16 mask = seq_list->mask;
377 seq_list->head = seq_list->list[seq & mask];
378 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
380 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
381 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
382 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
388 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
392 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
395 for (i = 0; i <= seq_list->mask; i++)
396 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
398 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
399 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
402 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
404 u16 mask = seq_list->mask;
406 /* All appends happen in constant time */
408 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
411 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
412 seq_list->head = seq;
414 seq_list->list[seq_list->tail & mask] = seq;
416 seq_list->tail = seq;
417 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
420 static void l2cap_chan_timeout(struct work_struct *work)
422 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
424 struct l2cap_conn *conn = chan->conn;
427 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
429 mutex_lock(&conn->chan_lock);
430 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
431 * this work. No need to call l2cap_chan_hold(chan) here again.
433 l2cap_chan_lock(chan);
435 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
436 reason = ECONNREFUSED;
437 else if (chan->state == BT_CONNECT &&
438 chan->sec_level != BT_SECURITY_SDP)
439 reason = ECONNREFUSED;
443 l2cap_chan_close(chan, reason);
445 chan->ops->close(chan);
447 l2cap_chan_unlock(chan);
448 l2cap_chan_put(chan);
450 mutex_unlock(&conn->chan_lock);
453 struct l2cap_chan *l2cap_chan_create(void)
455 struct l2cap_chan *chan;
457 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
461 skb_queue_head_init(&chan->tx_q);
462 skb_queue_head_init(&chan->srej_q);
463 mutex_init(&chan->lock);
465 /* Set default lock nesting level */
466 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
468 write_lock(&chan_list_lock);
469 list_add(&chan->global_l, &chan_list);
470 write_unlock(&chan_list_lock);
472 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
474 chan->state = BT_OPEN;
476 kref_init(&chan->kref);
478 /* This flag is cleared in l2cap_chan_ready() */
479 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
481 BT_DBG("chan %p", chan);
485 EXPORT_SYMBOL_GPL(l2cap_chan_create);
487 static void l2cap_chan_destroy(struct kref *kref)
489 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
491 BT_DBG("chan %p", chan);
493 write_lock(&chan_list_lock);
494 list_del(&chan->global_l);
495 write_unlock(&chan_list_lock);
500 void l2cap_chan_hold(struct l2cap_chan *c)
502 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
507 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
509 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
511 if (!kref_get_unless_zero(&c->kref))
517 void l2cap_chan_put(struct l2cap_chan *c)
519 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
521 kref_put(&c->kref, l2cap_chan_destroy);
523 EXPORT_SYMBOL_GPL(l2cap_chan_put);
525 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
527 chan->fcs = L2CAP_FCS_CRC16;
528 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
529 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
530 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
531 chan->remote_max_tx = chan->max_tx;
532 chan->remote_tx_win = chan->tx_win;
533 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
534 chan->sec_level = BT_SECURITY_LOW;
535 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
536 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
537 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
539 chan->conf_state = 0;
540 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
542 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
544 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
546 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
549 chan->sdu_last_frag = NULL;
551 chan->tx_credits = 0;
552 chan->rx_credits = le_max_credits;
553 chan->mps = min_t(u16, chan->imtu, le_default_mps);
555 skb_queue_head_init(&chan->tx_q);
558 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
560 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
561 __le16_to_cpu(chan->psm), chan->dcid);
563 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
567 switch (chan->chan_type) {
568 case L2CAP_CHAN_CONN_ORIENTED:
569 /* Alloc CID for connection-oriented socket */
570 chan->scid = l2cap_alloc_cid(conn);
571 if (conn->hcon->type == ACL_LINK)
572 chan->omtu = L2CAP_DEFAULT_MTU;
575 case L2CAP_CHAN_CONN_LESS:
576 /* Connectionless socket */
577 chan->scid = L2CAP_CID_CONN_LESS;
578 chan->dcid = L2CAP_CID_CONN_LESS;
579 chan->omtu = L2CAP_DEFAULT_MTU;
582 case L2CAP_CHAN_FIXED:
583 /* Caller will set CID and CID specific MTU values */
587 /* Raw socket can send/recv signalling messages only */
588 chan->scid = L2CAP_CID_SIGNALING;
589 chan->dcid = L2CAP_CID_SIGNALING;
590 chan->omtu = L2CAP_DEFAULT_MTU;
593 chan->local_id = L2CAP_BESTEFFORT_ID;
594 chan->local_stype = L2CAP_SERV_BESTEFFORT;
595 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
596 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
597 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
598 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
600 l2cap_chan_hold(chan);
602 /* Only keep a reference for fixed channels if they requested it */
603 if (chan->chan_type != L2CAP_CHAN_FIXED ||
604 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
605 hci_conn_hold(conn->hcon);
607 list_add(&chan->list, &conn->chan_l);
610 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
612 mutex_lock(&conn->chan_lock);
613 __l2cap_chan_add(conn, chan);
614 mutex_unlock(&conn->chan_lock);
617 void l2cap_chan_del(struct l2cap_chan *chan, int err)
619 struct l2cap_conn *conn = chan->conn;
621 __clear_chan_timer(chan);
623 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
624 state_to_string(chan->state));
626 chan->ops->teardown(chan, err);
629 struct amp_mgr *mgr = conn->hcon->amp_mgr;
630 /* Delete from channel list */
631 list_del(&chan->list);
633 l2cap_chan_put(chan);
637 /* Reference was only held for non-fixed channels or
638 * fixed channels that explicitly requested it using the
639 * FLAG_HOLD_HCI_CONN flag.
641 if (chan->chan_type != L2CAP_CHAN_FIXED ||
642 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
643 hci_conn_drop(conn->hcon);
645 if (mgr && mgr->bredr_chan == chan)
646 mgr->bredr_chan = NULL;
649 if (chan->hs_hchan) {
650 struct hci_chan *hs_hchan = chan->hs_hchan;
652 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
653 amp_disconnect_logical_link(hs_hchan);
656 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
660 case L2CAP_MODE_BASIC:
663 case L2CAP_MODE_LE_FLOWCTL:
664 skb_queue_purge(&chan->tx_q);
667 case L2CAP_MODE_ERTM:
668 __clear_retrans_timer(chan);
669 __clear_monitor_timer(chan);
670 __clear_ack_timer(chan);
672 skb_queue_purge(&chan->srej_q);
674 l2cap_seq_list_free(&chan->srej_list);
675 l2cap_seq_list_free(&chan->retrans_list);
679 case L2CAP_MODE_STREAMING:
680 skb_queue_purge(&chan->tx_q);
686 EXPORT_SYMBOL_GPL(l2cap_chan_del);
688 static void l2cap_conn_update_id_addr(struct work_struct *work)
690 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
691 id_addr_update_work);
692 struct hci_conn *hcon = conn->hcon;
693 struct l2cap_chan *chan;
695 mutex_lock(&conn->chan_lock);
697 list_for_each_entry(chan, &conn->chan_l, list) {
698 l2cap_chan_lock(chan);
699 bacpy(&chan->dst, &hcon->dst);
700 chan->dst_type = bdaddr_dst_type(hcon);
701 l2cap_chan_unlock(chan);
704 mutex_unlock(&conn->chan_lock);
707 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
709 struct l2cap_conn *conn = chan->conn;
710 struct l2cap_le_conn_rsp rsp;
713 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
714 result = L2CAP_CR_AUTHORIZATION;
716 result = L2CAP_CR_BAD_PSM;
718 l2cap_state_change(chan, BT_DISCONN);
720 rsp.dcid = cpu_to_le16(chan->scid);
721 rsp.mtu = cpu_to_le16(chan->imtu);
722 rsp.mps = cpu_to_le16(chan->mps);
723 rsp.credits = cpu_to_le16(chan->rx_credits);
724 rsp.result = cpu_to_le16(result);
726 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
730 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
732 struct l2cap_conn *conn = chan->conn;
733 struct l2cap_conn_rsp rsp;
736 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
737 result = L2CAP_CR_SEC_BLOCK;
739 result = L2CAP_CR_BAD_PSM;
741 l2cap_state_change(chan, BT_DISCONN);
743 rsp.scid = cpu_to_le16(chan->dcid);
744 rsp.dcid = cpu_to_le16(chan->scid);
745 rsp.result = cpu_to_le16(result);
746 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
748 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
751 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
753 struct l2cap_conn *conn = chan->conn;
755 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
757 switch (chan->state) {
759 chan->ops->teardown(chan, 0);
764 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
765 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
766 l2cap_send_disconn_req(chan, reason);
768 l2cap_chan_del(chan, reason);
772 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
773 if (conn->hcon->type == ACL_LINK)
774 l2cap_chan_connect_reject(chan);
775 else if (conn->hcon->type == LE_LINK)
776 l2cap_chan_le_connect_reject(chan);
779 l2cap_chan_del(chan, reason);
784 l2cap_chan_del(chan, reason);
788 chan->ops->teardown(chan, 0);
792 EXPORT_SYMBOL(l2cap_chan_close);
794 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
796 switch (chan->chan_type) {
798 switch (chan->sec_level) {
799 case BT_SECURITY_HIGH:
800 case BT_SECURITY_FIPS:
801 return HCI_AT_DEDICATED_BONDING_MITM;
802 case BT_SECURITY_MEDIUM:
803 return HCI_AT_DEDICATED_BONDING;
805 return HCI_AT_NO_BONDING;
808 case L2CAP_CHAN_CONN_LESS:
809 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
810 if (chan->sec_level == BT_SECURITY_LOW)
811 chan->sec_level = BT_SECURITY_SDP;
813 if (chan->sec_level == BT_SECURITY_HIGH ||
814 chan->sec_level == BT_SECURITY_FIPS)
815 return HCI_AT_NO_BONDING_MITM;
817 return HCI_AT_NO_BONDING;
819 case L2CAP_CHAN_CONN_ORIENTED:
820 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
821 if (chan->sec_level == BT_SECURITY_LOW)
822 chan->sec_level = BT_SECURITY_SDP;
824 if (chan->sec_level == BT_SECURITY_HIGH ||
825 chan->sec_level == BT_SECURITY_FIPS)
826 return HCI_AT_NO_BONDING_MITM;
828 return HCI_AT_NO_BONDING;
832 switch (chan->sec_level) {
833 case BT_SECURITY_HIGH:
834 case BT_SECURITY_FIPS:
835 return HCI_AT_GENERAL_BONDING_MITM;
836 case BT_SECURITY_MEDIUM:
837 return HCI_AT_GENERAL_BONDING;
839 return HCI_AT_NO_BONDING;
845 /* Service level security */
846 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
848 struct l2cap_conn *conn = chan->conn;
851 if (conn->hcon->type == LE_LINK)
852 return smp_conn_security(conn->hcon, chan->sec_level);
854 auth_type = l2cap_get_auth_type(chan);
856 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
860 static u8 l2cap_get_ident(struct l2cap_conn *conn)
864 /* Get next available identificator.
865 * 1 - 128 are used by kernel.
866 * 129 - 199 are reserved.
867 * 200 - 254 are used by utilities like l2ping, etc.
870 mutex_lock(&conn->ident_lock);
872 if (++conn->tx_ident > 128)
877 mutex_unlock(&conn->ident_lock);
882 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
885 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
888 BT_DBG("code 0x%2.2x", code);
893 /* Use NO_FLUSH if supported or we have an LE link (which does
894 * not support auto-flushing packets) */
895 if (lmp_no_flush_capable(conn->hcon->hdev) ||
896 conn->hcon->type == LE_LINK)
897 flags = ACL_START_NO_FLUSH;
901 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
902 skb->priority = HCI_PRIO_MAX;
904 hci_send_acl(conn->hchan, skb, flags);
907 static bool __chan_is_moving(struct l2cap_chan *chan)
909 return chan->move_state != L2CAP_MOVE_STABLE &&
910 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
913 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
915 struct hci_conn *hcon = chan->conn->hcon;
918 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
921 if (chan->hs_hcon && !__chan_is_moving(chan)) {
923 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
930 /* Use NO_FLUSH for LE links (where this is the only option) or
931 * if the BR/EDR link supports it and flushing has not been
932 * explicitly requested (through FLAG_FLUSHABLE).
934 if (hcon->type == LE_LINK ||
935 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
936 lmp_no_flush_capable(hcon->hdev)))
937 flags = ACL_START_NO_FLUSH;
941 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
942 hci_send_acl(chan->conn->hchan, skb, flags);
945 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
947 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
948 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
950 if (enh & L2CAP_CTRL_FRAME_TYPE) {
953 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
954 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
961 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
962 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
969 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
971 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
972 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
974 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
977 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
978 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
985 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
986 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
993 static inline void __unpack_control(struct l2cap_chan *chan,
996 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
997 __unpack_extended_control(get_unaligned_le32(skb->data),
999 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1001 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1002 &bt_cb(skb)->l2cap);
1003 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1007 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1011 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1012 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1014 if (control->sframe) {
1015 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1016 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1017 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1019 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1020 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1026 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1030 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1031 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1033 if (control->sframe) {
1034 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1035 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1036 packed |= L2CAP_CTRL_FRAME_TYPE;
1038 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1039 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1045 static inline void __pack_control(struct l2cap_chan *chan,
1046 struct l2cap_ctrl *control,
1047 struct sk_buff *skb)
1049 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1050 put_unaligned_le32(__pack_extended_control(control),
1051 skb->data + L2CAP_HDR_SIZE);
1053 put_unaligned_le16(__pack_enhanced_control(control),
1054 skb->data + L2CAP_HDR_SIZE);
1058 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1060 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1061 return L2CAP_EXT_HDR_SIZE;
1063 return L2CAP_ENH_HDR_SIZE;
1066 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1069 struct sk_buff *skb;
1070 struct l2cap_hdr *lh;
1071 int hlen = __ertm_hdr_size(chan);
1073 if (chan->fcs == L2CAP_FCS_CRC16)
1074 hlen += L2CAP_FCS_SIZE;
1076 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1079 return ERR_PTR(-ENOMEM);
1081 lh = skb_put(skb, L2CAP_HDR_SIZE);
1082 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1083 lh->cid = cpu_to_le16(chan->dcid);
1085 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1086 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1088 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1090 if (chan->fcs == L2CAP_FCS_CRC16) {
1091 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1092 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1095 skb->priority = HCI_PRIO_MAX;
1099 static void l2cap_send_sframe(struct l2cap_chan *chan,
1100 struct l2cap_ctrl *control)
1102 struct sk_buff *skb;
1105 BT_DBG("chan %p, control %p", chan, control);
1107 if (!control->sframe)
1110 if (__chan_is_moving(chan))
1113 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1117 if (control->super == L2CAP_SUPER_RR)
1118 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1119 else if (control->super == L2CAP_SUPER_RNR)
1120 set_bit(CONN_RNR_SENT, &chan->conn_state);
1122 if (control->super != L2CAP_SUPER_SREJ) {
1123 chan->last_acked_seq = control->reqseq;
1124 __clear_ack_timer(chan);
1127 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1128 control->final, control->poll, control->super);
1130 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1131 control_field = __pack_extended_control(control);
1133 control_field = __pack_enhanced_control(control);
1135 skb = l2cap_create_sframe_pdu(chan, control_field);
1137 l2cap_do_send(chan, skb);
1140 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1142 struct l2cap_ctrl control;
1144 BT_DBG("chan %p, poll %d", chan, poll);
1146 memset(&control, 0, sizeof(control));
1148 control.poll = poll;
1150 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1151 control.super = L2CAP_SUPER_RNR;
1153 control.super = L2CAP_SUPER_RR;
1155 control.reqseq = chan->buffer_seq;
1156 l2cap_send_sframe(chan, &control);
1159 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1161 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1164 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1167 static bool __amp_capable(struct l2cap_chan *chan)
1169 struct l2cap_conn *conn = chan->conn;
1170 struct hci_dev *hdev;
1171 bool amp_available = false;
1173 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1176 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1179 read_lock(&hci_dev_list_lock);
1180 list_for_each_entry(hdev, &hci_dev_list, list) {
1181 if (hdev->amp_type != AMP_TYPE_BREDR &&
1182 test_bit(HCI_UP, &hdev->flags)) {
1183 amp_available = true;
1187 read_unlock(&hci_dev_list_lock);
1189 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1190 return amp_available;
1195 static bool l2cap_check_efs(struct l2cap_chan *chan)
1197 /* Check EFS parameters */
1201 void l2cap_send_conn_req(struct l2cap_chan *chan)
1203 struct l2cap_conn *conn = chan->conn;
1204 struct l2cap_conn_req req;
1206 req.scid = cpu_to_le16(chan->scid);
1207 req.psm = chan->psm;
1209 chan->ident = l2cap_get_ident(conn);
1211 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1213 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1216 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1218 struct l2cap_create_chan_req req;
1219 req.scid = cpu_to_le16(chan->scid);
1220 req.psm = chan->psm;
1221 req.amp_id = amp_id;
1223 chan->ident = l2cap_get_ident(chan->conn);
1225 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1229 static void l2cap_move_setup(struct l2cap_chan *chan)
1231 struct sk_buff *skb;
1233 BT_DBG("chan %p", chan);
1235 if (chan->mode != L2CAP_MODE_ERTM)
1238 __clear_retrans_timer(chan);
1239 __clear_monitor_timer(chan);
1240 __clear_ack_timer(chan);
1242 chan->retry_count = 0;
1243 skb_queue_walk(&chan->tx_q, skb) {
1244 if (bt_cb(skb)->l2cap.retries)
1245 bt_cb(skb)->l2cap.retries = 1;
1250 chan->expected_tx_seq = chan->buffer_seq;
1252 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1253 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1254 l2cap_seq_list_clear(&chan->retrans_list);
1255 l2cap_seq_list_clear(&chan->srej_list);
1256 skb_queue_purge(&chan->srej_q);
1258 chan->tx_state = L2CAP_TX_STATE_XMIT;
1259 chan->rx_state = L2CAP_RX_STATE_MOVE;
1261 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1264 static void l2cap_move_done(struct l2cap_chan *chan)
1266 u8 move_role = chan->move_role;
1267 BT_DBG("chan %p", chan);
1269 chan->move_state = L2CAP_MOVE_STABLE;
1270 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1272 if (chan->mode != L2CAP_MODE_ERTM)
1275 switch (move_role) {
1276 case L2CAP_MOVE_ROLE_INITIATOR:
1277 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1278 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1280 case L2CAP_MOVE_ROLE_RESPONDER:
1281 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1286 static void l2cap_chan_ready(struct l2cap_chan *chan)
1288 /* The channel may have already been flagged as connected in
1289 * case of receiving data before the L2CAP info req/rsp
1290 * procedure is complete.
1292 if (chan->state == BT_CONNECTED)
1295 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1296 chan->conf_state = 0;
1297 __clear_chan_timer(chan);
1299 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1300 chan->ops->suspend(chan);
1302 chan->state = BT_CONNECTED;
1304 chan->ops->ready(chan);
1307 static void l2cap_le_connect(struct l2cap_chan *chan)
1309 struct l2cap_conn *conn = chan->conn;
1310 struct l2cap_le_conn_req req;
1312 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1315 req.psm = chan->psm;
1316 req.scid = cpu_to_le16(chan->scid);
1317 req.mtu = cpu_to_le16(chan->imtu);
1318 req.mps = cpu_to_le16(chan->mps);
1319 req.credits = cpu_to_le16(chan->rx_credits);
1321 chan->ident = l2cap_get_ident(conn);
1323 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1327 static void l2cap_le_start(struct l2cap_chan *chan)
1329 struct l2cap_conn *conn = chan->conn;
1331 if (!smp_conn_security(conn->hcon, chan->sec_level))
1335 l2cap_chan_ready(chan);
1339 if (chan->state == BT_CONNECT)
1340 l2cap_le_connect(chan);
1343 static void l2cap_start_connection(struct l2cap_chan *chan)
1345 if (__amp_capable(chan)) {
1346 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1347 a2mp_discover_amp(chan);
1348 } else if (chan->conn->hcon->type == LE_LINK) {
1349 l2cap_le_start(chan);
1351 l2cap_send_conn_req(chan);
1355 static void l2cap_request_info(struct l2cap_conn *conn)
1357 struct l2cap_info_req req;
1359 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1362 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1364 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1365 conn->info_ident = l2cap_get_ident(conn);
1367 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1369 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1373 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1375 /* The minimum encryption key size needs to be enforced by the
1376 * host stack before establishing any L2CAP connections. The
1377 * specification in theory allows a minimum of 1, but to align
1378 * BR/EDR and LE transports, a minimum of 7 is chosen.
1380 * This check might also be called for unencrypted connections
1381 * that have no key size requirements. Ensure that the link is
1382 * actually encrypted before enforcing a key size.
1384 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1385 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
1388 static void l2cap_do_start(struct l2cap_chan *chan)
1390 struct l2cap_conn *conn = chan->conn;
1392 if (conn->hcon->type == LE_LINK) {
1393 l2cap_le_start(chan);
1397 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1398 l2cap_request_info(conn);
1402 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1405 if (!l2cap_chan_check_security(chan, true) ||
1406 !__l2cap_no_conn_pending(chan))
1409 if (l2cap_check_enc_key_size(conn->hcon))
1410 l2cap_start_connection(chan);
1412 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1415 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1417 u32 local_feat_mask = l2cap_feat_mask;
1419 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1422 case L2CAP_MODE_ERTM:
1423 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1424 case L2CAP_MODE_STREAMING:
1425 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1431 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1433 struct l2cap_conn *conn = chan->conn;
1434 struct l2cap_disconn_req req;
1439 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1440 __clear_retrans_timer(chan);
1441 __clear_monitor_timer(chan);
1442 __clear_ack_timer(chan);
1445 if (chan->scid == L2CAP_CID_A2MP) {
1446 l2cap_state_change(chan, BT_DISCONN);
1450 req.dcid = cpu_to_le16(chan->dcid);
1451 req.scid = cpu_to_le16(chan->scid);
1452 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1455 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1458 /* ---- L2CAP connections ---- */
1459 static void l2cap_conn_start(struct l2cap_conn *conn)
1461 struct l2cap_chan *chan, *tmp;
1463 BT_DBG("conn %p", conn);
1465 mutex_lock(&conn->chan_lock);
1467 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1468 l2cap_chan_lock(chan);
1470 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1471 l2cap_chan_ready(chan);
1472 l2cap_chan_unlock(chan);
1476 if (chan->state == BT_CONNECT) {
1477 if (!l2cap_chan_check_security(chan, true) ||
1478 !__l2cap_no_conn_pending(chan)) {
1479 l2cap_chan_unlock(chan);
1483 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1484 && test_bit(CONF_STATE2_DEVICE,
1485 &chan->conf_state)) {
1486 l2cap_chan_close(chan, ECONNRESET);
1487 l2cap_chan_unlock(chan);
1491 if (l2cap_check_enc_key_size(conn->hcon))
1492 l2cap_start_connection(chan);
1494 l2cap_chan_close(chan, ECONNREFUSED);
1496 } else if (chan->state == BT_CONNECT2) {
1497 struct l2cap_conn_rsp rsp;
1499 rsp.scid = cpu_to_le16(chan->dcid);
1500 rsp.dcid = cpu_to_le16(chan->scid);
1502 if (l2cap_chan_check_security(chan, false)) {
1503 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1504 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1505 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1506 chan->ops->defer(chan);
1509 l2cap_state_change(chan, BT_CONFIG);
1510 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1511 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1514 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1515 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1518 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1521 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1522 rsp.result != L2CAP_CR_SUCCESS) {
1523 l2cap_chan_unlock(chan);
1527 set_bit(CONF_REQ_SENT, &chan->conf_state);
1528 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1529 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1530 chan->num_conf_req++;
1533 l2cap_chan_unlock(chan);
1536 mutex_unlock(&conn->chan_lock);
1539 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1541 struct hci_conn *hcon = conn->hcon;
1542 struct hci_dev *hdev = hcon->hdev;
1544 BT_DBG("%s conn %p", hdev->name, conn);
1546 /* For outgoing pairing which doesn't necessarily have an
1547 * associated socket (e.g. mgmt_pair_device).
1550 smp_conn_security(hcon, hcon->pending_sec_level);
1552 /* For LE slave connections, make sure the connection interval
1553 * is in the range of the minium and maximum interval that has
1554 * been configured for this connection. If not, then trigger
1555 * the connection update procedure.
1557 if (hcon->role == HCI_ROLE_SLAVE &&
1558 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1559 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1560 struct l2cap_conn_param_update_req req;
1562 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1563 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1564 req.latency = cpu_to_le16(hcon->le_conn_latency);
1565 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1567 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1568 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1572 static void l2cap_conn_ready(struct l2cap_conn *conn)
1574 struct l2cap_chan *chan;
1575 struct hci_conn *hcon = conn->hcon;
1577 BT_DBG("conn %p", conn);
1579 if (hcon->type == ACL_LINK)
1580 l2cap_request_info(conn);
1582 mutex_lock(&conn->chan_lock);
1584 list_for_each_entry(chan, &conn->chan_l, list) {
1586 l2cap_chan_lock(chan);
1588 if (chan->scid == L2CAP_CID_A2MP) {
1589 l2cap_chan_unlock(chan);
1593 if (hcon->type == LE_LINK) {
1594 l2cap_le_start(chan);
1595 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1596 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1597 l2cap_chan_ready(chan);
1598 } else if (chan->state == BT_CONNECT) {
1599 l2cap_do_start(chan);
1602 l2cap_chan_unlock(chan);
1605 mutex_unlock(&conn->chan_lock);
1607 if (hcon->type == LE_LINK)
1608 l2cap_le_conn_ready(conn);
1610 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1613 /* Notify sockets that we cannot guaranty reliability anymore */
1614 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1616 struct l2cap_chan *chan;
1618 BT_DBG("conn %p", conn);
1620 mutex_lock(&conn->chan_lock);
1622 list_for_each_entry(chan, &conn->chan_l, list) {
1623 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1624 l2cap_chan_set_err(chan, err);
1627 mutex_unlock(&conn->chan_lock);
1630 static void l2cap_info_timeout(struct work_struct *work)
1632 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1635 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1636 conn->info_ident = 0;
1638 l2cap_conn_start(conn);
1643 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1644 * callback is called during registration. The ->remove callback is called
1645 * during unregistration.
1646 * An l2cap_user object can either be explicitly unregistered or when the
1647 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1648 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1649 * External modules must own a reference to the l2cap_conn object if they intend
1650 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1651 * any time if they don't.
1654 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1656 struct hci_dev *hdev = conn->hcon->hdev;
1659 /* We need to check whether l2cap_conn is registered. If it is not, we
1660 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1661 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1662 * relies on the parent hci_conn object to be locked. This itself relies
1663 * on the hci_dev object to be locked. So we must lock the hci device
1668 if (!list_empty(&user->list)) {
1673 /* conn->hchan is NULL after l2cap_conn_del() was called */
1679 ret = user->probe(conn, user);
1683 list_add(&user->list, &conn->users);
1687 hci_dev_unlock(hdev);
1690 EXPORT_SYMBOL(l2cap_register_user);
1692 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1694 struct hci_dev *hdev = conn->hcon->hdev;
1698 if (list_empty(&user->list))
1701 list_del_init(&user->list);
1702 user->remove(conn, user);
1705 hci_dev_unlock(hdev);
1707 EXPORT_SYMBOL(l2cap_unregister_user);
1709 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1711 struct l2cap_user *user;
1713 while (!list_empty(&conn->users)) {
1714 user = list_first_entry(&conn->users, struct l2cap_user, list);
1715 list_del_init(&user->list);
1716 user->remove(conn, user);
1720 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1722 struct l2cap_conn *conn = hcon->l2cap_data;
1723 struct l2cap_chan *chan, *l;
1728 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1730 kfree_skb(conn->rx_skb);
1732 skb_queue_purge(&conn->pending_rx);
1734 /* We can not call flush_work(&conn->pending_rx_work) here since we
1735 * might block if we are running on a worker from the same workqueue
1736 * pending_rx_work is waiting on.
1738 if (work_pending(&conn->pending_rx_work))
1739 cancel_work_sync(&conn->pending_rx_work);
1741 if (work_pending(&conn->id_addr_update_work))
1742 cancel_work_sync(&conn->id_addr_update_work);
1744 l2cap_unregister_all_users(conn);
1746 /* Force the connection to be immediately dropped */
1747 hcon->disc_timeout = 0;
1749 mutex_lock(&conn->chan_lock);
1752 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1753 l2cap_chan_hold(chan);
1754 l2cap_chan_lock(chan);
1756 l2cap_chan_del(chan, err);
1758 chan->ops->close(chan);
1760 l2cap_chan_unlock(chan);
1761 l2cap_chan_put(chan);
1764 mutex_unlock(&conn->chan_lock);
1766 hci_chan_del(conn->hchan);
1768 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1769 cancel_delayed_work_sync(&conn->info_timer);
1771 hcon->l2cap_data = NULL;
1773 l2cap_conn_put(conn);
1776 static void l2cap_conn_free(struct kref *ref)
1778 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1780 hci_conn_put(conn->hcon);
1784 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1786 kref_get(&conn->ref);
1789 EXPORT_SYMBOL(l2cap_conn_get);
1791 void l2cap_conn_put(struct l2cap_conn *conn)
1793 kref_put(&conn->ref, l2cap_conn_free);
1795 EXPORT_SYMBOL(l2cap_conn_put);
1797 /* ---- Socket interface ---- */
1799 /* Find socket with psm and source / destination bdaddr.
1800 * Returns closest match.
1802 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1807 struct l2cap_chan *c, *tmp, *c1 = NULL;
1809 read_lock(&chan_list_lock);
1811 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1812 if (state && c->state != state)
1815 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1818 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1821 if (c->psm == psm) {
1822 int src_match, dst_match;
1823 int src_any, dst_any;
1826 src_match = !bacmp(&c->src, src);
1827 dst_match = !bacmp(&c->dst, dst);
1828 if (src_match && dst_match) {
1829 if (!l2cap_chan_hold_unless_zero(c))
1832 read_unlock(&chan_list_lock);
1837 src_any = !bacmp(&c->src, BDADDR_ANY);
1838 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1839 if ((src_match && dst_any) || (src_any && dst_match) ||
1840 (src_any && dst_any))
1846 c1 = l2cap_chan_hold_unless_zero(c1);
1848 read_unlock(&chan_list_lock);
1853 static void l2cap_monitor_timeout(struct work_struct *work)
1855 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1856 monitor_timer.work);
1858 BT_DBG("chan %p", chan);
1860 l2cap_chan_lock(chan);
1863 l2cap_chan_unlock(chan);
1864 l2cap_chan_put(chan);
1868 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1870 l2cap_chan_unlock(chan);
1871 l2cap_chan_put(chan);
1874 static void l2cap_retrans_timeout(struct work_struct *work)
1876 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1877 retrans_timer.work);
1879 BT_DBG("chan %p", chan);
1881 l2cap_chan_lock(chan);
1884 l2cap_chan_unlock(chan);
1885 l2cap_chan_put(chan);
1889 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1890 l2cap_chan_unlock(chan);
1891 l2cap_chan_put(chan);
1894 static void l2cap_streaming_send(struct l2cap_chan *chan,
1895 struct sk_buff_head *skbs)
1897 struct sk_buff *skb;
1898 struct l2cap_ctrl *control;
1900 BT_DBG("chan %p, skbs %p", chan, skbs);
1902 if (__chan_is_moving(chan))
1905 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1907 while (!skb_queue_empty(&chan->tx_q)) {
1909 skb = skb_dequeue(&chan->tx_q);
1911 bt_cb(skb)->l2cap.retries = 1;
1912 control = &bt_cb(skb)->l2cap;
1914 control->reqseq = 0;
1915 control->txseq = chan->next_tx_seq;
1917 __pack_control(chan, control, skb);
1919 if (chan->fcs == L2CAP_FCS_CRC16) {
1920 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1921 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1924 l2cap_do_send(chan, skb);
1926 BT_DBG("Sent txseq %u", control->txseq);
1928 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1929 chan->frames_sent++;
1933 static int l2cap_ertm_send(struct l2cap_chan *chan)
1935 struct sk_buff *skb, *tx_skb;
1936 struct l2cap_ctrl *control;
1939 BT_DBG("chan %p", chan);
1941 if (chan->state != BT_CONNECTED)
1944 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1947 if (__chan_is_moving(chan))
1950 while (chan->tx_send_head &&
1951 chan->unacked_frames < chan->remote_tx_win &&
1952 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1954 skb = chan->tx_send_head;
1956 bt_cb(skb)->l2cap.retries = 1;
1957 control = &bt_cb(skb)->l2cap;
1959 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1962 control->reqseq = chan->buffer_seq;
1963 chan->last_acked_seq = chan->buffer_seq;
1964 control->txseq = chan->next_tx_seq;
1966 __pack_control(chan, control, skb);
1968 if (chan->fcs == L2CAP_FCS_CRC16) {
1969 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1970 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1973 /* Clone after data has been modified. Data is assumed to be
1974 read-only (for locking purposes) on cloned sk_buffs.
1976 tx_skb = skb_clone(skb, GFP_KERNEL);
1981 __set_retrans_timer(chan);
1983 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1984 chan->unacked_frames++;
1985 chan->frames_sent++;
1988 if (skb_queue_is_last(&chan->tx_q, skb))
1989 chan->tx_send_head = NULL;
1991 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1993 l2cap_do_send(chan, tx_skb);
1994 BT_DBG("Sent txseq %u", control->txseq);
1997 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1998 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2003 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2005 struct l2cap_ctrl control;
2006 struct sk_buff *skb;
2007 struct sk_buff *tx_skb;
2010 BT_DBG("chan %p", chan);
2012 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2015 if (__chan_is_moving(chan))
2018 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2019 seq = l2cap_seq_list_pop(&chan->retrans_list);
2021 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2023 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2028 bt_cb(skb)->l2cap.retries++;
2029 control = bt_cb(skb)->l2cap;
2031 if (chan->max_tx != 0 &&
2032 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2033 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2034 l2cap_send_disconn_req(chan, ECONNRESET);
2035 l2cap_seq_list_clear(&chan->retrans_list);
2039 control.reqseq = chan->buffer_seq;
2040 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2045 if (skb_cloned(skb)) {
2046 /* Cloned sk_buffs are read-only, so we need a
2049 tx_skb = skb_copy(skb, GFP_KERNEL);
2051 tx_skb = skb_clone(skb, GFP_KERNEL);
2055 l2cap_seq_list_clear(&chan->retrans_list);
2059 /* Update skb contents */
2060 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2061 put_unaligned_le32(__pack_extended_control(&control),
2062 tx_skb->data + L2CAP_HDR_SIZE);
2064 put_unaligned_le16(__pack_enhanced_control(&control),
2065 tx_skb->data + L2CAP_HDR_SIZE);
2069 if (chan->fcs == L2CAP_FCS_CRC16) {
2070 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2071 tx_skb->len - L2CAP_FCS_SIZE);
2072 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2076 l2cap_do_send(chan, tx_skb);
2078 BT_DBG("Resent txseq %d", control.txseq);
2080 chan->last_acked_seq = chan->buffer_seq;
2084 static void l2cap_retransmit(struct l2cap_chan *chan,
2085 struct l2cap_ctrl *control)
2087 BT_DBG("chan %p, control %p", chan, control);
2089 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2090 l2cap_ertm_resend(chan);
2093 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2094 struct l2cap_ctrl *control)
2096 struct sk_buff *skb;
2098 BT_DBG("chan %p, control %p", chan, control);
2101 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2103 l2cap_seq_list_clear(&chan->retrans_list);
2105 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2108 if (chan->unacked_frames) {
2109 skb_queue_walk(&chan->tx_q, skb) {
2110 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2111 skb == chan->tx_send_head)
2115 skb_queue_walk_from(&chan->tx_q, skb) {
2116 if (skb == chan->tx_send_head)
2119 l2cap_seq_list_append(&chan->retrans_list,
2120 bt_cb(skb)->l2cap.txseq);
2123 l2cap_ertm_resend(chan);
2127 static void l2cap_send_ack(struct l2cap_chan *chan)
2129 struct l2cap_ctrl control;
2130 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2131 chan->last_acked_seq);
2134 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2135 chan, chan->last_acked_seq, chan->buffer_seq);
2137 memset(&control, 0, sizeof(control));
2140 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2141 chan->rx_state == L2CAP_RX_STATE_RECV) {
2142 __clear_ack_timer(chan);
2143 control.super = L2CAP_SUPER_RNR;
2144 control.reqseq = chan->buffer_seq;
2145 l2cap_send_sframe(chan, &control);
2147 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2148 l2cap_ertm_send(chan);
2149 /* If any i-frames were sent, they included an ack */
2150 if (chan->buffer_seq == chan->last_acked_seq)
2154 /* Ack now if the window is 3/4ths full.
2155 * Calculate without mul or div
2157 threshold = chan->ack_win;
2158 threshold += threshold << 1;
2161 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2164 if (frames_to_ack >= threshold) {
2165 __clear_ack_timer(chan);
2166 control.super = L2CAP_SUPER_RR;
2167 control.reqseq = chan->buffer_seq;
2168 l2cap_send_sframe(chan, &control);
2173 __set_ack_timer(chan);
2177 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2178 struct msghdr *msg, int len,
2179 int count, struct sk_buff *skb)
2181 struct l2cap_conn *conn = chan->conn;
2182 struct sk_buff **frag;
2185 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2191 /* Continuation fragments (no L2CAP header) */
2192 frag = &skb_shinfo(skb)->frag_list;
2194 struct sk_buff *tmp;
2196 count = min_t(unsigned int, conn->mtu, len);
2198 tmp = chan->ops->alloc_skb(chan, 0, count,
2199 msg->msg_flags & MSG_DONTWAIT);
2201 return PTR_ERR(tmp);
2205 if (!copy_from_iter_full(skb_put(*frag, count), count,
2212 skb->len += (*frag)->len;
2213 skb->data_len += (*frag)->len;
2215 frag = &(*frag)->next;
2221 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2222 struct msghdr *msg, size_t len)
2224 struct l2cap_conn *conn = chan->conn;
2225 struct sk_buff *skb;
2226 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2227 struct l2cap_hdr *lh;
2229 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2230 __le16_to_cpu(chan->psm), len);
2232 count = min_t(unsigned int, (conn->mtu - hlen), len);
2234 skb = chan->ops->alloc_skb(chan, hlen, count,
2235 msg->msg_flags & MSG_DONTWAIT);
2239 /* Create L2CAP header */
2240 lh = skb_put(skb, L2CAP_HDR_SIZE);
2241 lh->cid = cpu_to_le16(chan->dcid);
2242 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2243 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2245 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2246 if (unlikely(err < 0)) {
2248 return ERR_PTR(err);
2253 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2254 struct msghdr *msg, size_t len)
2256 struct l2cap_conn *conn = chan->conn;
2257 struct sk_buff *skb;
2259 struct l2cap_hdr *lh;
2261 BT_DBG("chan %p len %zu", chan, len);
2263 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2265 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2266 msg->msg_flags & MSG_DONTWAIT);
2270 /* Create L2CAP header */
2271 lh = skb_put(skb, L2CAP_HDR_SIZE);
2272 lh->cid = cpu_to_le16(chan->dcid);
2273 lh->len = cpu_to_le16(len);
2275 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2276 if (unlikely(err < 0)) {
2278 return ERR_PTR(err);
2283 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2284 struct msghdr *msg, size_t len,
2287 struct l2cap_conn *conn = chan->conn;
2288 struct sk_buff *skb;
2289 int err, count, hlen;
2290 struct l2cap_hdr *lh;
2292 BT_DBG("chan %p len %zu", chan, len);
2295 return ERR_PTR(-ENOTCONN);
2297 hlen = __ertm_hdr_size(chan);
2300 hlen += L2CAP_SDULEN_SIZE;
2302 if (chan->fcs == L2CAP_FCS_CRC16)
2303 hlen += L2CAP_FCS_SIZE;
2305 count = min_t(unsigned int, (conn->mtu - hlen), len);
2307 skb = chan->ops->alloc_skb(chan, hlen, count,
2308 msg->msg_flags & MSG_DONTWAIT);
2312 /* Create L2CAP header */
2313 lh = skb_put(skb, L2CAP_HDR_SIZE);
2314 lh->cid = cpu_to_le16(chan->dcid);
2315 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2317 /* Control header is populated later */
2318 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2319 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2321 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2324 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2326 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2327 if (unlikely(err < 0)) {
2329 return ERR_PTR(err);
2332 bt_cb(skb)->l2cap.fcs = chan->fcs;
2333 bt_cb(skb)->l2cap.retries = 0;
2337 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2338 struct sk_buff_head *seg_queue,
2339 struct msghdr *msg, size_t len)
2341 struct sk_buff *skb;
2346 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2348 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2349 * so fragmented skbs are not used. The HCI layer's handling
2350 * of fragmented skbs is not compatible with ERTM's queueing.
2353 /* PDU size is derived from the HCI MTU */
2354 pdu_len = chan->conn->mtu;
2356 /* Constrain PDU size for BR/EDR connections */
2358 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2360 /* Adjust for largest possible L2CAP overhead. */
2362 pdu_len -= L2CAP_FCS_SIZE;
2364 pdu_len -= __ertm_hdr_size(chan);
2366 /* Remote device may have requested smaller PDUs */
2367 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2369 if (len <= pdu_len) {
2370 sar = L2CAP_SAR_UNSEGMENTED;
2374 sar = L2CAP_SAR_START;
2379 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2382 __skb_queue_purge(seg_queue);
2383 return PTR_ERR(skb);
2386 bt_cb(skb)->l2cap.sar = sar;
2387 __skb_queue_tail(seg_queue, skb);
2393 if (len <= pdu_len) {
2394 sar = L2CAP_SAR_END;
2397 sar = L2CAP_SAR_CONTINUE;
2404 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2406 size_t len, u16 sdulen)
2408 struct l2cap_conn *conn = chan->conn;
2409 struct sk_buff *skb;
2410 int err, count, hlen;
2411 struct l2cap_hdr *lh;
2413 BT_DBG("chan %p len %zu", chan, len);
2416 return ERR_PTR(-ENOTCONN);
2418 hlen = L2CAP_HDR_SIZE;
2421 hlen += L2CAP_SDULEN_SIZE;
2423 count = min_t(unsigned int, (conn->mtu - hlen), len);
2425 skb = chan->ops->alloc_skb(chan, hlen, count,
2426 msg->msg_flags & MSG_DONTWAIT);
2430 /* Create L2CAP header */
2431 lh = skb_put(skb, L2CAP_HDR_SIZE);
2432 lh->cid = cpu_to_le16(chan->dcid);
2433 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2436 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2438 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2439 if (unlikely(err < 0)) {
2441 return ERR_PTR(err);
2447 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2448 struct sk_buff_head *seg_queue,
2449 struct msghdr *msg, size_t len)
2451 struct sk_buff *skb;
2455 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2458 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2464 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2466 __skb_queue_purge(seg_queue);
2467 return PTR_ERR(skb);
2470 __skb_queue_tail(seg_queue, skb);
2476 pdu_len += L2CAP_SDULEN_SIZE;
2483 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2487 BT_DBG("chan %p", chan);
2489 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2490 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2495 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2496 skb_queue_len(&chan->tx_q));
2499 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2501 struct sk_buff *skb;
2503 struct sk_buff_head seg_queue;
2508 /* Connectionless channel */
2509 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2510 skb = l2cap_create_connless_pdu(chan, msg, len);
2512 return PTR_ERR(skb);
2514 /* Channel lock is released before requesting new skb and then
2515 * reacquired thus we need to recheck channel state.
2517 if (chan->state != BT_CONNECTED) {
2522 l2cap_do_send(chan, skb);
2526 switch (chan->mode) {
2527 case L2CAP_MODE_LE_FLOWCTL:
2528 /* Check outgoing MTU */
2529 if (len > chan->omtu)
2532 __skb_queue_head_init(&seg_queue);
2534 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2536 if (chan->state != BT_CONNECTED) {
2537 __skb_queue_purge(&seg_queue);
2544 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2546 l2cap_le_flowctl_send(chan);
2548 if (!chan->tx_credits)
2549 chan->ops->suspend(chan);
2555 case L2CAP_MODE_BASIC:
2556 /* Check outgoing MTU */
2557 if (len > chan->omtu)
2560 /* Create a basic PDU */
2561 skb = l2cap_create_basic_pdu(chan, msg, len);
2563 return PTR_ERR(skb);
2565 /* Channel lock is released before requesting new skb and then
2566 * reacquired thus we need to recheck channel state.
2568 if (chan->state != BT_CONNECTED) {
2573 l2cap_do_send(chan, skb);
2577 case L2CAP_MODE_ERTM:
2578 case L2CAP_MODE_STREAMING:
2579 /* Check outgoing MTU */
2580 if (len > chan->omtu) {
2585 __skb_queue_head_init(&seg_queue);
2587 /* Do segmentation before calling in to the state machine,
2588 * since it's possible to block while waiting for memory
2591 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2593 /* The channel could have been closed while segmenting,
2594 * check that it is still connected.
2596 if (chan->state != BT_CONNECTED) {
2597 __skb_queue_purge(&seg_queue);
2604 if (chan->mode == L2CAP_MODE_ERTM)
2605 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2607 l2cap_streaming_send(chan, &seg_queue);
2611 /* If the skbs were not queued for sending, they'll still be in
2612 * seg_queue and need to be purged.
2614 __skb_queue_purge(&seg_queue);
2618 BT_DBG("bad state %1.1x", chan->mode);
2624 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2626 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2628 struct l2cap_ctrl control;
2631 BT_DBG("chan %p, txseq %u", chan, txseq);
2633 memset(&control, 0, sizeof(control));
2635 control.super = L2CAP_SUPER_SREJ;
2637 for (seq = chan->expected_tx_seq; seq != txseq;
2638 seq = __next_seq(chan, seq)) {
2639 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2640 control.reqseq = seq;
2641 l2cap_send_sframe(chan, &control);
2642 l2cap_seq_list_append(&chan->srej_list, seq);
2646 chan->expected_tx_seq = __next_seq(chan, txseq);
2649 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2651 struct l2cap_ctrl control;
2653 BT_DBG("chan %p", chan);
2655 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2658 memset(&control, 0, sizeof(control));
2660 control.super = L2CAP_SUPER_SREJ;
2661 control.reqseq = chan->srej_list.tail;
2662 l2cap_send_sframe(chan, &control);
2665 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2667 struct l2cap_ctrl control;
2671 BT_DBG("chan %p, txseq %u", chan, txseq);
2673 memset(&control, 0, sizeof(control));
2675 control.super = L2CAP_SUPER_SREJ;
2677 /* Capture initial list head to allow only one pass through the list. */
2678 initial_head = chan->srej_list.head;
2681 seq = l2cap_seq_list_pop(&chan->srej_list);
2682 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2685 control.reqseq = seq;
2686 l2cap_send_sframe(chan, &control);
2687 l2cap_seq_list_append(&chan->srej_list, seq);
2688 } while (chan->srej_list.head != initial_head);
2691 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2693 struct sk_buff *acked_skb;
2696 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2698 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2701 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2702 chan->expected_ack_seq, chan->unacked_frames);
2704 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2705 ackseq = __next_seq(chan, ackseq)) {
2707 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2709 skb_unlink(acked_skb, &chan->tx_q);
2710 kfree_skb(acked_skb);
2711 chan->unacked_frames--;
2715 chan->expected_ack_seq = reqseq;
2717 if (chan->unacked_frames == 0)
2718 __clear_retrans_timer(chan);
2720 BT_DBG("unacked_frames %u", chan->unacked_frames);
2723 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2725 BT_DBG("chan %p", chan);
2727 chan->expected_tx_seq = chan->buffer_seq;
2728 l2cap_seq_list_clear(&chan->srej_list);
2729 skb_queue_purge(&chan->srej_q);
2730 chan->rx_state = L2CAP_RX_STATE_RECV;
2733 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2734 struct l2cap_ctrl *control,
2735 struct sk_buff_head *skbs, u8 event)
2737 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2741 case L2CAP_EV_DATA_REQUEST:
2742 if (chan->tx_send_head == NULL)
2743 chan->tx_send_head = skb_peek(skbs);
2745 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2746 l2cap_ertm_send(chan);
2748 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2749 BT_DBG("Enter LOCAL_BUSY");
2750 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2752 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2753 /* The SREJ_SENT state must be aborted if we are to
2754 * enter the LOCAL_BUSY state.
2756 l2cap_abort_rx_srej_sent(chan);
2759 l2cap_send_ack(chan);
2762 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2763 BT_DBG("Exit LOCAL_BUSY");
2764 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2766 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2767 struct l2cap_ctrl local_control;
2769 memset(&local_control, 0, sizeof(local_control));
2770 local_control.sframe = 1;
2771 local_control.super = L2CAP_SUPER_RR;
2772 local_control.poll = 1;
2773 local_control.reqseq = chan->buffer_seq;
2774 l2cap_send_sframe(chan, &local_control);
2776 chan->retry_count = 1;
2777 __set_monitor_timer(chan);
2778 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2781 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2782 l2cap_process_reqseq(chan, control->reqseq);
2784 case L2CAP_EV_EXPLICIT_POLL:
2785 l2cap_send_rr_or_rnr(chan, 1);
2786 chan->retry_count = 1;
2787 __set_monitor_timer(chan);
2788 __clear_ack_timer(chan);
2789 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2791 case L2CAP_EV_RETRANS_TO:
2792 l2cap_send_rr_or_rnr(chan, 1);
2793 chan->retry_count = 1;
2794 __set_monitor_timer(chan);
2795 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2797 case L2CAP_EV_RECV_FBIT:
2798 /* Nothing to process */
2805 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2806 struct l2cap_ctrl *control,
2807 struct sk_buff_head *skbs, u8 event)
2809 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2813 case L2CAP_EV_DATA_REQUEST:
2814 if (chan->tx_send_head == NULL)
2815 chan->tx_send_head = skb_peek(skbs);
2816 /* Queue data, but don't send. */
2817 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2819 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2820 BT_DBG("Enter LOCAL_BUSY");
2821 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2823 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2824 /* The SREJ_SENT state must be aborted if we are to
2825 * enter the LOCAL_BUSY state.
2827 l2cap_abort_rx_srej_sent(chan);
2830 l2cap_send_ack(chan);
2833 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2834 BT_DBG("Exit LOCAL_BUSY");
2835 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2837 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2838 struct l2cap_ctrl local_control;
2839 memset(&local_control, 0, sizeof(local_control));
2840 local_control.sframe = 1;
2841 local_control.super = L2CAP_SUPER_RR;
2842 local_control.poll = 1;
2843 local_control.reqseq = chan->buffer_seq;
2844 l2cap_send_sframe(chan, &local_control);
2846 chan->retry_count = 1;
2847 __set_monitor_timer(chan);
2848 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2851 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2852 l2cap_process_reqseq(chan, control->reqseq);
2856 case L2CAP_EV_RECV_FBIT:
2857 if (control && control->final) {
2858 __clear_monitor_timer(chan);
2859 if (chan->unacked_frames > 0)
2860 __set_retrans_timer(chan);
2861 chan->retry_count = 0;
2862 chan->tx_state = L2CAP_TX_STATE_XMIT;
2863 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2866 case L2CAP_EV_EXPLICIT_POLL:
2869 case L2CAP_EV_MONITOR_TO:
2870 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2871 l2cap_send_rr_or_rnr(chan, 1);
2872 __set_monitor_timer(chan);
2873 chan->retry_count++;
2875 l2cap_send_disconn_req(chan, ECONNABORTED);
2883 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2884 struct sk_buff_head *skbs, u8 event)
2886 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2887 chan, control, skbs, event, chan->tx_state);
2889 switch (chan->tx_state) {
2890 case L2CAP_TX_STATE_XMIT:
2891 l2cap_tx_state_xmit(chan, control, skbs, event);
2893 case L2CAP_TX_STATE_WAIT_F:
2894 l2cap_tx_state_wait_f(chan, control, skbs, event);
2902 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2903 struct l2cap_ctrl *control)
2905 BT_DBG("chan %p, control %p", chan, control);
2906 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2909 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2910 struct l2cap_ctrl *control)
2912 BT_DBG("chan %p, control %p", chan, control);
2913 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2916 /* Copy frame to all raw sockets on that connection */
2917 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2919 struct sk_buff *nskb;
2920 struct l2cap_chan *chan;
2922 BT_DBG("conn %p", conn);
2924 mutex_lock(&conn->chan_lock);
2926 list_for_each_entry(chan, &conn->chan_l, list) {
2927 if (chan->chan_type != L2CAP_CHAN_RAW)
2930 /* Don't send frame to the channel it came from */
2931 if (bt_cb(skb)->l2cap.chan == chan)
2934 nskb = skb_clone(skb, GFP_KERNEL);
2937 if (chan->ops->recv(chan, nskb))
2941 mutex_unlock(&conn->chan_lock);
2944 /* ---- L2CAP signalling commands ---- */
2945 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2946 u8 ident, u16 dlen, void *data)
2948 struct sk_buff *skb, **frag;
2949 struct l2cap_cmd_hdr *cmd;
2950 struct l2cap_hdr *lh;
2953 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2954 conn, code, ident, dlen);
2956 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2959 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2960 count = min_t(unsigned int, conn->mtu, len);
2962 skb = bt_skb_alloc(count, GFP_KERNEL);
2966 lh = skb_put(skb, L2CAP_HDR_SIZE);
2967 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2969 if (conn->hcon->type == LE_LINK)
2970 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2972 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2974 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2977 cmd->len = cpu_to_le16(dlen);
2980 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2981 skb_put_data(skb, data, count);
2987 /* Continuation fragments (no L2CAP header) */
2988 frag = &skb_shinfo(skb)->frag_list;
2990 count = min_t(unsigned int, conn->mtu, len);
2992 *frag = bt_skb_alloc(count, GFP_KERNEL);
2996 skb_put_data(*frag, data, count);
3001 frag = &(*frag)->next;
3011 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3014 struct l2cap_conf_opt *opt = *ptr;
3017 len = L2CAP_CONF_OPT_SIZE + opt->len;
3025 *val = *((u8 *) opt->val);
3029 *val = get_unaligned_le16(opt->val);
3033 *val = get_unaligned_le32(opt->val);
3037 *val = (unsigned long) opt->val;
3041 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3045 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3047 struct l2cap_conf_opt *opt = *ptr;
3049 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3051 if (size < L2CAP_CONF_OPT_SIZE + len)
3059 *((u8 *) opt->val) = val;
3063 put_unaligned_le16(val, opt->val);
3067 put_unaligned_le32(val, opt->val);
3071 memcpy(opt->val, (void *) val, len);
3075 *ptr += L2CAP_CONF_OPT_SIZE + len;
3078 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3080 struct l2cap_conf_efs efs;
3082 switch (chan->mode) {
3083 case L2CAP_MODE_ERTM:
3084 efs.id = chan->local_id;
3085 efs.stype = chan->local_stype;
3086 efs.msdu = cpu_to_le16(chan->local_msdu);
3087 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3088 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3089 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3092 case L2CAP_MODE_STREAMING:
3094 efs.stype = L2CAP_SERV_BESTEFFORT;
3095 efs.msdu = cpu_to_le16(chan->local_msdu);
3096 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3105 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3106 (unsigned long) &efs, size);
3109 static void l2cap_ack_timeout(struct work_struct *work)
3111 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3115 BT_DBG("chan %p", chan);
3117 l2cap_chan_lock(chan);
3119 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3120 chan->last_acked_seq);
3123 l2cap_send_rr_or_rnr(chan, 0);
3125 l2cap_chan_unlock(chan);
3126 l2cap_chan_put(chan);
3129 int l2cap_ertm_init(struct l2cap_chan *chan)
3133 chan->next_tx_seq = 0;
3134 chan->expected_tx_seq = 0;
3135 chan->expected_ack_seq = 0;
3136 chan->unacked_frames = 0;
3137 chan->buffer_seq = 0;
3138 chan->frames_sent = 0;
3139 chan->last_acked_seq = 0;
3141 chan->sdu_last_frag = NULL;
3144 skb_queue_head_init(&chan->tx_q);
3146 chan->local_amp_id = AMP_ID_BREDR;
3147 chan->move_id = AMP_ID_BREDR;
3148 chan->move_state = L2CAP_MOVE_STABLE;
3149 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3151 if (chan->mode != L2CAP_MODE_ERTM)
3154 chan->rx_state = L2CAP_RX_STATE_RECV;
3155 chan->tx_state = L2CAP_TX_STATE_XMIT;
3157 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3158 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3159 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3161 skb_queue_head_init(&chan->srej_q);
3163 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3167 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3169 l2cap_seq_list_free(&chan->srej_list);
3174 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3177 case L2CAP_MODE_STREAMING:
3178 case L2CAP_MODE_ERTM:
3179 if (l2cap_mode_supported(mode, remote_feat_mask))
3183 return L2CAP_MODE_BASIC;
3187 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3189 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3190 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3193 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3195 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3196 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3199 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3200 struct l2cap_conf_rfc *rfc)
3202 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3203 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3205 /* Class 1 devices have must have ERTM timeouts
3206 * exceeding the Link Supervision Timeout. The
3207 * default Link Supervision Timeout for AMP
3208 * controllers is 10 seconds.
3210 * Class 1 devices use 0xffffffff for their
3211 * best-effort flush timeout, so the clamping logic
3212 * will result in a timeout that meets the above
3213 * requirement. ERTM timeouts are 16-bit values, so
3214 * the maximum timeout is 65.535 seconds.
3217 /* Convert timeout to milliseconds and round */
3218 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3220 /* This is the recommended formula for class 2 devices
3221 * that start ERTM timers when packets are sent to the
3224 ertm_to = 3 * ertm_to + 500;
3226 if (ertm_to > 0xffff)
3229 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3230 rfc->monitor_timeout = rfc->retrans_timeout;
3232 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3233 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3237 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3239 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3240 __l2cap_ews_supported(chan->conn)) {
3241 /* use extended control field */
3242 set_bit(FLAG_EXT_CTRL, &chan->flags);
3243 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3245 chan->tx_win = min_t(u16, chan->tx_win,
3246 L2CAP_DEFAULT_TX_WINDOW);
3247 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3249 chan->ack_win = chan->tx_win;
3252 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3254 struct l2cap_conf_req *req = data;
3255 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3256 void *ptr = req->data;
3257 void *endptr = data + data_size;
3260 BT_DBG("chan %p", chan);
3262 if (chan->num_conf_req || chan->num_conf_rsp)
3265 switch (chan->mode) {
3266 case L2CAP_MODE_STREAMING:
3267 case L2CAP_MODE_ERTM:
3268 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3271 if (__l2cap_efs_supported(chan->conn))
3272 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3276 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3281 if (chan->imtu != L2CAP_DEFAULT_MTU)
3282 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3284 switch (chan->mode) {
3285 case L2CAP_MODE_BASIC:
3289 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3290 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3293 rfc.mode = L2CAP_MODE_BASIC;
3295 rfc.max_transmit = 0;
3296 rfc.retrans_timeout = 0;
3297 rfc.monitor_timeout = 0;
3298 rfc.max_pdu_size = 0;
3300 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3301 (unsigned long) &rfc, endptr - ptr);
3304 case L2CAP_MODE_ERTM:
3305 rfc.mode = L2CAP_MODE_ERTM;
3306 rfc.max_transmit = chan->max_tx;
3308 __l2cap_set_ertm_timeouts(chan, &rfc);
3310 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3311 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3313 rfc.max_pdu_size = cpu_to_le16(size);
3315 l2cap_txwin_setup(chan);
3317 rfc.txwin_size = min_t(u16, chan->tx_win,
3318 L2CAP_DEFAULT_TX_WINDOW);
3320 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3321 (unsigned long) &rfc, endptr - ptr);
3323 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3324 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3326 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3327 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3328 chan->tx_win, endptr - ptr);
3330 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3331 if (chan->fcs == L2CAP_FCS_NONE ||
3332 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3333 chan->fcs = L2CAP_FCS_NONE;
3334 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3335 chan->fcs, endptr - ptr);
3339 case L2CAP_MODE_STREAMING:
3340 l2cap_txwin_setup(chan);
3341 rfc.mode = L2CAP_MODE_STREAMING;
3343 rfc.max_transmit = 0;
3344 rfc.retrans_timeout = 0;
3345 rfc.monitor_timeout = 0;
3347 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3348 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3350 rfc.max_pdu_size = cpu_to_le16(size);
3352 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3353 (unsigned long) &rfc, endptr - ptr);
3355 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3356 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3358 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3359 if (chan->fcs == L2CAP_FCS_NONE ||
3360 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3361 chan->fcs = L2CAP_FCS_NONE;
3362 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3363 chan->fcs, endptr - ptr);
3368 req->dcid = cpu_to_le16(chan->dcid);
3369 req->flags = cpu_to_le16(0);
3374 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3376 struct l2cap_conf_rsp *rsp = data;
3377 void *ptr = rsp->data;
3378 void *endptr = data + data_size;
3379 void *req = chan->conf_req;
3380 int len = chan->conf_len;
3381 int type, hint, olen;
3383 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3384 struct l2cap_conf_efs efs;
3386 u16 mtu = L2CAP_DEFAULT_MTU;
3387 u16 result = L2CAP_CONF_SUCCESS;
3390 BT_DBG("chan %p", chan);
3392 while (len >= L2CAP_CONF_OPT_SIZE) {
3393 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3397 hint = type & L2CAP_CONF_HINT;
3398 type &= L2CAP_CONF_MASK;
3401 case L2CAP_CONF_MTU:
3407 case L2CAP_CONF_FLUSH_TO:
3410 chan->flush_to = val;
3413 case L2CAP_CONF_QOS:
3416 case L2CAP_CONF_RFC:
3417 if (olen != sizeof(rfc))
3419 memcpy(&rfc, (void *) val, olen);
3422 case L2CAP_CONF_FCS:
3425 if (val == L2CAP_FCS_NONE)
3426 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3429 case L2CAP_CONF_EFS:
3430 if (olen != sizeof(efs))
3433 memcpy(&efs, (void *) val, olen);
3436 case L2CAP_CONF_EWS:
3439 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3440 return -ECONNREFUSED;
3441 set_bit(FLAG_EXT_CTRL, &chan->flags);
3442 set_bit(CONF_EWS_RECV, &chan->conf_state);
3443 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3444 chan->remote_tx_win = val;
3450 result = L2CAP_CONF_UNKNOWN;
3451 *((u8 *) ptr++) = type;
3456 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3459 switch (chan->mode) {
3460 case L2CAP_MODE_STREAMING:
3461 case L2CAP_MODE_ERTM:
3462 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3463 chan->mode = l2cap_select_mode(rfc.mode,
3464 chan->conn->feat_mask);
3469 if (__l2cap_efs_supported(chan->conn))
3470 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3472 return -ECONNREFUSED;
3475 if (chan->mode != rfc.mode)
3476 return -ECONNREFUSED;
3482 if (chan->mode != rfc.mode) {
3483 result = L2CAP_CONF_UNACCEPT;
3484 rfc.mode = chan->mode;
3486 if (chan->num_conf_rsp == 1)
3487 return -ECONNREFUSED;
3489 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3490 (unsigned long) &rfc, endptr - ptr);
3493 if (result == L2CAP_CONF_SUCCESS) {
3494 /* Configure output options and let the other side know
3495 * which ones we don't like. */
3497 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3498 result = L2CAP_CONF_UNACCEPT;
3501 set_bit(CONF_MTU_DONE, &chan->conf_state);
3503 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3506 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3507 efs.stype != L2CAP_SERV_NOTRAFIC &&
3508 efs.stype != chan->local_stype) {
3510 result = L2CAP_CONF_UNACCEPT;
3512 if (chan->num_conf_req >= 1)
3513 return -ECONNREFUSED;
3515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3517 (unsigned long) &efs, endptr - ptr);
3519 /* Send PENDING Conf Rsp */
3520 result = L2CAP_CONF_PENDING;
3521 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3526 case L2CAP_MODE_BASIC:
3527 chan->fcs = L2CAP_FCS_NONE;
3528 set_bit(CONF_MODE_DONE, &chan->conf_state);
3531 case L2CAP_MODE_ERTM:
3532 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3533 chan->remote_tx_win = rfc.txwin_size;
3535 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3537 chan->remote_max_tx = rfc.max_transmit;
3539 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3540 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3541 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3542 rfc.max_pdu_size = cpu_to_le16(size);
3543 chan->remote_mps = size;
3545 __l2cap_set_ertm_timeouts(chan, &rfc);
3547 set_bit(CONF_MODE_DONE, &chan->conf_state);
3549 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3550 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3552 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3553 chan->remote_id = efs.id;
3554 chan->remote_stype = efs.stype;
3555 chan->remote_msdu = le16_to_cpu(efs.msdu);
3556 chan->remote_flush_to =
3557 le32_to_cpu(efs.flush_to);
3558 chan->remote_acc_lat =
3559 le32_to_cpu(efs.acc_lat);
3560 chan->remote_sdu_itime =
3561 le32_to_cpu(efs.sdu_itime);
3562 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3564 (unsigned long) &efs, endptr - ptr);
3568 case L2CAP_MODE_STREAMING:
3569 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3570 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3571 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3572 rfc.max_pdu_size = cpu_to_le16(size);
3573 chan->remote_mps = size;
3575 set_bit(CONF_MODE_DONE, &chan->conf_state);
3577 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3578 (unsigned long) &rfc, endptr - ptr);
3583 result = L2CAP_CONF_UNACCEPT;
3585 memset(&rfc, 0, sizeof(rfc));
3586 rfc.mode = chan->mode;
3589 if (result == L2CAP_CONF_SUCCESS)
3590 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3592 rsp->scid = cpu_to_le16(chan->dcid);
3593 rsp->result = cpu_to_le16(result);
3594 rsp->flags = cpu_to_le16(0);
3599 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3600 void *data, size_t size, u16 *result)
3602 struct l2cap_conf_req *req = data;
3603 void *ptr = req->data;
3604 void *endptr = data + size;
3607 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3608 struct l2cap_conf_efs efs;
3610 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3612 while (len >= L2CAP_CONF_OPT_SIZE) {
3613 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3618 case L2CAP_CONF_MTU:
3621 if (val < L2CAP_DEFAULT_MIN_MTU) {
3622 *result = L2CAP_CONF_UNACCEPT;
3623 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3626 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3630 case L2CAP_CONF_FLUSH_TO:
3633 chan->flush_to = val;
3634 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3635 chan->flush_to, endptr - ptr);
3638 case L2CAP_CONF_RFC:
3639 if (olen != sizeof(rfc))
3641 memcpy(&rfc, (void *)val, olen);
3642 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3643 rfc.mode != chan->mode)
3644 return -ECONNREFUSED;
3646 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3647 (unsigned long) &rfc, endptr - ptr);
3650 case L2CAP_CONF_EWS:
3653 chan->ack_win = min_t(u16, val, chan->ack_win);
3654 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3655 chan->tx_win, endptr - ptr);
3658 case L2CAP_CONF_EFS:
3659 if (olen != sizeof(efs))
3661 memcpy(&efs, (void *)val, olen);
3662 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3663 efs.stype != L2CAP_SERV_NOTRAFIC &&
3664 efs.stype != chan->local_stype)
3665 return -ECONNREFUSED;
3666 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3667 (unsigned long) &efs, endptr - ptr);
3670 case L2CAP_CONF_FCS:
3673 if (*result == L2CAP_CONF_PENDING)
3674 if (val == L2CAP_FCS_NONE)
3675 set_bit(CONF_RECV_NO_FCS,
3681 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3682 return -ECONNREFUSED;
3684 chan->mode = rfc.mode;
3686 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3688 case L2CAP_MODE_ERTM:
3689 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3690 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3691 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3692 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3693 chan->ack_win = min_t(u16, chan->ack_win,
3696 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3697 chan->local_msdu = le16_to_cpu(efs.msdu);
3698 chan->local_sdu_itime =
3699 le32_to_cpu(efs.sdu_itime);
3700 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3701 chan->local_flush_to =
3702 le32_to_cpu(efs.flush_to);
3706 case L2CAP_MODE_STREAMING:
3707 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3711 req->dcid = cpu_to_le16(chan->dcid);
3712 req->flags = cpu_to_le16(0);
3717 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3718 u16 result, u16 flags)
3720 struct l2cap_conf_rsp *rsp = data;
3721 void *ptr = rsp->data;
3723 BT_DBG("chan %p", chan);
3725 rsp->scid = cpu_to_le16(chan->dcid);
3726 rsp->result = cpu_to_le16(result);
3727 rsp->flags = cpu_to_le16(flags);
3732 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3734 struct l2cap_le_conn_rsp rsp;
3735 struct l2cap_conn *conn = chan->conn;
3737 BT_DBG("chan %p", chan);
3739 rsp.dcid = cpu_to_le16(chan->scid);
3740 rsp.mtu = cpu_to_le16(chan->imtu);
3741 rsp.mps = cpu_to_le16(chan->mps);
3742 rsp.credits = cpu_to_le16(chan->rx_credits);
3743 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3745 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3749 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3751 struct l2cap_conn_rsp rsp;
3752 struct l2cap_conn *conn = chan->conn;
3756 rsp.scid = cpu_to_le16(chan->dcid);
3757 rsp.dcid = cpu_to_le16(chan->scid);
3758 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3759 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3762 rsp_code = L2CAP_CREATE_CHAN_RSP;
3764 rsp_code = L2CAP_CONN_RSP;
3766 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3768 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3770 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3773 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3774 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3775 chan->num_conf_req++;
3778 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3782 /* Use sane default values in case a misbehaving remote device
3783 * did not send an RFC or extended window size option.
3785 u16 txwin_ext = chan->ack_win;
3786 struct l2cap_conf_rfc rfc = {
3788 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3789 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3790 .max_pdu_size = cpu_to_le16(chan->imtu),
3791 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3794 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3796 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3799 while (len >= L2CAP_CONF_OPT_SIZE) {
3800 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3805 case L2CAP_CONF_RFC:
3806 if (olen != sizeof(rfc))
3808 memcpy(&rfc, (void *)val, olen);
3810 case L2CAP_CONF_EWS:
3819 case L2CAP_MODE_ERTM:
3820 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3821 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3822 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3823 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3824 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3826 chan->ack_win = min_t(u16, chan->ack_win,
3829 case L2CAP_MODE_STREAMING:
3830 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3834 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3835 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3838 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3840 if (cmd_len < sizeof(*rej))
3843 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3846 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3847 cmd->ident == conn->info_ident) {
3848 cancel_delayed_work(&conn->info_timer);
3850 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3851 conn->info_ident = 0;
3853 l2cap_conn_start(conn);
3859 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3860 struct l2cap_cmd_hdr *cmd,
3861 u8 *data, u8 rsp_code, u8 amp_id)
3863 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3864 struct l2cap_conn_rsp rsp;
3865 struct l2cap_chan *chan = NULL, *pchan;
3866 int result, status = L2CAP_CS_NO_INFO;
3868 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3869 __le16 psm = req->psm;
3871 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3873 /* Check if we have socket listening on psm */
3874 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3875 &conn->hcon->dst, ACL_LINK);
3877 result = L2CAP_CR_BAD_PSM;
3881 mutex_lock(&conn->chan_lock);
3882 l2cap_chan_lock(pchan);
3884 /* Check if the ACL is secure enough (if not SDP) */
3885 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3886 !hci_conn_check_link_mode(conn->hcon)) {
3887 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3888 result = L2CAP_CR_SEC_BLOCK;
3892 result = L2CAP_CR_NO_MEM;
3894 /* Check if we already have channel with that dcid */
3895 if (__l2cap_get_chan_by_dcid(conn, scid))
3898 chan = pchan->ops->new_connection(pchan);
3902 /* For certain devices (ex: HID mouse), support for authentication,
3903 * pairing and bonding is optional. For such devices, inorder to avoid
3904 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3905 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3907 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3909 bacpy(&chan->src, &conn->hcon->src);
3910 bacpy(&chan->dst, &conn->hcon->dst);
3911 chan->src_type = bdaddr_src_type(conn->hcon);
3912 chan->dst_type = bdaddr_dst_type(conn->hcon);
3915 chan->local_amp_id = amp_id;
3917 __l2cap_chan_add(conn, chan);
3921 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3923 chan->ident = cmd->ident;
3925 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3926 if (l2cap_chan_check_security(chan, false)) {
3927 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3928 l2cap_state_change(chan, BT_CONNECT2);
3929 result = L2CAP_CR_PEND;
3930 status = L2CAP_CS_AUTHOR_PEND;
3931 chan->ops->defer(chan);
3933 /* Force pending result for AMP controllers.
3934 * The connection will succeed after the
3935 * physical link is up.
3937 if (amp_id == AMP_ID_BREDR) {
3938 l2cap_state_change(chan, BT_CONFIG);
3939 result = L2CAP_CR_SUCCESS;
3941 l2cap_state_change(chan, BT_CONNECT2);
3942 result = L2CAP_CR_PEND;
3944 status = L2CAP_CS_NO_INFO;
3947 l2cap_state_change(chan, BT_CONNECT2);
3948 result = L2CAP_CR_PEND;
3949 status = L2CAP_CS_AUTHEN_PEND;
3952 l2cap_state_change(chan, BT_CONNECT2);
3953 result = L2CAP_CR_PEND;
3954 status = L2CAP_CS_NO_INFO;
3958 l2cap_chan_unlock(pchan);
3959 mutex_unlock(&conn->chan_lock);
3960 l2cap_chan_put(pchan);
3963 rsp.scid = cpu_to_le16(scid);
3964 rsp.dcid = cpu_to_le16(dcid);
3965 rsp.result = cpu_to_le16(result);
3966 rsp.status = cpu_to_le16(status);
3967 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3969 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3970 struct l2cap_info_req info;
3971 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3973 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3974 conn->info_ident = l2cap_get_ident(conn);
3976 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3978 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3979 sizeof(info), &info);
3982 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3983 result == L2CAP_CR_SUCCESS) {
3985 set_bit(CONF_REQ_SENT, &chan->conf_state);
3986 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3987 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3988 chan->num_conf_req++;
3994 static int l2cap_connect_req(struct l2cap_conn *conn,
3995 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3997 struct hci_dev *hdev = conn->hcon->hdev;
3998 struct hci_conn *hcon = conn->hcon;
4000 if (cmd_len < sizeof(struct l2cap_conn_req))
4004 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4005 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4006 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4007 hci_dev_unlock(hdev);
4009 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4013 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4014 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4017 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4018 u16 scid, dcid, result, status;
4019 struct l2cap_chan *chan;
4023 if (cmd_len < sizeof(*rsp))
4026 scid = __le16_to_cpu(rsp->scid);
4027 dcid = __le16_to_cpu(rsp->dcid);
4028 result = __le16_to_cpu(rsp->result);
4029 status = __le16_to_cpu(rsp->status);
4031 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4032 dcid, scid, result, status);
4034 mutex_lock(&conn->chan_lock);
4037 chan = __l2cap_get_chan_by_scid(conn, scid);
4043 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4052 l2cap_chan_lock(chan);
4055 case L2CAP_CR_SUCCESS:
4056 l2cap_state_change(chan, BT_CONFIG);
4059 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4061 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4064 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4065 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4066 chan->num_conf_req++;
4070 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4074 l2cap_chan_del(chan, ECONNREFUSED);
4078 l2cap_chan_unlock(chan);
4081 mutex_unlock(&conn->chan_lock);
4086 static inline void set_default_fcs(struct l2cap_chan *chan)
4088 /* FCS is enabled only in ERTM or streaming mode, if one or both
4091 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4092 chan->fcs = L2CAP_FCS_NONE;
4093 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4094 chan->fcs = L2CAP_FCS_CRC16;
4097 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4098 u8 ident, u16 flags)
4100 struct l2cap_conn *conn = chan->conn;
4102 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4105 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4106 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4108 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4109 l2cap_build_conf_rsp(chan, data,
4110 L2CAP_CONF_SUCCESS, flags), data);
4113 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4116 struct l2cap_cmd_rej_cid rej;
4118 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4119 rej.scid = __cpu_to_le16(scid);
4120 rej.dcid = __cpu_to_le16(dcid);
4122 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4125 static inline int l2cap_config_req(struct l2cap_conn *conn,
4126 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4129 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4132 struct l2cap_chan *chan;
4135 if (cmd_len < sizeof(*req))
4138 dcid = __le16_to_cpu(req->dcid);
4139 flags = __le16_to_cpu(req->flags);
4141 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4143 chan = l2cap_get_chan_by_scid(conn, dcid);
4145 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4149 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4150 chan->state != BT_CONNECTED) {
4151 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4156 /* Reject if config buffer is too small. */
4157 len = cmd_len - sizeof(*req);
4158 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4159 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4160 l2cap_build_conf_rsp(chan, rsp,
4161 L2CAP_CONF_REJECT, flags), rsp);
4166 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4167 chan->conf_len += len;
4169 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4170 /* Incomplete config. Send empty response. */
4171 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4172 l2cap_build_conf_rsp(chan, rsp,
4173 L2CAP_CONF_SUCCESS, flags), rsp);
4177 /* Complete config. */
4178 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4180 l2cap_send_disconn_req(chan, ECONNRESET);
4184 chan->ident = cmd->ident;
4185 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4186 chan->num_conf_rsp++;
4188 /* Reset config buffer. */
4191 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4194 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4195 set_default_fcs(chan);
4197 if (chan->mode == L2CAP_MODE_ERTM ||
4198 chan->mode == L2CAP_MODE_STREAMING)
4199 err = l2cap_ertm_init(chan);
4202 l2cap_send_disconn_req(chan, -err);
4204 l2cap_chan_ready(chan);
4209 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4211 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4212 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4213 chan->num_conf_req++;
4216 /* Got Conf Rsp PENDING from remote side and assume we sent
4217 Conf Rsp PENDING in the code above */
4218 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4219 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4221 /* check compatibility */
4223 /* Send rsp for BR/EDR channel */
4225 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4227 chan->ident = cmd->ident;
4231 l2cap_chan_unlock(chan);
4232 l2cap_chan_put(chan);
4236 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4237 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4240 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4241 u16 scid, flags, result;
4242 struct l2cap_chan *chan;
4243 int len = cmd_len - sizeof(*rsp);
4246 if (cmd_len < sizeof(*rsp))
4249 scid = __le16_to_cpu(rsp->scid);
4250 flags = __le16_to_cpu(rsp->flags);
4251 result = __le16_to_cpu(rsp->result);
4253 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4256 chan = l2cap_get_chan_by_scid(conn, scid);
4261 case L2CAP_CONF_SUCCESS:
4262 l2cap_conf_rfc_get(chan, rsp->data, len);
4263 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4266 case L2CAP_CONF_PENDING:
4267 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4269 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4272 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4273 buf, sizeof(buf), &result);
4275 l2cap_send_disconn_req(chan, ECONNRESET);
4279 if (!chan->hs_hcon) {
4280 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4283 if (l2cap_check_efs(chan)) {
4284 amp_create_logical_link(chan);
4285 chan->ident = cmd->ident;
4291 case L2CAP_CONF_UNACCEPT:
4292 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4295 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4296 l2cap_send_disconn_req(chan, ECONNRESET);
4300 /* throw out any old stored conf requests */
4301 result = L2CAP_CONF_SUCCESS;
4302 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4303 req, sizeof(req), &result);
4305 l2cap_send_disconn_req(chan, ECONNRESET);
4309 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4310 L2CAP_CONF_REQ, len, req);
4311 chan->num_conf_req++;
4312 if (result != L2CAP_CONF_SUCCESS)
4318 l2cap_chan_set_err(chan, ECONNRESET);
4320 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4321 l2cap_send_disconn_req(chan, ECONNRESET);
4325 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4328 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4330 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4331 set_default_fcs(chan);
4333 if (chan->mode == L2CAP_MODE_ERTM ||
4334 chan->mode == L2CAP_MODE_STREAMING)
4335 err = l2cap_ertm_init(chan);
4338 l2cap_send_disconn_req(chan, -err);
4340 l2cap_chan_ready(chan);
4344 l2cap_chan_unlock(chan);
4345 l2cap_chan_put(chan);
4349 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4350 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4353 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4354 struct l2cap_disconn_rsp rsp;
4356 struct l2cap_chan *chan;
4358 if (cmd_len != sizeof(*req))
4361 scid = __le16_to_cpu(req->scid);
4362 dcid = __le16_to_cpu(req->dcid);
4364 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4366 mutex_lock(&conn->chan_lock);
4368 chan = __l2cap_get_chan_by_scid(conn, dcid);
4370 mutex_unlock(&conn->chan_lock);
4371 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4375 l2cap_chan_hold(chan);
4376 l2cap_chan_lock(chan);
4378 rsp.dcid = cpu_to_le16(chan->scid);
4379 rsp.scid = cpu_to_le16(chan->dcid);
4380 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4382 chan->ops->set_shutdown(chan);
4384 l2cap_chan_del(chan, ECONNRESET);
4386 chan->ops->close(chan);
4388 l2cap_chan_unlock(chan);
4389 l2cap_chan_put(chan);
4391 mutex_unlock(&conn->chan_lock);
4396 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4397 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4400 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4402 struct l2cap_chan *chan;
4404 if (cmd_len != sizeof(*rsp))
4407 scid = __le16_to_cpu(rsp->scid);
4408 dcid = __le16_to_cpu(rsp->dcid);
4410 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4412 mutex_lock(&conn->chan_lock);
4414 chan = __l2cap_get_chan_by_scid(conn, scid);
4416 mutex_unlock(&conn->chan_lock);
4420 l2cap_chan_hold(chan);
4421 l2cap_chan_lock(chan);
4423 if (chan->state != BT_DISCONN) {
4424 l2cap_chan_unlock(chan);
4425 l2cap_chan_put(chan);
4426 mutex_unlock(&conn->chan_lock);
4430 l2cap_chan_del(chan, 0);
4432 chan->ops->close(chan);
4434 l2cap_chan_unlock(chan);
4435 l2cap_chan_put(chan);
4437 mutex_unlock(&conn->chan_lock);
4442 static inline int l2cap_information_req(struct l2cap_conn *conn,
4443 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4446 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4449 if (cmd_len != sizeof(*req))
4452 type = __le16_to_cpu(req->type);
4454 BT_DBG("type 0x%4.4x", type);
4456 if (type == L2CAP_IT_FEAT_MASK) {
4458 u32 feat_mask = l2cap_feat_mask;
4459 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4460 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4461 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4463 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4465 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4466 feat_mask |= L2CAP_FEAT_EXT_FLOW
4467 | L2CAP_FEAT_EXT_WINDOW;
4469 put_unaligned_le32(feat_mask, rsp->data);
4470 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4472 } else if (type == L2CAP_IT_FIXED_CHAN) {
4474 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4476 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4477 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4478 rsp->data[0] = conn->local_fixed_chan;
4479 memset(rsp->data + 1, 0, 7);
4480 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4483 struct l2cap_info_rsp rsp;
4484 rsp.type = cpu_to_le16(type);
4485 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4486 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4493 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4494 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4497 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4500 if (cmd_len < sizeof(*rsp))
4503 type = __le16_to_cpu(rsp->type);
4504 result = __le16_to_cpu(rsp->result);
4506 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4508 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4509 if (cmd->ident != conn->info_ident ||
4510 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4513 cancel_delayed_work(&conn->info_timer);
4515 if (result != L2CAP_IR_SUCCESS) {
4516 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4517 conn->info_ident = 0;
4519 l2cap_conn_start(conn);
4525 case L2CAP_IT_FEAT_MASK:
4526 conn->feat_mask = get_unaligned_le32(rsp->data);
4528 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4529 struct l2cap_info_req req;
4530 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4532 conn->info_ident = l2cap_get_ident(conn);
4534 l2cap_send_cmd(conn, conn->info_ident,
4535 L2CAP_INFO_REQ, sizeof(req), &req);
4537 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4538 conn->info_ident = 0;
4540 l2cap_conn_start(conn);
4544 case L2CAP_IT_FIXED_CHAN:
4545 conn->remote_fixed_chan = rsp->data[0];
4546 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4547 conn->info_ident = 0;
4549 l2cap_conn_start(conn);
4556 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4557 struct l2cap_cmd_hdr *cmd,
4558 u16 cmd_len, void *data)
4560 struct l2cap_create_chan_req *req = data;
4561 struct l2cap_create_chan_rsp rsp;
4562 struct l2cap_chan *chan;
4563 struct hci_dev *hdev;
4566 if (cmd_len != sizeof(*req))
4569 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4572 psm = le16_to_cpu(req->psm);
4573 scid = le16_to_cpu(req->scid);
4575 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4577 /* For controller id 0 make BR/EDR connection */
4578 if (req->amp_id == AMP_ID_BREDR) {
4579 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4584 /* Validate AMP controller id */
4585 hdev = hci_dev_get(req->amp_id);
4589 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4594 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4597 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4598 struct hci_conn *hs_hcon;
4600 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4604 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4609 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4611 mgr->bredr_chan = chan;
4612 chan->hs_hcon = hs_hcon;
4613 chan->fcs = L2CAP_FCS_NONE;
4614 conn->mtu = hdev->block_mtu;
4623 rsp.scid = cpu_to_le16(scid);
4624 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4625 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4627 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4633 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4635 struct l2cap_move_chan_req req;
4638 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4640 ident = l2cap_get_ident(chan->conn);
4641 chan->ident = ident;
4643 req.icid = cpu_to_le16(chan->scid);
4644 req.dest_amp_id = dest_amp_id;
4646 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4649 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4652 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4654 struct l2cap_move_chan_rsp rsp;
4656 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4658 rsp.icid = cpu_to_le16(chan->dcid);
4659 rsp.result = cpu_to_le16(result);
4661 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4665 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4667 struct l2cap_move_chan_cfm cfm;
4669 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4671 chan->ident = l2cap_get_ident(chan->conn);
4673 cfm.icid = cpu_to_le16(chan->scid);
4674 cfm.result = cpu_to_le16(result);
4676 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4679 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4682 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4684 struct l2cap_move_chan_cfm cfm;
4686 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4688 cfm.icid = cpu_to_le16(icid);
4689 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4691 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4695 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4698 struct l2cap_move_chan_cfm_rsp rsp;
4700 BT_DBG("icid 0x%4.4x", icid);
4702 rsp.icid = cpu_to_le16(icid);
4703 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4706 static void __release_logical_link(struct l2cap_chan *chan)
4708 chan->hs_hchan = NULL;
4709 chan->hs_hcon = NULL;
4711 /* Placeholder - release the logical link */
4714 static void l2cap_logical_fail(struct l2cap_chan *chan)
4716 /* Logical link setup failed */
4717 if (chan->state != BT_CONNECTED) {
4718 /* Create channel failure, disconnect */
4719 l2cap_send_disconn_req(chan, ECONNRESET);
4723 switch (chan->move_role) {
4724 case L2CAP_MOVE_ROLE_RESPONDER:
4725 l2cap_move_done(chan);
4726 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4728 case L2CAP_MOVE_ROLE_INITIATOR:
4729 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4730 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4731 /* Remote has only sent pending or
4732 * success responses, clean up
4734 l2cap_move_done(chan);
4737 /* Other amp move states imply that the move
4738 * has already aborted
4740 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4745 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4746 struct hci_chan *hchan)
4748 struct l2cap_conf_rsp rsp;
4750 chan->hs_hchan = hchan;
4751 chan->hs_hcon->l2cap_data = chan->conn;
4753 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4755 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4758 set_default_fcs(chan);
4760 err = l2cap_ertm_init(chan);
4762 l2cap_send_disconn_req(chan, -err);
4764 l2cap_chan_ready(chan);
4768 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4769 struct hci_chan *hchan)
4771 chan->hs_hcon = hchan->conn;
4772 chan->hs_hcon->l2cap_data = chan->conn;
4774 BT_DBG("move_state %d", chan->move_state);
4776 switch (chan->move_state) {
4777 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4778 /* Move confirm will be sent after a success
4779 * response is received
4781 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4783 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4784 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4785 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4786 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4787 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4788 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4789 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4790 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4791 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4795 /* Move was not in expected state, free the channel */
4796 __release_logical_link(chan);
4798 chan->move_state = L2CAP_MOVE_STABLE;
4802 /* Call with chan locked */
4803 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4806 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4809 l2cap_logical_fail(chan);
4810 __release_logical_link(chan);
4814 if (chan->state != BT_CONNECTED) {
4815 /* Ignore logical link if channel is on BR/EDR */
4816 if (chan->local_amp_id != AMP_ID_BREDR)
4817 l2cap_logical_finish_create(chan, hchan);
4819 l2cap_logical_finish_move(chan, hchan);
4823 void l2cap_move_start(struct l2cap_chan *chan)
4825 BT_DBG("chan %p", chan);
4827 if (chan->local_amp_id == AMP_ID_BREDR) {
4828 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4830 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4831 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4832 /* Placeholder - start physical link setup */
4834 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4835 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4837 l2cap_move_setup(chan);
4838 l2cap_send_move_chan_req(chan, 0);
4842 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4843 u8 local_amp_id, u8 remote_amp_id)
4845 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4846 local_amp_id, remote_amp_id);
4848 chan->fcs = L2CAP_FCS_NONE;
4850 /* Outgoing channel on AMP */
4851 if (chan->state == BT_CONNECT) {
4852 if (result == L2CAP_CR_SUCCESS) {
4853 chan->local_amp_id = local_amp_id;
4854 l2cap_send_create_chan_req(chan, remote_amp_id);
4856 /* Revert to BR/EDR connect */
4857 l2cap_send_conn_req(chan);
4863 /* Incoming channel on AMP */
4864 if (__l2cap_no_conn_pending(chan)) {
4865 struct l2cap_conn_rsp rsp;
4867 rsp.scid = cpu_to_le16(chan->dcid);
4868 rsp.dcid = cpu_to_le16(chan->scid);
4870 if (result == L2CAP_CR_SUCCESS) {
4871 /* Send successful response */
4872 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4873 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4875 /* Send negative response */
4876 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4877 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4880 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4883 if (result == L2CAP_CR_SUCCESS) {
4884 l2cap_state_change(chan, BT_CONFIG);
4885 set_bit(CONF_REQ_SENT, &chan->conf_state);
4886 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4888 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4889 chan->num_conf_req++;
4894 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4897 l2cap_move_setup(chan);
4898 chan->move_id = local_amp_id;
4899 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4901 l2cap_send_move_chan_req(chan, remote_amp_id);
4904 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4906 struct hci_chan *hchan = NULL;
4908 /* Placeholder - get hci_chan for logical link */
4911 if (hchan->state == BT_CONNECTED) {
4912 /* Logical link is ready to go */
4913 chan->hs_hcon = hchan->conn;
4914 chan->hs_hcon->l2cap_data = chan->conn;
4915 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4916 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4918 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4920 /* Wait for logical link to be ready */
4921 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4924 /* Logical link not available */
4925 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4929 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4931 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4933 if (result == -EINVAL)
4934 rsp_result = L2CAP_MR_BAD_ID;
4936 rsp_result = L2CAP_MR_NOT_ALLOWED;
4938 l2cap_send_move_chan_rsp(chan, rsp_result);
4941 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4942 chan->move_state = L2CAP_MOVE_STABLE;
4944 /* Restart data transmission */
4945 l2cap_ertm_send(chan);
4948 /* Invoke with locked chan */
4949 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4951 u8 local_amp_id = chan->local_amp_id;
4952 u8 remote_amp_id = chan->remote_amp_id;
4954 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4955 chan, result, local_amp_id, remote_amp_id);
4957 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4960 if (chan->state != BT_CONNECTED) {
4961 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4962 } else if (result != L2CAP_MR_SUCCESS) {
4963 l2cap_do_move_cancel(chan, result);
4965 switch (chan->move_role) {
4966 case L2CAP_MOVE_ROLE_INITIATOR:
4967 l2cap_do_move_initiate(chan, local_amp_id,
4970 case L2CAP_MOVE_ROLE_RESPONDER:
4971 l2cap_do_move_respond(chan, result);
4974 l2cap_do_move_cancel(chan, result);
4980 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4981 struct l2cap_cmd_hdr *cmd,
4982 u16 cmd_len, void *data)
4984 struct l2cap_move_chan_req *req = data;
4985 struct l2cap_move_chan_rsp rsp;
4986 struct l2cap_chan *chan;
4988 u16 result = L2CAP_MR_NOT_ALLOWED;
4990 if (cmd_len != sizeof(*req))
4993 icid = le16_to_cpu(req->icid);
4995 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4997 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5000 chan = l2cap_get_chan_by_dcid(conn, icid);
5002 rsp.icid = cpu_to_le16(icid);
5003 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5004 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5009 chan->ident = cmd->ident;
5011 if (chan->scid < L2CAP_CID_DYN_START ||
5012 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5013 (chan->mode != L2CAP_MODE_ERTM &&
5014 chan->mode != L2CAP_MODE_STREAMING)) {
5015 result = L2CAP_MR_NOT_ALLOWED;
5016 goto send_move_response;
5019 if (chan->local_amp_id == req->dest_amp_id) {
5020 result = L2CAP_MR_SAME_ID;
5021 goto send_move_response;
5024 if (req->dest_amp_id != AMP_ID_BREDR) {
5025 struct hci_dev *hdev;
5026 hdev = hci_dev_get(req->dest_amp_id);
5027 if (!hdev || hdev->dev_type != HCI_AMP ||
5028 !test_bit(HCI_UP, &hdev->flags)) {
5032 result = L2CAP_MR_BAD_ID;
5033 goto send_move_response;
5038 /* Detect a move collision. Only send a collision response
5039 * if this side has "lost", otherwise proceed with the move.
5040 * The winner has the larger bd_addr.
5042 if ((__chan_is_moving(chan) ||
5043 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5044 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5045 result = L2CAP_MR_COLLISION;
5046 goto send_move_response;
5049 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5050 l2cap_move_setup(chan);
5051 chan->move_id = req->dest_amp_id;
5054 if (req->dest_amp_id == AMP_ID_BREDR) {
5055 /* Moving to BR/EDR */
5056 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5057 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5058 result = L2CAP_MR_PEND;
5060 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5061 result = L2CAP_MR_SUCCESS;
5064 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5065 /* Placeholder - uncomment when amp functions are available */
5066 /*amp_accept_physical(chan, req->dest_amp_id);*/
5067 result = L2CAP_MR_PEND;
5071 l2cap_send_move_chan_rsp(chan, result);
5073 l2cap_chan_unlock(chan);
5074 l2cap_chan_put(chan);
5079 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5081 struct l2cap_chan *chan;
5082 struct hci_chan *hchan = NULL;
5084 chan = l2cap_get_chan_by_scid(conn, icid);
5086 l2cap_send_move_chan_cfm_icid(conn, icid);
5090 __clear_chan_timer(chan);
5091 if (result == L2CAP_MR_PEND)
5092 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5094 switch (chan->move_state) {
5095 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5096 /* Move confirm will be sent when logical link
5099 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5101 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5102 if (result == L2CAP_MR_PEND) {
5104 } else if (test_bit(CONN_LOCAL_BUSY,
5105 &chan->conn_state)) {
5106 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5108 /* Logical link is up or moving to BR/EDR,
5111 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5112 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5115 case L2CAP_MOVE_WAIT_RSP:
5117 if (result == L2CAP_MR_SUCCESS) {
5118 /* Remote is ready, send confirm immediately
5119 * after logical link is ready
5121 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5123 /* Both logical link and move success
5124 * are required to confirm
5126 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5129 /* Placeholder - get hci_chan for logical link */
5131 /* Logical link not available */
5132 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5136 /* If the logical link is not yet connected, do not
5137 * send confirmation.
5139 if (hchan->state != BT_CONNECTED)
5142 /* Logical link is already ready to go */
5144 chan->hs_hcon = hchan->conn;
5145 chan->hs_hcon->l2cap_data = chan->conn;
5147 if (result == L2CAP_MR_SUCCESS) {
5148 /* Can confirm now */
5149 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5151 /* Now only need move success
5154 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5157 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5160 /* Any other amp move state means the move failed. */
5161 chan->move_id = chan->local_amp_id;
5162 l2cap_move_done(chan);
5163 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5166 l2cap_chan_unlock(chan);
5167 l2cap_chan_put(chan);
5170 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5173 struct l2cap_chan *chan;
5175 chan = l2cap_get_chan_by_ident(conn, ident);
5177 /* Could not locate channel, icid is best guess */
5178 l2cap_send_move_chan_cfm_icid(conn, icid);
5182 __clear_chan_timer(chan);
5184 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5185 if (result == L2CAP_MR_COLLISION) {
5186 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5188 /* Cleanup - cancel move */
5189 chan->move_id = chan->local_amp_id;
5190 l2cap_move_done(chan);
5194 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5196 l2cap_chan_unlock(chan);
5197 l2cap_chan_put(chan);
5200 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5201 struct l2cap_cmd_hdr *cmd,
5202 u16 cmd_len, void *data)
5204 struct l2cap_move_chan_rsp *rsp = data;
5207 if (cmd_len != sizeof(*rsp))
5210 icid = le16_to_cpu(rsp->icid);
5211 result = le16_to_cpu(rsp->result);
5213 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5215 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5216 l2cap_move_continue(conn, icid, result);
5218 l2cap_move_fail(conn, cmd->ident, icid, result);
5223 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5224 struct l2cap_cmd_hdr *cmd,
5225 u16 cmd_len, void *data)
5227 struct l2cap_move_chan_cfm *cfm = data;
5228 struct l2cap_chan *chan;
5231 if (cmd_len != sizeof(*cfm))
5234 icid = le16_to_cpu(cfm->icid);
5235 result = le16_to_cpu(cfm->result);
5237 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5239 chan = l2cap_get_chan_by_dcid(conn, icid);
5241 /* Spec requires a response even if the icid was not found */
5242 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5246 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5247 if (result == L2CAP_MC_CONFIRMED) {
5248 chan->local_amp_id = chan->move_id;
5249 if (chan->local_amp_id == AMP_ID_BREDR)
5250 __release_logical_link(chan);
5252 chan->move_id = chan->local_amp_id;
5255 l2cap_move_done(chan);
5258 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5260 l2cap_chan_unlock(chan);
5261 l2cap_chan_put(chan);
5266 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5267 struct l2cap_cmd_hdr *cmd,
5268 u16 cmd_len, void *data)
5270 struct l2cap_move_chan_cfm_rsp *rsp = data;
5271 struct l2cap_chan *chan;
5274 if (cmd_len != sizeof(*rsp))
5277 icid = le16_to_cpu(rsp->icid);
5279 BT_DBG("icid 0x%4.4x", icid);
5281 chan = l2cap_get_chan_by_scid(conn, icid);
5285 __clear_chan_timer(chan);
5287 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5288 chan->local_amp_id = chan->move_id;
5290 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5291 __release_logical_link(chan);
5293 l2cap_move_done(chan);
5296 l2cap_chan_unlock(chan);
5297 l2cap_chan_put(chan);
5302 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5303 struct l2cap_cmd_hdr *cmd,
5304 u16 cmd_len, u8 *data)
5306 struct hci_conn *hcon = conn->hcon;
5307 struct l2cap_conn_param_update_req *req;
5308 struct l2cap_conn_param_update_rsp rsp;
5309 u16 min, max, latency, to_multiplier;
5312 if (hcon->role != HCI_ROLE_MASTER)
5315 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5318 req = (struct l2cap_conn_param_update_req *) data;
5319 min = __le16_to_cpu(req->min);
5320 max = __le16_to_cpu(req->max);
5321 latency = __le16_to_cpu(req->latency);
5322 to_multiplier = __le16_to_cpu(req->to_multiplier);
5324 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5325 min, max, latency, to_multiplier);
5327 memset(&rsp, 0, sizeof(rsp));
5329 err = hci_check_conn_params(min, max, latency, to_multiplier);
5331 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5333 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5335 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5341 store_hint = hci_le_conn_update(hcon, min, max, latency,
5343 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5344 store_hint, min, max, latency,
5352 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5353 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5356 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5357 struct hci_conn *hcon = conn->hcon;
5358 u16 dcid, mtu, mps, credits, result;
5359 struct l2cap_chan *chan;
5362 if (cmd_len < sizeof(*rsp))
5365 dcid = __le16_to_cpu(rsp->dcid);
5366 mtu = __le16_to_cpu(rsp->mtu);
5367 mps = __le16_to_cpu(rsp->mps);
5368 credits = __le16_to_cpu(rsp->credits);
5369 result = __le16_to_cpu(rsp->result);
5371 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5372 dcid < L2CAP_CID_DYN_START ||
5373 dcid > L2CAP_CID_LE_DYN_END))
5376 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5377 dcid, mtu, mps, credits, result);
5379 mutex_lock(&conn->chan_lock);
5381 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5389 l2cap_chan_lock(chan);
5392 case L2CAP_CR_SUCCESS:
5393 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5401 chan->remote_mps = mps;
5402 chan->tx_credits = credits;
5403 l2cap_chan_ready(chan);
5406 case L2CAP_CR_AUTHENTICATION:
5407 case L2CAP_CR_ENCRYPTION:
5408 /* If we already have MITM protection we can't do
5411 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5412 l2cap_chan_del(chan, ECONNREFUSED);
5416 sec_level = hcon->sec_level + 1;
5417 if (chan->sec_level < sec_level)
5418 chan->sec_level = sec_level;
5420 /* We'll need to send a new Connect Request */
5421 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5423 smp_conn_security(hcon, chan->sec_level);
5427 l2cap_chan_del(chan, ECONNREFUSED);
5431 l2cap_chan_unlock(chan);
5434 mutex_unlock(&conn->chan_lock);
5439 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5440 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5445 switch (cmd->code) {
5446 case L2CAP_COMMAND_REJ:
5447 l2cap_command_rej(conn, cmd, cmd_len, data);
5450 case L2CAP_CONN_REQ:
5451 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5454 case L2CAP_CONN_RSP:
5455 case L2CAP_CREATE_CHAN_RSP:
5456 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5459 case L2CAP_CONF_REQ:
5460 err = l2cap_config_req(conn, cmd, cmd_len, data);
5463 case L2CAP_CONF_RSP:
5464 l2cap_config_rsp(conn, cmd, cmd_len, data);
5467 case L2CAP_DISCONN_REQ:
5468 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5471 case L2CAP_DISCONN_RSP:
5472 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5475 case L2CAP_ECHO_REQ:
5476 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5479 case L2CAP_ECHO_RSP:
5482 case L2CAP_INFO_REQ:
5483 err = l2cap_information_req(conn, cmd, cmd_len, data);
5486 case L2CAP_INFO_RSP:
5487 l2cap_information_rsp(conn, cmd, cmd_len, data);
5490 case L2CAP_CREATE_CHAN_REQ:
5491 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5494 case L2CAP_MOVE_CHAN_REQ:
5495 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5498 case L2CAP_MOVE_CHAN_RSP:
5499 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5502 case L2CAP_MOVE_CHAN_CFM:
5503 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5506 case L2CAP_MOVE_CHAN_CFM_RSP:
5507 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5511 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5519 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5520 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5523 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5524 struct l2cap_le_conn_rsp rsp;
5525 struct l2cap_chan *chan, *pchan;
5526 u16 dcid, scid, credits, mtu, mps;
5530 if (cmd_len != sizeof(*req))
5533 scid = __le16_to_cpu(req->scid);
5534 mtu = __le16_to_cpu(req->mtu);
5535 mps = __le16_to_cpu(req->mps);
5540 if (mtu < 23 || mps < 23)
5543 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5546 /* Check if we have socket listening on psm */
5547 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5548 &conn->hcon->dst, LE_LINK);
5550 result = L2CAP_CR_BAD_PSM;
5555 mutex_lock(&conn->chan_lock);
5556 l2cap_chan_lock(pchan);
5558 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5560 result = L2CAP_CR_AUTHENTICATION;
5562 goto response_unlock;
5565 /* Check for valid dynamic CID range */
5566 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5567 result = L2CAP_CR_INVALID_SCID;
5569 goto response_unlock;
5572 /* Check if we already have channel with that dcid */
5573 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5574 result = L2CAP_CR_SCID_IN_USE;
5576 goto response_unlock;
5579 chan = pchan->ops->new_connection(pchan);
5581 result = L2CAP_CR_NO_MEM;
5582 goto response_unlock;
5585 l2cap_le_flowctl_init(chan);
5587 bacpy(&chan->src, &conn->hcon->src);
5588 bacpy(&chan->dst, &conn->hcon->dst);
5589 chan->src_type = bdaddr_src_type(conn->hcon);
5590 chan->dst_type = bdaddr_dst_type(conn->hcon);
5594 chan->remote_mps = mps;
5595 chan->tx_credits = __le16_to_cpu(req->credits);
5597 __l2cap_chan_add(conn, chan);
5599 credits = chan->rx_credits;
5601 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5603 chan->ident = cmd->ident;
5605 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5606 l2cap_state_change(chan, BT_CONNECT2);
5607 /* The following result value is actually not defined
5608 * for LE CoC but we use it to let the function know
5609 * that it should bail out after doing its cleanup
5610 * instead of sending a response.
5612 result = L2CAP_CR_PEND;
5613 chan->ops->defer(chan);
5615 l2cap_chan_ready(chan);
5616 result = L2CAP_CR_SUCCESS;
5620 l2cap_chan_unlock(pchan);
5621 mutex_unlock(&conn->chan_lock);
5622 l2cap_chan_put(pchan);
5624 if (result == L2CAP_CR_PEND)
5629 rsp.mtu = cpu_to_le16(chan->imtu);
5630 rsp.mps = cpu_to_le16(chan->mps);
5636 rsp.dcid = cpu_to_le16(dcid);
5637 rsp.credits = cpu_to_le16(credits);
5638 rsp.result = cpu_to_le16(result);
5640 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5645 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5646 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5649 struct l2cap_le_credits *pkt;
5650 struct l2cap_chan *chan;
5651 u16 cid, credits, max_credits;
5653 if (cmd_len != sizeof(*pkt))
5656 pkt = (struct l2cap_le_credits *) data;
5657 cid = __le16_to_cpu(pkt->cid);
5658 credits = __le16_to_cpu(pkt->credits);
5660 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5662 chan = l2cap_get_chan_by_dcid(conn, cid);
5666 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5667 if (credits > max_credits) {
5668 BT_ERR("LE credits overflow");
5669 l2cap_send_disconn_req(chan, ECONNRESET);
5671 /* Return 0 so that we don't trigger an unnecessary
5672 * command reject packet.
5677 chan->tx_credits += credits;
5679 /* Resume sending */
5680 l2cap_le_flowctl_send(chan);
5682 if (chan->tx_credits)
5683 chan->ops->resume(chan);
5686 l2cap_chan_unlock(chan);
5687 l2cap_chan_put(chan);
5692 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5693 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5696 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5697 struct l2cap_chan *chan;
5699 if (cmd_len < sizeof(*rej))
5702 mutex_lock(&conn->chan_lock);
5704 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5708 l2cap_chan_lock(chan);
5709 l2cap_chan_del(chan, ECONNREFUSED);
5710 l2cap_chan_unlock(chan);
5713 mutex_unlock(&conn->chan_lock);
5717 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5718 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5723 switch (cmd->code) {
5724 case L2CAP_COMMAND_REJ:
5725 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5728 case L2CAP_CONN_PARAM_UPDATE_REQ:
5729 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5732 case L2CAP_CONN_PARAM_UPDATE_RSP:
5735 case L2CAP_LE_CONN_RSP:
5736 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5739 case L2CAP_LE_CONN_REQ:
5740 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5743 case L2CAP_LE_CREDITS:
5744 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5747 case L2CAP_DISCONN_REQ:
5748 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5751 case L2CAP_DISCONN_RSP:
5752 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5756 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5764 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5765 struct sk_buff *skb)
5767 struct hci_conn *hcon = conn->hcon;
5768 struct l2cap_cmd_hdr *cmd;
5772 if (hcon->type != LE_LINK)
5775 if (skb->len < L2CAP_CMD_HDR_SIZE)
5778 cmd = (void *) skb->data;
5779 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5781 len = le16_to_cpu(cmd->len);
5783 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5785 if (len != skb->len || !cmd->ident) {
5786 BT_DBG("corrupted command");
5790 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5792 struct l2cap_cmd_rej_unk rej;
5794 BT_ERR("Wrong link type (%d)", err);
5796 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5797 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5805 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5806 struct sk_buff *skb)
5808 struct hci_conn *hcon = conn->hcon;
5809 u8 *data = skb->data;
5811 struct l2cap_cmd_hdr cmd;
5814 l2cap_raw_recv(conn, skb);
5816 if (hcon->type != ACL_LINK)
5819 while (len >= L2CAP_CMD_HDR_SIZE) {
5821 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5822 data += L2CAP_CMD_HDR_SIZE;
5823 len -= L2CAP_CMD_HDR_SIZE;
5825 cmd_len = le16_to_cpu(cmd.len);
5827 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5830 if (cmd_len > len || !cmd.ident) {
5831 BT_DBG("corrupted command");
5835 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5837 struct l2cap_cmd_rej_unk rej;
5839 BT_ERR("Wrong link type (%d)", err);
5841 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5842 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5854 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5856 u16 our_fcs, rcv_fcs;
5859 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5860 hdr_size = L2CAP_EXT_HDR_SIZE;
5862 hdr_size = L2CAP_ENH_HDR_SIZE;
5864 if (chan->fcs == L2CAP_FCS_CRC16) {
5865 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5866 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5867 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5869 if (our_fcs != rcv_fcs)
5875 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5877 struct l2cap_ctrl control;
5879 BT_DBG("chan %p", chan);
5881 memset(&control, 0, sizeof(control));
5884 control.reqseq = chan->buffer_seq;
5885 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5887 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5888 control.super = L2CAP_SUPER_RNR;
5889 l2cap_send_sframe(chan, &control);
5892 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5893 chan->unacked_frames > 0)
5894 __set_retrans_timer(chan);
5896 /* Send pending iframes */
5897 l2cap_ertm_send(chan);
5899 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5900 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5901 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5904 control.super = L2CAP_SUPER_RR;
5905 l2cap_send_sframe(chan, &control);
5909 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5910 struct sk_buff **last_frag)
5912 /* skb->len reflects data in skb as well as all fragments
5913 * skb->data_len reflects only data in fragments
5915 if (!skb_has_frag_list(skb))
5916 skb_shinfo(skb)->frag_list = new_frag;
5918 new_frag->next = NULL;
5920 (*last_frag)->next = new_frag;
5921 *last_frag = new_frag;
5923 skb->len += new_frag->len;
5924 skb->data_len += new_frag->len;
5925 skb->truesize += new_frag->truesize;
5928 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5929 struct l2cap_ctrl *control)
5933 switch (control->sar) {
5934 case L2CAP_SAR_UNSEGMENTED:
5938 err = chan->ops->recv(chan, skb);
5941 case L2CAP_SAR_START:
5945 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5948 chan->sdu_len = get_unaligned_le16(skb->data);
5949 skb_pull(skb, L2CAP_SDULEN_SIZE);
5951 if (chan->sdu_len > chan->imtu) {
5956 if (skb->len >= chan->sdu_len)
5960 chan->sdu_last_frag = skb;
5966 case L2CAP_SAR_CONTINUE:
5970 append_skb_frag(chan->sdu, skb,
5971 &chan->sdu_last_frag);
5974 if (chan->sdu->len >= chan->sdu_len)
5984 append_skb_frag(chan->sdu, skb,
5985 &chan->sdu_last_frag);
5988 if (chan->sdu->len != chan->sdu_len)
5991 err = chan->ops->recv(chan, chan->sdu);
5994 /* Reassembly complete */
5996 chan->sdu_last_frag = NULL;
6004 kfree_skb(chan->sdu);
6006 chan->sdu_last_frag = NULL;
6013 static int l2cap_resegment(struct l2cap_chan *chan)
6019 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6023 if (chan->mode != L2CAP_MODE_ERTM)
6026 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6027 l2cap_tx(chan, NULL, NULL, event);
6030 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6033 /* Pass sequential frames to l2cap_reassemble_sdu()
6034 * until a gap is encountered.
6037 BT_DBG("chan %p", chan);
6039 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6040 struct sk_buff *skb;
6041 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6042 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6044 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6049 skb_unlink(skb, &chan->srej_q);
6050 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6051 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6056 if (skb_queue_empty(&chan->srej_q)) {
6057 chan->rx_state = L2CAP_RX_STATE_RECV;
6058 l2cap_send_ack(chan);
6064 static void l2cap_handle_srej(struct l2cap_chan *chan,
6065 struct l2cap_ctrl *control)
6067 struct sk_buff *skb;
6069 BT_DBG("chan %p, control %p", chan, control);
6071 if (control->reqseq == chan->next_tx_seq) {
6072 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6073 l2cap_send_disconn_req(chan, ECONNRESET);
6077 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6080 BT_DBG("Seq %d not available for retransmission",
6085 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6086 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6087 l2cap_send_disconn_req(chan, ECONNRESET);
6091 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6093 if (control->poll) {
6094 l2cap_pass_to_tx(chan, control);
6096 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6097 l2cap_retransmit(chan, control);
6098 l2cap_ertm_send(chan);
6100 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6101 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6102 chan->srej_save_reqseq = control->reqseq;
6105 l2cap_pass_to_tx_fbit(chan, control);
6107 if (control->final) {
6108 if (chan->srej_save_reqseq != control->reqseq ||
6109 !test_and_clear_bit(CONN_SREJ_ACT,
6111 l2cap_retransmit(chan, control);
6113 l2cap_retransmit(chan, control);
6114 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6115 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6116 chan->srej_save_reqseq = control->reqseq;
6122 static void l2cap_handle_rej(struct l2cap_chan *chan,
6123 struct l2cap_ctrl *control)
6125 struct sk_buff *skb;
6127 BT_DBG("chan %p, control %p", chan, control);
6129 if (control->reqseq == chan->next_tx_seq) {
6130 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6131 l2cap_send_disconn_req(chan, ECONNRESET);
6135 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6137 if (chan->max_tx && skb &&
6138 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6139 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6140 l2cap_send_disconn_req(chan, ECONNRESET);
6144 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6146 l2cap_pass_to_tx(chan, control);
6148 if (control->final) {
6149 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6150 l2cap_retransmit_all(chan, control);
6152 l2cap_retransmit_all(chan, control);
6153 l2cap_ertm_send(chan);
6154 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6155 set_bit(CONN_REJ_ACT, &chan->conn_state);
6159 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6161 BT_DBG("chan %p, txseq %d", chan, txseq);
6163 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6164 chan->expected_tx_seq);
6166 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6167 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6169 /* See notes below regarding "double poll" and
6172 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6173 BT_DBG("Invalid/Ignore - after SREJ");
6174 return L2CAP_TXSEQ_INVALID_IGNORE;
6176 BT_DBG("Invalid - in window after SREJ sent");
6177 return L2CAP_TXSEQ_INVALID;
6181 if (chan->srej_list.head == txseq) {
6182 BT_DBG("Expected SREJ");
6183 return L2CAP_TXSEQ_EXPECTED_SREJ;
6186 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6187 BT_DBG("Duplicate SREJ - txseq already stored");
6188 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6191 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6192 BT_DBG("Unexpected SREJ - not requested");
6193 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6197 if (chan->expected_tx_seq == txseq) {
6198 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6200 BT_DBG("Invalid - txseq outside tx window");
6201 return L2CAP_TXSEQ_INVALID;
6204 return L2CAP_TXSEQ_EXPECTED;
6208 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6209 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6210 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6211 return L2CAP_TXSEQ_DUPLICATE;
6214 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6215 /* A source of invalid packets is a "double poll" condition,
6216 * where delays cause us to send multiple poll packets. If
6217 * the remote stack receives and processes both polls,
6218 * sequence numbers can wrap around in such a way that a
6219 * resent frame has a sequence number that looks like new data
6220 * with a sequence gap. This would trigger an erroneous SREJ
6223 * Fortunately, this is impossible with a tx window that's
6224 * less than half of the maximum sequence number, which allows
6225 * invalid frames to be safely ignored.
6227 * With tx window sizes greater than half of the tx window
6228 * maximum, the frame is invalid and cannot be ignored. This
6229 * causes a disconnect.
6232 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6233 BT_DBG("Invalid/Ignore - txseq outside tx window");
6234 return L2CAP_TXSEQ_INVALID_IGNORE;
6236 BT_DBG("Invalid - txseq outside tx window");
6237 return L2CAP_TXSEQ_INVALID;
6240 BT_DBG("Unexpected - txseq indicates missing frames");
6241 return L2CAP_TXSEQ_UNEXPECTED;
6245 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6246 struct l2cap_ctrl *control,
6247 struct sk_buff *skb, u8 event)
6250 bool skb_in_use = false;
6252 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6256 case L2CAP_EV_RECV_IFRAME:
6257 switch (l2cap_classify_txseq(chan, control->txseq)) {
6258 case L2CAP_TXSEQ_EXPECTED:
6259 l2cap_pass_to_tx(chan, control);
6261 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6262 BT_DBG("Busy, discarding expected seq %d",
6267 chan->expected_tx_seq = __next_seq(chan,
6270 chan->buffer_seq = chan->expected_tx_seq;
6273 err = l2cap_reassemble_sdu(chan, skb, control);
6277 if (control->final) {
6278 if (!test_and_clear_bit(CONN_REJ_ACT,
6279 &chan->conn_state)) {
6281 l2cap_retransmit_all(chan, control);
6282 l2cap_ertm_send(chan);
6286 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6287 l2cap_send_ack(chan);
6289 case L2CAP_TXSEQ_UNEXPECTED:
6290 l2cap_pass_to_tx(chan, control);
6292 /* Can't issue SREJ frames in the local busy state.
6293 * Drop this frame, it will be seen as missing
6294 * when local busy is exited.
6296 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6297 BT_DBG("Busy, discarding unexpected seq %d",
6302 /* There was a gap in the sequence, so an SREJ
6303 * must be sent for each missing frame. The
6304 * current frame is stored for later use.
6306 skb_queue_tail(&chan->srej_q, skb);
6308 BT_DBG("Queued %p (queue len %d)", skb,
6309 skb_queue_len(&chan->srej_q));
6311 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6312 l2cap_seq_list_clear(&chan->srej_list);
6313 l2cap_send_srej(chan, control->txseq);
6315 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6317 case L2CAP_TXSEQ_DUPLICATE:
6318 l2cap_pass_to_tx(chan, control);
6320 case L2CAP_TXSEQ_INVALID_IGNORE:
6322 case L2CAP_TXSEQ_INVALID:
6324 l2cap_send_disconn_req(chan, ECONNRESET);
6328 case L2CAP_EV_RECV_RR:
6329 l2cap_pass_to_tx(chan, control);
6330 if (control->final) {
6331 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6333 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6334 !__chan_is_moving(chan)) {
6336 l2cap_retransmit_all(chan, control);
6339 l2cap_ertm_send(chan);
6340 } else if (control->poll) {
6341 l2cap_send_i_or_rr_or_rnr(chan);
6343 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6344 &chan->conn_state) &&
6345 chan->unacked_frames)
6346 __set_retrans_timer(chan);
6348 l2cap_ertm_send(chan);
6351 case L2CAP_EV_RECV_RNR:
6352 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6353 l2cap_pass_to_tx(chan, control);
6354 if (control && control->poll) {
6355 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6356 l2cap_send_rr_or_rnr(chan, 0);
6358 __clear_retrans_timer(chan);
6359 l2cap_seq_list_clear(&chan->retrans_list);
6361 case L2CAP_EV_RECV_REJ:
6362 l2cap_handle_rej(chan, control);
6364 case L2CAP_EV_RECV_SREJ:
6365 l2cap_handle_srej(chan, control);
6371 if (skb && !skb_in_use) {
6372 BT_DBG("Freeing %p", skb);
6379 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6380 struct l2cap_ctrl *control,
6381 struct sk_buff *skb, u8 event)
6384 u16 txseq = control->txseq;
6385 bool skb_in_use = false;
6387 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6391 case L2CAP_EV_RECV_IFRAME:
6392 switch (l2cap_classify_txseq(chan, txseq)) {
6393 case L2CAP_TXSEQ_EXPECTED:
6394 /* Keep frame for reassembly later */
6395 l2cap_pass_to_tx(chan, control);
6396 skb_queue_tail(&chan->srej_q, skb);
6398 BT_DBG("Queued %p (queue len %d)", skb,
6399 skb_queue_len(&chan->srej_q));
6401 chan->expected_tx_seq = __next_seq(chan, txseq);
6403 case L2CAP_TXSEQ_EXPECTED_SREJ:
6404 l2cap_seq_list_pop(&chan->srej_list);
6406 l2cap_pass_to_tx(chan, control);
6407 skb_queue_tail(&chan->srej_q, skb);
6409 BT_DBG("Queued %p (queue len %d)", skb,
6410 skb_queue_len(&chan->srej_q));
6412 err = l2cap_rx_queued_iframes(chan);
6417 case L2CAP_TXSEQ_UNEXPECTED:
6418 /* Got a frame that can't be reassembled yet.
6419 * Save it for later, and send SREJs to cover
6420 * the missing frames.
6422 skb_queue_tail(&chan->srej_q, skb);
6424 BT_DBG("Queued %p (queue len %d)", skb,
6425 skb_queue_len(&chan->srej_q));
6427 l2cap_pass_to_tx(chan, control);
6428 l2cap_send_srej(chan, control->txseq);
6430 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6431 /* This frame was requested with an SREJ, but
6432 * some expected retransmitted frames are
6433 * missing. Request retransmission of missing
6436 skb_queue_tail(&chan->srej_q, skb);
6438 BT_DBG("Queued %p (queue len %d)", skb,
6439 skb_queue_len(&chan->srej_q));
6441 l2cap_pass_to_tx(chan, control);
6442 l2cap_send_srej_list(chan, control->txseq);
6444 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6445 /* We've already queued this frame. Drop this copy. */
6446 l2cap_pass_to_tx(chan, control);
6448 case L2CAP_TXSEQ_DUPLICATE:
6449 /* Expecting a later sequence number, so this frame
6450 * was already received. Ignore it completely.
6453 case L2CAP_TXSEQ_INVALID_IGNORE:
6455 case L2CAP_TXSEQ_INVALID:
6457 l2cap_send_disconn_req(chan, ECONNRESET);
6461 case L2CAP_EV_RECV_RR:
6462 l2cap_pass_to_tx(chan, control);
6463 if (control->final) {
6464 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6466 if (!test_and_clear_bit(CONN_REJ_ACT,
6467 &chan->conn_state)) {
6469 l2cap_retransmit_all(chan, control);
6472 l2cap_ertm_send(chan);
6473 } else if (control->poll) {
6474 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6475 &chan->conn_state) &&
6476 chan->unacked_frames) {
6477 __set_retrans_timer(chan);
6480 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6481 l2cap_send_srej_tail(chan);
6483 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6484 &chan->conn_state) &&
6485 chan->unacked_frames)
6486 __set_retrans_timer(chan);
6488 l2cap_send_ack(chan);
6491 case L2CAP_EV_RECV_RNR:
6492 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6493 l2cap_pass_to_tx(chan, control);
6494 if (control->poll) {
6495 l2cap_send_srej_tail(chan);
6497 struct l2cap_ctrl rr_control;
6498 memset(&rr_control, 0, sizeof(rr_control));
6499 rr_control.sframe = 1;
6500 rr_control.super = L2CAP_SUPER_RR;
6501 rr_control.reqseq = chan->buffer_seq;
6502 l2cap_send_sframe(chan, &rr_control);
6506 case L2CAP_EV_RECV_REJ:
6507 l2cap_handle_rej(chan, control);
6509 case L2CAP_EV_RECV_SREJ:
6510 l2cap_handle_srej(chan, control);
6514 if (skb && !skb_in_use) {
6515 BT_DBG("Freeing %p", skb);
6522 static int l2cap_finish_move(struct l2cap_chan *chan)
6524 BT_DBG("chan %p", chan);
6526 chan->rx_state = L2CAP_RX_STATE_RECV;
6529 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6531 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6533 return l2cap_resegment(chan);
6536 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6537 struct l2cap_ctrl *control,
6538 struct sk_buff *skb, u8 event)
6542 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6548 l2cap_process_reqseq(chan, control->reqseq);
6550 if (!skb_queue_empty(&chan->tx_q))
6551 chan->tx_send_head = skb_peek(&chan->tx_q);
6553 chan->tx_send_head = NULL;
6555 /* Rewind next_tx_seq to the point expected
6558 chan->next_tx_seq = control->reqseq;
6559 chan->unacked_frames = 0;
6561 err = l2cap_finish_move(chan);
6565 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6566 l2cap_send_i_or_rr_or_rnr(chan);
6568 if (event == L2CAP_EV_RECV_IFRAME)
6571 return l2cap_rx_state_recv(chan, control, NULL, event);
6574 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6575 struct l2cap_ctrl *control,
6576 struct sk_buff *skb, u8 event)
6580 if (!control->final)
6583 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6585 chan->rx_state = L2CAP_RX_STATE_RECV;
6586 l2cap_process_reqseq(chan, control->reqseq);
6588 if (!skb_queue_empty(&chan->tx_q))
6589 chan->tx_send_head = skb_peek(&chan->tx_q);
6591 chan->tx_send_head = NULL;
6593 /* Rewind next_tx_seq to the point expected
6596 chan->next_tx_seq = control->reqseq;
6597 chan->unacked_frames = 0;
6600 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6602 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6604 err = l2cap_resegment(chan);
6607 err = l2cap_rx_state_recv(chan, control, skb, event);
6612 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6614 /* Make sure reqseq is for a packet that has been sent but not acked */
6617 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6618 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6621 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6622 struct sk_buff *skb, u8 event)
6626 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6627 control, skb, event, chan->rx_state);
6629 if (__valid_reqseq(chan, control->reqseq)) {
6630 switch (chan->rx_state) {
6631 case L2CAP_RX_STATE_RECV:
6632 err = l2cap_rx_state_recv(chan, control, skb, event);
6634 case L2CAP_RX_STATE_SREJ_SENT:
6635 err = l2cap_rx_state_srej_sent(chan, control, skb,
6638 case L2CAP_RX_STATE_WAIT_P:
6639 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6641 case L2CAP_RX_STATE_WAIT_F:
6642 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6649 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6650 control->reqseq, chan->next_tx_seq,
6651 chan->expected_ack_seq);
6652 l2cap_send_disconn_req(chan, ECONNRESET);
6658 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6659 struct sk_buff *skb)
6661 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6664 if (l2cap_classify_txseq(chan, control->txseq) ==
6665 L2CAP_TXSEQ_EXPECTED) {
6666 l2cap_pass_to_tx(chan, control);
6668 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6669 __next_seq(chan, chan->buffer_seq));
6671 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6673 l2cap_reassemble_sdu(chan, skb, control);
6676 kfree_skb(chan->sdu);
6679 chan->sdu_last_frag = NULL;
6683 BT_DBG("Freeing %p", skb);
6688 chan->last_acked_seq = control->txseq;
6689 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6694 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6696 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6700 __unpack_control(chan, skb);
6705 * We can just drop the corrupted I-frame here.
6706 * Receiver will miss it and start proper recovery
6707 * procedures and ask for retransmission.
6709 if (l2cap_check_fcs(chan, skb))
6712 if (!control->sframe && control->sar == L2CAP_SAR_START)
6713 len -= L2CAP_SDULEN_SIZE;
6715 if (chan->fcs == L2CAP_FCS_CRC16)
6716 len -= L2CAP_FCS_SIZE;
6718 if (len > chan->mps) {
6719 l2cap_send_disconn_req(chan, ECONNRESET);
6723 if (chan->ops->filter) {
6724 if (chan->ops->filter(chan, skb))
6728 if (!control->sframe) {
6731 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6732 control->sar, control->reqseq, control->final,
6735 /* Validate F-bit - F=0 always valid, F=1 only
6736 * valid in TX WAIT_F
6738 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6741 if (chan->mode != L2CAP_MODE_STREAMING) {
6742 event = L2CAP_EV_RECV_IFRAME;
6743 err = l2cap_rx(chan, control, skb, event);
6745 err = l2cap_stream_rx(chan, control, skb);
6749 l2cap_send_disconn_req(chan, ECONNRESET);
6751 const u8 rx_func_to_event[4] = {
6752 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6753 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6756 /* Only I-frames are expected in streaming mode */
6757 if (chan->mode == L2CAP_MODE_STREAMING)
6760 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6761 control->reqseq, control->final, control->poll,
6765 BT_ERR("Trailing bytes: %d in sframe", len);
6766 l2cap_send_disconn_req(chan, ECONNRESET);
6770 /* Validate F and P bits */
6771 if (control->final && (control->poll ||
6772 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6775 event = rx_func_to_event[control->super];
6776 if (l2cap_rx(chan, control, skb, event))
6777 l2cap_send_disconn_req(chan, ECONNRESET);
6787 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6789 struct l2cap_conn *conn = chan->conn;
6790 struct l2cap_le_credits pkt;
6793 /* We return more credits to the sender only after the amount of
6794 * credits falls below half of the initial amount.
6796 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6799 return_credits = le_max_credits - chan->rx_credits;
6801 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6803 chan->rx_credits += return_credits;
6805 pkt.cid = cpu_to_le16(chan->scid);
6806 pkt.credits = cpu_to_le16(return_credits);
6808 chan->ident = l2cap_get_ident(conn);
6810 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6813 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6817 if (!chan->rx_credits) {
6818 BT_ERR("No credits to receive LE L2CAP data");
6819 l2cap_send_disconn_req(chan, ECONNRESET);
6823 if (chan->imtu < skb->len) {
6824 BT_ERR("Too big LE L2CAP PDU");
6829 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6831 l2cap_chan_le_send_credits(chan);
6838 sdu_len = get_unaligned_le16(skb->data);
6839 skb_pull(skb, L2CAP_SDULEN_SIZE);
6841 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6842 sdu_len, skb->len, chan->imtu);
6844 if (sdu_len > chan->imtu) {
6845 BT_ERR("Too big LE L2CAP SDU length received");
6850 if (skb->len > sdu_len) {
6851 BT_ERR("Too much LE L2CAP data received");
6856 if (skb->len == sdu_len)
6857 return chan->ops->recv(chan, skb);
6860 chan->sdu_len = sdu_len;
6861 chan->sdu_last_frag = skb;
6863 /* Detect if remote is not able to use the selected MPS */
6864 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6865 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6867 /* Adjust the number of credits */
6868 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6869 chan->mps = mps_len;
6870 l2cap_chan_le_send_credits(chan);
6876 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6877 chan->sdu->len, skb->len, chan->sdu_len);
6879 if (chan->sdu->len + skb->len > chan->sdu_len) {
6880 BT_ERR("Too much LE L2CAP data received");
6885 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6888 if (chan->sdu->len == chan->sdu_len) {
6889 err = chan->ops->recv(chan, chan->sdu);
6892 chan->sdu_last_frag = NULL;
6900 kfree_skb(chan->sdu);
6902 chan->sdu_last_frag = NULL;
6906 /* We can't return an error here since we took care of the skb
6907 * freeing internally. An error return would cause the caller to
6908 * do a double-free of the skb.
6913 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6914 struct sk_buff *skb)
6916 struct l2cap_chan *chan;
6918 chan = l2cap_get_chan_by_scid(conn, cid);
6920 if (cid == L2CAP_CID_A2MP) {
6921 chan = a2mp_channel_create(conn, skb);
6927 l2cap_chan_lock(chan);
6929 BT_DBG("unknown cid 0x%4.4x", cid);
6930 /* Drop packet and return */
6936 BT_DBG("chan %p, len %d", chan, skb->len);
6938 /* If we receive data on a fixed channel before the info req/rsp
6939 * procdure is done simply assume that the channel is supported
6940 * and mark it as ready.
6942 if (chan->chan_type == L2CAP_CHAN_FIXED)
6943 l2cap_chan_ready(chan);
6945 if (chan->state != BT_CONNECTED)
6948 switch (chan->mode) {
6949 case L2CAP_MODE_LE_FLOWCTL:
6950 if (l2cap_le_data_rcv(chan, skb) < 0)
6955 case L2CAP_MODE_BASIC:
6956 /* If socket recv buffers overflows we drop data here
6957 * which is *bad* because L2CAP has to be reliable.
6958 * But we don't have any other choice. L2CAP doesn't
6959 * provide flow control mechanism. */
6961 if (chan->imtu < skb->len) {
6962 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6966 if (!chan->ops->recv(chan, skb))
6970 case L2CAP_MODE_ERTM:
6971 case L2CAP_MODE_STREAMING:
6972 l2cap_data_rcv(chan, skb);
6976 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6984 l2cap_chan_unlock(chan);
6985 l2cap_chan_put(chan);
6988 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6989 struct sk_buff *skb)
6991 struct hci_conn *hcon = conn->hcon;
6992 struct l2cap_chan *chan;
6994 if (hcon->type != ACL_LINK)
6997 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7002 BT_DBG("chan %p, len %d", chan, skb->len);
7004 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7007 if (chan->imtu < skb->len)
7010 /* Store remote BD_ADDR and PSM for msg_name */
7011 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7012 bt_cb(skb)->l2cap.psm = psm;
7014 if (!chan->ops->recv(chan, skb)) {
7015 l2cap_chan_put(chan);
7020 l2cap_chan_put(chan);
7025 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7027 struct l2cap_hdr *lh = (void *) skb->data;
7028 struct hci_conn *hcon = conn->hcon;
7032 if (hcon->state != BT_CONNECTED) {
7033 BT_DBG("queueing pending rx skb");
7034 skb_queue_tail(&conn->pending_rx, skb);
7038 skb_pull(skb, L2CAP_HDR_SIZE);
7039 cid = __le16_to_cpu(lh->cid);
7040 len = __le16_to_cpu(lh->len);
7042 if (len != skb->len) {
7047 /* Since we can't actively block incoming LE connections we must
7048 * at least ensure that we ignore incoming data from them.
7050 if (hcon->type == LE_LINK &&
7051 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7052 bdaddr_dst_type(hcon))) {
7057 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7060 case L2CAP_CID_SIGNALING:
7061 l2cap_sig_channel(conn, skb);
7064 case L2CAP_CID_CONN_LESS:
7065 psm = get_unaligned((__le16 *) skb->data);
7066 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7067 l2cap_conless_channel(conn, psm, skb);
7070 case L2CAP_CID_LE_SIGNALING:
7071 l2cap_le_sig_channel(conn, skb);
7075 l2cap_data_channel(conn, cid, skb);
7080 static void process_pending_rx(struct work_struct *work)
7082 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7084 struct sk_buff *skb;
7088 while ((skb = skb_dequeue(&conn->pending_rx)))
7089 l2cap_recv_frame(conn, skb);
7092 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7094 struct l2cap_conn *conn = hcon->l2cap_data;
7095 struct hci_chan *hchan;
7100 hchan = hci_chan_create(hcon);
7104 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7106 hci_chan_del(hchan);
7110 kref_init(&conn->ref);
7111 hcon->l2cap_data = conn;
7112 conn->hcon = hci_conn_get(hcon);
7113 conn->hchan = hchan;
7115 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7117 switch (hcon->type) {
7119 if (hcon->hdev->le_mtu) {
7120 conn->mtu = hcon->hdev->le_mtu;
7125 conn->mtu = hcon->hdev->acl_mtu;
7129 conn->feat_mask = 0;
7131 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7133 if (hcon->type == ACL_LINK &&
7134 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7135 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7137 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7138 (bredr_sc_enabled(hcon->hdev) ||
7139 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7140 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7142 mutex_init(&conn->ident_lock);
7143 mutex_init(&conn->chan_lock);
7145 INIT_LIST_HEAD(&conn->chan_l);
7146 INIT_LIST_HEAD(&conn->users);
7148 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7150 skb_queue_head_init(&conn->pending_rx);
7151 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7152 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7154 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7159 static bool is_valid_psm(u16 psm, u8 dst_type) {
7163 if (bdaddr_type_is_le(dst_type))
7164 return (psm <= 0x00ff);
7166 /* PSM must be odd and lsb of upper byte must be 0 */
7167 return ((psm & 0x0101) == 0x0001);
7170 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7171 bdaddr_t *dst, u8 dst_type)
7173 struct l2cap_conn *conn;
7174 struct hci_conn *hcon;
7175 struct hci_dev *hdev;
7178 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7179 dst_type, __le16_to_cpu(psm));
7181 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7183 return -EHOSTUNREACH;
7187 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7188 chan->chan_type != L2CAP_CHAN_RAW) {
7193 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7198 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7203 switch (chan->mode) {
7204 case L2CAP_MODE_BASIC:
7206 case L2CAP_MODE_LE_FLOWCTL:
7207 l2cap_le_flowctl_init(chan);
7209 case L2CAP_MODE_ERTM:
7210 case L2CAP_MODE_STREAMING:
7219 switch (chan->state) {
7223 /* Already connecting */
7228 /* Already connected */
7242 /* Set destination address and psm */
7243 bacpy(&chan->dst, dst);
7244 chan->dst_type = dst_type;
7249 if (bdaddr_type_is_le(dst_type)) {
7250 /* Convert from L2CAP channel address type to HCI address type
7252 if (dst_type == BDADDR_LE_PUBLIC)
7253 dst_type = ADDR_LE_DEV_PUBLIC;
7255 dst_type = ADDR_LE_DEV_RANDOM;
7257 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7258 hcon = hci_connect_le(hdev, dst, dst_type,
7260 HCI_LE_CONN_TIMEOUT,
7261 HCI_ROLE_SLAVE, NULL);
7263 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7265 HCI_LE_CONN_TIMEOUT);
7268 u8 auth_type = l2cap_get_auth_type(chan);
7269 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7273 err = PTR_ERR(hcon);
7277 conn = l2cap_conn_add(hcon);
7279 hci_conn_drop(hcon);
7284 mutex_lock(&conn->chan_lock);
7285 l2cap_chan_lock(chan);
7287 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7288 hci_conn_drop(hcon);
7293 /* Update source addr of the socket */
7294 bacpy(&chan->src, &hcon->src);
7295 chan->src_type = bdaddr_src_type(hcon);
7297 __l2cap_chan_add(conn, chan);
7299 /* l2cap_chan_add takes its own ref so we can drop this one */
7300 hci_conn_drop(hcon);
7302 l2cap_state_change(chan, BT_CONNECT);
7303 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7305 /* Release chan->sport so that it can be reused by other
7306 * sockets (as it's only used for listening sockets).
7308 write_lock(&chan_list_lock);
7310 write_unlock(&chan_list_lock);
7312 if (hcon->state == BT_CONNECTED) {
7313 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7314 __clear_chan_timer(chan);
7315 if (l2cap_chan_check_security(chan, true))
7316 l2cap_state_change(chan, BT_CONNECTED);
7318 l2cap_do_start(chan);
7324 l2cap_chan_unlock(chan);
7325 mutex_unlock(&conn->chan_lock);
7327 hci_dev_unlock(hdev);
7331 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7333 /* ---- L2CAP interface with lower layer (HCI) ---- */
7335 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7337 int exact = 0, lm1 = 0, lm2 = 0;
7338 struct l2cap_chan *c;
7340 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7342 /* Find listening sockets and check their link_mode */
7343 read_lock(&chan_list_lock);
7344 list_for_each_entry(c, &chan_list, global_l) {
7345 if (c->state != BT_LISTEN)
7348 if (!bacmp(&c->src, &hdev->bdaddr)) {
7349 lm1 |= HCI_LM_ACCEPT;
7350 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7351 lm1 |= HCI_LM_MASTER;
7353 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7354 lm2 |= HCI_LM_ACCEPT;
7355 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7356 lm2 |= HCI_LM_MASTER;
7359 read_unlock(&chan_list_lock);
7361 return exact ? lm1 : lm2;
7364 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7365 * from an existing channel in the list or from the beginning of the
7366 * global list (by passing NULL as first parameter).
7368 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7369 struct hci_conn *hcon)
7371 u8 src_type = bdaddr_src_type(hcon);
7373 read_lock(&chan_list_lock);
7376 c = list_next_entry(c, global_l);
7378 c = list_entry(chan_list.next, typeof(*c), global_l);
7380 list_for_each_entry_from(c, &chan_list, global_l) {
7381 if (c->chan_type != L2CAP_CHAN_FIXED)
7383 if (c->state != BT_LISTEN)
7385 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7387 if (src_type != c->src_type)
7390 c = l2cap_chan_hold_unless_zero(c);
7391 read_unlock(&chan_list_lock);
7395 read_unlock(&chan_list_lock);
7400 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7402 struct hci_dev *hdev = hcon->hdev;
7403 struct l2cap_conn *conn;
7404 struct l2cap_chan *pchan;
7407 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7410 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7413 l2cap_conn_del(hcon, bt_to_errno(status));
7417 conn = l2cap_conn_add(hcon);
7421 dst_type = bdaddr_dst_type(hcon);
7423 /* If device is blocked, do not create channels for it */
7424 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7427 /* Find fixed channels and notify them of the new connection. We
7428 * use multiple individual lookups, continuing each time where
7429 * we left off, because the list lock would prevent calling the
7430 * potentially sleeping l2cap_chan_lock() function.
7432 pchan = l2cap_global_fixed_chan(NULL, hcon);
7434 struct l2cap_chan *chan, *next;
7436 /* Client fixed channels should override server ones */
7437 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7440 l2cap_chan_lock(pchan);
7441 chan = pchan->ops->new_connection(pchan);
7443 bacpy(&chan->src, &hcon->src);
7444 bacpy(&chan->dst, &hcon->dst);
7445 chan->src_type = bdaddr_src_type(hcon);
7446 chan->dst_type = dst_type;
7448 __l2cap_chan_add(conn, chan);
7451 l2cap_chan_unlock(pchan);
7453 next = l2cap_global_fixed_chan(pchan, hcon);
7454 l2cap_chan_put(pchan);
7458 l2cap_conn_ready(conn);
7461 int l2cap_disconn_ind(struct hci_conn *hcon)
7463 struct l2cap_conn *conn = hcon->l2cap_data;
7465 BT_DBG("hcon %p", hcon);
7468 return HCI_ERROR_REMOTE_USER_TERM;
7469 return conn->disc_reason;
7472 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7474 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7477 BT_DBG("hcon %p reason %d", hcon, reason);
7479 l2cap_conn_del(hcon, bt_to_errno(reason));
7482 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7484 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7487 if (encrypt == 0x00) {
7488 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7489 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7490 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7491 chan->sec_level == BT_SECURITY_FIPS)
7492 l2cap_chan_close(chan, ECONNREFUSED);
7494 if (chan->sec_level == BT_SECURITY_MEDIUM)
7495 __clear_chan_timer(chan);
7499 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7501 struct l2cap_conn *conn = hcon->l2cap_data;
7502 struct l2cap_chan *chan;
7507 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7509 mutex_lock(&conn->chan_lock);
7511 list_for_each_entry(chan, &conn->chan_l, list) {
7512 l2cap_chan_lock(chan);
7514 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7515 state_to_string(chan->state));
7517 if (chan->scid == L2CAP_CID_A2MP) {
7518 l2cap_chan_unlock(chan);
7522 if (!status && encrypt)
7523 chan->sec_level = hcon->sec_level;
7525 if (!__l2cap_no_conn_pending(chan)) {
7526 l2cap_chan_unlock(chan);
7530 if (!status && (chan->state == BT_CONNECTED ||
7531 chan->state == BT_CONFIG)) {
7532 chan->ops->resume(chan);
7533 l2cap_check_encryption(chan, encrypt);
7534 l2cap_chan_unlock(chan);
7538 if (chan->state == BT_CONNECT) {
7539 if (!status && l2cap_check_enc_key_size(hcon))
7540 l2cap_start_connection(chan);
7542 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7543 } else if (chan->state == BT_CONNECT2 &&
7544 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7545 struct l2cap_conn_rsp rsp;
7548 if (!status && l2cap_check_enc_key_size(hcon)) {
7549 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7550 res = L2CAP_CR_PEND;
7551 stat = L2CAP_CS_AUTHOR_PEND;
7552 chan->ops->defer(chan);
7554 l2cap_state_change(chan, BT_CONFIG);
7555 res = L2CAP_CR_SUCCESS;
7556 stat = L2CAP_CS_NO_INFO;
7559 l2cap_state_change(chan, BT_DISCONN);
7560 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7561 res = L2CAP_CR_SEC_BLOCK;
7562 stat = L2CAP_CS_NO_INFO;
7565 rsp.scid = cpu_to_le16(chan->dcid);
7566 rsp.dcid = cpu_to_le16(chan->scid);
7567 rsp.result = cpu_to_le16(res);
7568 rsp.status = cpu_to_le16(stat);
7569 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7572 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7573 res == L2CAP_CR_SUCCESS) {
7575 set_bit(CONF_REQ_SENT, &chan->conf_state);
7576 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7578 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7580 chan->num_conf_req++;
7584 l2cap_chan_unlock(chan);
7587 mutex_unlock(&conn->chan_lock);
7590 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7592 struct l2cap_conn *conn = hcon->l2cap_data;
7593 struct l2cap_hdr *hdr;
7596 /* For AMP controller do not create l2cap conn */
7597 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7601 conn = l2cap_conn_add(hcon);
7606 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7610 case ACL_START_NO_FLUSH:
7613 BT_ERR("Unexpected start frame (len %d)", skb->len);
7614 kfree_skb(conn->rx_skb);
7615 conn->rx_skb = NULL;
7617 l2cap_conn_unreliable(conn, ECOMM);
7620 /* Start fragment always begin with Basic L2CAP header */
7621 if (skb->len < L2CAP_HDR_SIZE) {
7622 BT_ERR("Frame is too short (len %d)", skb->len);
7623 l2cap_conn_unreliable(conn, ECOMM);
7627 hdr = (struct l2cap_hdr *) skb->data;
7628 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7630 if (len == skb->len) {
7631 /* Complete frame received */
7632 l2cap_recv_frame(conn, skb);
7636 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7638 if (skb->len > len) {
7639 BT_ERR("Frame is too long (len %d, expected len %d)",
7641 l2cap_conn_unreliable(conn, ECOMM);
7645 /* Allocate skb for the complete frame (with header) */
7646 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7650 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7652 conn->rx_len = len - skb->len;
7656 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7658 if (!conn->rx_len) {
7659 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7660 l2cap_conn_unreliable(conn, ECOMM);
7664 if (skb->len > conn->rx_len) {
7665 BT_ERR("Fragment is too long (len %d, expected %d)",
7666 skb->len, conn->rx_len);
7667 kfree_skb(conn->rx_skb);
7668 conn->rx_skb = NULL;
7670 l2cap_conn_unreliable(conn, ECOMM);
7674 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7676 conn->rx_len -= skb->len;
7678 if (!conn->rx_len) {
7679 /* Complete frame received. l2cap_recv_frame
7680 * takes ownership of the skb so set the global
7681 * rx_skb pointer to NULL first.
7683 struct sk_buff *rx_skb = conn->rx_skb;
7684 conn->rx_skb = NULL;
7685 l2cap_recv_frame(conn, rx_skb);
7694 static struct hci_cb l2cap_cb = {
7696 .connect_cfm = l2cap_connect_cfm,
7697 .disconn_cfm = l2cap_disconn_cfm,
7698 .security_cfm = l2cap_security_cfm,
7701 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7703 struct l2cap_chan *c;
7705 read_lock(&chan_list_lock);
7707 list_for_each_entry(c, &chan_list, global_l) {
7708 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7709 &c->src, c->src_type, &c->dst, c->dst_type,
7710 c->state, __le16_to_cpu(c->psm),
7711 c->scid, c->dcid, c->imtu, c->omtu,
7712 c->sec_level, c->mode);
7715 read_unlock(&chan_list_lock);
7720 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7722 return single_open(file, l2cap_debugfs_show, inode->i_private);
7725 static const struct file_operations l2cap_debugfs_fops = {
7726 .open = l2cap_debugfs_open,
7728 .llseek = seq_lseek,
7729 .release = single_release,
7732 static struct dentry *l2cap_debugfs;
7734 int __init l2cap_init(void)
7738 err = l2cap_init_sockets();
7742 hci_register_cb(&l2cap_cb);
7744 if (IS_ERR_OR_NULL(bt_debugfs))
7747 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7748 NULL, &l2cap_debugfs_fops);
7750 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7752 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7758 void l2cap_exit(void)
7760 debugfs_remove(l2cap_debugfs);
7761 hci_unregister_cb(&l2cap_cb);
7762 l2cap_cleanup_sockets();
7765 module_param(disable_ertm, bool, 0644);
7766 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");