2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66 static void l2cap_retrans_timeout(struct work_struct *work);
67 static void l2cap_monitor_timeout(struct work_struct *work);
68 static void l2cap_ack_timeout(struct work_struct *work);
70 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
72 if (link_type == LE_LINK) {
73 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
74 return BDADDR_LE_PUBLIC;
76 return BDADDR_LE_RANDOM;
82 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
84 return bdaddr_type(hcon->type, hcon->src_type);
87 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
89 return bdaddr_type(hcon->type, hcon->dst_type);
92 /* ---- L2CAP channels ---- */
94 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
99 list_for_each_entry(c, &conn->chan_l, list) {
106 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
109 struct l2cap_chan *c;
111 list_for_each_entry(c, &conn->chan_l, list) {
118 /* Find channel with given SCID.
119 * Returns a reference locked channel.
121 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
124 struct l2cap_chan *c;
126 mutex_lock(&conn->chan_lock);
127 c = __l2cap_get_chan_by_scid(conn, cid);
129 /* Only lock if chan reference is not 0 */
130 c = l2cap_chan_hold_unless_zero(c);
134 mutex_unlock(&conn->chan_lock);
139 /* Find channel with given DCID.
140 * Returns a reference locked channel.
142 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
145 struct l2cap_chan *c;
147 mutex_lock(&conn->chan_lock);
148 c = __l2cap_get_chan_by_dcid(conn, cid);
150 /* Only lock if chan reference is not 0 */
151 c = l2cap_chan_hold_unless_zero(c);
155 mutex_unlock(&conn->chan_lock);
160 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
163 struct l2cap_chan *c;
165 list_for_each_entry(c, &conn->chan_l, list) {
166 if (c->ident == ident)
172 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
175 struct l2cap_chan *c;
177 mutex_lock(&conn->chan_lock);
178 c = __l2cap_get_chan_by_ident(conn, ident);
180 /* Only lock if chan reference is not 0 */
181 c = l2cap_chan_hold_unless_zero(c);
185 mutex_unlock(&conn->chan_lock);
190 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
192 struct l2cap_chan *c;
194 list_for_each_entry(c, &chan_list, global_l) {
195 if (c->sport == psm && !bacmp(&c->src, src))
201 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
205 write_lock(&chan_list_lock);
207 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
217 u16 p, start, end, incr;
219 if (chan->src_type == BDADDR_BREDR) {
220 start = L2CAP_PSM_DYN_START;
221 end = L2CAP_PSM_AUTO_END;
224 start = L2CAP_PSM_LE_DYN_START;
225 end = L2CAP_PSM_LE_DYN_END;
230 for (p = start; p <= end; p += incr)
231 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
232 chan->psm = cpu_to_le16(p);
233 chan->sport = cpu_to_le16(p);
240 write_unlock(&chan_list_lock);
243 EXPORT_SYMBOL_GPL(l2cap_add_psm);
245 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
247 write_lock(&chan_list_lock);
249 /* Override the defaults (which are for conn-oriented) */
250 chan->omtu = L2CAP_DEFAULT_MTU;
251 chan->chan_type = L2CAP_CHAN_FIXED;
255 write_unlock(&chan_list_lock);
260 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
264 if (conn->hcon->type == LE_LINK)
265 dyn_end = L2CAP_CID_LE_DYN_END;
267 dyn_end = L2CAP_CID_DYN_END;
269 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
270 if (!__l2cap_get_chan_by_scid(conn, cid))
277 static void l2cap_state_change(struct l2cap_chan *chan, int state)
279 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
280 state_to_string(state));
283 chan->ops->state_change(chan, state, 0);
286 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
290 chan->ops->state_change(chan, chan->state, err);
293 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
295 chan->ops->state_change(chan, chan->state, err);
298 static void __set_retrans_timer(struct l2cap_chan *chan)
300 if (!delayed_work_pending(&chan->monitor_timer) &&
301 chan->retrans_timeout) {
302 l2cap_set_timer(chan, &chan->retrans_timer,
303 msecs_to_jiffies(chan->retrans_timeout));
307 static void __set_monitor_timer(struct l2cap_chan *chan)
309 __clear_retrans_timer(chan);
310 if (chan->monitor_timeout) {
311 l2cap_set_timer(chan, &chan->monitor_timer,
312 msecs_to_jiffies(chan->monitor_timeout));
316 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
321 skb_queue_walk(head, skb) {
322 if (bt_cb(skb)->l2cap.txseq == seq)
329 /* ---- L2CAP sequence number lists ---- */
331 /* For ERTM, ordered lists of sequence numbers must be tracked for
332 * SREJ requests that are received and for frames that are to be
333 * retransmitted. These seq_list functions implement a singly-linked
334 * list in an array, where membership in the list can also be checked
335 * in constant time. Items can also be added to the tail of the list
336 * and removed from the head in constant time, without further memory
340 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
342 size_t alloc_size, i;
344 /* Allocated size is a power of 2 to map sequence numbers
345 * (which may be up to 14 bits) in to a smaller array that is
346 * sized for the negotiated ERTM transmit windows.
348 alloc_size = roundup_pow_of_two(size);
350 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
354 seq_list->mask = alloc_size - 1;
355 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
356 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
357 for (i = 0; i < alloc_size; i++)
358 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
363 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
365 kfree(seq_list->list);
368 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
371 /* Constant-time check for list membership */
372 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
375 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
377 u16 seq = seq_list->head;
378 u16 mask = seq_list->mask;
380 seq_list->head = seq_list->list[seq & mask];
381 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
383 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
384 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
385 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
391 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
395 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
398 for (i = 0; i <= seq_list->mask; i++)
399 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
401 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
402 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
405 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
407 u16 mask = seq_list->mask;
409 /* All appends happen in constant time */
411 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
414 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
415 seq_list->head = seq;
417 seq_list->list[seq_list->tail & mask] = seq;
419 seq_list->tail = seq;
420 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
423 static void l2cap_chan_timeout(struct work_struct *work)
425 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
427 struct l2cap_conn *conn = chan->conn;
430 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
432 mutex_lock(&conn->chan_lock);
433 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
434 * this work. No need to call l2cap_chan_hold(chan) here again.
436 l2cap_chan_lock(chan);
438 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
439 reason = ECONNREFUSED;
440 else if (chan->state == BT_CONNECT &&
441 chan->sec_level != BT_SECURITY_SDP)
442 reason = ECONNREFUSED;
446 l2cap_chan_close(chan, reason);
448 chan->ops->close(chan);
450 l2cap_chan_unlock(chan);
451 l2cap_chan_put(chan);
453 mutex_unlock(&conn->chan_lock);
456 struct l2cap_chan *l2cap_chan_create(void)
458 struct l2cap_chan *chan;
460 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
464 skb_queue_head_init(&chan->tx_q);
465 skb_queue_head_init(&chan->srej_q);
466 mutex_init(&chan->lock);
468 /* Set default lock nesting level */
469 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
471 write_lock(&chan_list_lock);
472 list_add(&chan->global_l, &chan_list);
473 write_unlock(&chan_list_lock);
475 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
476 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
477 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
478 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
480 chan->state = BT_OPEN;
482 kref_init(&chan->kref);
484 /* This flag is cleared in l2cap_chan_ready() */
485 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
487 BT_DBG("chan %p", chan);
491 EXPORT_SYMBOL_GPL(l2cap_chan_create);
493 static void l2cap_chan_destroy(struct kref *kref)
495 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
497 BT_DBG("chan %p", chan);
499 write_lock(&chan_list_lock);
500 list_del(&chan->global_l);
501 write_unlock(&chan_list_lock);
506 void l2cap_chan_hold(struct l2cap_chan *c)
508 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
513 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
515 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
517 if (!kref_get_unless_zero(&c->kref))
523 void l2cap_chan_put(struct l2cap_chan *c)
525 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
527 kref_put(&c->kref, l2cap_chan_destroy);
529 EXPORT_SYMBOL_GPL(l2cap_chan_put);
531 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
533 chan->fcs = L2CAP_FCS_CRC16;
534 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
535 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
536 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
537 chan->remote_max_tx = chan->max_tx;
538 chan->remote_tx_win = chan->tx_win;
539 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
540 chan->sec_level = BT_SECURITY_LOW;
541 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
542 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
543 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
545 chan->conf_state = 0;
546 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
548 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
550 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
552 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
555 chan->sdu_last_frag = NULL;
557 chan->tx_credits = 0;
558 chan->rx_credits = le_max_credits;
559 chan->mps = min_t(u16, chan->imtu, le_default_mps);
561 skb_queue_head_init(&chan->tx_q);
564 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
566 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
567 __le16_to_cpu(chan->psm), chan->dcid);
569 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
573 switch (chan->chan_type) {
574 case L2CAP_CHAN_CONN_ORIENTED:
575 /* Alloc CID for connection-oriented socket */
576 chan->scid = l2cap_alloc_cid(conn);
577 if (conn->hcon->type == ACL_LINK)
578 chan->omtu = L2CAP_DEFAULT_MTU;
581 case L2CAP_CHAN_CONN_LESS:
582 /* Connectionless socket */
583 chan->scid = L2CAP_CID_CONN_LESS;
584 chan->dcid = L2CAP_CID_CONN_LESS;
585 chan->omtu = L2CAP_DEFAULT_MTU;
588 case L2CAP_CHAN_FIXED:
589 /* Caller will set CID and CID specific MTU values */
593 /* Raw socket can send/recv signalling messages only */
594 chan->scid = L2CAP_CID_SIGNALING;
595 chan->dcid = L2CAP_CID_SIGNALING;
596 chan->omtu = L2CAP_DEFAULT_MTU;
599 chan->local_id = L2CAP_BESTEFFORT_ID;
600 chan->local_stype = L2CAP_SERV_BESTEFFORT;
601 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
602 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
603 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
604 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
606 l2cap_chan_hold(chan);
608 /* Only keep a reference for fixed channels if they requested it */
609 if (chan->chan_type != L2CAP_CHAN_FIXED ||
610 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
611 hci_conn_hold(conn->hcon);
613 list_add(&chan->list, &conn->chan_l);
616 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
618 mutex_lock(&conn->chan_lock);
619 __l2cap_chan_add(conn, chan);
620 mutex_unlock(&conn->chan_lock);
623 void l2cap_chan_del(struct l2cap_chan *chan, int err)
625 struct l2cap_conn *conn = chan->conn;
627 __clear_chan_timer(chan);
629 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
630 state_to_string(chan->state));
632 chan->ops->teardown(chan, err);
635 struct amp_mgr *mgr = conn->hcon->amp_mgr;
636 /* Delete from channel list */
637 list_del(&chan->list);
639 l2cap_chan_put(chan);
643 /* Reference was only held for non-fixed channels or
644 * fixed channels that explicitly requested it using the
645 * FLAG_HOLD_HCI_CONN flag.
647 if (chan->chan_type != L2CAP_CHAN_FIXED ||
648 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
649 hci_conn_drop(conn->hcon);
651 if (mgr && mgr->bredr_chan == chan)
652 mgr->bredr_chan = NULL;
655 if (chan->hs_hchan) {
656 struct hci_chan *hs_hchan = chan->hs_hchan;
658 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
659 amp_disconnect_logical_link(hs_hchan);
662 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
666 case L2CAP_MODE_BASIC:
669 case L2CAP_MODE_LE_FLOWCTL:
670 skb_queue_purge(&chan->tx_q);
673 case L2CAP_MODE_ERTM:
674 __clear_retrans_timer(chan);
675 __clear_monitor_timer(chan);
676 __clear_ack_timer(chan);
678 skb_queue_purge(&chan->srej_q);
680 l2cap_seq_list_free(&chan->srej_list);
681 l2cap_seq_list_free(&chan->retrans_list);
685 case L2CAP_MODE_STREAMING:
686 skb_queue_purge(&chan->tx_q);
692 EXPORT_SYMBOL_GPL(l2cap_chan_del);
694 static void l2cap_conn_update_id_addr(struct work_struct *work)
696 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
697 id_addr_update_work);
698 struct hci_conn *hcon = conn->hcon;
699 struct l2cap_chan *chan;
701 mutex_lock(&conn->chan_lock);
703 list_for_each_entry(chan, &conn->chan_l, list) {
704 l2cap_chan_lock(chan);
705 bacpy(&chan->dst, &hcon->dst);
706 chan->dst_type = bdaddr_dst_type(hcon);
707 l2cap_chan_unlock(chan);
710 mutex_unlock(&conn->chan_lock);
713 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
715 struct l2cap_conn *conn = chan->conn;
716 struct l2cap_le_conn_rsp rsp;
719 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
720 result = L2CAP_CR_AUTHORIZATION;
722 result = L2CAP_CR_BAD_PSM;
724 l2cap_state_change(chan, BT_DISCONN);
726 rsp.dcid = cpu_to_le16(chan->scid);
727 rsp.mtu = cpu_to_le16(chan->imtu);
728 rsp.mps = cpu_to_le16(chan->mps);
729 rsp.credits = cpu_to_le16(chan->rx_credits);
730 rsp.result = cpu_to_le16(result);
732 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
736 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
738 struct l2cap_conn *conn = chan->conn;
739 struct l2cap_conn_rsp rsp;
742 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
743 result = L2CAP_CR_SEC_BLOCK;
745 result = L2CAP_CR_BAD_PSM;
747 l2cap_state_change(chan, BT_DISCONN);
749 rsp.scid = cpu_to_le16(chan->dcid);
750 rsp.dcid = cpu_to_le16(chan->scid);
751 rsp.result = cpu_to_le16(result);
752 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
754 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
757 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
759 struct l2cap_conn *conn = chan->conn;
761 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
763 switch (chan->state) {
765 chan->ops->teardown(chan, 0);
770 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
771 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
772 l2cap_send_disconn_req(chan, reason);
774 l2cap_chan_del(chan, reason);
778 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
779 if (conn->hcon->type == ACL_LINK)
780 l2cap_chan_connect_reject(chan);
781 else if (conn->hcon->type == LE_LINK)
782 l2cap_chan_le_connect_reject(chan);
785 l2cap_chan_del(chan, reason);
790 l2cap_chan_del(chan, reason);
794 chan->ops->teardown(chan, 0);
798 EXPORT_SYMBOL(l2cap_chan_close);
800 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
802 switch (chan->chan_type) {
804 switch (chan->sec_level) {
805 case BT_SECURITY_HIGH:
806 case BT_SECURITY_FIPS:
807 return HCI_AT_DEDICATED_BONDING_MITM;
808 case BT_SECURITY_MEDIUM:
809 return HCI_AT_DEDICATED_BONDING;
811 return HCI_AT_NO_BONDING;
814 case L2CAP_CHAN_CONN_LESS:
815 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
816 if (chan->sec_level == BT_SECURITY_LOW)
817 chan->sec_level = BT_SECURITY_SDP;
819 if (chan->sec_level == BT_SECURITY_HIGH ||
820 chan->sec_level == BT_SECURITY_FIPS)
821 return HCI_AT_NO_BONDING_MITM;
823 return HCI_AT_NO_BONDING;
825 case L2CAP_CHAN_CONN_ORIENTED:
826 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
827 if (chan->sec_level == BT_SECURITY_LOW)
828 chan->sec_level = BT_SECURITY_SDP;
830 if (chan->sec_level == BT_SECURITY_HIGH ||
831 chan->sec_level == BT_SECURITY_FIPS)
832 return HCI_AT_NO_BONDING_MITM;
834 return HCI_AT_NO_BONDING;
838 switch (chan->sec_level) {
839 case BT_SECURITY_HIGH:
840 case BT_SECURITY_FIPS:
841 return HCI_AT_GENERAL_BONDING_MITM;
842 case BT_SECURITY_MEDIUM:
843 return HCI_AT_GENERAL_BONDING;
845 return HCI_AT_NO_BONDING;
851 /* Service level security */
852 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
854 struct l2cap_conn *conn = chan->conn;
857 if (conn->hcon->type == LE_LINK)
858 return smp_conn_security(conn->hcon, chan->sec_level);
860 auth_type = l2cap_get_auth_type(chan);
862 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
866 static u8 l2cap_get_ident(struct l2cap_conn *conn)
870 /* Get next available identificator.
871 * 1 - 128 are used by kernel.
872 * 129 - 199 are reserved.
873 * 200 - 254 are used by utilities like l2ping, etc.
876 mutex_lock(&conn->ident_lock);
878 if (++conn->tx_ident > 128)
883 mutex_unlock(&conn->ident_lock);
888 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
891 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
894 BT_DBG("code 0x%2.2x", code);
899 /* Use NO_FLUSH if supported or we have an LE link (which does
900 * not support auto-flushing packets) */
901 if (lmp_no_flush_capable(conn->hcon->hdev) ||
902 conn->hcon->type == LE_LINK)
903 flags = ACL_START_NO_FLUSH;
907 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
908 skb->priority = HCI_PRIO_MAX;
910 hci_send_acl(conn->hchan, skb, flags);
913 static bool __chan_is_moving(struct l2cap_chan *chan)
915 return chan->move_state != L2CAP_MOVE_STABLE &&
916 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
919 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
921 struct hci_conn *hcon = chan->conn->hcon;
924 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
927 if (chan->hs_hcon && !__chan_is_moving(chan)) {
929 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
936 /* Use NO_FLUSH for LE links (where this is the only option) or
937 * if the BR/EDR link supports it and flushing has not been
938 * explicitly requested (through FLAG_FLUSHABLE).
940 if (hcon->type == LE_LINK ||
941 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
942 lmp_no_flush_capable(hcon->hdev)))
943 flags = ACL_START_NO_FLUSH;
947 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
948 hci_send_acl(chan->conn->hchan, skb, flags);
951 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
953 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
954 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
956 if (enh & L2CAP_CTRL_FRAME_TYPE) {
959 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
960 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
967 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
968 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
975 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
977 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
978 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
980 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
983 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
984 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
991 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
992 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
999 static inline void __unpack_control(struct l2cap_chan *chan,
1000 struct sk_buff *skb)
1002 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1003 __unpack_extended_control(get_unaligned_le32(skb->data),
1004 &bt_cb(skb)->l2cap);
1005 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1007 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1008 &bt_cb(skb)->l2cap);
1009 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1013 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1017 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1018 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1020 if (control->sframe) {
1021 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1022 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1023 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1025 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1026 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1032 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1036 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1037 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1039 if (control->sframe) {
1040 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1041 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1042 packed |= L2CAP_CTRL_FRAME_TYPE;
1044 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1045 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1051 static inline void __pack_control(struct l2cap_chan *chan,
1052 struct l2cap_ctrl *control,
1053 struct sk_buff *skb)
1055 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1056 put_unaligned_le32(__pack_extended_control(control),
1057 skb->data + L2CAP_HDR_SIZE);
1059 put_unaligned_le16(__pack_enhanced_control(control),
1060 skb->data + L2CAP_HDR_SIZE);
1064 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1066 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1067 return L2CAP_EXT_HDR_SIZE;
1069 return L2CAP_ENH_HDR_SIZE;
1072 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1075 struct sk_buff *skb;
1076 struct l2cap_hdr *lh;
1077 int hlen = __ertm_hdr_size(chan);
1079 if (chan->fcs == L2CAP_FCS_CRC16)
1080 hlen += L2CAP_FCS_SIZE;
1082 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1085 return ERR_PTR(-ENOMEM);
1087 lh = skb_put(skb, L2CAP_HDR_SIZE);
1088 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1089 lh->cid = cpu_to_le16(chan->dcid);
1091 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1092 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1094 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1096 if (chan->fcs == L2CAP_FCS_CRC16) {
1097 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1098 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1101 skb->priority = HCI_PRIO_MAX;
1105 static void l2cap_send_sframe(struct l2cap_chan *chan,
1106 struct l2cap_ctrl *control)
1108 struct sk_buff *skb;
1111 BT_DBG("chan %p, control %p", chan, control);
1113 if (!control->sframe)
1116 if (__chan_is_moving(chan))
1119 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1123 if (control->super == L2CAP_SUPER_RR)
1124 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1125 else if (control->super == L2CAP_SUPER_RNR)
1126 set_bit(CONN_RNR_SENT, &chan->conn_state);
1128 if (control->super != L2CAP_SUPER_SREJ) {
1129 chan->last_acked_seq = control->reqseq;
1130 __clear_ack_timer(chan);
1133 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1134 control->final, control->poll, control->super);
1136 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1137 control_field = __pack_extended_control(control);
1139 control_field = __pack_enhanced_control(control);
1141 skb = l2cap_create_sframe_pdu(chan, control_field);
1143 l2cap_do_send(chan, skb);
1146 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1148 struct l2cap_ctrl control;
1150 BT_DBG("chan %p, poll %d", chan, poll);
1152 memset(&control, 0, sizeof(control));
1154 control.poll = poll;
1156 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1157 control.super = L2CAP_SUPER_RNR;
1159 control.super = L2CAP_SUPER_RR;
1161 control.reqseq = chan->buffer_seq;
1162 l2cap_send_sframe(chan, &control);
1165 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1167 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1170 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1173 static bool __amp_capable(struct l2cap_chan *chan)
1175 struct l2cap_conn *conn = chan->conn;
1176 struct hci_dev *hdev;
1177 bool amp_available = false;
1179 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1182 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1185 read_lock(&hci_dev_list_lock);
1186 list_for_each_entry(hdev, &hci_dev_list, list) {
1187 if (hdev->amp_type != AMP_TYPE_BREDR &&
1188 test_bit(HCI_UP, &hdev->flags)) {
1189 amp_available = true;
1193 read_unlock(&hci_dev_list_lock);
1195 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1196 return amp_available;
1201 static bool l2cap_check_efs(struct l2cap_chan *chan)
1203 /* Check EFS parameters */
1207 void l2cap_send_conn_req(struct l2cap_chan *chan)
1209 struct l2cap_conn *conn = chan->conn;
1210 struct l2cap_conn_req req;
1212 req.scid = cpu_to_le16(chan->scid);
1213 req.psm = chan->psm;
1215 chan->ident = l2cap_get_ident(conn);
1217 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1219 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1222 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1224 struct l2cap_create_chan_req req;
1225 req.scid = cpu_to_le16(chan->scid);
1226 req.psm = chan->psm;
1227 req.amp_id = amp_id;
1229 chan->ident = l2cap_get_ident(chan->conn);
1231 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1235 static void l2cap_move_setup(struct l2cap_chan *chan)
1237 struct sk_buff *skb;
1239 BT_DBG("chan %p", chan);
1241 if (chan->mode != L2CAP_MODE_ERTM)
1244 __clear_retrans_timer(chan);
1245 __clear_monitor_timer(chan);
1246 __clear_ack_timer(chan);
1248 chan->retry_count = 0;
1249 skb_queue_walk(&chan->tx_q, skb) {
1250 if (bt_cb(skb)->l2cap.retries)
1251 bt_cb(skb)->l2cap.retries = 1;
1256 chan->expected_tx_seq = chan->buffer_seq;
1258 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1259 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1260 l2cap_seq_list_clear(&chan->retrans_list);
1261 l2cap_seq_list_clear(&chan->srej_list);
1262 skb_queue_purge(&chan->srej_q);
1264 chan->tx_state = L2CAP_TX_STATE_XMIT;
1265 chan->rx_state = L2CAP_RX_STATE_MOVE;
1267 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1270 static void l2cap_move_done(struct l2cap_chan *chan)
1272 u8 move_role = chan->move_role;
1273 BT_DBG("chan %p", chan);
1275 chan->move_state = L2CAP_MOVE_STABLE;
1276 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1278 if (chan->mode != L2CAP_MODE_ERTM)
1281 switch (move_role) {
1282 case L2CAP_MOVE_ROLE_INITIATOR:
1283 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1284 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1286 case L2CAP_MOVE_ROLE_RESPONDER:
1287 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1292 static void l2cap_chan_ready(struct l2cap_chan *chan)
1294 /* The channel may have already been flagged as connected in
1295 * case of receiving data before the L2CAP info req/rsp
1296 * procedure is complete.
1298 if (chan->state == BT_CONNECTED)
1301 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1302 chan->conf_state = 0;
1303 __clear_chan_timer(chan);
1305 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1306 chan->ops->suspend(chan);
1308 chan->state = BT_CONNECTED;
1310 chan->ops->ready(chan);
1313 static void l2cap_le_connect(struct l2cap_chan *chan)
1315 struct l2cap_conn *conn = chan->conn;
1316 struct l2cap_le_conn_req req;
1318 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1321 req.psm = chan->psm;
1322 req.scid = cpu_to_le16(chan->scid);
1323 req.mtu = cpu_to_le16(chan->imtu);
1324 req.mps = cpu_to_le16(chan->mps);
1325 req.credits = cpu_to_le16(chan->rx_credits);
1327 chan->ident = l2cap_get_ident(conn);
1329 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1333 static void l2cap_le_start(struct l2cap_chan *chan)
1335 struct l2cap_conn *conn = chan->conn;
1337 if (!smp_conn_security(conn->hcon, chan->sec_level))
1341 l2cap_chan_ready(chan);
1345 if (chan->state == BT_CONNECT)
1346 l2cap_le_connect(chan);
1349 static void l2cap_start_connection(struct l2cap_chan *chan)
1351 if (__amp_capable(chan)) {
1352 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1353 a2mp_discover_amp(chan);
1354 } else if (chan->conn->hcon->type == LE_LINK) {
1355 l2cap_le_start(chan);
1357 l2cap_send_conn_req(chan);
1361 static void l2cap_request_info(struct l2cap_conn *conn)
1363 struct l2cap_info_req req;
1365 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1368 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1370 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1371 conn->info_ident = l2cap_get_ident(conn);
1373 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1375 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1379 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1381 /* The minimum encryption key size needs to be enforced by the
1382 * host stack before establishing any L2CAP connections. The
1383 * specification in theory allows a minimum of 1, but to align
1384 * BR/EDR and LE transports, a minimum of 7 is chosen.
1386 * This check might also be called for unencrypted connections
1387 * that have no key size requirements. Ensure that the link is
1388 * actually encrypted before enforcing a key size.
1390 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1391 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
1394 static void l2cap_do_start(struct l2cap_chan *chan)
1396 struct l2cap_conn *conn = chan->conn;
1398 if (conn->hcon->type == LE_LINK) {
1399 l2cap_le_start(chan);
1403 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1404 l2cap_request_info(conn);
1408 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1411 if (!l2cap_chan_check_security(chan, true) ||
1412 !__l2cap_no_conn_pending(chan))
1415 if (l2cap_check_enc_key_size(conn->hcon))
1416 l2cap_start_connection(chan);
1418 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1421 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1423 u32 local_feat_mask = l2cap_feat_mask;
1425 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1428 case L2CAP_MODE_ERTM:
1429 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1430 case L2CAP_MODE_STREAMING:
1431 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1437 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1439 struct l2cap_conn *conn = chan->conn;
1440 struct l2cap_disconn_req req;
1445 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1446 __clear_retrans_timer(chan);
1447 __clear_monitor_timer(chan);
1448 __clear_ack_timer(chan);
1451 if (chan->scid == L2CAP_CID_A2MP) {
1452 l2cap_state_change(chan, BT_DISCONN);
1456 req.dcid = cpu_to_le16(chan->dcid);
1457 req.scid = cpu_to_le16(chan->scid);
1458 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1461 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1464 /* ---- L2CAP connections ---- */
1465 static void l2cap_conn_start(struct l2cap_conn *conn)
1467 struct l2cap_chan *chan, *tmp;
1469 BT_DBG("conn %p", conn);
1471 mutex_lock(&conn->chan_lock);
1473 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1474 l2cap_chan_lock(chan);
1476 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1477 l2cap_chan_ready(chan);
1478 l2cap_chan_unlock(chan);
1482 if (chan->state == BT_CONNECT) {
1483 if (!l2cap_chan_check_security(chan, true) ||
1484 !__l2cap_no_conn_pending(chan)) {
1485 l2cap_chan_unlock(chan);
1489 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1490 && test_bit(CONF_STATE2_DEVICE,
1491 &chan->conf_state)) {
1492 l2cap_chan_close(chan, ECONNRESET);
1493 l2cap_chan_unlock(chan);
1497 if (l2cap_check_enc_key_size(conn->hcon))
1498 l2cap_start_connection(chan);
1500 l2cap_chan_close(chan, ECONNREFUSED);
1502 } else if (chan->state == BT_CONNECT2) {
1503 struct l2cap_conn_rsp rsp;
1505 rsp.scid = cpu_to_le16(chan->dcid);
1506 rsp.dcid = cpu_to_le16(chan->scid);
1508 if (l2cap_chan_check_security(chan, false)) {
1509 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1510 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1511 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1512 chan->ops->defer(chan);
1515 l2cap_state_change(chan, BT_CONFIG);
1516 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1517 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1520 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1521 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1524 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1527 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1528 rsp.result != L2CAP_CR_SUCCESS) {
1529 l2cap_chan_unlock(chan);
1533 set_bit(CONF_REQ_SENT, &chan->conf_state);
1534 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1535 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1536 chan->num_conf_req++;
1539 l2cap_chan_unlock(chan);
1542 mutex_unlock(&conn->chan_lock);
1545 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1547 struct hci_conn *hcon = conn->hcon;
1548 struct hci_dev *hdev = hcon->hdev;
1550 BT_DBG("%s conn %p", hdev->name, conn);
1552 /* For outgoing pairing which doesn't necessarily have an
1553 * associated socket (e.g. mgmt_pair_device).
1556 smp_conn_security(hcon, hcon->pending_sec_level);
1558 /* For LE slave connections, make sure the connection interval
1559 * is in the range of the minium and maximum interval that has
1560 * been configured for this connection. If not, then trigger
1561 * the connection update procedure.
1563 if (hcon->role == HCI_ROLE_SLAVE &&
1564 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1565 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1566 struct l2cap_conn_param_update_req req;
1568 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1569 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1570 req.latency = cpu_to_le16(hcon->le_conn_latency);
1571 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1573 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1574 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1578 static void l2cap_conn_ready(struct l2cap_conn *conn)
1580 struct l2cap_chan *chan;
1581 struct hci_conn *hcon = conn->hcon;
1583 BT_DBG("conn %p", conn);
1585 if (hcon->type == ACL_LINK)
1586 l2cap_request_info(conn);
1588 mutex_lock(&conn->chan_lock);
1590 list_for_each_entry(chan, &conn->chan_l, list) {
1592 l2cap_chan_lock(chan);
1594 if (chan->scid == L2CAP_CID_A2MP) {
1595 l2cap_chan_unlock(chan);
1599 if (hcon->type == LE_LINK) {
1600 l2cap_le_start(chan);
1601 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1602 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1603 l2cap_chan_ready(chan);
1604 } else if (chan->state == BT_CONNECT) {
1605 l2cap_do_start(chan);
1608 l2cap_chan_unlock(chan);
1611 mutex_unlock(&conn->chan_lock);
1613 if (hcon->type == LE_LINK)
1614 l2cap_le_conn_ready(conn);
1616 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1619 /* Notify sockets that we cannot guaranty reliability anymore */
1620 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1622 struct l2cap_chan *chan;
1624 BT_DBG("conn %p", conn);
1626 mutex_lock(&conn->chan_lock);
1628 list_for_each_entry(chan, &conn->chan_l, list) {
1629 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1630 l2cap_chan_set_err(chan, err);
1633 mutex_unlock(&conn->chan_lock);
1636 static void l2cap_info_timeout(struct work_struct *work)
1638 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1641 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1642 conn->info_ident = 0;
1644 l2cap_conn_start(conn);
1649 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1650 * callback is called during registration. The ->remove callback is called
1651 * during unregistration.
1652 * An l2cap_user object can either be explicitly unregistered or when the
1653 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1654 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1655 * External modules must own a reference to the l2cap_conn object if they intend
1656 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1657 * any time if they don't.
1660 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1662 struct hci_dev *hdev = conn->hcon->hdev;
1665 /* We need to check whether l2cap_conn is registered. If it is not, we
1666 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1667 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1668 * relies on the parent hci_conn object to be locked. This itself relies
1669 * on the hci_dev object to be locked. So we must lock the hci device
1674 if (!list_empty(&user->list)) {
1679 /* conn->hchan is NULL after l2cap_conn_del() was called */
1685 ret = user->probe(conn, user);
1689 list_add(&user->list, &conn->users);
1693 hci_dev_unlock(hdev);
1696 EXPORT_SYMBOL(l2cap_register_user);
1698 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1700 struct hci_dev *hdev = conn->hcon->hdev;
1704 if (list_empty(&user->list))
1707 list_del_init(&user->list);
1708 user->remove(conn, user);
1711 hci_dev_unlock(hdev);
1713 EXPORT_SYMBOL(l2cap_unregister_user);
1715 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1717 struct l2cap_user *user;
1719 while (!list_empty(&conn->users)) {
1720 user = list_first_entry(&conn->users, struct l2cap_user, list);
1721 list_del_init(&user->list);
1722 user->remove(conn, user);
1726 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1728 struct l2cap_conn *conn = hcon->l2cap_data;
1729 struct l2cap_chan *chan, *l;
1734 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1736 kfree_skb(conn->rx_skb);
1738 skb_queue_purge(&conn->pending_rx);
1740 /* We can not call flush_work(&conn->pending_rx_work) here since we
1741 * might block if we are running on a worker from the same workqueue
1742 * pending_rx_work is waiting on.
1744 if (work_pending(&conn->pending_rx_work))
1745 cancel_work_sync(&conn->pending_rx_work);
1747 if (work_pending(&conn->id_addr_update_work))
1748 cancel_work_sync(&conn->id_addr_update_work);
1750 l2cap_unregister_all_users(conn);
1752 /* Force the connection to be immediately dropped */
1753 hcon->disc_timeout = 0;
1755 mutex_lock(&conn->chan_lock);
1758 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1759 l2cap_chan_hold(chan);
1760 l2cap_chan_lock(chan);
1762 l2cap_chan_del(chan, err);
1764 chan->ops->close(chan);
1766 l2cap_chan_unlock(chan);
1767 l2cap_chan_put(chan);
1770 mutex_unlock(&conn->chan_lock);
1772 hci_chan_del(conn->hchan);
1774 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1775 cancel_delayed_work_sync(&conn->info_timer);
1777 hcon->l2cap_data = NULL;
1779 l2cap_conn_put(conn);
1782 static void l2cap_conn_free(struct kref *ref)
1784 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1786 hci_conn_put(conn->hcon);
1790 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1792 kref_get(&conn->ref);
1795 EXPORT_SYMBOL(l2cap_conn_get);
1797 void l2cap_conn_put(struct l2cap_conn *conn)
1799 kref_put(&conn->ref, l2cap_conn_free);
1801 EXPORT_SYMBOL(l2cap_conn_put);
1803 /* ---- Socket interface ---- */
1805 /* Find socket with psm and source / destination bdaddr.
1806 * Returns closest match.
1808 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1813 struct l2cap_chan *c, *tmp, *c1 = NULL;
1815 read_lock(&chan_list_lock);
1817 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1818 if (state && c->state != state)
1821 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1824 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1827 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1828 int src_match, dst_match;
1829 int src_any, dst_any;
1832 src_match = !bacmp(&c->src, src);
1833 dst_match = !bacmp(&c->dst, dst);
1834 if (src_match && dst_match) {
1835 if (!l2cap_chan_hold_unless_zero(c))
1838 read_unlock(&chan_list_lock);
1843 src_any = !bacmp(&c->src, BDADDR_ANY);
1844 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1845 if ((src_match && dst_any) || (src_any && dst_match) ||
1846 (src_any && dst_any))
1852 c1 = l2cap_chan_hold_unless_zero(c1);
1854 read_unlock(&chan_list_lock);
1859 static void l2cap_monitor_timeout(struct work_struct *work)
1861 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1862 monitor_timer.work);
1864 BT_DBG("chan %p", chan);
1866 l2cap_chan_lock(chan);
1869 l2cap_chan_unlock(chan);
1870 l2cap_chan_put(chan);
1874 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1876 l2cap_chan_unlock(chan);
1877 l2cap_chan_put(chan);
1880 static void l2cap_retrans_timeout(struct work_struct *work)
1882 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1883 retrans_timer.work);
1885 BT_DBG("chan %p", chan);
1887 l2cap_chan_lock(chan);
1890 l2cap_chan_unlock(chan);
1891 l2cap_chan_put(chan);
1895 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1896 l2cap_chan_unlock(chan);
1897 l2cap_chan_put(chan);
1900 static void l2cap_streaming_send(struct l2cap_chan *chan,
1901 struct sk_buff_head *skbs)
1903 struct sk_buff *skb;
1904 struct l2cap_ctrl *control;
1906 BT_DBG("chan %p, skbs %p", chan, skbs);
1908 if (__chan_is_moving(chan))
1911 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1913 while (!skb_queue_empty(&chan->tx_q)) {
1915 skb = skb_dequeue(&chan->tx_q);
1917 bt_cb(skb)->l2cap.retries = 1;
1918 control = &bt_cb(skb)->l2cap;
1920 control->reqseq = 0;
1921 control->txseq = chan->next_tx_seq;
1923 __pack_control(chan, control, skb);
1925 if (chan->fcs == L2CAP_FCS_CRC16) {
1926 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1927 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1930 l2cap_do_send(chan, skb);
1932 BT_DBG("Sent txseq %u", control->txseq);
1934 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1935 chan->frames_sent++;
1939 static int l2cap_ertm_send(struct l2cap_chan *chan)
1941 struct sk_buff *skb, *tx_skb;
1942 struct l2cap_ctrl *control;
1945 BT_DBG("chan %p", chan);
1947 if (chan->state != BT_CONNECTED)
1950 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1953 if (__chan_is_moving(chan))
1956 while (chan->tx_send_head &&
1957 chan->unacked_frames < chan->remote_tx_win &&
1958 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1960 skb = chan->tx_send_head;
1962 bt_cb(skb)->l2cap.retries = 1;
1963 control = &bt_cb(skb)->l2cap;
1965 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1968 control->reqseq = chan->buffer_seq;
1969 chan->last_acked_seq = chan->buffer_seq;
1970 control->txseq = chan->next_tx_seq;
1972 __pack_control(chan, control, skb);
1974 if (chan->fcs == L2CAP_FCS_CRC16) {
1975 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1976 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1979 /* Clone after data has been modified. Data is assumed to be
1980 read-only (for locking purposes) on cloned sk_buffs.
1982 tx_skb = skb_clone(skb, GFP_KERNEL);
1987 __set_retrans_timer(chan);
1989 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1990 chan->unacked_frames++;
1991 chan->frames_sent++;
1994 if (skb_queue_is_last(&chan->tx_q, skb))
1995 chan->tx_send_head = NULL;
1997 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1999 l2cap_do_send(chan, tx_skb);
2000 BT_DBG("Sent txseq %u", control->txseq);
2003 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2004 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2009 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2011 struct l2cap_ctrl control;
2012 struct sk_buff *skb;
2013 struct sk_buff *tx_skb;
2016 BT_DBG("chan %p", chan);
2018 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2021 if (__chan_is_moving(chan))
2024 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2025 seq = l2cap_seq_list_pop(&chan->retrans_list);
2027 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2029 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2034 bt_cb(skb)->l2cap.retries++;
2035 control = bt_cb(skb)->l2cap;
2037 if (chan->max_tx != 0 &&
2038 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2039 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2040 l2cap_send_disconn_req(chan, ECONNRESET);
2041 l2cap_seq_list_clear(&chan->retrans_list);
2045 control.reqseq = chan->buffer_seq;
2046 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2051 if (skb_cloned(skb)) {
2052 /* Cloned sk_buffs are read-only, so we need a
2055 tx_skb = skb_copy(skb, GFP_KERNEL);
2057 tx_skb = skb_clone(skb, GFP_KERNEL);
2061 l2cap_seq_list_clear(&chan->retrans_list);
2065 /* Update skb contents */
2066 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2067 put_unaligned_le32(__pack_extended_control(&control),
2068 tx_skb->data + L2CAP_HDR_SIZE);
2070 put_unaligned_le16(__pack_enhanced_control(&control),
2071 tx_skb->data + L2CAP_HDR_SIZE);
2075 if (chan->fcs == L2CAP_FCS_CRC16) {
2076 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2077 tx_skb->len - L2CAP_FCS_SIZE);
2078 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2082 l2cap_do_send(chan, tx_skb);
2084 BT_DBG("Resent txseq %d", control.txseq);
2086 chan->last_acked_seq = chan->buffer_seq;
2090 static void l2cap_retransmit(struct l2cap_chan *chan,
2091 struct l2cap_ctrl *control)
2093 BT_DBG("chan %p, control %p", chan, control);
2095 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2096 l2cap_ertm_resend(chan);
2099 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2100 struct l2cap_ctrl *control)
2102 struct sk_buff *skb;
2104 BT_DBG("chan %p, control %p", chan, control);
2107 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2109 l2cap_seq_list_clear(&chan->retrans_list);
2111 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2114 if (chan->unacked_frames) {
2115 skb_queue_walk(&chan->tx_q, skb) {
2116 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2117 skb == chan->tx_send_head)
2121 skb_queue_walk_from(&chan->tx_q, skb) {
2122 if (skb == chan->tx_send_head)
2125 l2cap_seq_list_append(&chan->retrans_list,
2126 bt_cb(skb)->l2cap.txseq);
2129 l2cap_ertm_resend(chan);
2133 static void l2cap_send_ack(struct l2cap_chan *chan)
2135 struct l2cap_ctrl control;
2136 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2137 chan->last_acked_seq);
2140 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2141 chan, chan->last_acked_seq, chan->buffer_seq);
2143 memset(&control, 0, sizeof(control));
2146 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2147 chan->rx_state == L2CAP_RX_STATE_RECV) {
2148 __clear_ack_timer(chan);
2149 control.super = L2CAP_SUPER_RNR;
2150 control.reqseq = chan->buffer_seq;
2151 l2cap_send_sframe(chan, &control);
2153 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2154 l2cap_ertm_send(chan);
2155 /* If any i-frames were sent, they included an ack */
2156 if (chan->buffer_seq == chan->last_acked_seq)
2160 /* Ack now if the window is 3/4ths full.
2161 * Calculate without mul or div
2163 threshold = chan->ack_win;
2164 threshold += threshold << 1;
2167 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2170 if (frames_to_ack >= threshold) {
2171 __clear_ack_timer(chan);
2172 control.super = L2CAP_SUPER_RR;
2173 control.reqseq = chan->buffer_seq;
2174 l2cap_send_sframe(chan, &control);
2179 __set_ack_timer(chan);
2183 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2184 struct msghdr *msg, int len,
2185 int count, struct sk_buff *skb)
2187 struct l2cap_conn *conn = chan->conn;
2188 struct sk_buff **frag;
2191 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2197 /* Continuation fragments (no L2CAP header) */
2198 frag = &skb_shinfo(skb)->frag_list;
2200 struct sk_buff *tmp;
2202 count = min_t(unsigned int, conn->mtu, len);
2204 tmp = chan->ops->alloc_skb(chan, 0, count,
2205 msg->msg_flags & MSG_DONTWAIT);
2207 return PTR_ERR(tmp);
2211 if (!copy_from_iter_full(skb_put(*frag, count), count,
2218 skb->len += (*frag)->len;
2219 skb->data_len += (*frag)->len;
2221 frag = &(*frag)->next;
2227 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2228 struct msghdr *msg, size_t len)
2230 struct l2cap_conn *conn = chan->conn;
2231 struct sk_buff *skb;
2232 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2233 struct l2cap_hdr *lh;
2235 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2236 __le16_to_cpu(chan->psm), len);
2238 count = min_t(unsigned int, (conn->mtu - hlen), len);
2240 skb = chan->ops->alloc_skb(chan, hlen, count,
2241 msg->msg_flags & MSG_DONTWAIT);
2245 /* Create L2CAP header */
2246 lh = skb_put(skb, L2CAP_HDR_SIZE);
2247 lh->cid = cpu_to_le16(chan->dcid);
2248 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2249 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2251 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2252 if (unlikely(err < 0)) {
2254 return ERR_PTR(err);
2259 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2260 struct msghdr *msg, size_t len)
2262 struct l2cap_conn *conn = chan->conn;
2263 struct sk_buff *skb;
2265 struct l2cap_hdr *lh;
2267 BT_DBG("chan %p len %zu", chan, len);
2269 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2271 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2272 msg->msg_flags & MSG_DONTWAIT);
2276 /* Create L2CAP header */
2277 lh = skb_put(skb, L2CAP_HDR_SIZE);
2278 lh->cid = cpu_to_le16(chan->dcid);
2279 lh->len = cpu_to_le16(len);
2281 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2282 if (unlikely(err < 0)) {
2284 return ERR_PTR(err);
2289 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2290 struct msghdr *msg, size_t len,
2293 struct l2cap_conn *conn = chan->conn;
2294 struct sk_buff *skb;
2295 int err, count, hlen;
2296 struct l2cap_hdr *lh;
2298 BT_DBG("chan %p len %zu", chan, len);
2301 return ERR_PTR(-ENOTCONN);
2303 hlen = __ertm_hdr_size(chan);
2306 hlen += L2CAP_SDULEN_SIZE;
2308 if (chan->fcs == L2CAP_FCS_CRC16)
2309 hlen += L2CAP_FCS_SIZE;
2311 count = min_t(unsigned int, (conn->mtu - hlen), len);
2313 skb = chan->ops->alloc_skb(chan, hlen, count,
2314 msg->msg_flags & MSG_DONTWAIT);
2318 /* Create L2CAP header */
2319 lh = skb_put(skb, L2CAP_HDR_SIZE);
2320 lh->cid = cpu_to_le16(chan->dcid);
2321 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2323 /* Control header is populated later */
2324 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2325 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2327 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2330 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2332 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2333 if (unlikely(err < 0)) {
2335 return ERR_PTR(err);
2338 bt_cb(skb)->l2cap.fcs = chan->fcs;
2339 bt_cb(skb)->l2cap.retries = 0;
2343 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2344 struct sk_buff_head *seg_queue,
2345 struct msghdr *msg, size_t len)
2347 struct sk_buff *skb;
2352 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2354 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2355 * so fragmented skbs are not used. The HCI layer's handling
2356 * of fragmented skbs is not compatible with ERTM's queueing.
2359 /* PDU size is derived from the HCI MTU */
2360 pdu_len = chan->conn->mtu;
2362 /* Constrain PDU size for BR/EDR connections */
2364 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2366 /* Adjust for largest possible L2CAP overhead. */
2368 pdu_len -= L2CAP_FCS_SIZE;
2370 pdu_len -= __ertm_hdr_size(chan);
2372 /* Remote device may have requested smaller PDUs */
2373 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2375 if (len <= pdu_len) {
2376 sar = L2CAP_SAR_UNSEGMENTED;
2380 sar = L2CAP_SAR_START;
2385 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2388 __skb_queue_purge(seg_queue);
2389 return PTR_ERR(skb);
2392 bt_cb(skb)->l2cap.sar = sar;
2393 __skb_queue_tail(seg_queue, skb);
2399 if (len <= pdu_len) {
2400 sar = L2CAP_SAR_END;
2403 sar = L2CAP_SAR_CONTINUE;
2410 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2412 size_t len, u16 sdulen)
2414 struct l2cap_conn *conn = chan->conn;
2415 struct sk_buff *skb;
2416 int err, count, hlen;
2417 struct l2cap_hdr *lh;
2419 BT_DBG("chan %p len %zu", chan, len);
2422 return ERR_PTR(-ENOTCONN);
2424 hlen = L2CAP_HDR_SIZE;
2427 hlen += L2CAP_SDULEN_SIZE;
2429 count = min_t(unsigned int, (conn->mtu - hlen), len);
2431 skb = chan->ops->alloc_skb(chan, hlen, count,
2432 msg->msg_flags & MSG_DONTWAIT);
2436 /* Create L2CAP header */
2437 lh = skb_put(skb, L2CAP_HDR_SIZE);
2438 lh->cid = cpu_to_le16(chan->dcid);
2439 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2442 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2444 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2445 if (unlikely(err < 0)) {
2447 return ERR_PTR(err);
2453 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2454 struct sk_buff_head *seg_queue,
2455 struct msghdr *msg, size_t len)
2457 struct sk_buff *skb;
2461 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2464 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2470 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2472 __skb_queue_purge(seg_queue);
2473 return PTR_ERR(skb);
2476 __skb_queue_tail(seg_queue, skb);
2482 pdu_len += L2CAP_SDULEN_SIZE;
2489 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2493 BT_DBG("chan %p", chan);
2495 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2496 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2501 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2502 skb_queue_len(&chan->tx_q));
2505 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2507 struct sk_buff *skb;
2509 struct sk_buff_head seg_queue;
2514 /* Connectionless channel */
2515 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2516 skb = l2cap_create_connless_pdu(chan, msg, len);
2518 return PTR_ERR(skb);
2520 /* Channel lock is released before requesting new skb and then
2521 * reacquired thus we need to recheck channel state.
2523 if (chan->state != BT_CONNECTED) {
2528 l2cap_do_send(chan, skb);
2532 switch (chan->mode) {
2533 case L2CAP_MODE_LE_FLOWCTL:
2534 /* Check outgoing MTU */
2535 if (len > chan->omtu)
2538 __skb_queue_head_init(&seg_queue);
2540 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2542 if (chan->state != BT_CONNECTED) {
2543 __skb_queue_purge(&seg_queue);
2550 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2552 l2cap_le_flowctl_send(chan);
2554 if (!chan->tx_credits)
2555 chan->ops->suspend(chan);
2561 case L2CAP_MODE_BASIC:
2562 /* Check outgoing MTU */
2563 if (len > chan->omtu)
2566 /* Create a basic PDU */
2567 skb = l2cap_create_basic_pdu(chan, msg, len);
2569 return PTR_ERR(skb);
2571 /* Channel lock is released before requesting new skb and then
2572 * reacquired thus we need to recheck channel state.
2574 if (chan->state != BT_CONNECTED) {
2579 l2cap_do_send(chan, skb);
2583 case L2CAP_MODE_ERTM:
2584 case L2CAP_MODE_STREAMING:
2585 /* Check outgoing MTU */
2586 if (len > chan->omtu) {
2591 __skb_queue_head_init(&seg_queue);
2593 /* Do segmentation before calling in to the state machine,
2594 * since it's possible to block while waiting for memory
2597 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2599 /* The channel could have been closed while segmenting,
2600 * check that it is still connected.
2602 if (chan->state != BT_CONNECTED) {
2603 __skb_queue_purge(&seg_queue);
2610 if (chan->mode == L2CAP_MODE_ERTM)
2611 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2613 l2cap_streaming_send(chan, &seg_queue);
2617 /* If the skbs were not queued for sending, they'll still be in
2618 * seg_queue and need to be purged.
2620 __skb_queue_purge(&seg_queue);
2624 BT_DBG("bad state %1.1x", chan->mode);
2630 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2632 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2634 struct l2cap_ctrl control;
2637 BT_DBG("chan %p, txseq %u", chan, txseq);
2639 memset(&control, 0, sizeof(control));
2641 control.super = L2CAP_SUPER_SREJ;
2643 for (seq = chan->expected_tx_seq; seq != txseq;
2644 seq = __next_seq(chan, seq)) {
2645 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2646 control.reqseq = seq;
2647 l2cap_send_sframe(chan, &control);
2648 l2cap_seq_list_append(&chan->srej_list, seq);
2652 chan->expected_tx_seq = __next_seq(chan, txseq);
2655 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2657 struct l2cap_ctrl control;
2659 BT_DBG("chan %p", chan);
2661 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2664 memset(&control, 0, sizeof(control));
2666 control.super = L2CAP_SUPER_SREJ;
2667 control.reqseq = chan->srej_list.tail;
2668 l2cap_send_sframe(chan, &control);
2671 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2673 struct l2cap_ctrl control;
2677 BT_DBG("chan %p, txseq %u", chan, txseq);
2679 memset(&control, 0, sizeof(control));
2681 control.super = L2CAP_SUPER_SREJ;
2683 /* Capture initial list head to allow only one pass through the list. */
2684 initial_head = chan->srej_list.head;
2687 seq = l2cap_seq_list_pop(&chan->srej_list);
2688 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2691 control.reqseq = seq;
2692 l2cap_send_sframe(chan, &control);
2693 l2cap_seq_list_append(&chan->srej_list, seq);
2694 } while (chan->srej_list.head != initial_head);
2697 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2699 struct sk_buff *acked_skb;
2702 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2704 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2707 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2708 chan->expected_ack_seq, chan->unacked_frames);
2710 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2711 ackseq = __next_seq(chan, ackseq)) {
2713 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2715 skb_unlink(acked_skb, &chan->tx_q);
2716 kfree_skb(acked_skb);
2717 chan->unacked_frames--;
2721 chan->expected_ack_seq = reqseq;
2723 if (chan->unacked_frames == 0)
2724 __clear_retrans_timer(chan);
2726 BT_DBG("unacked_frames %u", chan->unacked_frames);
2729 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2731 BT_DBG("chan %p", chan);
2733 chan->expected_tx_seq = chan->buffer_seq;
2734 l2cap_seq_list_clear(&chan->srej_list);
2735 skb_queue_purge(&chan->srej_q);
2736 chan->rx_state = L2CAP_RX_STATE_RECV;
2739 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2740 struct l2cap_ctrl *control,
2741 struct sk_buff_head *skbs, u8 event)
2743 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2747 case L2CAP_EV_DATA_REQUEST:
2748 if (chan->tx_send_head == NULL)
2749 chan->tx_send_head = skb_peek(skbs);
2751 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2752 l2cap_ertm_send(chan);
2754 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2755 BT_DBG("Enter LOCAL_BUSY");
2756 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2758 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2759 /* The SREJ_SENT state must be aborted if we are to
2760 * enter the LOCAL_BUSY state.
2762 l2cap_abort_rx_srej_sent(chan);
2765 l2cap_send_ack(chan);
2768 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2769 BT_DBG("Exit LOCAL_BUSY");
2770 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2772 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2773 struct l2cap_ctrl local_control;
2775 memset(&local_control, 0, sizeof(local_control));
2776 local_control.sframe = 1;
2777 local_control.super = L2CAP_SUPER_RR;
2778 local_control.poll = 1;
2779 local_control.reqseq = chan->buffer_seq;
2780 l2cap_send_sframe(chan, &local_control);
2782 chan->retry_count = 1;
2783 __set_monitor_timer(chan);
2784 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2787 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2788 l2cap_process_reqseq(chan, control->reqseq);
2790 case L2CAP_EV_EXPLICIT_POLL:
2791 l2cap_send_rr_or_rnr(chan, 1);
2792 chan->retry_count = 1;
2793 __set_monitor_timer(chan);
2794 __clear_ack_timer(chan);
2795 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2797 case L2CAP_EV_RETRANS_TO:
2798 l2cap_send_rr_or_rnr(chan, 1);
2799 chan->retry_count = 1;
2800 __set_monitor_timer(chan);
2801 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2803 case L2CAP_EV_RECV_FBIT:
2804 /* Nothing to process */
2811 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2812 struct l2cap_ctrl *control,
2813 struct sk_buff_head *skbs, u8 event)
2815 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2819 case L2CAP_EV_DATA_REQUEST:
2820 if (chan->tx_send_head == NULL)
2821 chan->tx_send_head = skb_peek(skbs);
2822 /* Queue data, but don't send. */
2823 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2825 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2826 BT_DBG("Enter LOCAL_BUSY");
2827 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2829 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2830 /* The SREJ_SENT state must be aborted if we are to
2831 * enter the LOCAL_BUSY state.
2833 l2cap_abort_rx_srej_sent(chan);
2836 l2cap_send_ack(chan);
2839 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2840 BT_DBG("Exit LOCAL_BUSY");
2841 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2843 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2844 struct l2cap_ctrl local_control;
2845 memset(&local_control, 0, sizeof(local_control));
2846 local_control.sframe = 1;
2847 local_control.super = L2CAP_SUPER_RR;
2848 local_control.poll = 1;
2849 local_control.reqseq = chan->buffer_seq;
2850 l2cap_send_sframe(chan, &local_control);
2852 chan->retry_count = 1;
2853 __set_monitor_timer(chan);
2854 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2857 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2858 l2cap_process_reqseq(chan, control->reqseq);
2862 case L2CAP_EV_RECV_FBIT:
2863 if (control && control->final) {
2864 __clear_monitor_timer(chan);
2865 if (chan->unacked_frames > 0)
2866 __set_retrans_timer(chan);
2867 chan->retry_count = 0;
2868 chan->tx_state = L2CAP_TX_STATE_XMIT;
2869 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2872 case L2CAP_EV_EXPLICIT_POLL:
2875 case L2CAP_EV_MONITOR_TO:
2876 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2877 l2cap_send_rr_or_rnr(chan, 1);
2878 __set_monitor_timer(chan);
2879 chan->retry_count++;
2881 l2cap_send_disconn_req(chan, ECONNABORTED);
2889 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2890 struct sk_buff_head *skbs, u8 event)
2892 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2893 chan, control, skbs, event, chan->tx_state);
2895 switch (chan->tx_state) {
2896 case L2CAP_TX_STATE_XMIT:
2897 l2cap_tx_state_xmit(chan, control, skbs, event);
2899 case L2CAP_TX_STATE_WAIT_F:
2900 l2cap_tx_state_wait_f(chan, control, skbs, event);
2908 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2909 struct l2cap_ctrl *control)
2911 BT_DBG("chan %p, control %p", chan, control);
2912 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2915 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2916 struct l2cap_ctrl *control)
2918 BT_DBG("chan %p, control %p", chan, control);
2919 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2922 /* Copy frame to all raw sockets on that connection */
2923 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2925 struct sk_buff *nskb;
2926 struct l2cap_chan *chan;
2928 BT_DBG("conn %p", conn);
2930 mutex_lock(&conn->chan_lock);
2932 list_for_each_entry(chan, &conn->chan_l, list) {
2933 if (chan->chan_type != L2CAP_CHAN_RAW)
2936 /* Don't send frame to the channel it came from */
2937 if (bt_cb(skb)->l2cap.chan == chan)
2940 nskb = skb_clone(skb, GFP_KERNEL);
2943 if (chan->ops->recv(chan, nskb))
2947 mutex_unlock(&conn->chan_lock);
2950 /* ---- L2CAP signalling commands ---- */
2951 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2952 u8 ident, u16 dlen, void *data)
2954 struct sk_buff *skb, **frag;
2955 struct l2cap_cmd_hdr *cmd;
2956 struct l2cap_hdr *lh;
2959 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2960 conn, code, ident, dlen);
2962 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2965 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2966 count = min_t(unsigned int, conn->mtu, len);
2968 skb = bt_skb_alloc(count, GFP_KERNEL);
2972 lh = skb_put(skb, L2CAP_HDR_SIZE);
2973 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2975 if (conn->hcon->type == LE_LINK)
2976 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2978 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2980 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2983 cmd->len = cpu_to_le16(dlen);
2986 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2987 skb_put_data(skb, data, count);
2993 /* Continuation fragments (no L2CAP header) */
2994 frag = &skb_shinfo(skb)->frag_list;
2996 count = min_t(unsigned int, conn->mtu, len);
2998 *frag = bt_skb_alloc(count, GFP_KERNEL);
3002 skb_put_data(*frag, data, count);
3007 frag = &(*frag)->next;
3017 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3020 struct l2cap_conf_opt *opt = *ptr;
3023 len = L2CAP_CONF_OPT_SIZE + opt->len;
3031 *val = *((u8 *) opt->val);
3035 *val = get_unaligned_le16(opt->val);
3039 *val = get_unaligned_le32(opt->val);
3043 *val = (unsigned long) opt->val;
3047 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3051 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3053 struct l2cap_conf_opt *opt = *ptr;
3055 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3057 if (size < L2CAP_CONF_OPT_SIZE + len)
3065 *((u8 *) opt->val) = val;
3069 put_unaligned_le16(val, opt->val);
3073 put_unaligned_le32(val, opt->val);
3077 memcpy(opt->val, (void *) val, len);
3081 *ptr += L2CAP_CONF_OPT_SIZE + len;
3084 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3086 struct l2cap_conf_efs efs;
3088 switch (chan->mode) {
3089 case L2CAP_MODE_ERTM:
3090 efs.id = chan->local_id;
3091 efs.stype = chan->local_stype;
3092 efs.msdu = cpu_to_le16(chan->local_msdu);
3093 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3094 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3095 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3098 case L2CAP_MODE_STREAMING:
3100 efs.stype = L2CAP_SERV_BESTEFFORT;
3101 efs.msdu = cpu_to_le16(chan->local_msdu);
3102 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3111 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3112 (unsigned long) &efs, size);
3115 static void l2cap_ack_timeout(struct work_struct *work)
3117 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3121 BT_DBG("chan %p", chan);
3123 l2cap_chan_lock(chan);
3125 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3126 chan->last_acked_seq);
3129 l2cap_send_rr_or_rnr(chan, 0);
3131 l2cap_chan_unlock(chan);
3132 l2cap_chan_put(chan);
3135 int l2cap_ertm_init(struct l2cap_chan *chan)
3139 chan->next_tx_seq = 0;
3140 chan->expected_tx_seq = 0;
3141 chan->expected_ack_seq = 0;
3142 chan->unacked_frames = 0;
3143 chan->buffer_seq = 0;
3144 chan->frames_sent = 0;
3145 chan->last_acked_seq = 0;
3147 chan->sdu_last_frag = NULL;
3150 skb_queue_head_init(&chan->tx_q);
3152 chan->local_amp_id = AMP_ID_BREDR;
3153 chan->move_id = AMP_ID_BREDR;
3154 chan->move_state = L2CAP_MOVE_STABLE;
3155 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3157 if (chan->mode != L2CAP_MODE_ERTM)
3160 chan->rx_state = L2CAP_RX_STATE_RECV;
3161 chan->tx_state = L2CAP_TX_STATE_XMIT;
3163 skb_queue_head_init(&chan->srej_q);
3165 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3169 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3171 l2cap_seq_list_free(&chan->srej_list);
3176 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3179 case L2CAP_MODE_STREAMING:
3180 case L2CAP_MODE_ERTM:
3181 if (l2cap_mode_supported(mode, remote_feat_mask))
3185 return L2CAP_MODE_BASIC;
3189 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3191 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3192 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3195 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3197 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3198 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3201 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3202 struct l2cap_conf_rfc *rfc)
3204 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3205 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3207 /* Class 1 devices have must have ERTM timeouts
3208 * exceeding the Link Supervision Timeout. The
3209 * default Link Supervision Timeout for AMP
3210 * controllers is 10 seconds.
3212 * Class 1 devices use 0xffffffff for their
3213 * best-effort flush timeout, so the clamping logic
3214 * will result in a timeout that meets the above
3215 * requirement. ERTM timeouts are 16-bit values, so
3216 * the maximum timeout is 65.535 seconds.
3219 /* Convert timeout to milliseconds and round */
3220 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3222 /* This is the recommended formula for class 2 devices
3223 * that start ERTM timers when packets are sent to the
3226 ertm_to = 3 * ertm_to + 500;
3228 if (ertm_to > 0xffff)
3231 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3232 rfc->monitor_timeout = rfc->retrans_timeout;
3234 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3235 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3239 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3241 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3242 __l2cap_ews_supported(chan->conn)) {
3243 /* use extended control field */
3244 set_bit(FLAG_EXT_CTRL, &chan->flags);
3245 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3247 chan->tx_win = min_t(u16, chan->tx_win,
3248 L2CAP_DEFAULT_TX_WINDOW);
3249 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3251 chan->ack_win = chan->tx_win;
3254 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3256 struct l2cap_conf_req *req = data;
3257 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3258 void *ptr = req->data;
3259 void *endptr = data + data_size;
3262 BT_DBG("chan %p", chan);
3264 if (chan->num_conf_req || chan->num_conf_rsp)
3267 switch (chan->mode) {
3268 case L2CAP_MODE_STREAMING:
3269 case L2CAP_MODE_ERTM:
3270 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3273 if (__l2cap_efs_supported(chan->conn))
3274 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3278 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3283 if (chan->imtu != L2CAP_DEFAULT_MTU)
3284 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3286 switch (chan->mode) {
3287 case L2CAP_MODE_BASIC:
3291 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3292 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3295 rfc.mode = L2CAP_MODE_BASIC;
3297 rfc.max_transmit = 0;
3298 rfc.retrans_timeout = 0;
3299 rfc.monitor_timeout = 0;
3300 rfc.max_pdu_size = 0;
3302 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3303 (unsigned long) &rfc, endptr - ptr);
3306 case L2CAP_MODE_ERTM:
3307 rfc.mode = L2CAP_MODE_ERTM;
3308 rfc.max_transmit = chan->max_tx;
3310 __l2cap_set_ertm_timeouts(chan, &rfc);
3312 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3313 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3315 rfc.max_pdu_size = cpu_to_le16(size);
3317 l2cap_txwin_setup(chan);
3319 rfc.txwin_size = min_t(u16, chan->tx_win,
3320 L2CAP_DEFAULT_TX_WINDOW);
3322 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3323 (unsigned long) &rfc, endptr - ptr);
3325 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3326 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3328 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3329 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3330 chan->tx_win, endptr - ptr);
3332 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3333 if (chan->fcs == L2CAP_FCS_NONE ||
3334 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3335 chan->fcs = L2CAP_FCS_NONE;
3336 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3337 chan->fcs, endptr - ptr);
3341 case L2CAP_MODE_STREAMING:
3342 l2cap_txwin_setup(chan);
3343 rfc.mode = L2CAP_MODE_STREAMING;
3345 rfc.max_transmit = 0;
3346 rfc.retrans_timeout = 0;
3347 rfc.monitor_timeout = 0;
3349 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3350 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3352 rfc.max_pdu_size = cpu_to_le16(size);
3354 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3355 (unsigned long) &rfc, endptr - ptr);
3357 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3358 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3360 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3361 if (chan->fcs == L2CAP_FCS_NONE ||
3362 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3363 chan->fcs = L2CAP_FCS_NONE;
3364 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3365 chan->fcs, endptr - ptr);
3370 req->dcid = cpu_to_le16(chan->dcid);
3371 req->flags = cpu_to_le16(0);
3376 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3378 struct l2cap_conf_rsp *rsp = data;
3379 void *ptr = rsp->data;
3380 void *endptr = data + data_size;
3381 void *req = chan->conf_req;
3382 int len = chan->conf_len;
3383 int type, hint, olen;
3385 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3386 struct l2cap_conf_efs efs;
3388 u16 mtu = L2CAP_DEFAULT_MTU;
3389 u16 result = L2CAP_CONF_SUCCESS;
3392 BT_DBG("chan %p", chan);
3394 while (len >= L2CAP_CONF_OPT_SIZE) {
3395 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3399 hint = type & L2CAP_CONF_HINT;
3400 type &= L2CAP_CONF_MASK;
3403 case L2CAP_CONF_MTU:
3409 case L2CAP_CONF_FLUSH_TO:
3412 chan->flush_to = val;
3415 case L2CAP_CONF_QOS:
3418 case L2CAP_CONF_RFC:
3419 if (olen != sizeof(rfc))
3421 memcpy(&rfc, (void *) val, olen);
3424 case L2CAP_CONF_FCS:
3427 if (val == L2CAP_FCS_NONE)
3428 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3431 case L2CAP_CONF_EFS:
3432 if (olen != sizeof(efs))
3435 memcpy(&efs, (void *) val, olen);
3438 case L2CAP_CONF_EWS:
3441 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3442 return -ECONNREFUSED;
3443 set_bit(FLAG_EXT_CTRL, &chan->flags);
3444 set_bit(CONF_EWS_RECV, &chan->conf_state);
3445 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3446 chan->remote_tx_win = val;
3452 result = L2CAP_CONF_UNKNOWN;
3453 *((u8 *) ptr++) = type;
3458 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3461 switch (chan->mode) {
3462 case L2CAP_MODE_STREAMING:
3463 case L2CAP_MODE_ERTM:
3464 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3465 chan->mode = l2cap_select_mode(rfc.mode,
3466 chan->conn->feat_mask);
3471 if (__l2cap_efs_supported(chan->conn))
3472 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3474 return -ECONNREFUSED;
3477 if (chan->mode != rfc.mode)
3478 return -ECONNREFUSED;
3484 if (chan->mode != rfc.mode) {
3485 result = L2CAP_CONF_UNACCEPT;
3486 rfc.mode = chan->mode;
3488 if (chan->num_conf_rsp == 1)
3489 return -ECONNREFUSED;
3491 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3492 (unsigned long) &rfc, endptr - ptr);
3495 if (result == L2CAP_CONF_SUCCESS) {
3496 /* Configure output options and let the other side know
3497 * which ones we don't like. */
3499 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3500 result = L2CAP_CONF_UNACCEPT;
3503 set_bit(CONF_MTU_DONE, &chan->conf_state);
3505 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3508 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3509 efs.stype != L2CAP_SERV_NOTRAFIC &&
3510 efs.stype != chan->local_stype) {
3512 result = L2CAP_CONF_UNACCEPT;
3514 if (chan->num_conf_req >= 1)
3515 return -ECONNREFUSED;
3517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3519 (unsigned long) &efs, endptr - ptr);
3521 /* Send PENDING Conf Rsp */
3522 result = L2CAP_CONF_PENDING;
3523 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3528 case L2CAP_MODE_BASIC:
3529 chan->fcs = L2CAP_FCS_NONE;
3530 set_bit(CONF_MODE_DONE, &chan->conf_state);
3533 case L2CAP_MODE_ERTM:
3534 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3535 chan->remote_tx_win = rfc.txwin_size;
3537 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3539 chan->remote_max_tx = rfc.max_transmit;
3541 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3542 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3543 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3544 rfc.max_pdu_size = cpu_to_le16(size);
3545 chan->remote_mps = size;
3547 __l2cap_set_ertm_timeouts(chan, &rfc);
3549 set_bit(CONF_MODE_DONE, &chan->conf_state);
3551 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3552 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3555 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3556 chan->remote_id = efs.id;
3557 chan->remote_stype = efs.stype;
3558 chan->remote_msdu = le16_to_cpu(efs.msdu);
3559 chan->remote_flush_to =
3560 le32_to_cpu(efs.flush_to);
3561 chan->remote_acc_lat =
3562 le32_to_cpu(efs.acc_lat);
3563 chan->remote_sdu_itime =
3564 le32_to_cpu(efs.sdu_itime);
3565 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3567 (unsigned long) &efs, endptr - ptr);
3571 case L2CAP_MODE_STREAMING:
3572 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3573 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3574 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3575 rfc.max_pdu_size = cpu_to_le16(size);
3576 chan->remote_mps = size;
3578 set_bit(CONF_MODE_DONE, &chan->conf_state);
3580 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3581 (unsigned long) &rfc, endptr - ptr);
3586 result = L2CAP_CONF_UNACCEPT;
3588 memset(&rfc, 0, sizeof(rfc));
3589 rfc.mode = chan->mode;
3592 if (result == L2CAP_CONF_SUCCESS)
3593 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3595 rsp->scid = cpu_to_le16(chan->dcid);
3596 rsp->result = cpu_to_le16(result);
3597 rsp->flags = cpu_to_le16(0);
3602 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3603 void *data, size_t size, u16 *result)
3605 struct l2cap_conf_req *req = data;
3606 void *ptr = req->data;
3607 void *endptr = data + size;
3610 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3611 struct l2cap_conf_efs efs;
3613 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3615 while (len >= L2CAP_CONF_OPT_SIZE) {
3616 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3621 case L2CAP_CONF_MTU:
3624 if (val < L2CAP_DEFAULT_MIN_MTU) {
3625 *result = L2CAP_CONF_UNACCEPT;
3626 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3629 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3633 case L2CAP_CONF_FLUSH_TO:
3636 chan->flush_to = val;
3637 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3638 chan->flush_to, endptr - ptr);
3641 case L2CAP_CONF_RFC:
3642 if (olen != sizeof(rfc))
3644 memcpy(&rfc, (void *)val, olen);
3645 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3646 rfc.mode != chan->mode)
3647 return -ECONNREFUSED;
3649 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3650 (unsigned long) &rfc, endptr - ptr);
3653 case L2CAP_CONF_EWS:
3656 chan->ack_win = min_t(u16, val, chan->ack_win);
3657 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3658 chan->tx_win, endptr - ptr);
3661 case L2CAP_CONF_EFS:
3662 if (olen != sizeof(efs))
3664 memcpy(&efs, (void *)val, olen);
3665 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3666 efs.stype != L2CAP_SERV_NOTRAFIC &&
3667 efs.stype != chan->local_stype)
3668 return -ECONNREFUSED;
3669 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3670 (unsigned long) &efs, endptr - ptr);
3673 case L2CAP_CONF_FCS:
3676 if (*result == L2CAP_CONF_PENDING)
3677 if (val == L2CAP_FCS_NONE)
3678 set_bit(CONF_RECV_NO_FCS,
3684 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3685 return -ECONNREFUSED;
3687 chan->mode = rfc.mode;
3689 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3691 case L2CAP_MODE_ERTM:
3692 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3693 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3694 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3695 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3696 chan->ack_win = min_t(u16, chan->ack_win,
3699 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3700 chan->local_msdu = le16_to_cpu(efs.msdu);
3701 chan->local_sdu_itime =
3702 le32_to_cpu(efs.sdu_itime);
3703 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3704 chan->local_flush_to =
3705 le32_to_cpu(efs.flush_to);
3709 case L2CAP_MODE_STREAMING:
3710 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3714 req->dcid = cpu_to_le16(chan->dcid);
3715 req->flags = cpu_to_le16(0);
3720 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3721 u16 result, u16 flags)
3723 struct l2cap_conf_rsp *rsp = data;
3724 void *ptr = rsp->data;
3726 BT_DBG("chan %p", chan);
3728 rsp->scid = cpu_to_le16(chan->dcid);
3729 rsp->result = cpu_to_le16(result);
3730 rsp->flags = cpu_to_le16(flags);
3735 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3737 struct l2cap_le_conn_rsp rsp;
3738 struct l2cap_conn *conn = chan->conn;
3740 BT_DBG("chan %p", chan);
3742 rsp.dcid = cpu_to_le16(chan->scid);
3743 rsp.mtu = cpu_to_le16(chan->imtu);
3744 rsp.mps = cpu_to_le16(chan->mps);
3745 rsp.credits = cpu_to_le16(chan->rx_credits);
3746 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3748 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3752 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3754 struct l2cap_conn_rsp rsp;
3755 struct l2cap_conn *conn = chan->conn;
3759 rsp.scid = cpu_to_le16(chan->dcid);
3760 rsp.dcid = cpu_to_le16(chan->scid);
3761 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3762 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3765 rsp_code = L2CAP_CREATE_CHAN_RSP;
3767 rsp_code = L2CAP_CONN_RSP;
3769 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3771 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3773 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3776 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3777 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3778 chan->num_conf_req++;
3781 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3785 /* Use sane default values in case a misbehaving remote device
3786 * did not send an RFC or extended window size option.
3788 u16 txwin_ext = chan->ack_win;
3789 struct l2cap_conf_rfc rfc = {
3791 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3792 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3793 .max_pdu_size = cpu_to_le16(chan->imtu),
3794 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3797 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3799 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3802 while (len >= L2CAP_CONF_OPT_SIZE) {
3803 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3808 case L2CAP_CONF_RFC:
3809 if (olen != sizeof(rfc))
3811 memcpy(&rfc, (void *)val, olen);
3813 case L2CAP_CONF_EWS:
3822 case L2CAP_MODE_ERTM:
3823 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3824 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3825 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3826 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3827 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3829 chan->ack_win = min_t(u16, chan->ack_win,
3832 case L2CAP_MODE_STREAMING:
3833 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3837 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3838 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3841 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3843 if (cmd_len < sizeof(*rej))
3846 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3849 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3850 cmd->ident == conn->info_ident) {
3851 cancel_delayed_work(&conn->info_timer);
3853 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3854 conn->info_ident = 0;
3856 l2cap_conn_start(conn);
3862 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3863 struct l2cap_cmd_hdr *cmd,
3864 u8 *data, u8 rsp_code, u8 amp_id)
3866 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3867 struct l2cap_conn_rsp rsp;
3868 struct l2cap_chan *chan = NULL, *pchan;
3869 int result, status = L2CAP_CS_NO_INFO;
3871 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3872 __le16 psm = req->psm;
3874 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3876 /* Check if we have socket listening on psm */
3877 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3878 &conn->hcon->dst, ACL_LINK);
3880 result = L2CAP_CR_BAD_PSM;
3884 mutex_lock(&conn->chan_lock);
3885 l2cap_chan_lock(pchan);
3887 /* Check if the ACL is secure enough (if not SDP) */
3888 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3889 !hci_conn_check_link_mode(conn->hcon)) {
3890 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3891 result = L2CAP_CR_SEC_BLOCK;
3895 result = L2CAP_CR_NO_MEM;
3897 /* Check if we already have channel with that dcid */
3898 if (__l2cap_get_chan_by_dcid(conn, scid))
3901 chan = pchan->ops->new_connection(pchan);
3905 /* For certain devices (ex: HID mouse), support for authentication,
3906 * pairing and bonding is optional. For such devices, inorder to avoid
3907 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3908 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3910 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3912 bacpy(&chan->src, &conn->hcon->src);
3913 bacpy(&chan->dst, &conn->hcon->dst);
3914 chan->src_type = bdaddr_src_type(conn->hcon);
3915 chan->dst_type = bdaddr_dst_type(conn->hcon);
3918 chan->local_amp_id = amp_id;
3920 __l2cap_chan_add(conn, chan);
3924 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3926 chan->ident = cmd->ident;
3928 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3929 if (l2cap_chan_check_security(chan, false)) {
3930 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3931 l2cap_state_change(chan, BT_CONNECT2);
3932 result = L2CAP_CR_PEND;
3933 status = L2CAP_CS_AUTHOR_PEND;
3934 chan->ops->defer(chan);
3936 /* Force pending result for AMP controllers.
3937 * The connection will succeed after the
3938 * physical link is up.
3940 if (amp_id == AMP_ID_BREDR) {
3941 l2cap_state_change(chan, BT_CONFIG);
3942 result = L2CAP_CR_SUCCESS;
3944 l2cap_state_change(chan, BT_CONNECT2);
3945 result = L2CAP_CR_PEND;
3947 status = L2CAP_CS_NO_INFO;
3950 l2cap_state_change(chan, BT_CONNECT2);
3951 result = L2CAP_CR_PEND;
3952 status = L2CAP_CS_AUTHEN_PEND;
3955 l2cap_state_change(chan, BT_CONNECT2);
3956 result = L2CAP_CR_PEND;
3957 status = L2CAP_CS_NO_INFO;
3961 l2cap_chan_unlock(pchan);
3962 mutex_unlock(&conn->chan_lock);
3963 l2cap_chan_put(pchan);
3966 rsp.scid = cpu_to_le16(scid);
3967 rsp.dcid = cpu_to_le16(dcid);
3968 rsp.result = cpu_to_le16(result);
3969 rsp.status = cpu_to_le16(status);
3970 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3972 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3973 struct l2cap_info_req info;
3974 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3976 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3977 conn->info_ident = l2cap_get_ident(conn);
3979 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3981 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3982 sizeof(info), &info);
3985 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3986 result == L2CAP_CR_SUCCESS) {
3988 set_bit(CONF_REQ_SENT, &chan->conf_state);
3989 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3990 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3991 chan->num_conf_req++;
3997 static int l2cap_connect_req(struct l2cap_conn *conn,
3998 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4000 struct hci_dev *hdev = conn->hcon->hdev;
4001 struct hci_conn *hcon = conn->hcon;
4003 if (cmd_len < sizeof(struct l2cap_conn_req))
4007 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4008 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4009 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4010 hci_dev_unlock(hdev);
4012 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4016 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4017 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4020 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4021 u16 scid, dcid, result, status;
4022 struct l2cap_chan *chan;
4026 if (cmd_len < sizeof(*rsp))
4029 scid = __le16_to_cpu(rsp->scid);
4030 dcid = __le16_to_cpu(rsp->dcid);
4031 result = __le16_to_cpu(rsp->result);
4032 status = __le16_to_cpu(rsp->status);
4034 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4035 dcid, scid, result, status);
4037 mutex_lock(&conn->chan_lock);
4040 chan = __l2cap_get_chan_by_scid(conn, scid);
4046 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4053 chan = l2cap_chan_hold_unless_zero(chan);
4061 l2cap_chan_lock(chan);
4064 case L2CAP_CR_SUCCESS:
4065 l2cap_state_change(chan, BT_CONFIG);
4068 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4070 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4073 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4074 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4075 chan->num_conf_req++;
4079 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4083 l2cap_chan_del(chan, ECONNREFUSED);
4087 l2cap_chan_unlock(chan);
4088 l2cap_chan_put(chan);
4091 mutex_unlock(&conn->chan_lock);
4096 static inline void set_default_fcs(struct l2cap_chan *chan)
4098 /* FCS is enabled only in ERTM or streaming mode, if one or both
4101 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4102 chan->fcs = L2CAP_FCS_NONE;
4103 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4104 chan->fcs = L2CAP_FCS_CRC16;
4107 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4108 u8 ident, u16 flags)
4110 struct l2cap_conn *conn = chan->conn;
4112 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4115 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4116 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4118 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4119 l2cap_build_conf_rsp(chan, data,
4120 L2CAP_CONF_SUCCESS, flags), data);
4123 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4126 struct l2cap_cmd_rej_cid rej;
4128 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4129 rej.scid = __cpu_to_le16(scid);
4130 rej.dcid = __cpu_to_le16(dcid);
4132 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4135 static inline int l2cap_config_req(struct l2cap_conn *conn,
4136 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4139 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4142 struct l2cap_chan *chan;
4145 if (cmd_len < sizeof(*req))
4148 dcid = __le16_to_cpu(req->dcid);
4149 flags = __le16_to_cpu(req->flags);
4151 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4153 chan = l2cap_get_chan_by_scid(conn, dcid);
4155 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4159 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4160 chan->state != BT_CONNECTED) {
4161 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4166 /* Reject if config buffer is too small. */
4167 len = cmd_len - sizeof(*req);
4168 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4169 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4170 l2cap_build_conf_rsp(chan, rsp,
4171 L2CAP_CONF_REJECT, flags), rsp);
4176 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4177 chan->conf_len += len;
4179 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4180 /* Incomplete config. Send empty response. */
4181 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4182 l2cap_build_conf_rsp(chan, rsp,
4183 L2CAP_CONF_SUCCESS, flags), rsp);
4187 /* Complete config. */
4188 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4190 l2cap_send_disconn_req(chan, ECONNRESET);
4194 chan->ident = cmd->ident;
4195 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4196 chan->num_conf_rsp++;
4198 /* Reset config buffer. */
4201 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4204 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4205 set_default_fcs(chan);
4207 if (chan->mode == L2CAP_MODE_ERTM ||
4208 chan->mode == L2CAP_MODE_STREAMING)
4209 err = l2cap_ertm_init(chan);
4212 l2cap_send_disconn_req(chan, -err);
4214 l2cap_chan_ready(chan);
4219 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4221 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4222 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4223 chan->num_conf_req++;
4226 /* Got Conf Rsp PENDING from remote side and assume we sent
4227 Conf Rsp PENDING in the code above */
4228 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4229 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4231 /* check compatibility */
4233 /* Send rsp for BR/EDR channel */
4235 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4237 chan->ident = cmd->ident;
4241 l2cap_chan_unlock(chan);
4242 l2cap_chan_put(chan);
4246 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4247 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4250 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4251 u16 scid, flags, result;
4252 struct l2cap_chan *chan;
4253 int len = cmd_len - sizeof(*rsp);
4256 if (cmd_len < sizeof(*rsp))
4259 scid = __le16_to_cpu(rsp->scid);
4260 flags = __le16_to_cpu(rsp->flags);
4261 result = __le16_to_cpu(rsp->result);
4263 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4266 chan = l2cap_get_chan_by_scid(conn, scid);
4271 case L2CAP_CONF_SUCCESS:
4272 l2cap_conf_rfc_get(chan, rsp->data, len);
4273 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4276 case L2CAP_CONF_PENDING:
4277 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4279 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4282 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4283 buf, sizeof(buf), &result);
4285 l2cap_send_disconn_req(chan, ECONNRESET);
4289 if (!chan->hs_hcon) {
4290 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4293 if (l2cap_check_efs(chan)) {
4294 amp_create_logical_link(chan);
4295 chan->ident = cmd->ident;
4301 case L2CAP_CONF_UNACCEPT:
4302 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4305 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4306 l2cap_send_disconn_req(chan, ECONNRESET);
4310 /* throw out any old stored conf requests */
4311 result = L2CAP_CONF_SUCCESS;
4312 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4313 req, sizeof(req), &result);
4315 l2cap_send_disconn_req(chan, ECONNRESET);
4319 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4320 L2CAP_CONF_REQ, len, req);
4321 chan->num_conf_req++;
4322 if (result != L2CAP_CONF_SUCCESS)
4328 l2cap_chan_set_err(chan, ECONNRESET);
4330 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4331 l2cap_send_disconn_req(chan, ECONNRESET);
4335 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4338 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4340 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4341 set_default_fcs(chan);
4343 if (chan->mode == L2CAP_MODE_ERTM ||
4344 chan->mode == L2CAP_MODE_STREAMING)
4345 err = l2cap_ertm_init(chan);
4348 l2cap_send_disconn_req(chan, -err);
4350 l2cap_chan_ready(chan);
4354 l2cap_chan_unlock(chan);
4355 l2cap_chan_put(chan);
4359 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4360 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4363 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4364 struct l2cap_disconn_rsp rsp;
4366 struct l2cap_chan *chan;
4368 if (cmd_len != sizeof(*req))
4371 scid = __le16_to_cpu(req->scid);
4372 dcid = __le16_to_cpu(req->dcid);
4374 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4376 mutex_lock(&conn->chan_lock);
4378 chan = __l2cap_get_chan_by_scid(conn, dcid);
4380 mutex_unlock(&conn->chan_lock);
4381 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4385 l2cap_chan_hold(chan);
4386 l2cap_chan_lock(chan);
4388 rsp.dcid = cpu_to_le16(chan->scid);
4389 rsp.scid = cpu_to_le16(chan->dcid);
4390 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4392 chan->ops->set_shutdown(chan);
4394 l2cap_chan_del(chan, ECONNRESET);
4396 chan->ops->close(chan);
4398 l2cap_chan_unlock(chan);
4399 l2cap_chan_put(chan);
4401 mutex_unlock(&conn->chan_lock);
4406 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4407 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4410 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4412 struct l2cap_chan *chan;
4414 if (cmd_len != sizeof(*rsp))
4417 scid = __le16_to_cpu(rsp->scid);
4418 dcid = __le16_to_cpu(rsp->dcid);
4420 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4422 mutex_lock(&conn->chan_lock);
4424 chan = __l2cap_get_chan_by_scid(conn, scid);
4426 mutex_unlock(&conn->chan_lock);
4430 l2cap_chan_hold(chan);
4431 l2cap_chan_lock(chan);
4433 if (chan->state != BT_DISCONN) {
4434 l2cap_chan_unlock(chan);
4435 l2cap_chan_put(chan);
4436 mutex_unlock(&conn->chan_lock);
4440 l2cap_chan_del(chan, 0);
4442 chan->ops->close(chan);
4444 l2cap_chan_unlock(chan);
4445 l2cap_chan_put(chan);
4447 mutex_unlock(&conn->chan_lock);
4452 static inline int l2cap_information_req(struct l2cap_conn *conn,
4453 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4456 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4459 if (cmd_len != sizeof(*req))
4462 type = __le16_to_cpu(req->type);
4464 BT_DBG("type 0x%4.4x", type);
4466 if (type == L2CAP_IT_FEAT_MASK) {
4468 u32 feat_mask = l2cap_feat_mask;
4469 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4470 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4471 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4473 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4475 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4476 feat_mask |= L2CAP_FEAT_EXT_FLOW
4477 | L2CAP_FEAT_EXT_WINDOW;
4479 put_unaligned_le32(feat_mask, rsp->data);
4480 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4482 } else if (type == L2CAP_IT_FIXED_CHAN) {
4484 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4486 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4487 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4488 rsp->data[0] = conn->local_fixed_chan;
4489 memset(rsp->data + 1, 0, 7);
4490 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4493 struct l2cap_info_rsp rsp;
4494 rsp.type = cpu_to_le16(type);
4495 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4496 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4503 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4504 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4507 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4510 if (cmd_len < sizeof(*rsp))
4513 type = __le16_to_cpu(rsp->type);
4514 result = __le16_to_cpu(rsp->result);
4516 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4518 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4519 if (cmd->ident != conn->info_ident ||
4520 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4523 cancel_delayed_work(&conn->info_timer);
4525 if (result != L2CAP_IR_SUCCESS) {
4526 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4527 conn->info_ident = 0;
4529 l2cap_conn_start(conn);
4535 case L2CAP_IT_FEAT_MASK:
4536 conn->feat_mask = get_unaligned_le32(rsp->data);
4538 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4539 struct l2cap_info_req req;
4540 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4542 conn->info_ident = l2cap_get_ident(conn);
4544 l2cap_send_cmd(conn, conn->info_ident,
4545 L2CAP_INFO_REQ, sizeof(req), &req);
4547 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4548 conn->info_ident = 0;
4550 l2cap_conn_start(conn);
4554 case L2CAP_IT_FIXED_CHAN:
4555 conn->remote_fixed_chan = rsp->data[0];
4556 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4557 conn->info_ident = 0;
4559 l2cap_conn_start(conn);
4566 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4567 struct l2cap_cmd_hdr *cmd,
4568 u16 cmd_len, void *data)
4570 struct l2cap_create_chan_req *req = data;
4571 struct l2cap_create_chan_rsp rsp;
4572 struct l2cap_chan *chan;
4573 struct hci_dev *hdev;
4576 if (cmd_len != sizeof(*req))
4579 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4582 psm = le16_to_cpu(req->psm);
4583 scid = le16_to_cpu(req->scid);
4585 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4587 /* For controller id 0 make BR/EDR connection */
4588 if (req->amp_id == AMP_ID_BREDR) {
4589 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4594 /* Validate AMP controller id */
4595 hdev = hci_dev_get(req->amp_id);
4599 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4604 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4607 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4608 struct hci_conn *hs_hcon;
4610 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4614 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4619 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4621 mgr->bredr_chan = chan;
4622 chan->hs_hcon = hs_hcon;
4623 chan->fcs = L2CAP_FCS_NONE;
4624 conn->mtu = hdev->block_mtu;
4633 rsp.scid = cpu_to_le16(scid);
4634 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4635 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4637 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4643 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4645 struct l2cap_move_chan_req req;
4648 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4650 ident = l2cap_get_ident(chan->conn);
4651 chan->ident = ident;
4653 req.icid = cpu_to_le16(chan->scid);
4654 req.dest_amp_id = dest_amp_id;
4656 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4659 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4662 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4664 struct l2cap_move_chan_rsp rsp;
4666 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4668 rsp.icid = cpu_to_le16(chan->dcid);
4669 rsp.result = cpu_to_le16(result);
4671 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4675 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4677 struct l2cap_move_chan_cfm cfm;
4679 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4681 chan->ident = l2cap_get_ident(chan->conn);
4683 cfm.icid = cpu_to_le16(chan->scid);
4684 cfm.result = cpu_to_le16(result);
4686 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4689 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4692 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4694 struct l2cap_move_chan_cfm cfm;
4696 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4698 cfm.icid = cpu_to_le16(icid);
4699 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4701 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4705 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4708 struct l2cap_move_chan_cfm_rsp rsp;
4710 BT_DBG("icid 0x%4.4x", icid);
4712 rsp.icid = cpu_to_le16(icid);
4713 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4716 static void __release_logical_link(struct l2cap_chan *chan)
4718 chan->hs_hchan = NULL;
4719 chan->hs_hcon = NULL;
4721 /* Placeholder - release the logical link */
4724 static void l2cap_logical_fail(struct l2cap_chan *chan)
4726 /* Logical link setup failed */
4727 if (chan->state != BT_CONNECTED) {
4728 /* Create channel failure, disconnect */
4729 l2cap_send_disconn_req(chan, ECONNRESET);
4733 switch (chan->move_role) {
4734 case L2CAP_MOVE_ROLE_RESPONDER:
4735 l2cap_move_done(chan);
4736 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4738 case L2CAP_MOVE_ROLE_INITIATOR:
4739 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4740 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4741 /* Remote has only sent pending or
4742 * success responses, clean up
4744 l2cap_move_done(chan);
4747 /* Other amp move states imply that the move
4748 * has already aborted
4750 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4755 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4756 struct hci_chan *hchan)
4758 struct l2cap_conf_rsp rsp;
4760 chan->hs_hchan = hchan;
4761 chan->hs_hcon->l2cap_data = chan->conn;
4763 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4765 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4768 set_default_fcs(chan);
4770 err = l2cap_ertm_init(chan);
4772 l2cap_send_disconn_req(chan, -err);
4774 l2cap_chan_ready(chan);
4778 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4779 struct hci_chan *hchan)
4781 chan->hs_hcon = hchan->conn;
4782 chan->hs_hcon->l2cap_data = chan->conn;
4784 BT_DBG("move_state %d", chan->move_state);
4786 switch (chan->move_state) {
4787 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4788 /* Move confirm will be sent after a success
4789 * response is received
4791 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4793 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4794 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4795 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4796 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4797 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4798 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4799 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4800 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4801 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4805 /* Move was not in expected state, free the channel */
4806 __release_logical_link(chan);
4808 chan->move_state = L2CAP_MOVE_STABLE;
4812 /* Call with chan locked */
4813 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4816 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4819 l2cap_logical_fail(chan);
4820 __release_logical_link(chan);
4824 if (chan->state != BT_CONNECTED) {
4825 /* Ignore logical link if channel is on BR/EDR */
4826 if (chan->local_amp_id != AMP_ID_BREDR)
4827 l2cap_logical_finish_create(chan, hchan);
4829 l2cap_logical_finish_move(chan, hchan);
4833 void l2cap_move_start(struct l2cap_chan *chan)
4835 BT_DBG("chan %p", chan);
4837 if (chan->local_amp_id == AMP_ID_BREDR) {
4838 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4840 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4841 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4842 /* Placeholder - start physical link setup */
4844 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4845 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4847 l2cap_move_setup(chan);
4848 l2cap_send_move_chan_req(chan, 0);
4852 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4853 u8 local_amp_id, u8 remote_amp_id)
4855 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4856 local_amp_id, remote_amp_id);
4858 chan->fcs = L2CAP_FCS_NONE;
4860 /* Outgoing channel on AMP */
4861 if (chan->state == BT_CONNECT) {
4862 if (result == L2CAP_CR_SUCCESS) {
4863 chan->local_amp_id = local_amp_id;
4864 l2cap_send_create_chan_req(chan, remote_amp_id);
4866 /* Revert to BR/EDR connect */
4867 l2cap_send_conn_req(chan);
4873 /* Incoming channel on AMP */
4874 if (__l2cap_no_conn_pending(chan)) {
4875 struct l2cap_conn_rsp rsp;
4877 rsp.scid = cpu_to_le16(chan->dcid);
4878 rsp.dcid = cpu_to_le16(chan->scid);
4880 if (result == L2CAP_CR_SUCCESS) {
4881 /* Send successful response */
4882 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4883 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4885 /* Send negative response */
4886 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4887 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4890 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4893 if (result == L2CAP_CR_SUCCESS) {
4894 l2cap_state_change(chan, BT_CONFIG);
4895 set_bit(CONF_REQ_SENT, &chan->conf_state);
4896 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4898 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4899 chan->num_conf_req++;
4904 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4907 l2cap_move_setup(chan);
4908 chan->move_id = local_amp_id;
4909 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4911 l2cap_send_move_chan_req(chan, remote_amp_id);
4914 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4916 struct hci_chan *hchan = NULL;
4918 /* Placeholder - get hci_chan for logical link */
4921 if (hchan->state == BT_CONNECTED) {
4922 /* Logical link is ready to go */
4923 chan->hs_hcon = hchan->conn;
4924 chan->hs_hcon->l2cap_data = chan->conn;
4925 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4926 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4928 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4930 /* Wait for logical link to be ready */
4931 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4934 /* Logical link not available */
4935 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4939 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4941 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4943 if (result == -EINVAL)
4944 rsp_result = L2CAP_MR_BAD_ID;
4946 rsp_result = L2CAP_MR_NOT_ALLOWED;
4948 l2cap_send_move_chan_rsp(chan, rsp_result);
4951 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4952 chan->move_state = L2CAP_MOVE_STABLE;
4954 /* Restart data transmission */
4955 l2cap_ertm_send(chan);
4958 /* Invoke with locked chan */
4959 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4961 u8 local_amp_id = chan->local_amp_id;
4962 u8 remote_amp_id = chan->remote_amp_id;
4964 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4965 chan, result, local_amp_id, remote_amp_id);
4967 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4970 if (chan->state != BT_CONNECTED) {
4971 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4972 } else if (result != L2CAP_MR_SUCCESS) {
4973 l2cap_do_move_cancel(chan, result);
4975 switch (chan->move_role) {
4976 case L2CAP_MOVE_ROLE_INITIATOR:
4977 l2cap_do_move_initiate(chan, local_amp_id,
4980 case L2CAP_MOVE_ROLE_RESPONDER:
4981 l2cap_do_move_respond(chan, result);
4984 l2cap_do_move_cancel(chan, result);
4990 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4991 struct l2cap_cmd_hdr *cmd,
4992 u16 cmd_len, void *data)
4994 struct l2cap_move_chan_req *req = data;
4995 struct l2cap_move_chan_rsp rsp;
4996 struct l2cap_chan *chan;
4998 u16 result = L2CAP_MR_NOT_ALLOWED;
5000 if (cmd_len != sizeof(*req))
5003 icid = le16_to_cpu(req->icid);
5005 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5007 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5010 chan = l2cap_get_chan_by_dcid(conn, icid);
5012 rsp.icid = cpu_to_le16(icid);
5013 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5014 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5019 chan->ident = cmd->ident;
5021 if (chan->scid < L2CAP_CID_DYN_START ||
5022 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5023 (chan->mode != L2CAP_MODE_ERTM &&
5024 chan->mode != L2CAP_MODE_STREAMING)) {
5025 result = L2CAP_MR_NOT_ALLOWED;
5026 goto send_move_response;
5029 if (chan->local_amp_id == req->dest_amp_id) {
5030 result = L2CAP_MR_SAME_ID;
5031 goto send_move_response;
5034 if (req->dest_amp_id != AMP_ID_BREDR) {
5035 struct hci_dev *hdev;
5036 hdev = hci_dev_get(req->dest_amp_id);
5037 if (!hdev || hdev->dev_type != HCI_AMP ||
5038 !test_bit(HCI_UP, &hdev->flags)) {
5042 result = L2CAP_MR_BAD_ID;
5043 goto send_move_response;
5048 /* Detect a move collision. Only send a collision response
5049 * if this side has "lost", otherwise proceed with the move.
5050 * The winner has the larger bd_addr.
5052 if ((__chan_is_moving(chan) ||
5053 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5054 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5055 result = L2CAP_MR_COLLISION;
5056 goto send_move_response;
5059 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5060 l2cap_move_setup(chan);
5061 chan->move_id = req->dest_amp_id;
5064 if (req->dest_amp_id == AMP_ID_BREDR) {
5065 /* Moving to BR/EDR */
5066 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5067 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5068 result = L2CAP_MR_PEND;
5070 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5071 result = L2CAP_MR_SUCCESS;
5074 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5075 /* Placeholder - uncomment when amp functions are available */
5076 /*amp_accept_physical(chan, req->dest_amp_id);*/
5077 result = L2CAP_MR_PEND;
5081 l2cap_send_move_chan_rsp(chan, result);
5083 l2cap_chan_unlock(chan);
5084 l2cap_chan_put(chan);
5089 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5091 struct l2cap_chan *chan;
5092 struct hci_chan *hchan = NULL;
5094 chan = l2cap_get_chan_by_scid(conn, icid);
5096 l2cap_send_move_chan_cfm_icid(conn, icid);
5100 __clear_chan_timer(chan);
5101 if (result == L2CAP_MR_PEND)
5102 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5104 switch (chan->move_state) {
5105 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5106 /* Move confirm will be sent when logical link
5109 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5111 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5112 if (result == L2CAP_MR_PEND) {
5114 } else if (test_bit(CONN_LOCAL_BUSY,
5115 &chan->conn_state)) {
5116 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5118 /* Logical link is up or moving to BR/EDR,
5121 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5122 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5125 case L2CAP_MOVE_WAIT_RSP:
5127 if (result == L2CAP_MR_SUCCESS) {
5128 /* Remote is ready, send confirm immediately
5129 * after logical link is ready
5131 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5133 /* Both logical link and move success
5134 * are required to confirm
5136 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5139 /* Placeholder - get hci_chan for logical link */
5141 /* Logical link not available */
5142 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5146 /* If the logical link is not yet connected, do not
5147 * send confirmation.
5149 if (hchan->state != BT_CONNECTED)
5152 /* Logical link is already ready to go */
5154 chan->hs_hcon = hchan->conn;
5155 chan->hs_hcon->l2cap_data = chan->conn;
5157 if (result == L2CAP_MR_SUCCESS) {
5158 /* Can confirm now */
5159 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5161 /* Now only need move success
5164 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5167 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5170 /* Any other amp move state means the move failed. */
5171 chan->move_id = chan->local_amp_id;
5172 l2cap_move_done(chan);
5173 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5176 l2cap_chan_unlock(chan);
5177 l2cap_chan_put(chan);
5180 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5183 struct l2cap_chan *chan;
5185 chan = l2cap_get_chan_by_ident(conn, ident);
5187 /* Could not locate channel, icid is best guess */
5188 l2cap_send_move_chan_cfm_icid(conn, icid);
5192 __clear_chan_timer(chan);
5194 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5195 if (result == L2CAP_MR_COLLISION) {
5196 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5198 /* Cleanup - cancel move */
5199 chan->move_id = chan->local_amp_id;
5200 l2cap_move_done(chan);
5204 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5206 l2cap_chan_unlock(chan);
5207 l2cap_chan_put(chan);
5210 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5211 struct l2cap_cmd_hdr *cmd,
5212 u16 cmd_len, void *data)
5214 struct l2cap_move_chan_rsp *rsp = data;
5217 if (cmd_len != sizeof(*rsp))
5220 icid = le16_to_cpu(rsp->icid);
5221 result = le16_to_cpu(rsp->result);
5223 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5225 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5226 l2cap_move_continue(conn, icid, result);
5228 l2cap_move_fail(conn, cmd->ident, icid, result);
5233 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5234 struct l2cap_cmd_hdr *cmd,
5235 u16 cmd_len, void *data)
5237 struct l2cap_move_chan_cfm *cfm = data;
5238 struct l2cap_chan *chan;
5241 if (cmd_len != sizeof(*cfm))
5244 icid = le16_to_cpu(cfm->icid);
5245 result = le16_to_cpu(cfm->result);
5247 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5249 chan = l2cap_get_chan_by_dcid(conn, icid);
5251 /* Spec requires a response even if the icid was not found */
5252 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5256 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5257 if (result == L2CAP_MC_CONFIRMED) {
5258 chan->local_amp_id = chan->move_id;
5259 if (chan->local_amp_id == AMP_ID_BREDR)
5260 __release_logical_link(chan);
5262 chan->move_id = chan->local_amp_id;
5265 l2cap_move_done(chan);
5268 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5270 l2cap_chan_unlock(chan);
5271 l2cap_chan_put(chan);
5276 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5277 struct l2cap_cmd_hdr *cmd,
5278 u16 cmd_len, void *data)
5280 struct l2cap_move_chan_cfm_rsp *rsp = data;
5281 struct l2cap_chan *chan;
5284 if (cmd_len != sizeof(*rsp))
5287 icid = le16_to_cpu(rsp->icid);
5289 BT_DBG("icid 0x%4.4x", icid);
5291 chan = l2cap_get_chan_by_scid(conn, icid);
5295 __clear_chan_timer(chan);
5297 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5298 chan->local_amp_id = chan->move_id;
5300 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5301 __release_logical_link(chan);
5303 l2cap_move_done(chan);
5306 l2cap_chan_unlock(chan);
5307 l2cap_chan_put(chan);
5312 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5313 struct l2cap_cmd_hdr *cmd,
5314 u16 cmd_len, u8 *data)
5316 struct hci_conn *hcon = conn->hcon;
5317 struct l2cap_conn_param_update_req *req;
5318 struct l2cap_conn_param_update_rsp rsp;
5319 u16 min, max, latency, to_multiplier;
5322 if (hcon->role != HCI_ROLE_MASTER)
5325 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5328 req = (struct l2cap_conn_param_update_req *) data;
5329 min = __le16_to_cpu(req->min);
5330 max = __le16_to_cpu(req->max);
5331 latency = __le16_to_cpu(req->latency);
5332 to_multiplier = __le16_to_cpu(req->to_multiplier);
5334 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5335 min, max, latency, to_multiplier);
5337 memset(&rsp, 0, sizeof(rsp));
5339 err = hci_check_conn_params(min, max, latency, to_multiplier);
5341 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5343 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5345 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5351 store_hint = hci_le_conn_update(hcon, min, max, latency,
5353 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5354 store_hint, min, max, latency,
5362 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5363 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5366 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5367 struct hci_conn *hcon = conn->hcon;
5368 u16 dcid, mtu, mps, credits, result;
5369 struct l2cap_chan *chan;
5372 if (cmd_len < sizeof(*rsp))
5375 dcid = __le16_to_cpu(rsp->dcid);
5376 mtu = __le16_to_cpu(rsp->mtu);
5377 mps = __le16_to_cpu(rsp->mps);
5378 credits = __le16_to_cpu(rsp->credits);
5379 result = __le16_to_cpu(rsp->result);
5381 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5382 dcid < L2CAP_CID_DYN_START ||
5383 dcid > L2CAP_CID_LE_DYN_END))
5386 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5387 dcid, mtu, mps, credits, result);
5389 mutex_lock(&conn->chan_lock);
5391 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5399 l2cap_chan_lock(chan);
5402 case L2CAP_CR_SUCCESS:
5403 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5411 chan->remote_mps = mps;
5412 chan->tx_credits = credits;
5413 l2cap_chan_ready(chan);
5416 case L2CAP_CR_AUTHENTICATION:
5417 case L2CAP_CR_ENCRYPTION:
5418 /* If we already have MITM protection we can't do
5421 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5422 l2cap_chan_del(chan, ECONNREFUSED);
5426 sec_level = hcon->sec_level + 1;
5427 if (chan->sec_level < sec_level)
5428 chan->sec_level = sec_level;
5430 /* We'll need to send a new Connect Request */
5431 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5433 smp_conn_security(hcon, chan->sec_level);
5437 l2cap_chan_del(chan, ECONNREFUSED);
5441 l2cap_chan_unlock(chan);
5444 mutex_unlock(&conn->chan_lock);
5449 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5450 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5455 switch (cmd->code) {
5456 case L2CAP_COMMAND_REJ:
5457 l2cap_command_rej(conn, cmd, cmd_len, data);
5460 case L2CAP_CONN_REQ:
5461 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5464 case L2CAP_CONN_RSP:
5465 case L2CAP_CREATE_CHAN_RSP:
5466 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5469 case L2CAP_CONF_REQ:
5470 err = l2cap_config_req(conn, cmd, cmd_len, data);
5473 case L2CAP_CONF_RSP:
5474 l2cap_config_rsp(conn, cmd, cmd_len, data);
5477 case L2CAP_DISCONN_REQ:
5478 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5481 case L2CAP_DISCONN_RSP:
5482 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5485 case L2CAP_ECHO_REQ:
5486 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5489 case L2CAP_ECHO_RSP:
5492 case L2CAP_INFO_REQ:
5493 err = l2cap_information_req(conn, cmd, cmd_len, data);
5496 case L2CAP_INFO_RSP:
5497 l2cap_information_rsp(conn, cmd, cmd_len, data);
5500 case L2CAP_CREATE_CHAN_REQ:
5501 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5504 case L2CAP_MOVE_CHAN_REQ:
5505 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5508 case L2CAP_MOVE_CHAN_RSP:
5509 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5512 case L2CAP_MOVE_CHAN_CFM:
5513 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5516 case L2CAP_MOVE_CHAN_CFM_RSP:
5517 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5521 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5529 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5530 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5533 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5534 struct l2cap_le_conn_rsp rsp;
5535 struct l2cap_chan *chan, *pchan;
5536 u16 dcid, scid, credits, mtu, mps;
5540 if (cmd_len != sizeof(*req))
5543 scid = __le16_to_cpu(req->scid);
5544 mtu = __le16_to_cpu(req->mtu);
5545 mps = __le16_to_cpu(req->mps);
5550 if (mtu < 23 || mps < 23)
5553 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5556 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5559 * Valid range: 0x0001-0x00ff
5561 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5563 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5564 result = L2CAP_CR_BAD_PSM;
5569 /* Check if we have socket listening on psm */
5570 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5571 &conn->hcon->dst, LE_LINK);
5573 result = L2CAP_CR_BAD_PSM;
5578 mutex_lock(&conn->chan_lock);
5579 l2cap_chan_lock(pchan);
5581 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5583 result = L2CAP_CR_AUTHENTICATION;
5585 goto response_unlock;
5588 /* Check for valid dynamic CID range */
5589 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5590 result = L2CAP_CR_INVALID_SCID;
5592 goto response_unlock;
5595 /* Check if we already have channel with that dcid */
5596 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5597 result = L2CAP_CR_SCID_IN_USE;
5599 goto response_unlock;
5602 chan = pchan->ops->new_connection(pchan);
5604 result = L2CAP_CR_NO_MEM;
5605 goto response_unlock;
5608 l2cap_le_flowctl_init(chan);
5610 bacpy(&chan->src, &conn->hcon->src);
5611 bacpy(&chan->dst, &conn->hcon->dst);
5612 chan->src_type = bdaddr_src_type(conn->hcon);
5613 chan->dst_type = bdaddr_dst_type(conn->hcon);
5617 chan->remote_mps = mps;
5618 chan->tx_credits = __le16_to_cpu(req->credits);
5620 __l2cap_chan_add(conn, chan);
5622 credits = chan->rx_credits;
5624 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5626 chan->ident = cmd->ident;
5628 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5629 l2cap_state_change(chan, BT_CONNECT2);
5630 /* The following result value is actually not defined
5631 * for LE CoC but we use it to let the function know
5632 * that it should bail out after doing its cleanup
5633 * instead of sending a response.
5635 result = L2CAP_CR_PEND;
5636 chan->ops->defer(chan);
5638 l2cap_chan_ready(chan);
5639 result = L2CAP_CR_SUCCESS;
5643 l2cap_chan_unlock(pchan);
5644 mutex_unlock(&conn->chan_lock);
5645 l2cap_chan_put(pchan);
5647 if (result == L2CAP_CR_PEND)
5652 rsp.mtu = cpu_to_le16(chan->imtu);
5653 rsp.mps = cpu_to_le16(chan->mps);
5659 rsp.dcid = cpu_to_le16(dcid);
5660 rsp.credits = cpu_to_le16(credits);
5661 rsp.result = cpu_to_le16(result);
5663 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5668 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5669 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5672 struct l2cap_le_credits *pkt;
5673 struct l2cap_chan *chan;
5674 u16 cid, credits, max_credits;
5676 if (cmd_len != sizeof(*pkt))
5679 pkt = (struct l2cap_le_credits *) data;
5680 cid = __le16_to_cpu(pkt->cid);
5681 credits = __le16_to_cpu(pkt->credits);
5683 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5685 chan = l2cap_get_chan_by_dcid(conn, cid);
5689 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5690 if (credits > max_credits) {
5691 BT_ERR("LE credits overflow");
5692 l2cap_send_disconn_req(chan, ECONNRESET);
5694 /* Return 0 so that we don't trigger an unnecessary
5695 * command reject packet.
5700 chan->tx_credits += credits;
5702 /* Resume sending */
5703 l2cap_le_flowctl_send(chan);
5705 if (chan->tx_credits)
5706 chan->ops->resume(chan);
5709 l2cap_chan_unlock(chan);
5710 l2cap_chan_put(chan);
5715 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5716 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5719 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5720 struct l2cap_chan *chan;
5722 if (cmd_len < sizeof(*rej))
5725 mutex_lock(&conn->chan_lock);
5727 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5731 l2cap_chan_lock(chan);
5732 l2cap_chan_del(chan, ECONNREFUSED);
5733 l2cap_chan_unlock(chan);
5736 mutex_unlock(&conn->chan_lock);
5740 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5741 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5746 switch (cmd->code) {
5747 case L2CAP_COMMAND_REJ:
5748 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5751 case L2CAP_CONN_PARAM_UPDATE_REQ:
5752 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5755 case L2CAP_CONN_PARAM_UPDATE_RSP:
5758 case L2CAP_LE_CONN_RSP:
5759 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5762 case L2CAP_LE_CONN_REQ:
5763 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5766 case L2CAP_LE_CREDITS:
5767 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5770 case L2CAP_DISCONN_REQ:
5771 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5774 case L2CAP_DISCONN_RSP:
5775 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5779 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5787 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5788 struct sk_buff *skb)
5790 struct hci_conn *hcon = conn->hcon;
5791 struct l2cap_cmd_hdr *cmd;
5795 if (hcon->type != LE_LINK)
5798 if (skb->len < L2CAP_CMD_HDR_SIZE)
5801 cmd = (void *) skb->data;
5802 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5804 len = le16_to_cpu(cmd->len);
5806 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5808 if (len != skb->len || !cmd->ident) {
5809 BT_DBG("corrupted command");
5813 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5815 struct l2cap_cmd_rej_unk rej;
5817 BT_ERR("Wrong link type (%d)", err);
5819 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5820 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5828 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5829 struct sk_buff *skb)
5831 struct hci_conn *hcon = conn->hcon;
5832 u8 *data = skb->data;
5834 struct l2cap_cmd_hdr cmd;
5837 l2cap_raw_recv(conn, skb);
5839 if (hcon->type != ACL_LINK)
5842 while (len >= L2CAP_CMD_HDR_SIZE) {
5844 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5845 data += L2CAP_CMD_HDR_SIZE;
5846 len -= L2CAP_CMD_HDR_SIZE;
5848 cmd_len = le16_to_cpu(cmd.len);
5850 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5853 if (cmd_len > len || !cmd.ident) {
5854 BT_DBG("corrupted command");
5858 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5860 struct l2cap_cmd_rej_unk rej;
5862 BT_ERR("Wrong link type (%d)", err);
5864 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5865 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5877 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5879 u16 our_fcs, rcv_fcs;
5882 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5883 hdr_size = L2CAP_EXT_HDR_SIZE;
5885 hdr_size = L2CAP_ENH_HDR_SIZE;
5887 if (chan->fcs == L2CAP_FCS_CRC16) {
5888 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5889 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5890 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5892 if (our_fcs != rcv_fcs)
5898 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5900 struct l2cap_ctrl control;
5902 BT_DBG("chan %p", chan);
5904 memset(&control, 0, sizeof(control));
5907 control.reqseq = chan->buffer_seq;
5908 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5910 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5911 control.super = L2CAP_SUPER_RNR;
5912 l2cap_send_sframe(chan, &control);
5915 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5916 chan->unacked_frames > 0)
5917 __set_retrans_timer(chan);
5919 /* Send pending iframes */
5920 l2cap_ertm_send(chan);
5922 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5923 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5924 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5927 control.super = L2CAP_SUPER_RR;
5928 l2cap_send_sframe(chan, &control);
5932 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5933 struct sk_buff **last_frag)
5935 /* skb->len reflects data in skb as well as all fragments
5936 * skb->data_len reflects only data in fragments
5938 if (!skb_has_frag_list(skb))
5939 skb_shinfo(skb)->frag_list = new_frag;
5941 new_frag->next = NULL;
5943 (*last_frag)->next = new_frag;
5944 *last_frag = new_frag;
5946 skb->len += new_frag->len;
5947 skb->data_len += new_frag->len;
5948 skb->truesize += new_frag->truesize;
5951 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5952 struct l2cap_ctrl *control)
5956 switch (control->sar) {
5957 case L2CAP_SAR_UNSEGMENTED:
5961 err = chan->ops->recv(chan, skb);
5964 case L2CAP_SAR_START:
5968 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5971 chan->sdu_len = get_unaligned_le16(skb->data);
5972 skb_pull(skb, L2CAP_SDULEN_SIZE);
5974 if (chan->sdu_len > chan->imtu) {
5979 if (skb->len >= chan->sdu_len)
5983 chan->sdu_last_frag = skb;
5989 case L2CAP_SAR_CONTINUE:
5993 append_skb_frag(chan->sdu, skb,
5994 &chan->sdu_last_frag);
5997 if (chan->sdu->len >= chan->sdu_len)
6007 append_skb_frag(chan->sdu, skb,
6008 &chan->sdu_last_frag);
6011 if (chan->sdu->len != chan->sdu_len)
6014 err = chan->ops->recv(chan, chan->sdu);
6017 /* Reassembly complete */
6019 chan->sdu_last_frag = NULL;
6027 kfree_skb(chan->sdu);
6029 chan->sdu_last_frag = NULL;
6036 static int l2cap_resegment(struct l2cap_chan *chan)
6042 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6046 if (chan->mode != L2CAP_MODE_ERTM)
6049 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6050 l2cap_tx(chan, NULL, NULL, event);
6053 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6056 /* Pass sequential frames to l2cap_reassemble_sdu()
6057 * until a gap is encountered.
6060 BT_DBG("chan %p", chan);
6062 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6063 struct sk_buff *skb;
6064 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6065 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6067 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6072 skb_unlink(skb, &chan->srej_q);
6073 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6074 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6079 if (skb_queue_empty(&chan->srej_q)) {
6080 chan->rx_state = L2CAP_RX_STATE_RECV;
6081 l2cap_send_ack(chan);
6087 static void l2cap_handle_srej(struct l2cap_chan *chan,
6088 struct l2cap_ctrl *control)
6090 struct sk_buff *skb;
6092 BT_DBG("chan %p, control %p", chan, control);
6094 if (control->reqseq == chan->next_tx_seq) {
6095 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6096 l2cap_send_disconn_req(chan, ECONNRESET);
6100 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6103 BT_DBG("Seq %d not available for retransmission",
6108 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6109 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6110 l2cap_send_disconn_req(chan, ECONNRESET);
6114 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6116 if (control->poll) {
6117 l2cap_pass_to_tx(chan, control);
6119 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6120 l2cap_retransmit(chan, control);
6121 l2cap_ertm_send(chan);
6123 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6124 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6125 chan->srej_save_reqseq = control->reqseq;
6128 l2cap_pass_to_tx_fbit(chan, control);
6130 if (control->final) {
6131 if (chan->srej_save_reqseq != control->reqseq ||
6132 !test_and_clear_bit(CONN_SREJ_ACT,
6134 l2cap_retransmit(chan, control);
6136 l2cap_retransmit(chan, control);
6137 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6138 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6139 chan->srej_save_reqseq = control->reqseq;
6145 static void l2cap_handle_rej(struct l2cap_chan *chan,
6146 struct l2cap_ctrl *control)
6148 struct sk_buff *skb;
6150 BT_DBG("chan %p, control %p", chan, control);
6152 if (control->reqseq == chan->next_tx_seq) {
6153 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6154 l2cap_send_disconn_req(chan, ECONNRESET);
6158 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6160 if (chan->max_tx && skb &&
6161 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6162 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6163 l2cap_send_disconn_req(chan, ECONNRESET);
6167 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6169 l2cap_pass_to_tx(chan, control);
6171 if (control->final) {
6172 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6173 l2cap_retransmit_all(chan, control);
6175 l2cap_retransmit_all(chan, control);
6176 l2cap_ertm_send(chan);
6177 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6178 set_bit(CONN_REJ_ACT, &chan->conn_state);
6182 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6184 BT_DBG("chan %p, txseq %d", chan, txseq);
6186 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6187 chan->expected_tx_seq);
6189 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6190 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6192 /* See notes below regarding "double poll" and
6195 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6196 BT_DBG("Invalid/Ignore - after SREJ");
6197 return L2CAP_TXSEQ_INVALID_IGNORE;
6199 BT_DBG("Invalid - in window after SREJ sent");
6200 return L2CAP_TXSEQ_INVALID;
6204 if (chan->srej_list.head == txseq) {
6205 BT_DBG("Expected SREJ");
6206 return L2CAP_TXSEQ_EXPECTED_SREJ;
6209 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6210 BT_DBG("Duplicate SREJ - txseq already stored");
6211 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6214 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6215 BT_DBG("Unexpected SREJ - not requested");
6216 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6220 if (chan->expected_tx_seq == txseq) {
6221 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6223 BT_DBG("Invalid - txseq outside tx window");
6224 return L2CAP_TXSEQ_INVALID;
6227 return L2CAP_TXSEQ_EXPECTED;
6231 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6232 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6233 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6234 return L2CAP_TXSEQ_DUPLICATE;
6237 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6238 /* A source of invalid packets is a "double poll" condition,
6239 * where delays cause us to send multiple poll packets. If
6240 * the remote stack receives and processes both polls,
6241 * sequence numbers can wrap around in such a way that a
6242 * resent frame has a sequence number that looks like new data
6243 * with a sequence gap. This would trigger an erroneous SREJ
6246 * Fortunately, this is impossible with a tx window that's
6247 * less than half of the maximum sequence number, which allows
6248 * invalid frames to be safely ignored.
6250 * With tx window sizes greater than half of the tx window
6251 * maximum, the frame is invalid and cannot be ignored. This
6252 * causes a disconnect.
6255 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6256 BT_DBG("Invalid/Ignore - txseq outside tx window");
6257 return L2CAP_TXSEQ_INVALID_IGNORE;
6259 BT_DBG("Invalid - txseq outside tx window");
6260 return L2CAP_TXSEQ_INVALID;
6263 BT_DBG("Unexpected - txseq indicates missing frames");
6264 return L2CAP_TXSEQ_UNEXPECTED;
6268 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6269 struct l2cap_ctrl *control,
6270 struct sk_buff *skb, u8 event)
6272 struct l2cap_ctrl local_control;
6274 bool skb_in_use = false;
6276 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6280 case L2CAP_EV_RECV_IFRAME:
6281 switch (l2cap_classify_txseq(chan, control->txseq)) {
6282 case L2CAP_TXSEQ_EXPECTED:
6283 l2cap_pass_to_tx(chan, control);
6285 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6286 BT_DBG("Busy, discarding expected seq %d",
6291 chan->expected_tx_seq = __next_seq(chan,
6294 chan->buffer_seq = chan->expected_tx_seq;
6297 /* l2cap_reassemble_sdu may free skb, hence invalidate
6298 * control, so make a copy in advance to use it after
6299 * l2cap_reassemble_sdu returns and to avoid the race
6300 * condition, for example:
6302 * The current thread calls:
6303 * l2cap_reassemble_sdu
6304 * chan->ops->recv == l2cap_sock_recv_cb
6305 * __sock_queue_rcv_skb
6306 * Another thread calls:
6310 * Then the current thread tries to access control, but
6311 * it was freed by skb_free_datagram.
6313 local_control = *control;
6314 err = l2cap_reassemble_sdu(chan, skb, control);
6318 if (local_control.final) {
6319 if (!test_and_clear_bit(CONN_REJ_ACT,
6320 &chan->conn_state)) {
6321 local_control.final = 0;
6322 l2cap_retransmit_all(chan, &local_control);
6323 l2cap_ertm_send(chan);
6327 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6328 l2cap_send_ack(chan);
6330 case L2CAP_TXSEQ_UNEXPECTED:
6331 l2cap_pass_to_tx(chan, control);
6333 /* Can't issue SREJ frames in the local busy state.
6334 * Drop this frame, it will be seen as missing
6335 * when local busy is exited.
6337 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6338 BT_DBG("Busy, discarding unexpected seq %d",
6343 /* There was a gap in the sequence, so an SREJ
6344 * must be sent for each missing frame. The
6345 * current frame is stored for later use.
6347 skb_queue_tail(&chan->srej_q, skb);
6349 BT_DBG("Queued %p (queue len %d)", skb,
6350 skb_queue_len(&chan->srej_q));
6352 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6353 l2cap_seq_list_clear(&chan->srej_list);
6354 l2cap_send_srej(chan, control->txseq);
6356 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6358 case L2CAP_TXSEQ_DUPLICATE:
6359 l2cap_pass_to_tx(chan, control);
6361 case L2CAP_TXSEQ_INVALID_IGNORE:
6363 case L2CAP_TXSEQ_INVALID:
6365 l2cap_send_disconn_req(chan, ECONNRESET);
6369 case L2CAP_EV_RECV_RR:
6370 l2cap_pass_to_tx(chan, control);
6371 if (control->final) {
6372 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6374 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6375 !__chan_is_moving(chan)) {
6377 l2cap_retransmit_all(chan, control);
6380 l2cap_ertm_send(chan);
6381 } else if (control->poll) {
6382 l2cap_send_i_or_rr_or_rnr(chan);
6384 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6385 &chan->conn_state) &&
6386 chan->unacked_frames)
6387 __set_retrans_timer(chan);
6389 l2cap_ertm_send(chan);
6392 case L2CAP_EV_RECV_RNR:
6393 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6394 l2cap_pass_to_tx(chan, control);
6395 if (control && control->poll) {
6396 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6397 l2cap_send_rr_or_rnr(chan, 0);
6399 __clear_retrans_timer(chan);
6400 l2cap_seq_list_clear(&chan->retrans_list);
6402 case L2CAP_EV_RECV_REJ:
6403 l2cap_handle_rej(chan, control);
6405 case L2CAP_EV_RECV_SREJ:
6406 l2cap_handle_srej(chan, control);
6412 if (skb && !skb_in_use) {
6413 BT_DBG("Freeing %p", skb);
6420 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6421 struct l2cap_ctrl *control,
6422 struct sk_buff *skb, u8 event)
6425 u16 txseq = control->txseq;
6426 bool skb_in_use = false;
6428 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6432 case L2CAP_EV_RECV_IFRAME:
6433 switch (l2cap_classify_txseq(chan, txseq)) {
6434 case L2CAP_TXSEQ_EXPECTED:
6435 /* Keep frame for reassembly later */
6436 l2cap_pass_to_tx(chan, control);
6437 skb_queue_tail(&chan->srej_q, skb);
6439 BT_DBG("Queued %p (queue len %d)", skb,
6440 skb_queue_len(&chan->srej_q));
6442 chan->expected_tx_seq = __next_seq(chan, txseq);
6444 case L2CAP_TXSEQ_EXPECTED_SREJ:
6445 l2cap_seq_list_pop(&chan->srej_list);
6447 l2cap_pass_to_tx(chan, control);
6448 skb_queue_tail(&chan->srej_q, skb);
6450 BT_DBG("Queued %p (queue len %d)", skb,
6451 skb_queue_len(&chan->srej_q));
6453 err = l2cap_rx_queued_iframes(chan);
6458 case L2CAP_TXSEQ_UNEXPECTED:
6459 /* Got a frame that can't be reassembled yet.
6460 * Save it for later, and send SREJs to cover
6461 * the missing frames.
6463 skb_queue_tail(&chan->srej_q, skb);
6465 BT_DBG("Queued %p (queue len %d)", skb,
6466 skb_queue_len(&chan->srej_q));
6468 l2cap_pass_to_tx(chan, control);
6469 l2cap_send_srej(chan, control->txseq);
6471 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6472 /* This frame was requested with an SREJ, but
6473 * some expected retransmitted frames are
6474 * missing. Request retransmission of missing
6477 skb_queue_tail(&chan->srej_q, skb);
6479 BT_DBG("Queued %p (queue len %d)", skb,
6480 skb_queue_len(&chan->srej_q));
6482 l2cap_pass_to_tx(chan, control);
6483 l2cap_send_srej_list(chan, control->txseq);
6485 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6486 /* We've already queued this frame. Drop this copy. */
6487 l2cap_pass_to_tx(chan, control);
6489 case L2CAP_TXSEQ_DUPLICATE:
6490 /* Expecting a later sequence number, so this frame
6491 * was already received. Ignore it completely.
6494 case L2CAP_TXSEQ_INVALID_IGNORE:
6496 case L2CAP_TXSEQ_INVALID:
6498 l2cap_send_disconn_req(chan, ECONNRESET);
6502 case L2CAP_EV_RECV_RR:
6503 l2cap_pass_to_tx(chan, control);
6504 if (control->final) {
6505 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6507 if (!test_and_clear_bit(CONN_REJ_ACT,
6508 &chan->conn_state)) {
6510 l2cap_retransmit_all(chan, control);
6513 l2cap_ertm_send(chan);
6514 } else if (control->poll) {
6515 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6516 &chan->conn_state) &&
6517 chan->unacked_frames) {
6518 __set_retrans_timer(chan);
6521 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6522 l2cap_send_srej_tail(chan);
6524 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6525 &chan->conn_state) &&
6526 chan->unacked_frames)
6527 __set_retrans_timer(chan);
6529 l2cap_send_ack(chan);
6532 case L2CAP_EV_RECV_RNR:
6533 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6534 l2cap_pass_to_tx(chan, control);
6535 if (control->poll) {
6536 l2cap_send_srej_tail(chan);
6538 struct l2cap_ctrl rr_control;
6539 memset(&rr_control, 0, sizeof(rr_control));
6540 rr_control.sframe = 1;
6541 rr_control.super = L2CAP_SUPER_RR;
6542 rr_control.reqseq = chan->buffer_seq;
6543 l2cap_send_sframe(chan, &rr_control);
6547 case L2CAP_EV_RECV_REJ:
6548 l2cap_handle_rej(chan, control);
6550 case L2CAP_EV_RECV_SREJ:
6551 l2cap_handle_srej(chan, control);
6555 if (skb && !skb_in_use) {
6556 BT_DBG("Freeing %p", skb);
6563 static int l2cap_finish_move(struct l2cap_chan *chan)
6565 BT_DBG("chan %p", chan);
6567 chan->rx_state = L2CAP_RX_STATE_RECV;
6570 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6572 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6574 return l2cap_resegment(chan);
6577 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6578 struct l2cap_ctrl *control,
6579 struct sk_buff *skb, u8 event)
6583 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6589 l2cap_process_reqseq(chan, control->reqseq);
6591 if (!skb_queue_empty(&chan->tx_q))
6592 chan->tx_send_head = skb_peek(&chan->tx_q);
6594 chan->tx_send_head = NULL;
6596 /* Rewind next_tx_seq to the point expected
6599 chan->next_tx_seq = control->reqseq;
6600 chan->unacked_frames = 0;
6602 err = l2cap_finish_move(chan);
6606 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6607 l2cap_send_i_or_rr_or_rnr(chan);
6609 if (event == L2CAP_EV_RECV_IFRAME)
6612 return l2cap_rx_state_recv(chan, control, NULL, event);
6615 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6616 struct l2cap_ctrl *control,
6617 struct sk_buff *skb, u8 event)
6621 if (!control->final)
6624 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6626 chan->rx_state = L2CAP_RX_STATE_RECV;
6627 l2cap_process_reqseq(chan, control->reqseq);
6629 if (!skb_queue_empty(&chan->tx_q))
6630 chan->tx_send_head = skb_peek(&chan->tx_q);
6632 chan->tx_send_head = NULL;
6634 /* Rewind next_tx_seq to the point expected
6637 chan->next_tx_seq = control->reqseq;
6638 chan->unacked_frames = 0;
6641 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6643 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6645 err = l2cap_resegment(chan);
6648 err = l2cap_rx_state_recv(chan, control, skb, event);
6653 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6655 /* Make sure reqseq is for a packet that has been sent but not acked */
6658 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6659 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6662 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6663 struct sk_buff *skb, u8 event)
6667 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6668 control, skb, event, chan->rx_state);
6670 if (__valid_reqseq(chan, control->reqseq)) {
6671 switch (chan->rx_state) {
6672 case L2CAP_RX_STATE_RECV:
6673 err = l2cap_rx_state_recv(chan, control, skb, event);
6675 case L2CAP_RX_STATE_SREJ_SENT:
6676 err = l2cap_rx_state_srej_sent(chan, control, skb,
6679 case L2CAP_RX_STATE_WAIT_P:
6680 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6682 case L2CAP_RX_STATE_WAIT_F:
6683 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6690 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6691 control->reqseq, chan->next_tx_seq,
6692 chan->expected_ack_seq);
6693 l2cap_send_disconn_req(chan, ECONNRESET);
6699 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6700 struct sk_buff *skb)
6702 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6703 * the txseq field in advance to use it after l2cap_reassemble_sdu
6704 * returns and to avoid the race condition, for example:
6706 * The current thread calls:
6707 * l2cap_reassemble_sdu
6708 * chan->ops->recv == l2cap_sock_recv_cb
6709 * __sock_queue_rcv_skb
6710 * Another thread calls:
6714 * Then the current thread tries to access control, but it was freed by
6715 * skb_free_datagram.
6717 u16 txseq = control->txseq;
6719 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6722 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6723 l2cap_pass_to_tx(chan, control);
6725 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6726 __next_seq(chan, chan->buffer_seq));
6728 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6730 l2cap_reassemble_sdu(chan, skb, control);
6733 kfree_skb(chan->sdu);
6736 chan->sdu_last_frag = NULL;
6740 BT_DBG("Freeing %p", skb);
6745 chan->last_acked_seq = txseq;
6746 chan->expected_tx_seq = __next_seq(chan, txseq);
6751 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6753 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6757 __unpack_control(chan, skb);
6762 * We can just drop the corrupted I-frame here.
6763 * Receiver will miss it and start proper recovery
6764 * procedures and ask for retransmission.
6766 if (l2cap_check_fcs(chan, skb))
6769 if (!control->sframe && control->sar == L2CAP_SAR_START)
6770 len -= L2CAP_SDULEN_SIZE;
6772 if (chan->fcs == L2CAP_FCS_CRC16)
6773 len -= L2CAP_FCS_SIZE;
6775 if (len > chan->mps) {
6776 l2cap_send_disconn_req(chan, ECONNRESET);
6780 if (chan->ops->filter) {
6781 if (chan->ops->filter(chan, skb))
6785 if (!control->sframe) {
6788 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6789 control->sar, control->reqseq, control->final,
6792 /* Validate F-bit - F=0 always valid, F=1 only
6793 * valid in TX WAIT_F
6795 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6798 if (chan->mode != L2CAP_MODE_STREAMING) {
6799 event = L2CAP_EV_RECV_IFRAME;
6800 err = l2cap_rx(chan, control, skb, event);
6802 err = l2cap_stream_rx(chan, control, skb);
6806 l2cap_send_disconn_req(chan, ECONNRESET);
6808 const u8 rx_func_to_event[4] = {
6809 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6810 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6813 /* Only I-frames are expected in streaming mode */
6814 if (chan->mode == L2CAP_MODE_STREAMING)
6817 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6818 control->reqseq, control->final, control->poll,
6822 BT_ERR("Trailing bytes: %d in sframe", len);
6823 l2cap_send_disconn_req(chan, ECONNRESET);
6827 /* Validate F and P bits */
6828 if (control->final && (control->poll ||
6829 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6832 event = rx_func_to_event[control->super];
6833 if (l2cap_rx(chan, control, skb, event))
6834 l2cap_send_disconn_req(chan, ECONNRESET);
6844 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6846 struct l2cap_conn *conn = chan->conn;
6847 struct l2cap_le_credits pkt;
6850 /* We return more credits to the sender only after the amount of
6851 * credits falls below half of the initial amount.
6853 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6856 return_credits = le_max_credits - chan->rx_credits;
6858 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6860 chan->rx_credits += return_credits;
6862 pkt.cid = cpu_to_le16(chan->scid);
6863 pkt.credits = cpu_to_le16(return_credits);
6865 chan->ident = l2cap_get_ident(conn);
6867 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6870 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6874 if (!chan->rx_credits) {
6875 BT_ERR("No credits to receive LE L2CAP data");
6876 l2cap_send_disconn_req(chan, ECONNRESET);
6880 if (chan->imtu < skb->len) {
6881 BT_ERR("Too big LE L2CAP PDU");
6886 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6888 l2cap_chan_le_send_credits(chan);
6895 sdu_len = get_unaligned_le16(skb->data);
6896 skb_pull(skb, L2CAP_SDULEN_SIZE);
6898 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6899 sdu_len, skb->len, chan->imtu);
6901 if (sdu_len > chan->imtu) {
6902 BT_ERR("Too big LE L2CAP SDU length received");
6907 if (skb->len > sdu_len) {
6908 BT_ERR("Too much LE L2CAP data received");
6913 if (skb->len == sdu_len)
6914 return chan->ops->recv(chan, skb);
6917 chan->sdu_len = sdu_len;
6918 chan->sdu_last_frag = skb;
6920 /* Detect if remote is not able to use the selected MPS */
6921 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6922 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6924 /* Adjust the number of credits */
6925 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6926 chan->mps = mps_len;
6927 l2cap_chan_le_send_credits(chan);
6933 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6934 chan->sdu->len, skb->len, chan->sdu_len);
6936 if (chan->sdu->len + skb->len > chan->sdu_len) {
6937 BT_ERR("Too much LE L2CAP data received");
6942 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6945 if (chan->sdu->len == chan->sdu_len) {
6946 err = chan->ops->recv(chan, chan->sdu);
6949 chan->sdu_last_frag = NULL;
6957 kfree_skb(chan->sdu);
6959 chan->sdu_last_frag = NULL;
6963 /* We can't return an error here since we took care of the skb
6964 * freeing internally. An error return would cause the caller to
6965 * do a double-free of the skb.
6970 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6971 struct sk_buff *skb)
6973 struct l2cap_chan *chan;
6975 chan = l2cap_get_chan_by_scid(conn, cid);
6977 if (cid == L2CAP_CID_A2MP) {
6978 chan = a2mp_channel_create(conn, skb);
6984 l2cap_chan_hold(chan);
6985 l2cap_chan_lock(chan);
6987 BT_DBG("unknown cid 0x%4.4x", cid);
6988 /* Drop packet and return */
6994 BT_DBG("chan %p, len %d", chan, skb->len);
6996 /* If we receive data on a fixed channel before the info req/rsp
6997 * procdure is done simply assume that the channel is supported
6998 * and mark it as ready.
7000 if (chan->chan_type == L2CAP_CHAN_FIXED)
7001 l2cap_chan_ready(chan);
7003 if (chan->state != BT_CONNECTED)
7006 switch (chan->mode) {
7007 case L2CAP_MODE_LE_FLOWCTL:
7008 if (l2cap_le_data_rcv(chan, skb) < 0)
7013 case L2CAP_MODE_BASIC:
7014 /* If socket recv buffers overflows we drop data here
7015 * which is *bad* because L2CAP has to be reliable.
7016 * But we don't have any other choice. L2CAP doesn't
7017 * provide flow control mechanism. */
7019 if (chan->imtu < skb->len) {
7020 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7024 if (!chan->ops->recv(chan, skb))
7028 case L2CAP_MODE_ERTM:
7029 case L2CAP_MODE_STREAMING:
7030 l2cap_data_rcv(chan, skb);
7034 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7042 l2cap_chan_unlock(chan);
7043 l2cap_chan_put(chan);
7046 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7047 struct sk_buff *skb)
7049 struct hci_conn *hcon = conn->hcon;
7050 struct l2cap_chan *chan;
7052 if (hcon->type != ACL_LINK)
7055 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7060 BT_DBG("chan %p, len %d", chan, skb->len);
7062 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7065 if (chan->imtu < skb->len)
7068 /* Store remote BD_ADDR and PSM for msg_name */
7069 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7070 bt_cb(skb)->l2cap.psm = psm;
7072 if (!chan->ops->recv(chan, skb)) {
7073 l2cap_chan_put(chan);
7078 l2cap_chan_put(chan);
7083 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7085 struct l2cap_hdr *lh = (void *) skb->data;
7086 struct hci_conn *hcon = conn->hcon;
7090 if (hcon->state != BT_CONNECTED) {
7091 BT_DBG("queueing pending rx skb");
7092 skb_queue_tail(&conn->pending_rx, skb);
7096 skb_pull(skb, L2CAP_HDR_SIZE);
7097 cid = __le16_to_cpu(lh->cid);
7098 len = __le16_to_cpu(lh->len);
7100 if (len != skb->len) {
7105 /* Since we can't actively block incoming LE connections we must
7106 * at least ensure that we ignore incoming data from them.
7108 if (hcon->type == LE_LINK &&
7109 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7110 bdaddr_dst_type(hcon))) {
7115 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7118 case L2CAP_CID_SIGNALING:
7119 l2cap_sig_channel(conn, skb);
7122 case L2CAP_CID_CONN_LESS:
7123 psm = get_unaligned((__le16 *) skb->data);
7124 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7125 l2cap_conless_channel(conn, psm, skb);
7128 case L2CAP_CID_LE_SIGNALING:
7129 l2cap_le_sig_channel(conn, skb);
7133 l2cap_data_channel(conn, cid, skb);
7138 static void process_pending_rx(struct work_struct *work)
7140 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7142 struct sk_buff *skb;
7146 while ((skb = skb_dequeue(&conn->pending_rx)))
7147 l2cap_recv_frame(conn, skb);
7150 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7152 struct l2cap_conn *conn = hcon->l2cap_data;
7153 struct hci_chan *hchan;
7158 hchan = hci_chan_create(hcon);
7162 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7164 hci_chan_del(hchan);
7168 kref_init(&conn->ref);
7169 hcon->l2cap_data = conn;
7170 conn->hcon = hci_conn_get(hcon);
7171 conn->hchan = hchan;
7173 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7175 switch (hcon->type) {
7177 if (hcon->hdev->le_mtu) {
7178 conn->mtu = hcon->hdev->le_mtu;
7183 conn->mtu = hcon->hdev->acl_mtu;
7187 conn->feat_mask = 0;
7189 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7191 if (hcon->type == ACL_LINK &&
7192 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7193 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7195 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7196 (bredr_sc_enabled(hcon->hdev) ||
7197 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7198 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7200 mutex_init(&conn->ident_lock);
7201 mutex_init(&conn->chan_lock);
7203 INIT_LIST_HEAD(&conn->chan_l);
7204 INIT_LIST_HEAD(&conn->users);
7206 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7208 skb_queue_head_init(&conn->pending_rx);
7209 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7210 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7212 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7217 static bool is_valid_psm(u16 psm, u8 dst_type) {
7221 if (bdaddr_type_is_le(dst_type))
7222 return (psm <= 0x00ff);
7224 /* PSM must be odd and lsb of upper byte must be 0 */
7225 return ((psm & 0x0101) == 0x0001);
7228 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7229 bdaddr_t *dst, u8 dst_type)
7231 struct l2cap_conn *conn;
7232 struct hci_conn *hcon;
7233 struct hci_dev *hdev;
7236 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7237 dst_type, __le16_to_cpu(psm));
7239 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7241 return -EHOSTUNREACH;
7245 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7246 chan->chan_type != L2CAP_CHAN_RAW) {
7251 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7256 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7261 switch (chan->mode) {
7262 case L2CAP_MODE_BASIC:
7264 case L2CAP_MODE_LE_FLOWCTL:
7265 l2cap_le_flowctl_init(chan);
7267 case L2CAP_MODE_ERTM:
7268 case L2CAP_MODE_STREAMING:
7277 switch (chan->state) {
7281 /* Already connecting */
7286 /* Already connected */
7300 /* Set destination address and psm */
7301 bacpy(&chan->dst, dst);
7302 chan->dst_type = dst_type;
7307 if (bdaddr_type_is_le(dst_type)) {
7308 /* Convert from L2CAP channel address type to HCI address type
7310 if (dst_type == BDADDR_LE_PUBLIC)
7311 dst_type = ADDR_LE_DEV_PUBLIC;
7313 dst_type = ADDR_LE_DEV_RANDOM;
7315 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7316 hcon = hci_connect_le(hdev, dst, dst_type,
7318 HCI_LE_CONN_TIMEOUT,
7319 HCI_ROLE_SLAVE, NULL);
7321 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7323 HCI_LE_CONN_TIMEOUT);
7326 u8 auth_type = l2cap_get_auth_type(chan);
7327 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7331 err = PTR_ERR(hcon);
7335 conn = l2cap_conn_add(hcon);
7337 hci_conn_drop(hcon);
7342 mutex_lock(&conn->chan_lock);
7343 l2cap_chan_lock(chan);
7345 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7346 hci_conn_drop(hcon);
7351 /* Update source addr of the socket */
7352 bacpy(&chan->src, &hcon->src);
7353 chan->src_type = bdaddr_src_type(hcon);
7355 __l2cap_chan_add(conn, chan);
7357 /* l2cap_chan_add takes its own ref so we can drop this one */
7358 hci_conn_drop(hcon);
7360 l2cap_state_change(chan, BT_CONNECT);
7361 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7363 /* Release chan->sport so that it can be reused by other
7364 * sockets (as it's only used for listening sockets).
7366 write_lock(&chan_list_lock);
7368 write_unlock(&chan_list_lock);
7370 if (hcon->state == BT_CONNECTED) {
7371 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7372 __clear_chan_timer(chan);
7373 if (l2cap_chan_check_security(chan, true))
7374 l2cap_state_change(chan, BT_CONNECTED);
7376 l2cap_do_start(chan);
7382 l2cap_chan_unlock(chan);
7383 mutex_unlock(&conn->chan_lock);
7385 hci_dev_unlock(hdev);
7389 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7391 /* ---- L2CAP interface with lower layer (HCI) ---- */
7393 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7395 int exact = 0, lm1 = 0, lm2 = 0;
7396 struct l2cap_chan *c;
7398 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7400 /* Find listening sockets and check their link_mode */
7401 read_lock(&chan_list_lock);
7402 list_for_each_entry(c, &chan_list, global_l) {
7403 if (c->state != BT_LISTEN)
7406 if (!bacmp(&c->src, &hdev->bdaddr)) {
7407 lm1 |= HCI_LM_ACCEPT;
7408 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7409 lm1 |= HCI_LM_MASTER;
7411 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7412 lm2 |= HCI_LM_ACCEPT;
7413 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7414 lm2 |= HCI_LM_MASTER;
7417 read_unlock(&chan_list_lock);
7419 return exact ? lm1 : lm2;
7422 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7423 * from an existing channel in the list or from the beginning of the
7424 * global list (by passing NULL as first parameter).
7426 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7427 struct hci_conn *hcon)
7429 u8 src_type = bdaddr_src_type(hcon);
7431 read_lock(&chan_list_lock);
7434 c = list_next_entry(c, global_l);
7436 c = list_entry(chan_list.next, typeof(*c), global_l);
7438 list_for_each_entry_from(c, &chan_list, global_l) {
7439 if (c->chan_type != L2CAP_CHAN_FIXED)
7441 if (c->state != BT_LISTEN)
7443 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7445 if (src_type != c->src_type)
7448 c = l2cap_chan_hold_unless_zero(c);
7449 read_unlock(&chan_list_lock);
7453 read_unlock(&chan_list_lock);
7458 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7460 struct hci_dev *hdev = hcon->hdev;
7461 struct l2cap_conn *conn;
7462 struct l2cap_chan *pchan;
7465 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7468 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7471 l2cap_conn_del(hcon, bt_to_errno(status));
7475 conn = l2cap_conn_add(hcon);
7479 dst_type = bdaddr_dst_type(hcon);
7481 /* If device is blocked, do not create channels for it */
7482 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7485 /* Find fixed channels and notify them of the new connection. We
7486 * use multiple individual lookups, continuing each time where
7487 * we left off, because the list lock would prevent calling the
7488 * potentially sleeping l2cap_chan_lock() function.
7490 pchan = l2cap_global_fixed_chan(NULL, hcon);
7492 struct l2cap_chan *chan, *next;
7494 /* Client fixed channels should override server ones */
7495 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7498 l2cap_chan_lock(pchan);
7499 chan = pchan->ops->new_connection(pchan);
7501 bacpy(&chan->src, &hcon->src);
7502 bacpy(&chan->dst, &hcon->dst);
7503 chan->src_type = bdaddr_src_type(hcon);
7504 chan->dst_type = dst_type;
7506 __l2cap_chan_add(conn, chan);
7509 l2cap_chan_unlock(pchan);
7511 next = l2cap_global_fixed_chan(pchan, hcon);
7512 l2cap_chan_put(pchan);
7516 l2cap_conn_ready(conn);
7519 int l2cap_disconn_ind(struct hci_conn *hcon)
7521 struct l2cap_conn *conn = hcon->l2cap_data;
7523 BT_DBG("hcon %p", hcon);
7526 return HCI_ERROR_REMOTE_USER_TERM;
7527 return conn->disc_reason;
7530 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7532 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7535 BT_DBG("hcon %p reason %d", hcon, reason);
7537 l2cap_conn_del(hcon, bt_to_errno(reason));
7540 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7542 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7545 if (encrypt == 0x00) {
7546 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7547 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7548 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7549 chan->sec_level == BT_SECURITY_FIPS)
7550 l2cap_chan_close(chan, ECONNREFUSED);
7552 if (chan->sec_level == BT_SECURITY_MEDIUM)
7553 __clear_chan_timer(chan);
7557 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7559 struct l2cap_conn *conn = hcon->l2cap_data;
7560 struct l2cap_chan *chan;
7565 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7567 mutex_lock(&conn->chan_lock);
7569 list_for_each_entry(chan, &conn->chan_l, list) {
7570 l2cap_chan_lock(chan);
7572 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7573 state_to_string(chan->state));
7575 if (chan->scid == L2CAP_CID_A2MP) {
7576 l2cap_chan_unlock(chan);
7580 if (!status && encrypt)
7581 chan->sec_level = hcon->sec_level;
7583 if (!__l2cap_no_conn_pending(chan)) {
7584 l2cap_chan_unlock(chan);
7588 if (!status && (chan->state == BT_CONNECTED ||
7589 chan->state == BT_CONFIG)) {
7590 chan->ops->resume(chan);
7591 l2cap_check_encryption(chan, encrypt);
7592 l2cap_chan_unlock(chan);
7596 if (chan->state == BT_CONNECT) {
7597 if (!status && l2cap_check_enc_key_size(hcon))
7598 l2cap_start_connection(chan);
7600 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7601 } else if (chan->state == BT_CONNECT2 &&
7602 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7603 struct l2cap_conn_rsp rsp;
7606 if (!status && l2cap_check_enc_key_size(hcon)) {
7607 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7608 res = L2CAP_CR_PEND;
7609 stat = L2CAP_CS_AUTHOR_PEND;
7610 chan->ops->defer(chan);
7612 l2cap_state_change(chan, BT_CONFIG);
7613 res = L2CAP_CR_SUCCESS;
7614 stat = L2CAP_CS_NO_INFO;
7617 l2cap_state_change(chan, BT_DISCONN);
7618 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7619 res = L2CAP_CR_SEC_BLOCK;
7620 stat = L2CAP_CS_NO_INFO;
7623 rsp.scid = cpu_to_le16(chan->dcid);
7624 rsp.dcid = cpu_to_le16(chan->scid);
7625 rsp.result = cpu_to_le16(res);
7626 rsp.status = cpu_to_le16(stat);
7627 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7630 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7631 res == L2CAP_CR_SUCCESS) {
7633 set_bit(CONF_REQ_SENT, &chan->conf_state);
7634 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7636 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7638 chan->num_conf_req++;
7642 l2cap_chan_unlock(chan);
7645 mutex_unlock(&conn->chan_lock);
7648 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7650 struct l2cap_conn *conn = hcon->l2cap_data;
7651 struct l2cap_hdr *hdr;
7654 /* For AMP controller do not create l2cap conn */
7655 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7659 conn = l2cap_conn_add(hcon);
7664 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7668 case ACL_START_NO_FLUSH:
7671 BT_ERR("Unexpected start frame (len %d)", skb->len);
7672 kfree_skb(conn->rx_skb);
7673 conn->rx_skb = NULL;
7675 l2cap_conn_unreliable(conn, ECOMM);
7678 /* Start fragment always begin with Basic L2CAP header */
7679 if (skb->len < L2CAP_HDR_SIZE) {
7680 BT_ERR("Frame is too short (len %d)", skb->len);
7681 l2cap_conn_unreliable(conn, ECOMM);
7685 hdr = (struct l2cap_hdr *) skb->data;
7686 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7688 if (len == skb->len) {
7689 /* Complete frame received */
7690 l2cap_recv_frame(conn, skb);
7694 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7696 if (skb->len > len) {
7697 BT_ERR("Frame is too long (len %d, expected len %d)",
7699 l2cap_conn_unreliable(conn, ECOMM);
7703 /* Allocate skb for the complete frame (with header) */
7704 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7708 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7710 conn->rx_len = len - skb->len;
7714 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7716 if (!conn->rx_len) {
7717 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7718 l2cap_conn_unreliable(conn, ECOMM);
7722 if (skb->len > conn->rx_len) {
7723 BT_ERR("Fragment is too long (len %d, expected %d)",
7724 skb->len, conn->rx_len);
7725 kfree_skb(conn->rx_skb);
7726 conn->rx_skb = NULL;
7728 l2cap_conn_unreliable(conn, ECOMM);
7732 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7734 conn->rx_len -= skb->len;
7736 if (!conn->rx_len) {
7737 /* Complete frame received. l2cap_recv_frame
7738 * takes ownership of the skb so set the global
7739 * rx_skb pointer to NULL first.
7741 struct sk_buff *rx_skb = conn->rx_skb;
7742 conn->rx_skb = NULL;
7743 l2cap_recv_frame(conn, rx_skb);
7752 static struct hci_cb l2cap_cb = {
7754 .connect_cfm = l2cap_connect_cfm,
7755 .disconn_cfm = l2cap_disconn_cfm,
7756 .security_cfm = l2cap_security_cfm,
7759 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7761 struct l2cap_chan *c;
7763 read_lock(&chan_list_lock);
7765 list_for_each_entry(c, &chan_list, global_l) {
7766 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7767 &c->src, c->src_type, &c->dst, c->dst_type,
7768 c->state, __le16_to_cpu(c->psm),
7769 c->scid, c->dcid, c->imtu, c->omtu,
7770 c->sec_level, c->mode);
7773 read_unlock(&chan_list_lock);
7778 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7780 return single_open(file, l2cap_debugfs_show, inode->i_private);
7783 static const struct file_operations l2cap_debugfs_fops = {
7784 .open = l2cap_debugfs_open,
7786 .llseek = seq_lseek,
7787 .release = single_release,
7790 static struct dentry *l2cap_debugfs;
7792 int __init l2cap_init(void)
7796 err = l2cap_init_sockets();
7800 hci_register_cb(&l2cap_cb);
7802 if (IS_ERR_OR_NULL(bt_debugfs))
7805 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7806 NULL, &l2cap_debugfs_fops);
7808 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7810 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7816 void l2cap_exit(void)
7818 debugfs_remove(l2cap_debugfs);
7819 hci_unregister_cb(&l2cap_cb);
7820 l2cap_cleanup_sockets();
7823 module_param(disable_ertm, bool, 0644);
7824 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");