2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
55 u8 code, u8 ident, u16 dlen, void *data);
56 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
59 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
62 struct sk_buff_head *skbs, u8 event);
64 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
66 if (link_type == LE_LINK) {
67 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
68 return BDADDR_LE_PUBLIC;
70 return BDADDR_LE_RANDOM;
76 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
78 return bdaddr_type(hcon->type, hcon->src_type);
81 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
83 return bdaddr_type(hcon->type, hcon->dst_type);
86 /* ---- L2CAP channels ---- */
88 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 struct l2cap_chan *c;
105 list_for_each_entry(c, &conn->chan_l, list) {
112 /* Find channel with given SCID.
113 * Returns locked channel. */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 struct l2cap_chan *c;
119 mutex_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
123 mutex_unlock(&conn->chan_lock);
128 /* Find channel with given DCID.
129 * Returns locked channel.
131 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
134 struct l2cap_chan *c;
136 mutex_lock(&conn->chan_lock);
137 c = __l2cap_get_chan_by_dcid(conn, cid);
140 mutex_unlock(&conn->chan_lock);
145 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 list_for_each_entry(c, &conn->chan_l, list) {
151 if (c->ident == ident)
157 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
160 struct l2cap_chan *c;
162 mutex_lock(&conn->chan_lock);
163 c = __l2cap_get_chan_by_ident(conn, ident);
166 mutex_unlock(&conn->chan_lock);
171 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
174 struct l2cap_chan *c;
176 list_for_each_entry(c, &chan_list, global_l) {
177 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
180 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
183 if (c->sport == psm && !bacmp(&c->src, src))
189 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
193 write_lock(&chan_list_lock);
195 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
205 u16 p, start, end, incr;
207 if (chan->src_type == BDADDR_BREDR) {
208 start = L2CAP_PSM_DYN_START;
209 end = L2CAP_PSM_AUTO_END;
212 start = L2CAP_PSM_LE_DYN_START;
213 end = L2CAP_PSM_LE_DYN_END;
218 for (p = start; p <= end; p += incr)
219 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
221 chan->psm = cpu_to_le16(p);
222 chan->sport = cpu_to_le16(p);
229 write_unlock(&chan_list_lock);
232 EXPORT_SYMBOL_GPL(l2cap_add_psm);
234 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
236 write_lock(&chan_list_lock);
238 /* Override the defaults (which are for conn-oriented) */
239 chan->omtu = L2CAP_DEFAULT_MTU;
240 chan->chan_type = L2CAP_CHAN_FIXED;
244 write_unlock(&chan_list_lock);
249 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
253 if (conn->hcon->type == LE_LINK)
254 dyn_end = L2CAP_CID_LE_DYN_END;
256 dyn_end = L2CAP_CID_DYN_END;
258 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
259 if (!__l2cap_get_chan_by_scid(conn, cid))
266 static void l2cap_state_change(struct l2cap_chan *chan, int state)
268 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
269 state_to_string(state));
272 chan->ops->state_change(chan, state, 0);
275 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
279 chan->ops->state_change(chan, chan->state, err);
282 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
284 chan->ops->state_change(chan, chan->state, err);
287 static void __set_retrans_timer(struct l2cap_chan *chan)
289 if (!delayed_work_pending(&chan->monitor_timer) &&
290 chan->retrans_timeout) {
291 l2cap_set_timer(chan, &chan->retrans_timer,
292 msecs_to_jiffies(chan->retrans_timeout));
296 static void __set_monitor_timer(struct l2cap_chan *chan)
298 __clear_retrans_timer(chan);
299 if (chan->monitor_timeout) {
300 l2cap_set_timer(chan, &chan->monitor_timer,
301 msecs_to_jiffies(chan->monitor_timeout));
305 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
310 skb_queue_walk(head, skb) {
311 if (bt_cb(skb)->l2cap.txseq == seq)
318 /* ---- L2CAP sequence number lists ---- */
320 /* For ERTM, ordered lists of sequence numbers must be tracked for
321 * SREJ requests that are received and for frames that are to be
322 * retransmitted. These seq_list functions implement a singly-linked
323 * list in an array, where membership in the list can also be checked
324 * in constant time. Items can also be added to the tail of the list
325 * and removed from the head in constant time, without further memory
329 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
331 size_t alloc_size, i;
333 /* Allocated size is a power of 2 to map sequence numbers
334 * (which may be up to 14 bits) in to a smaller array that is
335 * sized for the negotiated ERTM transmit windows.
337 alloc_size = roundup_pow_of_two(size);
339 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
343 seq_list->mask = alloc_size - 1;
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 for (i = 0; i < alloc_size; i++)
347 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
352 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
354 kfree(seq_list->list);
357 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
360 /* Constant-time check for list membership */
361 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
364 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
366 u16 seq = seq_list->head;
367 u16 mask = seq_list->mask;
369 seq_list->head = seq_list->list[seq & mask];
370 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
372 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
373 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
380 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
384 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
387 for (i = 0; i <= seq_list->mask; i++)
388 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
390 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
394 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
396 u16 mask = seq_list->mask;
398 /* All appends happen in constant time */
400 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
403 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
404 seq_list->head = seq;
406 seq_list->list[seq_list->tail & mask] = seq;
408 seq_list->tail = seq;
409 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
412 static void l2cap_chan_timeout(struct work_struct *work)
414 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
416 struct l2cap_conn *conn = chan->conn;
419 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
421 mutex_lock(&conn->chan_lock);
422 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
423 * this work. No need to call l2cap_chan_hold(chan) here again.
425 l2cap_chan_lock(chan);
427 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
428 reason = ECONNREFUSED;
429 else if (chan->state == BT_CONNECT &&
430 chan->sec_level != BT_SECURITY_SDP)
431 reason = ECONNREFUSED;
435 l2cap_chan_close(chan, reason);
437 chan->ops->close(chan);
439 l2cap_chan_unlock(chan);
440 l2cap_chan_put(chan);
442 mutex_unlock(&conn->chan_lock);
445 struct l2cap_chan *l2cap_chan_create(void)
447 struct l2cap_chan *chan;
449 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
453 skb_queue_head_init(&chan->tx_q);
454 skb_queue_head_init(&chan->srej_q);
455 mutex_init(&chan->lock);
457 /* Set default lock nesting level */
458 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
460 write_lock(&chan_list_lock);
461 list_add(&chan->global_l, &chan_list);
462 write_unlock(&chan_list_lock);
464 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
466 chan->state = BT_OPEN;
468 kref_init(&chan->kref);
470 /* This flag is cleared in l2cap_chan_ready() */
471 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
473 BT_DBG("chan %p", chan);
477 EXPORT_SYMBOL_GPL(l2cap_chan_create);
479 static void l2cap_chan_destroy(struct kref *kref)
481 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
483 BT_DBG("chan %p", chan);
485 write_lock(&chan_list_lock);
486 list_del(&chan->global_l);
487 write_unlock(&chan_list_lock);
492 void l2cap_chan_hold(struct l2cap_chan *c)
494 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
499 void l2cap_chan_put(struct l2cap_chan *c)
501 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
503 kref_put(&c->kref, l2cap_chan_destroy);
505 EXPORT_SYMBOL_GPL(l2cap_chan_put);
507 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
509 chan->fcs = L2CAP_FCS_CRC16;
510 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
511 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
512 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
513 chan->remote_max_tx = chan->max_tx;
514 chan->remote_tx_win = chan->tx_win;
515 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
516 chan->sec_level = BT_SECURITY_LOW;
517 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
518 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
519 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
521 chan->conf_state = 0;
522 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
524 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
526 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
528 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
531 chan->sdu_last_frag = NULL;
533 chan->tx_credits = tx_credits;
534 /* Derive MPS from connection MTU to stop HCI fragmentation */
535 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
536 /* Give enough credits for a full packet */
537 chan->rx_credits = (chan->imtu / chan->mps) + 1;
539 skb_queue_head_init(&chan->tx_q);
542 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
544 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
545 __le16_to_cpu(chan->psm), chan->dcid);
547 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
551 switch (chan->chan_type) {
552 case L2CAP_CHAN_CONN_ORIENTED:
553 /* Alloc CID for connection-oriented socket */
554 chan->scid = l2cap_alloc_cid(conn);
555 if (conn->hcon->type == ACL_LINK)
556 chan->omtu = L2CAP_DEFAULT_MTU;
559 case L2CAP_CHAN_CONN_LESS:
560 /* Connectionless socket */
561 chan->scid = L2CAP_CID_CONN_LESS;
562 chan->dcid = L2CAP_CID_CONN_LESS;
563 chan->omtu = L2CAP_DEFAULT_MTU;
566 case L2CAP_CHAN_FIXED:
567 /* Caller will set CID and CID specific MTU values */
571 /* Raw socket can send/recv signalling messages only */
572 chan->scid = L2CAP_CID_SIGNALING;
573 chan->dcid = L2CAP_CID_SIGNALING;
574 chan->omtu = L2CAP_DEFAULT_MTU;
577 chan->local_id = L2CAP_BESTEFFORT_ID;
578 chan->local_stype = L2CAP_SERV_BESTEFFORT;
579 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
580 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
581 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
582 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
584 l2cap_chan_hold(chan);
586 /* Only keep a reference for fixed channels if they requested it */
587 if (chan->chan_type != L2CAP_CHAN_FIXED ||
588 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
589 hci_conn_hold(conn->hcon);
591 list_add(&chan->list, &conn->chan_l);
594 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
596 mutex_lock(&conn->chan_lock);
597 __l2cap_chan_add(conn, chan);
598 mutex_unlock(&conn->chan_lock);
601 void l2cap_chan_del(struct l2cap_chan *chan, int err)
603 struct l2cap_conn *conn = chan->conn;
605 __clear_chan_timer(chan);
607 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
608 state_to_string(chan->state));
610 chan->ops->teardown(chan, err);
613 struct amp_mgr *mgr = conn->hcon->amp_mgr;
614 /* Delete from channel list */
615 list_del(&chan->list);
617 l2cap_chan_put(chan);
621 /* Reference was only held for non-fixed channels or
622 * fixed channels that explicitly requested it using the
623 * FLAG_HOLD_HCI_CONN flag.
625 if (chan->chan_type != L2CAP_CHAN_FIXED ||
626 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
627 hci_conn_drop(conn->hcon);
629 if (mgr && mgr->bredr_chan == chan)
630 mgr->bredr_chan = NULL;
633 if (chan->hs_hchan) {
634 struct hci_chan *hs_hchan = chan->hs_hchan;
636 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
637 amp_disconnect_logical_link(hs_hchan);
640 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
644 case L2CAP_MODE_BASIC:
647 case L2CAP_MODE_LE_FLOWCTL:
648 skb_queue_purge(&chan->tx_q);
651 case L2CAP_MODE_ERTM:
652 __clear_retrans_timer(chan);
653 __clear_monitor_timer(chan);
654 __clear_ack_timer(chan);
656 skb_queue_purge(&chan->srej_q);
658 l2cap_seq_list_free(&chan->srej_list);
659 l2cap_seq_list_free(&chan->retrans_list);
663 case L2CAP_MODE_STREAMING:
664 skb_queue_purge(&chan->tx_q);
670 EXPORT_SYMBOL_GPL(l2cap_chan_del);
672 static void l2cap_conn_update_id_addr(struct work_struct *work)
674 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
675 id_addr_update_work);
676 struct hci_conn *hcon = conn->hcon;
677 struct l2cap_chan *chan;
679 mutex_lock(&conn->chan_lock);
681 list_for_each_entry(chan, &conn->chan_l, list) {
682 l2cap_chan_lock(chan);
683 bacpy(&chan->dst, &hcon->dst);
684 chan->dst_type = bdaddr_dst_type(hcon);
685 l2cap_chan_unlock(chan);
688 mutex_unlock(&conn->chan_lock);
691 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
693 struct l2cap_conn *conn = chan->conn;
694 struct l2cap_le_conn_rsp rsp;
697 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
698 result = L2CAP_CR_LE_AUTHORIZATION;
700 result = L2CAP_CR_LE_BAD_PSM;
702 l2cap_state_change(chan, BT_DISCONN);
704 rsp.dcid = cpu_to_le16(chan->scid);
705 rsp.mtu = cpu_to_le16(chan->imtu);
706 rsp.mps = cpu_to_le16(chan->mps);
707 rsp.credits = cpu_to_le16(chan->rx_credits);
708 rsp.result = cpu_to_le16(result);
710 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
714 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
716 struct l2cap_conn *conn = chan->conn;
717 struct l2cap_conn_rsp rsp;
720 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
721 result = L2CAP_CR_SEC_BLOCK;
723 result = L2CAP_CR_BAD_PSM;
725 l2cap_state_change(chan, BT_DISCONN);
727 rsp.scid = cpu_to_le16(chan->dcid);
728 rsp.dcid = cpu_to_le16(chan->scid);
729 rsp.result = cpu_to_le16(result);
730 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
732 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
735 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
737 struct l2cap_conn *conn = chan->conn;
739 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
741 switch (chan->state) {
743 chan->ops->teardown(chan, 0);
748 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
749 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
750 l2cap_send_disconn_req(chan, reason);
752 l2cap_chan_del(chan, reason);
756 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
757 if (conn->hcon->type == ACL_LINK)
758 l2cap_chan_connect_reject(chan);
759 else if (conn->hcon->type == LE_LINK)
760 l2cap_chan_le_connect_reject(chan);
763 l2cap_chan_del(chan, reason);
768 l2cap_chan_del(chan, reason);
772 chan->ops->teardown(chan, 0);
776 EXPORT_SYMBOL(l2cap_chan_close);
778 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
780 switch (chan->chan_type) {
782 switch (chan->sec_level) {
783 case BT_SECURITY_HIGH:
784 case BT_SECURITY_FIPS:
785 return HCI_AT_DEDICATED_BONDING_MITM;
786 case BT_SECURITY_MEDIUM:
787 return HCI_AT_DEDICATED_BONDING;
789 return HCI_AT_NO_BONDING;
792 case L2CAP_CHAN_CONN_LESS:
793 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
794 if (chan->sec_level == BT_SECURITY_LOW)
795 chan->sec_level = BT_SECURITY_SDP;
797 if (chan->sec_level == BT_SECURITY_HIGH ||
798 chan->sec_level == BT_SECURITY_FIPS)
799 return HCI_AT_NO_BONDING_MITM;
801 return HCI_AT_NO_BONDING;
803 case L2CAP_CHAN_CONN_ORIENTED:
804 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
805 if (chan->sec_level == BT_SECURITY_LOW)
806 chan->sec_level = BT_SECURITY_SDP;
808 if (chan->sec_level == BT_SECURITY_HIGH ||
809 chan->sec_level == BT_SECURITY_FIPS)
810 return HCI_AT_NO_BONDING_MITM;
812 return HCI_AT_NO_BONDING;
816 switch (chan->sec_level) {
817 case BT_SECURITY_HIGH:
818 case BT_SECURITY_FIPS:
819 return HCI_AT_GENERAL_BONDING_MITM;
820 case BT_SECURITY_MEDIUM:
821 return HCI_AT_GENERAL_BONDING;
823 return HCI_AT_NO_BONDING;
829 /* Service level security */
830 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
832 struct l2cap_conn *conn = chan->conn;
835 if (conn->hcon->type == LE_LINK)
836 return smp_conn_security(conn->hcon, chan->sec_level);
838 auth_type = l2cap_get_auth_type(chan);
840 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
844 static u8 l2cap_get_ident(struct l2cap_conn *conn)
848 /* Get next available identificator.
849 * 1 - 128 are used by kernel.
850 * 129 - 199 are reserved.
851 * 200 - 254 are used by utilities like l2ping, etc.
854 mutex_lock(&conn->ident_lock);
856 if (++conn->tx_ident > 128)
861 mutex_unlock(&conn->ident_lock);
866 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
869 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
872 BT_DBG("code 0x%2.2x", code);
877 /* Use NO_FLUSH if supported or we have an LE link (which does
878 * not support auto-flushing packets) */
879 if (lmp_no_flush_capable(conn->hcon->hdev) ||
880 conn->hcon->type == LE_LINK)
881 flags = ACL_START_NO_FLUSH;
885 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
886 skb->priority = HCI_PRIO_MAX;
888 hci_send_acl(conn->hchan, skb, flags);
891 static bool __chan_is_moving(struct l2cap_chan *chan)
893 return chan->move_state != L2CAP_MOVE_STABLE &&
894 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
897 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
899 struct hci_conn *hcon = chan->conn->hcon;
902 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
905 if (chan->hs_hcon && !__chan_is_moving(chan)) {
907 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
914 /* Use NO_FLUSH for LE links (where this is the only option) or
915 * if the BR/EDR link supports it and flushing has not been
916 * explicitly requested (through FLAG_FLUSHABLE).
918 if (hcon->type == LE_LINK ||
919 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
920 lmp_no_flush_capable(hcon->hdev)))
921 flags = ACL_START_NO_FLUSH;
925 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
926 hci_send_acl(chan->conn->hchan, skb, flags);
929 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
931 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
932 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
934 if (enh & L2CAP_CTRL_FRAME_TYPE) {
937 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
938 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
945 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
946 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
953 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
955 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
956 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
958 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
961 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
962 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
969 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
970 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
977 static inline void __unpack_control(struct l2cap_chan *chan,
980 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
981 __unpack_extended_control(get_unaligned_le32(skb->data),
983 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
985 __unpack_enhanced_control(get_unaligned_le16(skb->data),
987 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
991 static u32 __pack_extended_control(struct l2cap_ctrl *control)
995 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
996 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
998 if (control->sframe) {
999 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1000 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1001 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1003 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1004 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1010 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1014 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1015 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1017 if (control->sframe) {
1018 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1019 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1020 packed |= L2CAP_CTRL_FRAME_TYPE;
1022 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1023 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1029 static inline void __pack_control(struct l2cap_chan *chan,
1030 struct l2cap_ctrl *control,
1031 struct sk_buff *skb)
1033 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1034 put_unaligned_le32(__pack_extended_control(control),
1035 skb->data + L2CAP_HDR_SIZE);
1037 put_unaligned_le16(__pack_enhanced_control(control),
1038 skb->data + L2CAP_HDR_SIZE);
1042 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1044 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1045 return L2CAP_EXT_HDR_SIZE;
1047 return L2CAP_ENH_HDR_SIZE;
1050 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1053 struct sk_buff *skb;
1054 struct l2cap_hdr *lh;
1055 int hlen = __ertm_hdr_size(chan);
1057 if (chan->fcs == L2CAP_FCS_CRC16)
1058 hlen += L2CAP_FCS_SIZE;
1060 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1063 return ERR_PTR(-ENOMEM);
1065 lh = skb_put(skb, L2CAP_HDR_SIZE);
1066 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1067 lh->cid = cpu_to_le16(chan->dcid);
1069 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1070 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1072 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1074 if (chan->fcs == L2CAP_FCS_CRC16) {
1075 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1076 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1079 skb->priority = HCI_PRIO_MAX;
1083 static void l2cap_send_sframe(struct l2cap_chan *chan,
1084 struct l2cap_ctrl *control)
1086 struct sk_buff *skb;
1089 BT_DBG("chan %p, control %p", chan, control);
1091 if (!control->sframe)
1094 if (__chan_is_moving(chan))
1097 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1101 if (control->super == L2CAP_SUPER_RR)
1102 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1103 else if (control->super == L2CAP_SUPER_RNR)
1104 set_bit(CONN_RNR_SENT, &chan->conn_state);
1106 if (control->super != L2CAP_SUPER_SREJ) {
1107 chan->last_acked_seq = control->reqseq;
1108 __clear_ack_timer(chan);
1111 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1112 control->final, control->poll, control->super);
1114 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1115 control_field = __pack_extended_control(control);
1117 control_field = __pack_enhanced_control(control);
1119 skb = l2cap_create_sframe_pdu(chan, control_field);
1121 l2cap_do_send(chan, skb);
1124 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1126 struct l2cap_ctrl control;
1128 BT_DBG("chan %p, poll %d", chan, poll);
1130 memset(&control, 0, sizeof(control));
1132 control.poll = poll;
1134 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1135 control.super = L2CAP_SUPER_RNR;
1137 control.super = L2CAP_SUPER_RR;
1139 control.reqseq = chan->buffer_seq;
1140 l2cap_send_sframe(chan, &control);
1143 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1145 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1148 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1151 static bool __amp_capable(struct l2cap_chan *chan)
1153 struct l2cap_conn *conn = chan->conn;
1154 struct hci_dev *hdev;
1155 bool amp_available = false;
1157 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1160 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1163 read_lock(&hci_dev_list_lock);
1164 list_for_each_entry(hdev, &hci_dev_list, list) {
1165 if (hdev->amp_type != AMP_TYPE_BREDR &&
1166 test_bit(HCI_UP, &hdev->flags)) {
1167 amp_available = true;
1171 read_unlock(&hci_dev_list_lock);
1173 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1174 return amp_available;
1179 static bool l2cap_check_efs(struct l2cap_chan *chan)
1181 /* Check EFS parameters */
1185 void l2cap_send_conn_req(struct l2cap_chan *chan)
1187 struct l2cap_conn *conn = chan->conn;
1188 struct l2cap_conn_req req;
1190 req.scid = cpu_to_le16(chan->scid);
1191 req.psm = chan->psm;
1193 chan->ident = l2cap_get_ident(conn);
1195 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1197 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1200 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1202 struct l2cap_create_chan_req req;
1203 req.scid = cpu_to_le16(chan->scid);
1204 req.psm = chan->psm;
1205 req.amp_id = amp_id;
1207 chan->ident = l2cap_get_ident(chan->conn);
1209 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1213 static void l2cap_move_setup(struct l2cap_chan *chan)
1215 struct sk_buff *skb;
1217 BT_DBG("chan %p", chan);
1219 if (chan->mode != L2CAP_MODE_ERTM)
1222 __clear_retrans_timer(chan);
1223 __clear_monitor_timer(chan);
1224 __clear_ack_timer(chan);
1226 chan->retry_count = 0;
1227 skb_queue_walk(&chan->tx_q, skb) {
1228 if (bt_cb(skb)->l2cap.retries)
1229 bt_cb(skb)->l2cap.retries = 1;
1234 chan->expected_tx_seq = chan->buffer_seq;
1236 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1237 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1238 l2cap_seq_list_clear(&chan->retrans_list);
1239 l2cap_seq_list_clear(&chan->srej_list);
1240 skb_queue_purge(&chan->srej_q);
1242 chan->tx_state = L2CAP_TX_STATE_XMIT;
1243 chan->rx_state = L2CAP_RX_STATE_MOVE;
1245 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1248 static void l2cap_move_done(struct l2cap_chan *chan)
1250 u8 move_role = chan->move_role;
1251 BT_DBG("chan %p", chan);
1253 chan->move_state = L2CAP_MOVE_STABLE;
1254 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1256 if (chan->mode != L2CAP_MODE_ERTM)
1259 switch (move_role) {
1260 case L2CAP_MOVE_ROLE_INITIATOR:
1261 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1262 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1264 case L2CAP_MOVE_ROLE_RESPONDER:
1265 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1270 static void l2cap_chan_ready(struct l2cap_chan *chan)
1272 /* The channel may have already been flagged as connected in
1273 * case of receiving data before the L2CAP info req/rsp
1274 * procedure is complete.
1276 if (chan->state == BT_CONNECTED)
1279 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1280 chan->conf_state = 0;
1281 __clear_chan_timer(chan);
1283 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1284 chan->ops->suspend(chan);
1286 chan->state = BT_CONNECTED;
1288 chan->ops->ready(chan);
1291 static void l2cap_le_connect(struct l2cap_chan *chan)
1293 struct l2cap_conn *conn = chan->conn;
1294 struct l2cap_le_conn_req req;
1296 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1299 l2cap_le_flowctl_init(chan, 0);
1301 req.psm = chan->psm;
1302 req.scid = cpu_to_le16(chan->scid);
1303 req.mtu = cpu_to_le16(chan->imtu);
1304 req.mps = cpu_to_le16(chan->mps);
1305 req.credits = cpu_to_le16(chan->rx_credits);
1307 chan->ident = l2cap_get_ident(conn);
1309 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1313 static void l2cap_le_start(struct l2cap_chan *chan)
1315 struct l2cap_conn *conn = chan->conn;
1317 if (!smp_conn_security(conn->hcon, chan->sec_level))
1321 l2cap_chan_ready(chan);
1325 if (chan->state == BT_CONNECT)
1326 l2cap_le_connect(chan);
1329 static void l2cap_start_connection(struct l2cap_chan *chan)
1331 if (__amp_capable(chan)) {
1332 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1333 a2mp_discover_amp(chan);
1334 } else if (chan->conn->hcon->type == LE_LINK) {
1335 l2cap_le_start(chan);
1337 l2cap_send_conn_req(chan);
1341 static void l2cap_request_info(struct l2cap_conn *conn)
1343 struct l2cap_info_req req;
1345 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1348 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1350 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1351 conn->info_ident = l2cap_get_ident(conn);
1353 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1355 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1359 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1361 /* The minimum encryption key size needs to be enforced by the
1362 * host stack before establishing any L2CAP connections. The
1363 * specification in theory allows a minimum of 1, but to align
1364 * BR/EDR and LE transports, a minimum of 7 is chosen.
1366 * This check might also be called for unencrypted connections
1367 * that have no key size requirements. Ensure that the link is
1368 * actually encrypted before enforcing a key size.
1370 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1371 hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1374 static void l2cap_do_start(struct l2cap_chan *chan)
1376 struct l2cap_conn *conn = chan->conn;
1378 if (conn->hcon->type == LE_LINK) {
1379 l2cap_le_start(chan);
1383 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1384 l2cap_request_info(conn);
1388 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1391 if (!l2cap_chan_check_security(chan, true) ||
1392 !__l2cap_no_conn_pending(chan))
1395 if (l2cap_check_enc_key_size(conn->hcon))
1396 l2cap_start_connection(chan);
1398 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1401 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1403 u32 local_feat_mask = l2cap_feat_mask;
1405 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1408 case L2CAP_MODE_ERTM:
1409 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1410 case L2CAP_MODE_STREAMING:
1411 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1417 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1419 struct l2cap_conn *conn = chan->conn;
1420 struct l2cap_disconn_req req;
1425 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1426 __clear_retrans_timer(chan);
1427 __clear_monitor_timer(chan);
1428 __clear_ack_timer(chan);
1431 if (chan->scid == L2CAP_CID_A2MP) {
1432 l2cap_state_change(chan, BT_DISCONN);
1436 req.dcid = cpu_to_le16(chan->dcid);
1437 req.scid = cpu_to_le16(chan->scid);
1438 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1441 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1444 /* ---- L2CAP connections ---- */
1445 static void l2cap_conn_start(struct l2cap_conn *conn)
1447 struct l2cap_chan *chan, *tmp;
1449 BT_DBG("conn %p", conn);
1451 mutex_lock(&conn->chan_lock);
1453 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1454 l2cap_chan_lock(chan);
1456 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1457 l2cap_chan_ready(chan);
1458 l2cap_chan_unlock(chan);
1462 if (chan->state == BT_CONNECT) {
1463 if (!l2cap_chan_check_security(chan, true) ||
1464 !__l2cap_no_conn_pending(chan)) {
1465 l2cap_chan_unlock(chan);
1469 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1470 && test_bit(CONF_STATE2_DEVICE,
1471 &chan->conf_state)) {
1472 l2cap_chan_close(chan, ECONNRESET);
1473 l2cap_chan_unlock(chan);
1477 if (l2cap_check_enc_key_size(conn->hcon))
1478 l2cap_start_connection(chan);
1480 l2cap_chan_close(chan, ECONNREFUSED);
1482 } else if (chan->state == BT_CONNECT2) {
1483 struct l2cap_conn_rsp rsp;
1485 rsp.scid = cpu_to_le16(chan->dcid);
1486 rsp.dcid = cpu_to_le16(chan->scid);
1488 if (l2cap_chan_check_security(chan, false)) {
1489 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1490 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1491 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1492 chan->ops->defer(chan);
1495 l2cap_state_change(chan, BT_CONFIG);
1496 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1497 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1500 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1501 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1504 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1507 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1508 rsp.result != L2CAP_CR_SUCCESS) {
1509 l2cap_chan_unlock(chan);
1513 set_bit(CONF_REQ_SENT, &chan->conf_state);
1514 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1515 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1516 chan->num_conf_req++;
1519 l2cap_chan_unlock(chan);
1522 mutex_unlock(&conn->chan_lock);
1525 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1527 struct hci_conn *hcon = conn->hcon;
1528 struct hci_dev *hdev = hcon->hdev;
1530 BT_DBG("%s conn %p", hdev->name, conn);
1532 /* For outgoing pairing which doesn't necessarily have an
1533 * associated socket (e.g. mgmt_pair_device).
1536 smp_conn_security(hcon, hcon->pending_sec_level);
1538 /* For LE slave connections, make sure the connection interval
1539 * is in the range of the minium and maximum interval that has
1540 * been configured for this connection. If not, then trigger
1541 * the connection update procedure.
1543 if (hcon->role == HCI_ROLE_SLAVE &&
1544 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1545 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1546 struct l2cap_conn_param_update_req req;
1548 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1549 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1550 req.latency = cpu_to_le16(hcon->le_conn_latency);
1551 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1553 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1554 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1558 static void l2cap_conn_ready(struct l2cap_conn *conn)
1560 struct l2cap_chan *chan;
1561 struct hci_conn *hcon = conn->hcon;
1563 BT_DBG("conn %p", conn);
1565 if (hcon->type == ACL_LINK)
1566 l2cap_request_info(conn);
1568 mutex_lock(&conn->chan_lock);
1570 list_for_each_entry(chan, &conn->chan_l, list) {
1572 l2cap_chan_lock(chan);
1574 if (chan->scid == L2CAP_CID_A2MP) {
1575 l2cap_chan_unlock(chan);
1579 if (hcon->type == LE_LINK) {
1580 l2cap_le_start(chan);
1581 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1582 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1583 l2cap_chan_ready(chan);
1584 } else if (chan->state == BT_CONNECT) {
1585 l2cap_do_start(chan);
1588 l2cap_chan_unlock(chan);
1591 mutex_unlock(&conn->chan_lock);
1593 if (hcon->type == LE_LINK)
1594 l2cap_le_conn_ready(conn);
1596 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1599 /* Notify sockets that we cannot guaranty reliability anymore */
1600 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1602 struct l2cap_chan *chan;
1604 BT_DBG("conn %p", conn);
1606 mutex_lock(&conn->chan_lock);
1608 list_for_each_entry(chan, &conn->chan_l, list) {
1609 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1610 l2cap_chan_set_err(chan, err);
1613 mutex_unlock(&conn->chan_lock);
1616 static void l2cap_info_timeout(struct work_struct *work)
1618 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1621 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1622 conn->info_ident = 0;
1624 l2cap_conn_start(conn);
1629 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1630 * callback is called during registration. The ->remove callback is called
1631 * during unregistration.
1632 * An l2cap_user object can either be explicitly unregistered or when the
1633 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1634 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1635 * External modules must own a reference to the l2cap_conn object if they intend
1636 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1637 * any time if they don't.
1640 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1642 struct hci_dev *hdev = conn->hcon->hdev;
1645 /* We need to check whether l2cap_conn is registered. If it is not, we
1646 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1647 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1648 * relies on the parent hci_conn object to be locked. This itself relies
1649 * on the hci_dev object to be locked. So we must lock the hci device
1654 if (!list_empty(&user->list)) {
1659 /* conn->hchan is NULL after l2cap_conn_del() was called */
1665 ret = user->probe(conn, user);
1669 list_add(&user->list, &conn->users);
1673 hci_dev_unlock(hdev);
1676 EXPORT_SYMBOL(l2cap_register_user);
1678 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1680 struct hci_dev *hdev = conn->hcon->hdev;
1684 if (list_empty(&user->list))
1687 list_del_init(&user->list);
1688 user->remove(conn, user);
1691 hci_dev_unlock(hdev);
1693 EXPORT_SYMBOL(l2cap_unregister_user);
1695 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1697 struct l2cap_user *user;
1699 while (!list_empty(&conn->users)) {
1700 user = list_first_entry(&conn->users, struct l2cap_user, list);
1701 list_del_init(&user->list);
1702 user->remove(conn, user);
1706 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1708 struct l2cap_conn *conn = hcon->l2cap_data;
1709 struct l2cap_chan *chan, *l;
1714 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1716 kfree_skb(conn->rx_skb);
1718 skb_queue_purge(&conn->pending_rx);
1720 /* We can not call flush_work(&conn->pending_rx_work) here since we
1721 * might block if we are running on a worker from the same workqueue
1722 * pending_rx_work is waiting on.
1724 if (work_pending(&conn->pending_rx_work))
1725 cancel_work_sync(&conn->pending_rx_work);
1727 if (work_pending(&conn->id_addr_update_work))
1728 cancel_work_sync(&conn->id_addr_update_work);
1730 l2cap_unregister_all_users(conn);
1732 /* Force the connection to be immediately dropped */
1733 hcon->disc_timeout = 0;
1735 mutex_lock(&conn->chan_lock);
1738 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1739 l2cap_chan_hold(chan);
1740 l2cap_chan_lock(chan);
1742 l2cap_chan_del(chan, err);
1744 chan->ops->close(chan);
1746 l2cap_chan_unlock(chan);
1747 l2cap_chan_put(chan);
1750 mutex_unlock(&conn->chan_lock);
1752 hci_chan_del(conn->hchan);
1754 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1755 cancel_delayed_work_sync(&conn->info_timer);
1757 hcon->l2cap_data = NULL;
1759 l2cap_conn_put(conn);
1762 static void l2cap_conn_free(struct kref *ref)
1764 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1766 hci_conn_put(conn->hcon);
1770 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1772 kref_get(&conn->ref);
1775 EXPORT_SYMBOL(l2cap_conn_get);
1777 void l2cap_conn_put(struct l2cap_conn *conn)
1779 kref_put(&conn->ref, l2cap_conn_free);
1781 EXPORT_SYMBOL(l2cap_conn_put);
1783 /* ---- Socket interface ---- */
1785 /* Find socket with psm and source / destination bdaddr.
1786 * Returns closest match.
1788 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1793 struct l2cap_chan *c, *c1 = NULL;
1795 read_lock(&chan_list_lock);
1797 list_for_each_entry(c, &chan_list, global_l) {
1798 if (state && c->state != state)
1801 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1804 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1807 if (c->psm == psm) {
1808 int src_match, dst_match;
1809 int src_any, dst_any;
1812 src_match = !bacmp(&c->src, src);
1813 dst_match = !bacmp(&c->dst, dst);
1814 if (src_match && dst_match) {
1816 read_unlock(&chan_list_lock);
1821 src_any = !bacmp(&c->src, BDADDR_ANY);
1822 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1823 if ((src_match && dst_any) || (src_any && dst_match) ||
1824 (src_any && dst_any))
1830 l2cap_chan_hold(c1);
1832 read_unlock(&chan_list_lock);
1837 static void l2cap_monitor_timeout(struct work_struct *work)
1839 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1840 monitor_timer.work);
1842 BT_DBG("chan %p", chan);
1844 l2cap_chan_lock(chan);
1847 l2cap_chan_unlock(chan);
1848 l2cap_chan_put(chan);
1852 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1854 l2cap_chan_unlock(chan);
1855 l2cap_chan_put(chan);
1858 static void l2cap_retrans_timeout(struct work_struct *work)
1860 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1861 retrans_timer.work);
1863 BT_DBG("chan %p", chan);
1865 l2cap_chan_lock(chan);
1868 l2cap_chan_unlock(chan);
1869 l2cap_chan_put(chan);
1873 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1874 l2cap_chan_unlock(chan);
1875 l2cap_chan_put(chan);
1878 static void l2cap_streaming_send(struct l2cap_chan *chan,
1879 struct sk_buff_head *skbs)
1881 struct sk_buff *skb;
1882 struct l2cap_ctrl *control;
1884 BT_DBG("chan %p, skbs %p", chan, skbs);
1886 if (__chan_is_moving(chan))
1889 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1891 while (!skb_queue_empty(&chan->tx_q)) {
1893 skb = skb_dequeue(&chan->tx_q);
1895 bt_cb(skb)->l2cap.retries = 1;
1896 control = &bt_cb(skb)->l2cap;
1898 control->reqseq = 0;
1899 control->txseq = chan->next_tx_seq;
1901 __pack_control(chan, control, skb);
1903 if (chan->fcs == L2CAP_FCS_CRC16) {
1904 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1905 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1908 l2cap_do_send(chan, skb);
1910 BT_DBG("Sent txseq %u", control->txseq);
1912 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1913 chan->frames_sent++;
1917 static int l2cap_ertm_send(struct l2cap_chan *chan)
1919 struct sk_buff *skb, *tx_skb;
1920 struct l2cap_ctrl *control;
1923 BT_DBG("chan %p", chan);
1925 if (chan->state != BT_CONNECTED)
1928 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1931 if (__chan_is_moving(chan))
1934 while (chan->tx_send_head &&
1935 chan->unacked_frames < chan->remote_tx_win &&
1936 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1938 skb = chan->tx_send_head;
1940 bt_cb(skb)->l2cap.retries = 1;
1941 control = &bt_cb(skb)->l2cap;
1943 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1946 control->reqseq = chan->buffer_seq;
1947 chan->last_acked_seq = chan->buffer_seq;
1948 control->txseq = chan->next_tx_seq;
1950 __pack_control(chan, control, skb);
1952 if (chan->fcs == L2CAP_FCS_CRC16) {
1953 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1954 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1957 /* Clone after data has been modified. Data is assumed to be
1958 read-only (for locking purposes) on cloned sk_buffs.
1960 tx_skb = skb_clone(skb, GFP_KERNEL);
1965 __set_retrans_timer(chan);
1967 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1968 chan->unacked_frames++;
1969 chan->frames_sent++;
1972 if (skb_queue_is_last(&chan->tx_q, skb))
1973 chan->tx_send_head = NULL;
1975 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1977 l2cap_do_send(chan, tx_skb);
1978 BT_DBG("Sent txseq %u", control->txseq);
1981 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1982 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1987 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1989 struct l2cap_ctrl control;
1990 struct sk_buff *skb;
1991 struct sk_buff *tx_skb;
1994 BT_DBG("chan %p", chan);
1996 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1999 if (__chan_is_moving(chan))
2002 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2003 seq = l2cap_seq_list_pop(&chan->retrans_list);
2005 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2007 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2012 bt_cb(skb)->l2cap.retries++;
2013 control = bt_cb(skb)->l2cap;
2015 if (chan->max_tx != 0 &&
2016 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2017 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2018 l2cap_send_disconn_req(chan, ECONNRESET);
2019 l2cap_seq_list_clear(&chan->retrans_list);
2023 control.reqseq = chan->buffer_seq;
2024 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2029 if (skb_cloned(skb)) {
2030 /* Cloned sk_buffs are read-only, so we need a
2033 tx_skb = skb_copy(skb, GFP_KERNEL);
2035 tx_skb = skb_clone(skb, GFP_KERNEL);
2039 l2cap_seq_list_clear(&chan->retrans_list);
2043 /* Update skb contents */
2044 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2045 put_unaligned_le32(__pack_extended_control(&control),
2046 tx_skb->data + L2CAP_HDR_SIZE);
2048 put_unaligned_le16(__pack_enhanced_control(&control),
2049 tx_skb->data + L2CAP_HDR_SIZE);
2053 if (chan->fcs == L2CAP_FCS_CRC16) {
2054 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2055 tx_skb->len - L2CAP_FCS_SIZE);
2056 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2060 l2cap_do_send(chan, tx_skb);
2062 BT_DBG("Resent txseq %d", control.txseq);
2064 chan->last_acked_seq = chan->buffer_seq;
2068 static void l2cap_retransmit(struct l2cap_chan *chan,
2069 struct l2cap_ctrl *control)
2071 BT_DBG("chan %p, control %p", chan, control);
2073 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2074 l2cap_ertm_resend(chan);
2077 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2078 struct l2cap_ctrl *control)
2080 struct sk_buff *skb;
2082 BT_DBG("chan %p, control %p", chan, control);
2085 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2087 l2cap_seq_list_clear(&chan->retrans_list);
2089 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2092 if (chan->unacked_frames) {
2093 skb_queue_walk(&chan->tx_q, skb) {
2094 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2095 skb == chan->tx_send_head)
2099 skb_queue_walk_from(&chan->tx_q, skb) {
2100 if (skb == chan->tx_send_head)
2103 l2cap_seq_list_append(&chan->retrans_list,
2104 bt_cb(skb)->l2cap.txseq);
2107 l2cap_ertm_resend(chan);
2111 static void l2cap_send_ack(struct l2cap_chan *chan)
2113 struct l2cap_ctrl control;
2114 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2115 chan->last_acked_seq);
2118 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2119 chan, chan->last_acked_seq, chan->buffer_seq);
2121 memset(&control, 0, sizeof(control));
2124 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2125 chan->rx_state == L2CAP_RX_STATE_RECV) {
2126 __clear_ack_timer(chan);
2127 control.super = L2CAP_SUPER_RNR;
2128 control.reqseq = chan->buffer_seq;
2129 l2cap_send_sframe(chan, &control);
2131 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2132 l2cap_ertm_send(chan);
2133 /* If any i-frames were sent, they included an ack */
2134 if (chan->buffer_seq == chan->last_acked_seq)
2138 /* Ack now if the window is 3/4ths full.
2139 * Calculate without mul or div
2141 threshold = chan->ack_win;
2142 threshold += threshold << 1;
2145 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2148 if (frames_to_ack >= threshold) {
2149 __clear_ack_timer(chan);
2150 control.super = L2CAP_SUPER_RR;
2151 control.reqseq = chan->buffer_seq;
2152 l2cap_send_sframe(chan, &control);
2157 __set_ack_timer(chan);
2161 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2162 struct msghdr *msg, int len,
2163 int count, struct sk_buff *skb)
2165 struct l2cap_conn *conn = chan->conn;
2166 struct sk_buff **frag;
2169 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2175 /* Continuation fragments (no L2CAP header) */
2176 frag = &skb_shinfo(skb)->frag_list;
2178 struct sk_buff *tmp;
2180 count = min_t(unsigned int, conn->mtu, len);
2182 tmp = chan->ops->alloc_skb(chan, 0, count,
2183 msg->msg_flags & MSG_DONTWAIT);
2185 return PTR_ERR(tmp);
2189 if (!copy_from_iter_full(skb_put(*frag, count), count,
2196 skb->len += (*frag)->len;
2197 skb->data_len += (*frag)->len;
2199 frag = &(*frag)->next;
2205 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2206 struct msghdr *msg, size_t len)
2208 struct l2cap_conn *conn = chan->conn;
2209 struct sk_buff *skb;
2210 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2211 struct l2cap_hdr *lh;
2213 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2214 __le16_to_cpu(chan->psm), len);
2216 count = min_t(unsigned int, (conn->mtu - hlen), len);
2218 skb = chan->ops->alloc_skb(chan, hlen, count,
2219 msg->msg_flags & MSG_DONTWAIT);
2223 /* Create L2CAP header */
2224 lh = skb_put(skb, L2CAP_HDR_SIZE);
2225 lh->cid = cpu_to_le16(chan->dcid);
2226 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2227 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2229 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2230 if (unlikely(err < 0)) {
2232 return ERR_PTR(err);
2237 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2238 struct msghdr *msg, size_t len)
2240 struct l2cap_conn *conn = chan->conn;
2241 struct sk_buff *skb;
2243 struct l2cap_hdr *lh;
2245 BT_DBG("chan %p len %zu", chan, len);
2247 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2249 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2250 msg->msg_flags & MSG_DONTWAIT);
2254 /* Create L2CAP header */
2255 lh = skb_put(skb, L2CAP_HDR_SIZE);
2256 lh->cid = cpu_to_le16(chan->dcid);
2257 lh->len = cpu_to_le16(len);
2259 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2260 if (unlikely(err < 0)) {
2262 return ERR_PTR(err);
2267 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2268 struct msghdr *msg, size_t len,
2271 struct l2cap_conn *conn = chan->conn;
2272 struct sk_buff *skb;
2273 int err, count, hlen;
2274 struct l2cap_hdr *lh;
2276 BT_DBG("chan %p len %zu", chan, len);
2279 return ERR_PTR(-ENOTCONN);
2281 hlen = __ertm_hdr_size(chan);
2284 hlen += L2CAP_SDULEN_SIZE;
2286 if (chan->fcs == L2CAP_FCS_CRC16)
2287 hlen += L2CAP_FCS_SIZE;
2289 count = min_t(unsigned int, (conn->mtu - hlen), len);
2291 skb = chan->ops->alloc_skb(chan, hlen, count,
2292 msg->msg_flags & MSG_DONTWAIT);
2296 /* Create L2CAP header */
2297 lh = skb_put(skb, L2CAP_HDR_SIZE);
2298 lh->cid = cpu_to_le16(chan->dcid);
2299 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2301 /* Control header is populated later */
2302 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2303 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2305 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2308 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2310 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2311 if (unlikely(err < 0)) {
2313 return ERR_PTR(err);
2316 bt_cb(skb)->l2cap.fcs = chan->fcs;
2317 bt_cb(skb)->l2cap.retries = 0;
2321 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2322 struct sk_buff_head *seg_queue,
2323 struct msghdr *msg, size_t len)
2325 struct sk_buff *skb;
2330 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2332 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2333 * so fragmented skbs are not used. The HCI layer's handling
2334 * of fragmented skbs is not compatible with ERTM's queueing.
2337 /* PDU size is derived from the HCI MTU */
2338 pdu_len = chan->conn->mtu;
2340 /* Constrain PDU size for BR/EDR connections */
2342 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2344 /* Adjust for largest possible L2CAP overhead. */
2346 pdu_len -= L2CAP_FCS_SIZE;
2348 pdu_len -= __ertm_hdr_size(chan);
2350 /* Remote device may have requested smaller PDUs */
2351 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2353 if (len <= pdu_len) {
2354 sar = L2CAP_SAR_UNSEGMENTED;
2358 sar = L2CAP_SAR_START;
2363 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2366 __skb_queue_purge(seg_queue);
2367 return PTR_ERR(skb);
2370 bt_cb(skb)->l2cap.sar = sar;
2371 __skb_queue_tail(seg_queue, skb);
2377 if (len <= pdu_len) {
2378 sar = L2CAP_SAR_END;
2381 sar = L2CAP_SAR_CONTINUE;
2388 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2390 size_t len, u16 sdulen)
2392 struct l2cap_conn *conn = chan->conn;
2393 struct sk_buff *skb;
2394 int err, count, hlen;
2395 struct l2cap_hdr *lh;
2397 BT_DBG("chan %p len %zu", chan, len);
2400 return ERR_PTR(-ENOTCONN);
2402 hlen = L2CAP_HDR_SIZE;
2405 hlen += L2CAP_SDULEN_SIZE;
2407 count = min_t(unsigned int, (conn->mtu - hlen), len);
2409 skb = chan->ops->alloc_skb(chan, hlen, count,
2410 msg->msg_flags & MSG_DONTWAIT);
2414 /* Create L2CAP header */
2415 lh = skb_put(skb, L2CAP_HDR_SIZE);
2416 lh->cid = cpu_to_le16(chan->dcid);
2417 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2420 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2422 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2423 if (unlikely(err < 0)) {
2425 return ERR_PTR(err);
2431 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2432 struct sk_buff_head *seg_queue,
2433 struct msghdr *msg, size_t len)
2435 struct sk_buff *skb;
2439 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2442 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2448 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2450 __skb_queue_purge(seg_queue);
2451 return PTR_ERR(skb);
2454 __skb_queue_tail(seg_queue, skb);
2460 pdu_len += L2CAP_SDULEN_SIZE;
2467 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2471 BT_DBG("chan %p", chan);
2473 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2474 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2479 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2480 skb_queue_len(&chan->tx_q));
2483 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2485 struct sk_buff *skb;
2487 struct sk_buff_head seg_queue;
2492 /* Connectionless channel */
2493 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2494 skb = l2cap_create_connless_pdu(chan, msg, len);
2496 return PTR_ERR(skb);
2498 /* Channel lock is released before requesting new skb and then
2499 * reacquired thus we need to recheck channel state.
2501 if (chan->state != BT_CONNECTED) {
2506 l2cap_do_send(chan, skb);
2510 switch (chan->mode) {
2511 case L2CAP_MODE_LE_FLOWCTL:
2512 /* Check outgoing MTU */
2513 if (len > chan->omtu)
2516 __skb_queue_head_init(&seg_queue);
2518 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2520 if (chan->state != BT_CONNECTED) {
2521 __skb_queue_purge(&seg_queue);
2528 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2530 l2cap_le_flowctl_send(chan);
2532 if (!chan->tx_credits)
2533 chan->ops->suspend(chan);
2539 case L2CAP_MODE_BASIC:
2540 /* Check outgoing MTU */
2541 if (len > chan->omtu)
2544 /* Create a basic PDU */
2545 skb = l2cap_create_basic_pdu(chan, msg, len);
2547 return PTR_ERR(skb);
2549 /* Channel lock is released before requesting new skb and then
2550 * reacquired thus we need to recheck channel state.
2552 if (chan->state != BT_CONNECTED) {
2557 l2cap_do_send(chan, skb);
2561 case L2CAP_MODE_ERTM:
2562 case L2CAP_MODE_STREAMING:
2563 /* Check outgoing MTU */
2564 if (len > chan->omtu) {
2569 __skb_queue_head_init(&seg_queue);
2571 /* Do segmentation before calling in to the state machine,
2572 * since it's possible to block while waiting for memory
2575 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2577 /* The channel could have been closed while segmenting,
2578 * check that it is still connected.
2580 if (chan->state != BT_CONNECTED) {
2581 __skb_queue_purge(&seg_queue);
2588 if (chan->mode == L2CAP_MODE_ERTM)
2589 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2591 l2cap_streaming_send(chan, &seg_queue);
2595 /* If the skbs were not queued for sending, they'll still be in
2596 * seg_queue and need to be purged.
2598 __skb_queue_purge(&seg_queue);
2602 BT_DBG("bad state %1.1x", chan->mode);
2608 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2610 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2612 struct l2cap_ctrl control;
2615 BT_DBG("chan %p, txseq %u", chan, txseq);
2617 memset(&control, 0, sizeof(control));
2619 control.super = L2CAP_SUPER_SREJ;
2621 for (seq = chan->expected_tx_seq; seq != txseq;
2622 seq = __next_seq(chan, seq)) {
2623 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2624 control.reqseq = seq;
2625 l2cap_send_sframe(chan, &control);
2626 l2cap_seq_list_append(&chan->srej_list, seq);
2630 chan->expected_tx_seq = __next_seq(chan, txseq);
2633 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2635 struct l2cap_ctrl control;
2637 BT_DBG("chan %p", chan);
2639 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2642 memset(&control, 0, sizeof(control));
2644 control.super = L2CAP_SUPER_SREJ;
2645 control.reqseq = chan->srej_list.tail;
2646 l2cap_send_sframe(chan, &control);
2649 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2651 struct l2cap_ctrl control;
2655 BT_DBG("chan %p, txseq %u", chan, txseq);
2657 memset(&control, 0, sizeof(control));
2659 control.super = L2CAP_SUPER_SREJ;
2661 /* Capture initial list head to allow only one pass through the list. */
2662 initial_head = chan->srej_list.head;
2665 seq = l2cap_seq_list_pop(&chan->srej_list);
2666 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2669 control.reqseq = seq;
2670 l2cap_send_sframe(chan, &control);
2671 l2cap_seq_list_append(&chan->srej_list, seq);
2672 } while (chan->srej_list.head != initial_head);
2675 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2677 struct sk_buff *acked_skb;
2680 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2682 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2685 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2686 chan->expected_ack_seq, chan->unacked_frames);
2688 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2689 ackseq = __next_seq(chan, ackseq)) {
2691 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2693 skb_unlink(acked_skb, &chan->tx_q);
2694 kfree_skb(acked_skb);
2695 chan->unacked_frames--;
2699 chan->expected_ack_seq = reqseq;
2701 if (chan->unacked_frames == 0)
2702 __clear_retrans_timer(chan);
2704 BT_DBG("unacked_frames %u", chan->unacked_frames);
2707 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2709 BT_DBG("chan %p", chan);
2711 chan->expected_tx_seq = chan->buffer_seq;
2712 l2cap_seq_list_clear(&chan->srej_list);
2713 skb_queue_purge(&chan->srej_q);
2714 chan->rx_state = L2CAP_RX_STATE_RECV;
2717 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2718 struct l2cap_ctrl *control,
2719 struct sk_buff_head *skbs, u8 event)
2721 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2725 case L2CAP_EV_DATA_REQUEST:
2726 if (chan->tx_send_head == NULL)
2727 chan->tx_send_head = skb_peek(skbs);
2729 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2730 l2cap_ertm_send(chan);
2732 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2733 BT_DBG("Enter LOCAL_BUSY");
2734 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2736 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2737 /* The SREJ_SENT state must be aborted if we are to
2738 * enter the LOCAL_BUSY state.
2740 l2cap_abort_rx_srej_sent(chan);
2743 l2cap_send_ack(chan);
2746 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2747 BT_DBG("Exit LOCAL_BUSY");
2748 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2750 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2751 struct l2cap_ctrl local_control;
2753 memset(&local_control, 0, sizeof(local_control));
2754 local_control.sframe = 1;
2755 local_control.super = L2CAP_SUPER_RR;
2756 local_control.poll = 1;
2757 local_control.reqseq = chan->buffer_seq;
2758 l2cap_send_sframe(chan, &local_control);
2760 chan->retry_count = 1;
2761 __set_monitor_timer(chan);
2762 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2765 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2766 l2cap_process_reqseq(chan, control->reqseq);
2768 case L2CAP_EV_EXPLICIT_POLL:
2769 l2cap_send_rr_or_rnr(chan, 1);
2770 chan->retry_count = 1;
2771 __set_monitor_timer(chan);
2772 __clear_ack_timer(chan);
2773 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2775 case L2CAP_EV_RETRANS_TO:
2776 l2cap_send_rr_or_rnr(chan, 1);
2777 chan->retry_count = 1;
2778 __set_monitor_timer(chan);
2779 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2781 case L2CAP_EV_RECV_FBIT:
2782 /* Nothing to process */
2789 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2790 struct l2cap_ctrl *control,
2791 struct sk_buff_head *skbs, u8 event)
2793 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2797 case L2CAP_EV_DATA_REQUEST:
2798 if (chan->tx_send_head == NULL)
2799 chan->tx_send_head = skb_peek(skbs);
2800 /* Queue data, but don't send. */
2801 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2803 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2804 BT_DBG("Enter LOCAL_BUSY");
2805 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2807 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2808 /* The SREJ_SENT state must be aborted if we are to
2809 * enter the LOCAL_BUSY state.
2811 l2cap_abort_rx_srej_sent(chan);
2814 l2cap_send_ack(chan);
2817 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2818 BT_DBG("Exit LOCAL_BUSY");
2819 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2821 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2822 struct l2cap_ctrl local_control;
2823 memset(&local_control, 0, sizeof(local_control));
2824 local_control.sframe = 1;
2825 local_control.super = L2CAP_SUPER_RR;
2826 local_control.poll = 1;
2827 local_control.reqseq = chan->buffer_seq;
2828 l2cap_send_sframe(chan, &local_control);
2830 chan->retry_count = 1;
2831 __set_monitor_timer(chan);
2832 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2835 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2836 l2cap_process_reqseq(chan, control->reqseq);
2840 case L2CAP_EV_RECV_FBIT:
2841 if (control && control->final) {
2842 __clear_monitor_timer(chan);
2843 if (chan->unacked_frames > 0)
2844 __set_retrans_timer(chan);
2845 chan->retry_count = 0;
2846 chan->tx_state = L2CAP_TX_STATE_XMIT;
2847 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2850 case L2CAP_EV_EXPLICIT_POLL:
2853 case L2CAP_EV_MONITOR_TO:
2854 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2855 l2cap_send_rr_or_rnr(chan, 1);
2856 __set_monitor_timer(chan);
2857 chan->retry_count++;
2859 l2cap_send_disconn_req(chan, ECONNABORTED);
2867 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2868 struct sk_buff_head *skbs, u8 event)
2870 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2871 chan, control, skbs, event, chan->tx_state);
2873 switch (chan->tx_state) {
2874 case L2CAP_TX_STATE_XMIT:
2875 l2cap_tx_state_xmit(chan, control, skbs, event);
2877 case L2CAP_TX_STATE_WAIT_F:
2878 l2cap_tx_state_wait_f(chan, control, skbs, event);
2886 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2887 struct l2cap_ctrl *control)
2889 BT_DBG("chan %p, control %p", chan, control);
2890 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2893 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2894 struct l2cap_ctrl *control)
2896 BT_DBG("chan %p, control %p", chan, control);
2897 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2900 /* Copy frame to all raw sockets on that connection */
2901 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2903 struct sk_buff *nskb;
2904 struct l2cap_chan *chan;
2906 BT_DBG("conn %p", conn);
2908 mutex_lock(&conn->chan_lock);
2910 list_for_each_entry(chan, &conn->chan_l, list) {
2911 if (chan->chan_type != L2CAP_CHAN_RAW)
2914 /* Don't send frame to the channel it came from */
2915 if (bt_cb(skb)->l2cap.chan == chan)
2918 nskb = skb_clone(skb, GFP_KERNEL);
2921 if (chan->ops->recv(chan, nskb))
2925 mutex_unlock(&conn->chan_lock);
2928 /* ---- L2CAP signalling commands ---- */
2929 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2930 u8 ident, u16 dlen, void *data)
2932 struct sk_buff *skb, **frag;
2933 struct l2cap_cmd_hdr *cmd;
2934 struct l2cap_hdr *lh;
2937 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2938 conn, code, ident, dlen);
2940 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2943 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2944 count = min_t(unsigned int, conn->mtu, len);
2946 skb = bt_skb_alloc(count, GFP_KERNEL);
2950 lh = skb_put(skb, L2CAP_HDR_SIZE);
2951 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2953 if (conn->hcon->type == LE_LINK)
2954 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2956 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2958 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2961 cmd->len = cpu_to_le16(dlen);
2964 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2965 skb_put_data(skb, data, count);
2971 /* Continuation fragments (no L2CAP header) */
2972 frag = &skb_shinfo(skb)->frag_list;
2974 count = min_t(unsigned int, conn->mtu, len);
2976 *frag = bt_skb_alloc(count, GFP_KERNEL);
2980 skb_put_data(*frag, data, count);
2985 frag = &(*frag)->next;
2995 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2998 struct l2cap_conf_opt *opt = *ptr;
3001 len = L2CAP_CONF_OPT_SIZE + opt->len;
3009 *val = *((u8 *) opt->val);
3013 *val = get_unaligned_le16(opt->val);
3017 *val = get_unaligned_le32(opt->val);
3021 *val = (unsigned long) opt->val;
3025 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3029 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3031 struct l2cap_conf_opt *opt = *ptr;
3033 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3035 if (size < L2CAP_CONF_OPT_SIZE + len)
3043 *((u8 *) opt->val) = val;
3047 put_unaligned_le16(val, opt->val);
3051 put_unaligned_le32(val, opt->val);
3055 memcpy(opt->val, (void *) val, len);
3059 *ptr += L2CAP_CONF_OPT_SIZE + len;
3062 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3064 struct l2cap_conf_efs efs;
3066 switch (chan->mode) {
3067 case L2CAP_MODE_ERTM:
3068 efs.id = chan->local_id;
3069 efs.stype = chan->local_stype;
3070 efs.msdu = cpu_to_le16(chan->local_msdu);
3071 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3072 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3073 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3076 case L2CAP_MODE_STREAMING:
3078 efs.stype = L2CAP_SERV_BESTEFFORT;
3079 efs.msdu = cpu_to_le16(chan->local_msdu);
3080 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3089 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3090 (unsigned long) &efs, size);
3093 static void l2cap_ack_timeout(struct work_struct *work)
3095 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3099 BT_DBG("chan %p", chan);
3101 l2cap_chan_lock(chan);
3103 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3104 chan->last_acked_seq);
3107 l2cap_send_rr_or_rnr(chan, 0);
3109 l2cap_chan_unlock(chan);
3110 l2cap_chan_put(chan);
3113 int l2cap_ertm_init(struct l2cap_chan *chan)
3117 chan->next_tx_seq = 0;
3118 chan->expected_tx_seq = 0;
3119 chan->expected_ack_seq = 0;
3120 chan->unacked_frames = 0;
3121 chan->buffer_seq = 0;
3122 chan->frames_sent = 0;
3123 chan->last_acked_seq = 0;
3125 chan->sdu_last_frag = NULL;
3128 skb_queue_head_init(&chan->tx_q);
3130 chan->local_amp_id = AMP_ID_BREDR;
3131 chan->move_id = AMP_ID_BREDR;
3132 chan->move_state = L2CAP_MOVE_STABLE;
3133 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3135 if (chan->mode != L2CAP_MODE_ERTM)
3138 chan->rx_state = L2CAP_RX_STATE_RECV;
3139 chan->tx_state = L2CAP_TX_STATE_XMIT;
3141 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3142 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3143 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3145 skb_queue_head_init(&chan->srej_q);
3147 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3151 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3153 l2cap_seq_list_free(&chan->srej_list);
3158 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3161 case L2CAP_MODE_STREAMING:
3162 case L2CAP_MODE_ERTM:
3163 if (l2cap_mode_supported(mode, remote_feat_mask))
3167 return L2CAP_MODE_BASIC;
3171 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3173 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3174 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3177 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3179 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3180 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3183 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3184 struct l2cap_conf_rfc *rfc)
3186 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3187 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3189 /* Class 1 devices have must have ERTM timeouts
3190 * exceeding the Link Supervision Timeout. The
3191 * default Link Supervision Timeout for AMP
3192 * controllers is 10 seconds.
3194 * Class 1 devices use 0xffffffff for their
3195 * best-effort flush timeout, so the clamping logic
3196 * will result in a timeout that meets the above
3197 * requirement. ERTM timeouts are 16-bit values, so
3198 * the maximum timeout is 65.535 seconds.
3201 /* Convert timeout to milliseconds and round */
3202 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3204 /* This is the recommended formula for class 2 devices
3205 * that start ERTM timers when packets are sent to the
3208 ertm_to = 3 * ertm_to + 500;
3210 if (ertm_to > 0xffff)
3213 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3214 rfc->monitor_timeout = rfc->retrans_timeout;
3216 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3217 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3221 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3223 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3224 __l2cap_ews_supported(chan->conn)) {
3225 /* use extended control field */
3226 set_bit(FLAG_EXT_CTRL, &chan->flags);
3227 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3229 chan->tx_win = min_t(u16, chan->tx_win,
3230 L2CAP_DEFAULT_TX_WINDOW);
3231 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3233 chan->ack_win = chan->tx_win;
3236 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3238 struct l2cap_conf_req *req = data;
3239 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3240 void *ptr = req->data;
3241 void *endptr = data + data_size;
3244 BT_DBG("chan %p", chan);
3246 if (chan->num_conf_req || chan->num_conf_rsp)
3249 switch (chan->mode) {
3250 case L2CAP_MODE_STREAMING:
3251 case L2CAP_MODE_ERTM:
3252 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3255 if (__l2cap_efs_supported(chan->conn))
3256 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3260 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3265 if (chan->imtu != L2CAP_DEFAULT_MTU)
3266 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3268 switch (chan->mode) {
3269 case L2CAP_MODE_BASIC:
3273 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3274 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3277 rfc.mode = L2CAP_MODE_BASIC;
3279 rfc.max_transmit = 0;
3280 rfc.retrans_timeout = 0;
3281 rfc.monitor_timeout = 0;
3282 rfc.max_pdu_size = 0;
3284 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3285 (unsigned long) &rfc, endptr - ptr);
3288 case L2CAP_MODE_ERTM:
3289 rfc.mode = L2CAP_MODE_ERTM;
3290 rfc.max_transmit = chan->max_tx;
3292 __l2cap_set_ertm_timeouts(chan, &rfc);
3294 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3295 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3297 rfc.max_pdu_size = cpu_to_le16(size);
3299 l2cap_txwin_setup(chan);
3301 rfc.txwin_size = min_t(u16, chan->tx_win,
3302 L2CAP_DEFAULT_TX_WINDOW);
3304 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3305 (unsigned long) &rfc, endptr - ptr);
3307 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3308 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3310 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3311 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3312 chan->tx_win, endptr - ptr);
3314 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3315 if (chan->fcs == L2CAP_FCS_NONE ||
3316 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3317 chan->fcs = L2CAP_FCS_NONE;
3318 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3319 chan->fcs, endptr - ptr);
3323 case L2CAP_MODE_STREAMING:
3324 l2cap_txwin_setup(chan);
3325 rfc.mode = L2CAP_MODE_STREAMING;
3327 rfc.max_transmit = 0;
3328 rfc.retrans_timeout = 0;
3329 rfc.monitor_timeout = 0;
3331 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3332 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3334 rfc.max_pdu_size = cpu_to_le16(size);
3336 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3337 (unsigned long) &rfc, endptr - ptr);
3339 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3340 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3342 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3343 if (chan->fcs == L2CAP_FCS_NONE ||
3344 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3345 chan->fcs = L2CAP_FCS_NONE;
3346 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3347 chan->fcs, endptr - ptr);
3352 req->dcid = cpu_to_le16(chan->dcid);
3353 req->flags = cpu_to_le16(0);
3358 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3360 struct l2cap_conf_rsp *rsp = data;
3361 void *ptr = rsp->data;
3362 void *endptr = data + data_size;
3363 void *req = chan->conf_req;
3364 int len = chan->conf_len;
3365 int type, hint, olen;
3367 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3368 struct l2cap_conf_efs efs;
3370 u16 mtu = L2CAP_DEFAULT_MTU;
3371 u16 result = L2CAP_CONF_SUCCESS;
3374 BT_DBG("chan %p", chan);
3376 while (len >= L2CAP_CONF_OPT_SIZE) {
3377 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3381 hint = type & L2CAP_CONF_HINT;
3382 type &= L2CAP_CONF_MASK;
3385 case L2CAP_CONF_MTU:
3391 case L2CAP_CONF_FLUSH_TO:
3394 chan->flush_to = val;
3397 case L2CAP_CONF_QOS:
3400 case L2CAP_CONF_RFC:
3401 if (olen != sizeof(rfc))
3403 memcpy(&rfc, (void *) val, olen);
3406 case L2CAP_CONF_FCS:
3409 if (val == L2CAP_FCS_NONE)
3410 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3413 case L2CAP_CONF_EFS:
3414 if (olen != sizeof(efs))
3417 memcpy(&efs, (void *) val, olen);
3420 case L2CAP_CONF_EWS:
3423 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3424 return -ECONNREFUSED;
3425 set_bit(FLAG_EXT_CTRL, &chan->flags);
3426 set_bit(CONF_EWS_RECV, &chan->conf_state);
3427 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3428 chan->remote_tx_win = val;
3434 result = L2CAP_CONF_UNKNOWN;
3435 *((u8 *) ptr++) = type;
3440 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3443 switch (chan->mode) {
3444 case L2CAP_MODE_STREAMING:
3445 case L2CAP_MODE_ERTM:
3446 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3447 chan->mode = l2cap_select_mode(rfc.mode,
3448 chan->conn->feat_mask);
3453 if (__l2cap_efs_supported(chan->conn))
3454 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3456 return -ECONNREFUSED;
3459 if (chan->mode != rfc.mode)
3460 return -ECONNREFUSED;
3466 if (chan->mode != rfc.mode) {
3467 result = L2CAP_CONF_UNACCEPT;
3468 rfc.mode = chan->mode;
3470 if (chan->num_conf_rsp == 1)
3471 return -ECONNREFUSED;
3473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3474 (unsigned long) &rfc, endptr - ptr);
3477 if (result == L2CAP_CONF_SUCCESS) {
3478 /* Configure output options and let the other side know
3479 * which ones we don't like. */
3481 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3482 result = L2CAP_CONF_UNACCEPT;
3485 set_bit(CONF_MTU_DONE, &chan->conf_state);
3487 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3490 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3491 efs.stype != L2CAP_SERV_NOTRAFIC &&
3492 efs.stype != chan->local_stype) {
3494 result = L2CAP_CONF_UNACCEPT;
3496 if (chan->num_conf_req >= 1)
3497 return -ECONNREFUSED;
3499 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3501 (unsigned long) &efs, endptr - ptr);
3503 /* Send PENDING Conf Rsp */
3504 result = L2CAP_CONF_PENDING;
3505 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3510 case L2CAP_MODE_BASIC:
3511 chan->fcs = L2CAP_FCS_NONE;
3512 set_bit(CONF_MODE_DONE, &chan->conf_state);
3515 case L2CAP_MODE_ERTM:
3516 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3517 chan->remote_tx_win = rfc.txwin_size;
3519 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3521 chan->remote_max_tx = rfc.max_transmit;
3523 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3524 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3525 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3526 rfc.max_pdu_size = cpu_to_le16(size);
3527 chan->remote_mps = size;
3529 __l2cap_set_ertm_timeouts(chan, &rfc);
3531 set_bit(CONF_MODE_DONE, &chan->conf_state);
3533 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3534 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3536 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3537 chan->remote_id = efs.id;
3538 chan->remote_stype = efs.stype;
3539 chan->remote_msdu = le16_to_cpu(efs.msdu);
3540 chan->remote_flush_to =
3541 le32_to_cpu(efs.flush_to);
3542 chan->remote_acc_lat =
3543 le32_to_cpu(efs.acc_lat);
3544 chan->remote_sdu_itime =
3545 le32_to_cpu(efs.sdu_itime);
3546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3548 (unsigned long) &efs, endptr - ptr);
3552 case L2CAP_MODE_STREAMING:
3553 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3554 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3555 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3556 rfc.max_pdu_size = cpu_to_le16(size);
3557 chan->remote_mps = size;
3559 set_bit(CONF_MODE_DONE, &chan->conf_state);
3561 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3562 (unsigned long) &rfc, endptr - ptr);
3567 result = L2CAP_CONF_UNACCEPT;
3569 memset(&rfc, 0, sizeof(rfc));
3570 rfc.mode = chan->mode;
3573 if (result == L2CAP_CONF_SUCCESS)
3574 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3576 rsp->scid = cpu_to_le16(chan->dcid);
3577 rsp->result = cpu_to_le16(result);
3578 rsp->flags = cpu_to_le16(0);
3583 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3584 void *data, size_t size, u16 *result)
3586 struct l2cap_conf_req *req = data;
3587 void *ptr = req->data;
3588 void *endptr = data + size;
3591 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3592 struct l2cap_conf_efs efs;
3594 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3596 while (len >= L2CAP_CONF_OPT_SIZE) {
3597 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3602 case L2CAP_CONF_MTU:
3605 if (val < L2CAP_DEFAULT_MIN_MTU) {
3606 *result = L2CAP_CONF_UNACCEPT;
3607 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3610 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3614 case L2CAP_CONF_FLUSH_TO:
3617 chan->flush_to = val;
3618 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3619 chan->flush_to, endptr - ptr);
3622 case L2CAP_CONF_RFC:
3623 if (olen != sizeof(rfc))
3625 memcpy(&rfc, (void *)val, olen);
3626 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3627 rfc.mode != chan->mode)
3628 return -ECONNREFUSED;
3630 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3631 (unsigned long) &rfc, endptr - ptr);
3634 case L2CAP_CONF_EWS:
3637 chan->ack_win = min_t(u16, val, chan->ack_win);
3638 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3639 chan->tx_win, endptr - ptr);
3642 case L2CAP_CONF_EFS:
3643 if (olen != sizeof(efs))
3645 memcpy(&efs, (void *)val, olen);
3646 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3647 efs.stype != L2CAP_SERV_NOTRAFIC &&
3648 efs.stype != chan->local_stype)
3649 return -ECONNREFUSED;
3650 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3651 (unsigned long) &efs, endptr - ptr);
3654 case L2CAP_CONF_FCS:
3657 if (*result == L2CAP_CONF_PENDING)
3658 if (val == L2CAP_FCS_NONE)
3659 set_bit(CONF_RECV_NO_FCS,
3665 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3666 return -ECONNREFUSED;
3668 chan->mode = rfc.mode;
3670 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3672 case L2CAP_MODE_ERTM:
3673 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3674 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3675 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3676 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3677 chan->ack_win = min_t(u16, chan->ack_win,
3680 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3681 chan->local_msdu = le16_to_cpu(efs.msdu);
3682 chan->local_sdu_itime =
3683 le32_to_cpu(efs.sdu_itime);
3684 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3685 chan->local_flush_to =
3686 le32_to_cpu(efs.flush_to);
3690 case L2CAP_MODE_STREAMING:
3691 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3695 req->dcid = cpu_to_le16(chan->dcid);
3696 req->flags = cpu_to_le16(0);
3701 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3702 u16 result, u16 flags)
3704 struct l2cap_conf_rsp *rsp = data;
3705 void *ptr = rsp->data;
3707 BT_DBG("chan %p", chan);
3709 rsp->scid = cpu_to_le16(chan->dcid);
3710 rsp->result = cpu_to_le16(result);
3711 rsp->flags = cpu_to_le16(flags);
3716 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3718 struct l2cap_le_conn_rsp rsp;
3719 struct l2cap_conn *conn = chan->conn;
3721 BT_DBG("chan %p", chan);
3723 rsp.dcid = cpu_to_le16(chan->scid);
3724 rsp.mtu = cpu_to_le16(chan->imtu);
3725 rsp.mps = cpu_to_le16(chan->mps);
3726 rsp.credits = cpu_to_le16(chan->rx_credits);
3727 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3729 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3733 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3735 struct l2cap_conn_rsp rsp;
3736 struct l2cap_conn *conn = chan->conn;
3740 rsp.scid = cpu_to_le16(chan->dcid);
3741 rsp.dcid = cpu_to_le16(chan->scid);
3742 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3743 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3746 rsp_code = L2CAP_CREATE_CHAN_RSP;
3748 rsp_code = L2CAP_CONN_RSP;
3750 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3752 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3754 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3757 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3758 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3759 chan->num_conf_req++;
3762 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3766 /* Use sane default values in case a misbehaving remote device
3767 * did not send an RFC or extended window size option.
3769 u16 txwin_ext = chan->ack_win;
3770 struct l2cap_conf_rfc rfc = {
3772 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3773 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3774 .max_pdu_size = cpu_to_le16(chan->imtu),
3775 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3778 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3780 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3783 while (len >= L2CAP_CONF_OPT_SIZE) {
3784 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3789 case L2CAP_CONF_RFC:
3790 if (olen != sizeof(rfc))
3792 memcpy(&rfc, (void *)val, olen);
3794 case L2CAP_CONF_EWS:
3803 case L2CAP_MODE_ERTM:
3804 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3805 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3806 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3807 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3808 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3810 chan->ack_win = min_t(u16, chan->ack_win,
3813 case L2CAP_MODE_STREAMING:
3814 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3818 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3819 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3822 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3824 if (cmd_len < sizeof(*rej))
3827 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3830 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3831 cmd->ident == conn->info_ident) {
3832 cancel_delayed_work(&conn->info_timer);
3834 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3835 conn->info_ident = 0;
3837 l2cap_conn_start(conn);
3843 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3844 struct l2cap_cmd_hdr *cmd,
3845 u8 *data, u8 rsp_code, u8 amp_id)
3847 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3848 struct l2cap_conn_rsp rsp;
3849 struct l2cap_chan *chan = NULL, *pchan;
3850 int result, status = L2CAP_CS_NO_INFO;
3852 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3853 __le16 psm = req->psm;
3855 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3857 /* Check if we have socket listening on psm */
3858 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3859 &conn->hcon->dst, ACL_LINK);
3861 result = L2CAP_CR_BAD_PSM;
3865 mutex_lock(&conn->chan_lock);
3866 l2cap_chan_lock(pchan);
3868 /* Check if the ACL is secure enough (if not SDP) */
3869 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3870 !hci_conn_check_link_mode(conn->hcon)) {
3871 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3872 result = L2CAP_CR_SEC_BLOCK;
3876 result = L2CAP_CR_NO_MEM;
3878 /* Check for valid dynamic CID range (as per Erratum 3253) */
3879 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3880 result = L2CAP_CR_INVALID_SCID;
3884 /* Check if we already have channel with that dcid */
3885 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3886 result = L2CAP_CR_SCID_IN_USE;
3890 chan = pchan->ops->new_connection(pchan);
3894 /* For certain devices (ex: HID mouse), support for authentication,
3895 * pairing and bonding is optional. For such devices, inorder to avoid
3896 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3897 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3899 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3901 bacpy(&chan->src, &conn->hcon->src);
3902 bacpy(&chan->dst, &conn->hcon->dst);
3903 chan->src_type = bdaddr_src_type(conn->hcon);
3904 chan->dst_type = bdaddr_dst_type(conn->hcon);
3907 chan->local_amp_id = amp_id;
3909 __l2cap_chan_add(conn, chan);
3913 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3915 chan->ident = cmd->ident;
3917 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3918 if (l2cap_chan_check_security(chan, false)) {
3919 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3920 l2cap_state_change(chan, BT_CONNECT2);
3921 result = L2CAP_CR_PEND;
3922 status = L2CAP_CS_AUTHOR_PEND;
3923 chan->ops->defer(chan);
3925 /* Force pending result for AMP controllers.
3926 * The connection will succeed after the
3927 * physical link is up.
3929 if (amp_id == AMP_ID_BREDR) {
3930 l2cap_state_change(chan, BT_CONFIG);
3931 result = L2CAP_CR_SUCCESS;
3933 l2cap_state_change(chan, BT_CONNECT2);
3934 result = L2CAP_CR_PEND;
3936 status = L2CAP_CS_NO_INFO;
3939 l2cap_state_change(chan, BT_CONNECT2);
3940 result = L2CAP_CR_PEND;
3941 status = L2CAP_CS_AUTHEN_PEND;
3944 l2cap_state_change(chan, BT_CONNECT2);
3945 result = L2CAP_CR_PEND;
3946 status = L2CAP_CS_NO_INFO;
3950 l2cap_chan_unlock(pchan);
3951 mutex_unlock(&conn->chan_lock);
3952 l2cap_chan_put(pchan);
3955 rsp.scid = cpu_to_le16(scid);
3956 rsp.dcid = cpu_to_le16(dcid);
3957 rsp.result = cpu_to_le16(result);
3958 rsp.status = cpu_to_le16(status);
3959 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3961 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3962 struct l2cap_info_req info;
3963 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3965 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3966 conn->info_ident = l2cap_get_ident(conn);
3968 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3970 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3971 sizeof(info), &info);
3974 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3975 result == L2CAP_CR_SUCCESS) {
3977 set_bit(CONF_REQ_SENT, &chan->conf_state);
3978 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3979 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3980 chan->num_conf_req++;
3986 static int l2cap_connect_req(struct l2cap_conn *conn,
3987 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3989 struct hci_dev *hdev = conn->hcon->hdev;
3990 struct hci_conn *hcon = conn->hcon;
3992 if (cmd_len < sizeof(struct l2cap_conn_req))
3996 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3997 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3998 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3999 hci_dev_unlock(hdev);
4001 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4005 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4006 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4009 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4010 u16 scid, dcid, result, status;
4011 struct l2cap_chan *chan;
4015 if (cmd_len < sizeof(*rsp))
4018 scid = __le16_to_cpu(rsp->scid);
4019 dcid = __le16_to_cpu(rsp->dcid);
4020 result = __le16_to_cpu(rsp->result);
4021 status = __le16_to_cpu(rsp->status);
4023 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4024 dcid, scid, result, status);
4026 mutex_lock(&conn->chan_lock);
4029 chan = __l2cap_get_chan_by_scid(conn, scid);
4035 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4044 l2cap_chan_lock(chan);
4047 case L2CAP_CR_SUCCESS:
4048 l2cap_state_change(chan, BT_CONFIG);
4051 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4053 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4056 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4057 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4058 chan->num_conf_req++;
4062 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4066 l2cap_chan_del(chan, ECONNREFUSED);
4070 l2cap_chan_unlock(chan);
4073 mutex_unlock(&conn->chan_lock);
4078 static inline void set_default_fcs(struct l2cap_chan *chan)
4080 /* FCS is enabled only in ERTM or streaming mode, if one or both
4083 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4084 chan->fcs = L2CAP_FCS_NONE;
4085 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4086 chan->fcs = L2CAP_FCS_CRC16;
4089 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4090 u8 ident, u16 flags)
4092 struct l2cap_conn *conn = chan->conn;
4094 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4097 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4098 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4100 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4101 l2cap_build_conf_rsp(chan, data,
4102 L2CAP_CONF_SUCCESS, flags), data);
4105 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4108 struct l2cap_cmd_rej_cid rej;
4110 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4111 rej.scid = __cpu_to_le16(scid);
4112 rej.dcid = __cpu_to_le16(dcid);
4114 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4117 static inline int l2cap_config_req(struct l2cap_conn *conn,
4118 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4121 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4124 struct l2cap_chan *chan;
4127 if (cmd_len < sizeof(*req))
4130 dcid = __le16_to_cpu(req->dcid);
4131 flags = __le16_to_cpu(req->flags);
4133 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4135 chan = l2cap_get_chan_by_scid(conn, dcid);
4137 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4141 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4142 chan->state != BT_CONNECTED) {
4143 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4148 /* Reject if config buffer is too small. */
4149 len = cmd_len - sizeof(*req);
4150 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4151 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4152 l2cap_build_conf_rsp(chan, rsp,
4153 L2CAP_CONF_REJECT, flags), rsp);
4158 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4159 chan->conf_len += len;
4161 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4162 /* Incomplete config. Send empty response. */
4163 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4164 l2cap_build_conf_rsp(chan, rsp,
4165 L2CAP_CONF_SUCCESS, flags), rsp);
4169 /* Complete config. */
4170 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4172 l2cap_send_disconn_req(chan, ECONNRESET);
4176 chan->ident = cmd->ident;
4177 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4178 chan->num_conf_rsp++;
4180 /* Reset config buffer. */
4183 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4186 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4187 set_default_fcs(chan);
4189 if (chan->mode == L2CAP_MODE_ERTM ||
4190 chan->mode == L2CAP_MODE_STREAMING)
4191 err = l2cap_ertm_init(chan);
4194 l2cap_send_disconn_req(chan, -err);
4196 l2cap_chan_ready(chan);
4201 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4203 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4204 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4205 chan->num_conf_req++;
4208 /* Got Conf Rsp PENDING from remote side and assume we sent
4209 Conf Rsp PENDING in the code above */
4210 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4211 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4213 /* check compatibility */
4215 /* Send rsp for BR/EDR channel */
4217 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4219 chan->ident = cmd->ident;
4223 l2cap_chan_unlock(chan);
4227 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4228 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4231 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4232 u16 scid, flags, result;
4233 struct l2cap_chan *chan;
4234 int len = cmd_len - sizeof(*rsp);
4237 if (cmd_len < sizeof(*rsp))
4240 scid = __le16_to_cpu(rsp->scid);
4241 flags = __le16_to_cpu(rsp->flags);
4242 result = __le16_to_cpu(rsp->result);
4244 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4247 chan = l2cap_get_chan_by_scid(conn, scid);
4252 case L2CAP_CONF_SUCCESS:
4253 l2cap_conf_rfc_get(chan, rsp->data, len);
4254 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4257 case L2CAP_CONF_PENDING:
4258 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4260 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4263 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4264 buf, sizeof(buf), &result);
4266 l2cap_send_disconn_req(chan, ECONNRESET);
4270 if (!chan->hs_hcon) {
4271 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4274 if (l2cap_check_efs(chan)) {
4275 amp_create_logical_link(chan);
4276 chan->ident = cmd->ident;
4282 case L2CAP_CONF_UNACCEPT:
4283 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4286 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4287 l2cap_send_disconn_req(chan, ECONNRESET);
4291 /* throw out any old stored conf requests */
4292 result = L2CAP_CONF_SUCCESS;
4293 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4294 req, sizeof(req), &result);
4296 l2cap_send_disconn_req(chan, ECONNRESET);
4300 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4301 L2CAP_CONF_REQ, len, req);
4302 chan->num_conf_req++;
4303 if (result != L2CAP_CONF_SUCCESS)
4310 l2cap_chan_set_err(chan, ECONNRESET);
4312 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4313 l2cap_send_disconn_req(chan, ECONNRESET);
4317 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4320 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4322 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4323 set_default_fcs(chan);
4325 if (chan->mode == L2CAP_MODE_ERTM ||
4326 chan->mode == L2CAP_MODE_STREAMING)
4327 err = l2cap_ertm_init(chan);
4330 l2cap_send_disconn_req(chan, -err);
4332 l2cap_chan_ready(chan);
4336 l2cap_chan_unlock(chan);
4340 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4341 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4344 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4345 struct l2cap_disconn_rsp rsp;
4347 struct l2cap_chan *chan;
4349 if (cmd_len != sizeof(*req))
4352 scid = __le16_to_cpu(req->scid);
4353 dcid = __le16_to_cpu(req->dcid);
4355 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4357 mutex_lock(&conn->chan_lock);
4359 chan = __l2cap_get_chan_by_scid(conn, dcid);
4361 mutex_unlock(&conn->chan_lock);
4362 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4366 l2cap_chan_hold(chan);
4367 l2cap_chan_lock(chan);
4369 rsp.dcid = cpu_to_le16(chan->scid);
4370 rsp.scid = cpu_to_le16(chan->dcid);
4371 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4373 chan->ops->set_shutdown(chan);
4375 l2cap_chan_del(chan, ECONNRESET);
4377 chan->ops->close(chan);
4379 l2cap_chan_unlock(chan);
4380 l2cap_chan_put(chan);
4382 mutex_unlock(&conn->chan_lock);
4387 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4388 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4391 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4393 struct l2cap_chan *chan;
4395 if (cmd_len != sizeof(*rsp))
4398 scid = __le16_to_cpu(rsp->scid);
4399 dcid = __le16_to_cpu(rsp->dcid);
4401 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4403 mutex_lock(&conn->chan_lock);
4405 chan = __l2cap_get_chan_by_scid(conn, scid);
4407 mutex_unlock(&conn->chan_lock);
4411 l2cap_chan_hold(chan);
4412 l2cap_chan_lock(chan);
4414 if (chan->state != BT_DISCONN) {
4415 l2cap_chan_unlock(chan);
4416 l2cap_chan_put(chan);
4417 mutex_unlock(&conn->chan_lock);
4421 l2cap_chan_del(chan, 0);
4423 chan->ops->close(chan);
4425 l2cap_chan_unlock(chan);
4426 l2cap_chan_put(chan);
4428 mutex_unlock(&conn->chan_lock);
4433 static inline int l2cap_information_req(struct l2cap_conn *conn,
4434 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4437 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4440 if (cmd_len != sizeof(*req))
4443 type = __le16_to_cpu(req->type);
4445 BT_DBG("type 0x%4.4x", type);
4447 if (type == L2CAP_IT_FEAT_MASK) {
4449 u32 feat_mask = l2cap_feat_mask;
4450 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4451 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4452 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4454 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4456 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4457 feat_mask |= L2CAP_FEAT_EXT_FLOW
4458 | L2CAP_FEAT_EXT_WINDOW;
4460 put_unaligned_le32(feat_mask, rsp->data);
4461 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4463 } else if (type == L2CAP_IT_FIXED_CHAN) {
4465 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4467 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4468 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4469 rsp->data[0] = conn->local_fixed_chan;
4470 memset(rsp->data + 1, 0, 7);
4471 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4474 struct l2cap_info_rsp rsp;
4475 rsp.type = cpu_to_le16(type);
4476 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4477 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4484 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4485 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4488 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4491 if (cmd_len < sizeof(*rsp))
4494 type = __le16_to_cpu(rsp->type);
4495 result = __le16_to_cpu(rsp->result);
4497 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4499 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4500 if (cmd->ident != conn->info_ident ||
4501 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4504 cancel_delayed_work(&conn->info_timer);
4506 if (result != L2CAP_IR_SUCCESS) {
4507 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4508 conn->info_ident = 0;
4510 l2cap_conn_start(conn);
4516 case L2CAP_IT_FEAT_MASK:
4517 conn->feat_mask = get_unaligned_le32(rsp->data);
4519 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4520 struct l2cap_info_req req;
4521 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4523 conn->info_ident = l2cap_get_ident(conn);
4525 l2cap_send_cmd(conn, conn->info_ident,
4526 L2CAP_INFO_REQ, sizeof(req), &req);
4528 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4529 conn->info_ident = 0;
4531 l2cap_conn_start(conn);
4535 case L2CAP_IT_FIXED_CHAN:
4536 conn->remote_fixed_chan = rsp->data[0];
4537 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4538 conn->info_ident = 0;
4540 l2cap_conn_start(conn);
4547 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4548 struct l2cap_cmd_hdr *cmd,
4549 u16 cmd_len, void *data)
4551 struct l2cap_create_chan_req *req = data;
4552 struct l2cap_create_chan_rsp rsp;
4553 struct l2cap_chan *chan;
4554 struct hci_dev *hdev;
4557 if (cmd_len != sizeof(*req))
4560 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4563 psm = le16_to_cpu(req->psm);
4564 scid = le16_to_cpu(req->scid);
4566 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4568 /* For controller id 0 make BR/EDR connection */
4569 if (req->amp_id == AMP_ID_BREDR) {
4570 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4575 /* Validate AMP controller id */
4576 hdev = hci_dev_get(req->amp_id);
4580 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4585 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4588 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4589 struct hci_conn *hs_hcon;
4591 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4595 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4600 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4602 mgr->bredr_chan = chan;
4603 chan->hs_hcon = hs_hcon;
4604 chan->fcs = L2CAP_FCS_NONE;
4605 conn->mtu = hdev->block_mtu;
4614 rsp.scid = cpu_to_le16(scid);
4615 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4616 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4618 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4624 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4626 struct l2cap_move_chan_req req;
4629 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4631 ident = l2cap_get_ident(chan->conn);
4632 chan->ident = ident;
4634 req.icid = cpu_to_le16(chan->scid);
4635 req.dest_amp_id = dest_amp_id;
4637 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4640 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4643 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4645 struct l2cap_move_chan_rsp rsp;
4647 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4649 rsp.icid = cpu_to_le16(chan->dcid);
4650 rsp.result = cpu_to_le16(result);
4652 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4656 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4658 struct l2cap_move_chan_cfm cfm;
4660 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4662 chan->ident = l2cap_get_ident(chan->conn);
4664 cfm.icid = cpu_to_le16(chan->scid);
4665 cfm.result = cpu_to_le16(result);
4667 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4670 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4673 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4675 struct l2cap_move_chan_cfm cfm;
4677 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4679 cfm.icid = cpu_to_le16(icid);
4680 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4682 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4686 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4689 struct l2cap_move_chan_cfm_rsp rsp;
4691 BT_DBG("icid 0x%4.4x", icid);
4693 rsp.icid = cpu_to_le16(icid);
4694 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4697 static void __release_logical_link(struct l2cap_chan *chan)
4699 chan->hs_hchan = NULL;
4700 chan->hs_hcon = NULL;
4702 /* Placeholder - release the logical link */
4705 static void l2cap_logical_fail(struct l2cap_chan *chan)
4707 /* Logical link setup failed */
4708 if (chan->state != BT_CONNECTED) {
4709 /* Create channel failure, disconnect */
4710 l2cap_send_disconn_req(chan, ECONNRESET);
4714 switch (chan->move_role) {
4715 case L2CAP_MOVE_ROLE_RESPONDER:
4716 l2cap_move_done(chan);
4717 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4719 case L2CAP_MOVE_ROLE_INITIATOR:
4720 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4721 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4722 /* Remote has only sent pending or
4723 * success responses, clean up
4725 l2cap_move_done(chan);
4728 /* Other amp move states imply that the move
4729 * has already aborted
4731 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4736 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4737 struct hci_chan *hchan)
4739 struct l2cap_conf_rsp rsp;
4741 chan->hs_hchan = hchan;
4742 chan->hs_hcon->l2cap_data = chan->conn;
4744 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4746 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4749 set_default_fcs(chan);
4751 err = l2cap_ertm_init(chan);
4753 l2cap_send_disconn_req(chan, -err);
4755 l2cap_chan_ready(chan);
4759 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4760 struct hci_chan *hchan)
4762 chan->hs_hcon = hchan->conn;
4763 chan->hs_hcon->l2cap_data = chan->conn;
4765 BT_DBG("move_state %d", chan->move_state);
4767 switch (chan->move_state) {
4768 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4769 /* Move confirm will be sent after a success
4770 * response is received
4772 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4774 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4775 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4776 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4777 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4778 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4779 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4780 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4781 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4782 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4786 /* Move was not in expected state, free the channel */
4787 __release_logical_link(chan);
4789 chan->move_state = L2CAP_MOVE_STABLE;
4793 /* Call with chan locked */
4794 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4797 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4800 l2cap_logical_fail(chan);
4801 __release_logical_link(chan);
4805 if (chan->state != BT_CONNECTED) {
4806 /* Ignore logical link if channel is on BR/EDR */
4807 if (chan->local_amp_id != AMP_ID_BREDR)
4808 l2cap_logical_finish_create(chan, hchan);
4810 l2cap_logical_finish_move(chan, hchan);
4814 void l2cap_move_start(struct l2cap_chan *chan)
4816 BT_DBG("chan %p", chan);
4818 if (chan->local_amp_id == AMP_ID_BREDR) {
4819 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4821 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4822 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4823 /* Placeholder - start physical link setup */
4825 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4826 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4828 l2cap_move_setup(chan);
4829 l2cap_send_move_chan_req(chan, 0);
4833 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4834 u8 local_amp_id, u8 remote_amp_id)
4836 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4837 local_amp_id, remote_amp_id);
4839 chan->fcs = L2CAP_FCS_NONE;
4841 /* Outgoing channel on AMP */
4842 if (chan->state == BT_CONNECT) {
4843 if (result == L2CAP_CR_SUCCESS) {
4844 chan->local_amp_id = local_amp_id;
4845 l2cap_send_create_chan_req(chan, remote_amp_id);
4847 /* Revert to BR/EDR connect */
4848 l2cap_send_conn_req(chan);
4854 /* Incoming channel on AMP */
4855 if (__l2cap_no_conn_pending(chan)) {
4856 struct l2cap_conn_rsp rsp;
4858 rsp.scid = cpu_to_le16(chan->dcid);
4859 rsp.dcid = cpu_to_le16(chan->scid);
4861 if (result == L2CAP_CR_SUCCESS) {
4862 /* Send successful response */
4863 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4864 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4866 /* Send negative response */
4867 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4868 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4871 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4874 if (result == L2CAP_CR_SUCCESS) {
4875 l2cap_state_change(chan, BT_CONFIG);
4876 set_bit(CONF_REQ_SENT, &chan->conf_state);
4877 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4879 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4880 chan->num_conf_req++;
4885 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4888 l2cap_move_setup(chan);
4889 chan->move_id = local_amp_id;
4890 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4892 l2cap_send_move_chan_req(chan, remote_amp_id);
4895 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4897 struct hci_chan *hchan = NULL;
4899 /* Placeholder - get hci_chan for logical link */
4902 if (hchan->state == BT_CONNECTED) {
4903 /* Logical link is ready to go */
4904 chan->hs_hcon = hchan->conn;
4905 chan->hs_hcon->l2cap_data = chan->conn;
4906 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4907 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4909 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4911 /* Wait for logical link to be ready */
4912 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4915 /* Logical link not available */
4916 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4920 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4922 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4924 if (result == -EINVAL)
4925 rsp_result = L2CAP_MR_BAD_ID;
4927 rsp_result = L2CAP_MR_NOT_ALLOWED;
4929 l2cap_send_move_chan_rsp(chan, rsp_result);
4932 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4933 chan->move_state = L2CAP_MOVE_STABLE;
4935 /* Restart data transmission */
4936 l2cap_ertm_send(chan);
4939 /* Invoke with locked chan */
4940 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4942 u8 local_amp_id = chan->local_amp_id;
4943 u8 remote_amp_id = chan->remote_amp_id;
4945 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4946 chan, result, local_amp_id, remote_amp_id);
4948 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4951 if (chan->state != BT_CONNECTED) {
4952 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4953 } else if (result != L2CAP_MR_SUCCESS) {
4954 l2cap_do_move_cancel(chan, result);
4956 switch (chan->move_role) {
4957 case L2CAP_MOVE_ROLE_INITIATOR:
4958 l2cap_do_move_initiate(chan, local_amp_id,
4961 case L2CAP_MOVE_ROLE_RESPONDER:
4962 l2cap_do_move_respond(chan, result);
4965 l2cap_do_move_cancel(chan, result);
4971 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4972 struct l2cap_cmd_hdr *cmd,
4973 u16 cmd_len, void *data)
4975 struct l2cap_move_chan_req *req = data;
4976 struct l2cap_move_chan_rsp rsp;
4977 struct l2cap_chan *chan;
4979 u16 result = L2CAP_MR_NOT_ALLOWED;
4981 if (cmd_len != sizeof(*req))
4984 icid = le16_to_cpu(req->icid);
4986 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4988 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4991 chan = l2cap_get_chan_by_dcid(conn, icid);
4993 rsp.icid = cpu_to_le16(icid);
4994 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4995 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5000 chan->ident = cmd->ident;
5002 if (chan->scid < L2CAP_CID_DYN_START ||
5003 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5004 (chan->mode != L2CAP_MODE_ERTM &&
5005 chan->mode != L2CAP_MODE_STREAMING)) {
5006 result = L2CAP_MR_NOT_ALLOWED;
5007 goto send_move_response;
5010 if (chan->local_amp_id == req->dest_amp_id) {
5011 result = L2CAP_MR_SAME_ID;
5012 goto send_move_response;
5015 if (req->dest_amp_id != AMP_ID_BREDR) {
5016 struct hci_dev *hdev;
5017 hdev = hci_dev_get(req->dest_amp_id);
5018 if (!hdev || hdev->dev_type != HCI_AMP ||
5019 !test_bit(HCI_UP, &hdev->flags)) {
5023 result = L2CAP_MR_BAD_ID;
5024 goto send_move_response;
5029 /* Detect a move collision. Only send a collision response
5030 * if this side has "lost", otherwise proceed with the move.
5031 * The winner has the larger bd_addr.
5033 if ((__chan_is_moving(chan) ||
5034 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5035 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5036 result = L2CAP_MR_COLLISION;
5037 goto send_move_response;
5040 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5041 l2cap_move_setup(chan);
5042 chan->move_id = req->dest_amp_id;
5045 if (req->dest_amp_id == AMP_ID_BREDR) {
5046 /* Moving to BR/EDR */
5047 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5048 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5049 result = L2CAP_MR_PEND;
5051 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5052 result = L2CAP_MR_SUCCESS;
5055 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5056 /* Placeholder - uncomment when amp functions are available */
5057 /*amp_accept_physical(chan, req->dest_amp_id);*/
5058 result = L2CAP_MR_PEND;
5062 l2cap_send_move_chan_rsp(chan, result);
5064 l2cap_chan_unlock(chan);
5069 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5071 struct l2cap_chan *chan;
5072 struct hci_chan *hchan = NULL;
5074 chan = l2cap_get_chan_by_scid(conn, icid);
5076 l2cap_send_move_chan_cfm_icid(conn, icid);
5080 __clear_chan_timer(chan);
5081 if (result == L2CAP_MR_PEND)
5082 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5084 switch (chan->move_state) {
5085 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5086 /* Move confirm will be sent when logical link
5089 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5091 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5092 if (result == L2CAP_MR_PEND) {
5094 } else if (test_bit(CONN_LOCAL_BUSY,
5095 &chan->conn_state)) {
5096 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5098 /* Logical link is up or moving to BR/EDR,
5101 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5102 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5105 case L2CAP_MOVE_WAIT_RSP:
5107 if (result == L2CAP_MR_SUCCESS) {
5108 /* Remote is ready, send confirm immediately
5109 * after logical link is ready
5111 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5113 /* Both logical link and move success
5114 * are required to confirm
5116 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5119 /* Placeholder - get hci_chan for logical link */
5121 /* Logical link not available */
5122 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5126 /* If the logical link is not yet connected, do not
5127 * send confirmation.
5129 if (hchan->state != BT_CONNECTED)
5132 /* Logical link is already ready to go */
5134 chan->hs_hcon = hchan->conn;
5135 chan->hs_hcon->l2cap_data = chan->conn;
5137 if (result == L2CAP_MR_SUCCESS) {
5138 /* Can confirm now */
5139 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5141 /* Now only need move success
5144 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5147 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5150 /* Any other amp move state means the move failed. */
5151 chan->move_id = chan->local_amp_id;
5152 l2cap_move_done(chan);
5153 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5156 l2cap_chan_unlock(chan);
5159 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5162 struct l2cap_chan *chan;
5164 chan = l2cap_get_chan_by_ident(conn, ident);
5166 /* Could not locate channel, icid is best guess */
5167 l2cap_send_move_chan_cfm_icid(conn, icid);
5171 __clear_chan_timer(chan);
5173 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5174 if (result == L2CAP_MR_COLLISION) {
5175 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5177 /* Cleanup - cancel move */
5178 chan->move_id = chan->local_amp_id;
5179 l2cap_move_done(chan);
5183 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5185 l2cap_chan_unlock(chan);
5188 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5189 struct l2cap_cmd_hdr *cmd,
5190 u16 cmd_len, void *data)
5192 struct l2cap_move_chan_rsp *rsp = data;
5195 if (cmd_len != sizeof(*rsp))
5198 icid = le16_to_cpu(rsp->icid);
5199 result = le16_to_cpu(rsp->result);
5201 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5203 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5204 l2cap_move_continue(conn, icid, result);
5206 l2cap_move_fail(conn, cmd->ident, icid, result);
5211 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5212 struct l2cap_cmd_hdr *cmd,
5213 u16 cmd_len, void *data)
5215 struct l2cap_move_chan_cfm *cfm = data;
5216 struct l2cap_chan *chan;
5219 if (cmd_len != sizeof(*cfm))
5222 icid = le16_to_cpu(cfm->icid);
5223 result = le16_to_cpu(cfm->result);
5225 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5227 chan = l2cap_get_chan_by_dcid(conn, icid);
5229 /* Spec requires a response even if the icid was not found */
5230 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5234 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5235 if (result == L2CAP_MC_CONFIRMED) {
5236 chan->local_amp_id = chan->move_id;
5237 if (chan->local_amp_id == AMP_ID_BREDR)
5238 __release_logical_link(chan);
5240 chan->move_id = chan->local_amp_id;
5243 l2cap_move_done(chan);
5246 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5248 l2cap_chan_unlock(chan);
5253 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5254 struct l2cap_cmd_hdr *cmd,
5255 u16 cmd_len, void *data)
5257 struct l2cap_move_chan_cfm_rsp *rsp = data;
5258 struct l2cap_chan *chan;
5261 if (cmd_len != sizeof(*rsp))
5264 icid = le16_to_cpu(rsp->icid);
5266 BT_DBG("icid 0x%4.4x", icid);
5268 chan = l2cap_get_chan_by_scid(conn, icid);
5272 __clear_chan_timer(chan);
5274 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5275 chan->local_amp_id = chan->move_id;
5277 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5278 __release_logical_link(chan);
5280 l2cap_move_done(chan);
5283 l2cap_chan_unlock(chan);
5288 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5289 struct l2cap_cmd_hdr *cmd,
5290 u16 cmd_len, u8 *data)
5292 struct hci_conn *hcon = conn->hcon;
5293 struct l2cap_conn_param_update_req *req;
5294 struct l2cap_conn_param_update_rsp rsp;
5295 u16 min, max, latency, to_multiplier;
5298 if (hcon->role != HCI_ROLE_MASTER)
5301 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5304 req = (struct l2cap_conn_param_update_req *) data;
5305 min = __le16_to_cpu(req->min);
5306 max = __le16_to_cpu(req->max);
5307 latency = __le16_to_cpu(req->latency);
5308 to_multiplier = __le16_to_cpu(req->to_multiplier);
5310 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5311 min, max, latency, to_multiplier);
5313 memset(&rsp, 0, sizeof(rsp));
5315 err = hci_check_conn_params(min, max, latency, to_multiplier);
5317 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5319 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5321 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5327 store_hint = hci_le_conn_update(hcon, min, max, latency,
5329 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5330 store_hint, min, max, latency,
5338 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5339 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5342 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5343 struct hci_conn *hcon = conn->hcon;
5344 u16 dcid, mtu, mps, credits, result;
5345 struct l2cap_chan *chan;
5348 if (cmd_len < sizeof(*rsp))
5351 dcid = __le16_to_cpu(rsp->dcid);
5352 mtu = __le16_to_cpu(rsp->mtu);
5353 mps = __le16_to_cpu(rsp->mps);
5354 credits = __le16_to_cpu(rsp->credits);
5355 result = __le16_to_cpu(rsp->result);
5357 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5358 dcid < L2CAP_CID_DYN_START ||
5359 dcid > L2CAP_CID_LE_DYN_END))
5362 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5363 dcid, mtu, mps, credits, result);
5365 mutex_lock(&conn->chan_lock);
5367 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5375 l2cap_chan_lock(chan);
5378 case L2CAP_CR_LE_SUCCESS:
5379 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5387 chan->remote_mps = mps;
5388 chan->tx_credits = credits;
5389 l2cap_chan_ready(chan);
5392 case L2CAP_CR_LE_AUTHENTICATION:
5393 case L2CAP_CR_LE_ENCRYPTION:
5394 /* If we already have MITM protection we can't do
5397 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5398 l2cap_chan_del(chan, ECONNREFUSED);
5402 sec_level = hcon->sec_level + 1;
5403 if (chan->sec_level < sec_level)
5404 chan->sec_level = sec_level;
5406 /* We'll need to send a new Connect Request */
5407 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5409 smp_conn_security(hcon, chan->sec_level);
5413 l2cap_chan_del(chan, ECONNREFUSED);
5417 l2cap_chan_unlock(chan);
5420 mutex_unlock(&conn->chan_lock);
5425 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5426 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5431 switch (cmd->code) {
5432 case L2CAP_COMMAND_REJ:
5433 l2cap_command_rej(conn, cmd, cmd_len, data);
5436 case L2CAP_CONN_REQ:
5437 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5440 case L2CAP_CONN_RSP:
5441 case L2CAP_CREATE_CHAN_RSP:
5442 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5445 case L2CAP_CONF_REQ:
5446 err = l2cap_config_req(conn, cmd, cmd_len, data);
5449 case L2CAP_CONF_RSP:
5450 l2cap_config_rsp(conn, cmd, cmd_len, data);
5453 case L2CAP_DISCONN_REQ:
5454 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5457 case L2CAP_DISCONN_RSP:
5458 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5461 case L2CAP_ECHO_REQ:
5462 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5465 case L2CAP_ECHO_RSP:
5468 case L2CAP_INFO_REQ:
5469 err = l2cap_information_req(conn, cmd, cmd_len, data);
5472 case L2CAP_INFO_RSP:
5473 l2cap_information_rsp(conn, cmd, cmd_len, data);
5476 case L2CAP_CREATE_CHAN_REQ:
5477 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5480 case L2CAP_MOVE_CHAN_REQ:
5481 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5484 case L2CAP_MOVE_CHAN_RSP:
5485 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5488 case L2CAP_MOVE_CHAN_CFM:
5489 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5492 case L2CAP_MOVE_CHAN_CFM_RSP:
5493 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5497 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5505 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5506 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5509 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5510 struct l2cap_le_conn_rsp rsp;
5511 struct l2cap_chan *chan, *pchan;
5512 u16 dcid, scid, credits, mtu, mps;
5516 if (cmd_len != sizeof(*req))
5519 scid = __le16_to_cpu(req->scid);
5520 mtu = __le16_to_cpu(req->mtu);
5521 mps = __le16_to_cpu(req->mps);
5526 if (mtu < 23 || mps < 23)
5529 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5532 /* Check if we have socket listening on psm */
5533 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5534 &conn->hcon->dst, LE_LINK);
5536 result = L2CAP_CR_LE_BAD_PSM;
5541 mutex_lock(&conn->chan_lock);
5542 l2cap_chan_lock(pchan);
5544 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5546 result = L2CAP_CR_LE_AUTHENTICATION;
5548 goto response_unlock;
5551 /* Check for valid dynamic CID range */
5552 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5553 result = L2CAP_CR_LE_INVALID_SCID;
5555 goto response_unlock;
5558 /* Check if we already have channel with that dcid */
5559 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5560 result = L2CAP_CR_LE_SCID_IN_USE;
5562 goto response_unlock;
5565 chan = pchan->ops->new_connection(pchan);
5567 result = L2CAP_CR_LE_NO_MEM;
5568 goto response_unlock;
5571 bacpy(&chan->src, &conn->hcon->src);
5572 bacpy(&chan->dst, &conn->hcon->dst);
5573 chan->src_type = bdaddr_src_type(conn->hcon);
5574 chan->dst_type = bdaddr_dst_type(conn->hcon);
5578 chan->remote_mps = mps;
5580 __l2cap_chan_add(conn, chan);
5582 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5585 credits = chan->rx_credits;
5587 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5589 chan->ident = cmd->ident;
5591 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5592 l2cap_state_change(chan, BT_CONNECT2);
5593 /* The following result value is actually not defined
5594 * for LE CoC but we use it to let the function know
5595 * that it should bail out after doing its cleanup
5596 * instead of sending a response.
5598 result = L2CAP_CR_PEND;
5599 chan->ops->defer(chan);
5601 l2cap_chan_ready(chan);
5602 result = L2CAP_CR_LE_SUCCESS;
5606 l2cap_chan_unlock(pchan);
5607 mutex_unlock(&conn->chan_lock);
5608 l2cap_chan_put(pchan);
5610 if (result == L2CAP_CR_PEND)
5615 rsp.mtu = cpu_to_le16(chan->imtu);
5616 rsp.mps = cpu_to_le16(chan->mps);
5622 rsp.dcid = cpu_to_le16(dcid);
5623 rsp.credits = cpu_to_le16(credits);
5624 rsp.result = cpu_to_le16(result);
5626 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5631 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5632 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5635 struct l2cap_le_credits *pkt;
5636 struct l2cap_chan *chan;
5637 u16 cid, credits, max_credits;
5639 if (cmd_len != sizeof(*pkt))
5642 pkt = (struct l2cap_le_credits *) data;
5643 cid = __le16_to_cpu(pkt->cid);
5644 credits = __le16_to_cpu(pkt->credits);
5646 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5648 chan = l2cap_get_chan_by_dcid(conn, cid);
5652 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5653 if (credits > max_credits) {
5654 BT_ERR("LE credits overflow");
5655 l2cap_send_disconn_req(chan, ECONNRESET);
5656 l2cap_chan_unlock(chan);
5658 /* Return 0 so that we don't trigger an unnecessary
5659 * command reject packet.
5664 chan->tx_credits += credits;
5666 /* Resume sending */
5667 l2cap_le_flowctl_send(chan);
5669 if (chan->tx_credits)
5670 chan->ops->resume(chan);
5672 l2cap_chan_unlock(chan);
5677 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5678 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5681 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5682 struct l2cap_chan *chan;
5684 if (cmd_len < sizeof(*rej))
5687 mutex_lock(&conn->chan_lock);
5689 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5693 l2cap_chan_lock(chan);
5694 l2cap_chan_del(chan, ECONNREFUSED);
5695 l2cap_chan_unlock(chan);
5698 mutex_unlock(&conn->chan_lock);
5702 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5703 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5708 switch (cmd->code) {
5709 case L2CAP_COMMAND_REJ:
5710 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5713 case L2CAP_CONN_PARAM_UPDATE_REQ:
5714 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5717 case L2CAP_CONN_PARAM_UPDATE_RSP:
5720 case L2CAP_LE_CONN_RSP:
5721 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5724 case L2CAP_LE_CONN_REQ:
5725 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5728 case L2CAP_LE_CREDITS:
5729 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5732 case L2CAP_DISCONN_REQ:
5733 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5736 case L2CAP_DISCONN_RSP:
5737 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5741 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5749 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5750 struct sk_buff *skb)
5752 struct hci_conn *hcon = conn->hcon;
5753 struct l2cap_cmd_hdr *cmd;
5757 if (hcon->type != LE_LINK)
5760 if (skb->len < L2CAP_CMD_HDR_SIZE)
5763 cmd = (void *) skb->data;
5764 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5766 len = le16_to_cpu(cmd->len);
5768 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5770 if (len != skb->len || !cmd->ident) {
5771 BT_DBG("corrupted command");
5775 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5777 struct l2cap_cmd_rej_unk rej;
5779 BT_ERR("Wrong link type (%d)", err);
5781 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5782 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5790 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5791 struct sk_buff *skb)
5793 struct hci_conn *hcon = conn->hcon;
5794 u8 *data = skb->data;
5796 struct l2cap_cmd_hdr cmd;
5799 l2cap_raw_recv(conn, skb);
5801 if (hcon->type != ACL_LINK)
5804 while (len >= L2CAP_CMD_HDR_SIZE) {
5806 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5807 data += L2CAP_CMD_HDR_SIZE;
5808 len -= L2CAP_CMD_HDR_SIZE;
5810 cmd_len = le16_to_cpu(cmd.len);
5812 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5815 if (cmd_len > len || !cmd.ident) {
5816 BT_DBG("corrupted command");
5820 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5822 struct l2cap_cmd_rej_unk rej;
5824 BT_ERR("Wrong link type (%d)", err);
5826 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5827 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5839 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5841 u16 our_fcs, rcv_fcs;
5844 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5845 hdr_size = L2CAP_EXT_HDR_SIZE;
5847 hdr_size = L2CAP_ENH_HDR_SIZE;
5849 if (chan->fcs == L2CAP_FCS_CRC16) {
5850 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5851 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5852 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5854 if (our_fcs != rcv_fcs)
5860 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5862 struct l2cap_ctrl control;
5864 BT_DBG("chan %p", chan);
5866 memset(&control, 0, sizeof(control));
5869 control.reqseq = chan->buffer_seq;
5870 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5872 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5873 control.super = L2CAP_SUPER_RNR;
5874 l2cap_send_sframe(chan, &control);
5877 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5878 chan->unacked_frames > 0)
5879 __set_retrans_timer(chan);
5881 /* Send pending iframes */
5882 l2cap_ertm_send(chan);
5884 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5885 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5886 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5889 control.super = L2CAP_SUPER_RR;
5890 l2cap_send_sframe(chan, &control);
5894 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5895 struct sk_buff **last_frag)
5897 /* skb->len reflects data in skb as well as all fragments
5898 * skb->data_len reflects only data in fragments
5900 if (!skb_has_frag_list(skb))
5901 skb_shinfo(skb)->frag_list = new_frag;
5903 new_frag->next = NULL;
5905 (*last_frag)->next = new_frag;
5906 *last_frag = new_frag;
5908 skb->len += new_frag->len;
5909 skb->data_len += new_frag->len;
5910 skb->truesize += new_frag->truesize;
5913 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5914 struct l2cap_ctrl *control)
5918 switch (control->sar) {
5919 case L2CAP_SAR_UNSEGMENTED:
5923 err = chan->ops->recv(chan, skb);
5926 case L2CAP_SAR_START:
5930 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5933 chan->sdu_len = get_unaligned_le16(skb->data);
5934 skb_pull(skb, L2CAP_SDULEN_SIZE);
5936 if (chan->sdu_len > chan->imtu) {
5941 if (skb->len >= chan->sdu_len)
5945 chan->sdu_last_frag = skb;
5951 case L2CAP_SAR_CONTINUE:
5955 append_skb_frag(chan->sdu, skb,
5956 &chan->sdu_last_frag);
5959 if (chan->sdu->len >= chan->sdu_len)
5969 append_skb_frag(chan->sdu, skb,
5970 &chan->sdu_last_frag);
5973 if (chan->sdu->len != chan->sdu_len)
5976 err = chan->ops->recv(chan, chan->sdu);
5979 /* Reassembly complete */
5981 chan->sdu_last_frag = NULL;
5989 kfree_skb(chan->sdu);
5991 chan->sdu_last_frag = NULL;
5998 static int l2cap_resegment(struct l2cap_chan *chan)
6004 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6008 if (chan->mode != L2CAP_MODE_ERTM)
6011 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6012 l2cap_tx(chan, NULL, NULL, event);
6015 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6018 /* Pass sequential frames to l2cap_reassemble_sdu()
6019 * until a gap is encountered.
6022 BT_DBG("chan %p", chan);
6024 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6025 struct sk_buff *skb;
6026 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6027 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6029 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6034 skb_unlink(skb, &chan->srej_q);
6035 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6036 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6041 if (skb_queue_empty(&chan->srej_q)) {
6042 chan->rx_state = L2CAP_RX_STATE_RECV;
6043 l2cap_send_ack(chan);
6049 static void l2cap_handle_srej(struct l2cap_chan *chan,
6050 struct l2cap_ctrl *control)
6052 struct sk_buff *skb;
6054 BT_DBG("chan %p, control %p", chan, control);
6056 if (control->reqseq == chan->next_tx_seq) {
6057 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6058 l2cap_send_disconn_req(chan, ECONNRESET);
6062 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6065 BT_DBG("Seq %d not available for retransmission",
6070 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6071 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6072 l2cap_send_disconn_req(chan, ECONNRESET);
6076 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6078 if (control->poll) {
6079 l2cap_pass_to_tx(chan, control);
6081 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6082 l2cap_retransmit(chan, control);
6083 l2cap_ertm_send(chan);
6085 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6086 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6087 chan->srej_save_reqseq = control->reqseq;
6090 l2cap_pass_to_tx_fbit(chan, control);
6092 if (control->final) {
6093 if (chan->srej_save_reqseq != control->reqseq ||
6094 !test_and_clear_bit(CONN_SREJ_ACT,
6096 l2cap_retransmit(chan, control);
6098 l2cap_retransmit(chan, control);
6099 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6100 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6101 chan->srej_save_reqseq = control->reqseq;
6107 static void l2cap_handle_rej(struct l2cap_chan *chan,
6108 struct l2cap_ctrl *control)
6110 struct sk_buff *skb;
6112 BT_DBG("chan %p, control %p", chan, control);
6114 if (control->reqseq == chan->next_tx_seq) {
6115 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6116 l2cap_send_disconn_req(chan, ECONNRESET);
6120 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6122 if (chan->max_tx && skb &&
6123 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6124 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6125 l2cap_send_disconn_req(chan, ECONNRESET);
6129 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6131 l2cap_pass_to_tx(chan, control);
6133 if (control->final) {
6134 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6135 l2cap_retransmit_all(chan, control);
6137 l2cap_retransmit_all(chan, control);
6138 l2cap_ertm_send(chan);
6139 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6140 set_bit(CONN_REJ_ACT, &chan->conn_state);
6144 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6146 BT_DBG("chan %p, txseq %d", chan, txseq);
6148 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6149 chan->expected_tx_seq);
6151 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6152 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6154 /* See notes below regarding "double poll" and
6157 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6158 BT_DBG("Invalid/Ignore - after SREJ");
6159 return L2CAP_TXSEQ_INVALID_IGNORE;
6161 BT_DBG("Invalid - in window after SREJ sent");
6162 return L2CAP_TXSEQ_INVALID;
6166 if (chan->srej_list.head == txseq) {
6167 BT_DBG("Expected SREJ");
6168 return L2CAP_TXSEQ_EXPECTED_SREJ;
6171 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6172 BT_DBG("Duplicate SREJ - txseq already stored");
6173 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6176 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6177 BT_DBG("Unexpected SREJ - not requested");
6178 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6182 if (chan->expected_tx_seq == txseq) {
6183 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6185 BT_DBG("Invalid - txseq outside tx window");
6186 return L2CAP_TXSEQ_INVALID;
6189 return L2CAP_TXSEQ_EXPECTED;
6193 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6194 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6195 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6196 return L2CAP_TXSEQ_DUPLICATE;
6199 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6200 /* A source of invalid packets is a "double poll" condition,
6201 * where delays cause us to send multiple poll packets. If
6202 * the remote stack receives and processes both polls,
6203 * sequence numbers can wrap around in such a way that a
6204 * resent frame has a sequence number that looks like new data
6205 * with a sequence gap. This would trigger an erroneous SREJ
6208 * Fortunately, this is impossible with a tx window that's
6209 * less than half of the maximum sequence number, which allows
6210 * invalid frames to be safely ignored.
6212 * With tx window sizes greater than half of the tx window
6213 * maximum, the frame is invalid and cannot be ignored. This
6214 * causes a disconnect.
6217 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6218 BT_DBG("Invalid/Ignore - txseq outside tx window");
6219 return L2CAP_TXSEQ_INVALID_IGNORE;
6221 BT_DBG("Invalid - txseq outside tx window");
6222 return L2CAP_TXSEQ_INVALID;
6225 BT_DBG("Unexpected - txseq indicates missing frames");
6226 return L2CAP_TXSEQ_UNEXPECTED;
6230 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6231 struct l2cap_ctrl *control,
6232 struct sk_buff *skb, u8 event)
6235 bool skb_in_use = false;
6237 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6241 case L2CAP_EV_RECV_IFRAME:
6242 switch (l2cap_classify_txseq(chan, control->txseq)) {
6243 case L2CAP_TXSEQ_EXPECTED:
6244 l2cap_pass_to_tx(chan, control);
6246 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6247 BT_DBG("Busy, discarding expected seq %d",
6252 chan->expected_tx_seq = __next_seq(chan,
6255 chan->buffer_seq = chan->expected_tx_seq;
6258 err = l2cap_reassemble_sdu(chan, skb, control);
6262 if (control->final) {
6263 if (!test_and_clear_bit(CONN_REJ_ACT,
6264 &chan->conn_state)) {
6266 l2cap_retransmit_all(chan, control);
6267 l2cap_ertm_send(chan);
6271 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6272 l2cap_send_ack(chan);
6274 case L2CAP_TXSEQ_UNEXPECTED:
6275 l2cap_pass_to_tx(chan, control);
6277 /* Can't issue SREJ frames in the local busy state.
6278 * Drop this frame, it will be seen as missing
6279 * when local busy is exited.
6281 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6282 BT_DBG("Busy, discarding unexpected seq %d",
6287 /* There was a gap in the sequence, so an SREJ
6288 * must be sent for each missing frame. The
6289 * current frame is stored for later use.
6291 skb_queue_tail(&chan->srej_q, skb);
6293 BT_DBG("Queued %p (queue len %d)", skb,
6294 skb_queue_len(&chan->srej_q));
6296 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6297 l2cap_seq_list_clear(&chan->srej_list);
6298 l2cap_send_srej(chan, control->txseq);
6300 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6302 case L2CAP_TXSEQ_DUPLICATE:
6303 l2cap_pass_to_tx(chan, control);
6305 case L2CAP_TXSEQ_INVALID_IGNORE:
6307 case L2CAP_TXSEQ_INVALID:
6309 l2cap_send_disconn_req(chan, ECONNRESET);
6313 case L2CAP_EV_RECV_RR:
6314 l2cap_pass_to_tx(chan, control);
6315 if (control->final) {
6316 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6318 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6319 !__chan_is_moving(chan)) {
6321 l2cap_retransmit_all(chan, control);
6324 l2cap_ertm_send(chan);
6325 } else if (control->poll) {
6326 l2cap_send_i_or_rr_or_rnr(chan);
6328 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6329 &chan->conn_state) &&
6330 chan->unacked_frames)
6331 __set_retrans_timer(chan);
6333 l2cap_ertm_send(chan);
6336 case L2CAP_EV_RECV_RNR:
6337 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6338 l2cap_pass_to_tx(chan, control);
6339 if (control && control->poll) {
6340 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6341 l2cap_send_rr_or_rnr(chan, 0);
6343 __clear_retrans_timer(chan);
6344 l2cap_seq_list_clear(&chan->retrans_list);
6346 case L2CAP_EV_RECV_REJ:
6347 l2cap_handle_rej(chan, control);
6349 case L2CAP_EV_RECV_SREJ:
6350 l2cap_handle_srej(chan, control);
6356 if (skb && !skb_in_use) {
6357 BT_DBG("Freeing %p", skb);
6364 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6365 struct l2cap_ctrl *control,
6366 struct sk_buff *skb, u8 event)
6369 u16 txseq = control->txseq;
6370 bool skb_in_use = false;
6372 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6376 case L2CAP_EV_RECV_IFRAME:
6377 switch (l2cap_classify_txseq(chan, txseq)) {
6378 case L2CAP_TXSEQ_EXPECTED:
6379 /* Keep frame for reassembly later */
6380 l2cap_pass_to_tx(chan, control);
6381 skb_queue_tail(&chan->srej_q, skb);
6383 BT_DBG("Queued %p (queue len %d)", skb,
6384 skb_queue_len(&chan->srej_q));
6386 chan->expected_tx_seq = __next_seq(chan, txseq);
6388 case L2CAP_TXSEQ_EXPECTED_SREJ:
6389 l2cap_seq_list_pop(&chan->srej_list);
6391 l2cap_pass_to_tx(chan, control);
6392 skb_queue_tail(&chan->srej_q, skb);
6394 BT_DBG("Queued %p (queue len %d)", skb,
6395 skb_queue_len(&chan->srej_q));
6397 err = l2cap_rx_queued_iframes(chan);
6402 case L2CAP_TXSEQ_UNEXPECTED:
6403 /* Got a frame that can't be reassembled yet.
6404 * Save it for later, and send SREJs to cover
6405 * the missing frames.
6407 skb_queue_tail(&chan->srej_q, skb);
6409 BT_DBG("Queued %p (queue len %d)", skb,
6410 skb_queue_len(&chan->srej_q));
6412 l2cap_pass_to_tx(chan, control);
6413 l2cap_send_srej(chan, control->txseq);
6415 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6416 /* This frame was requested with an SREJ, but
6417 * some expected retransmitted frames are
6418 * missing. Request retransmission of missing
6421 skb_queue_tail(&chan->srej_q, skb);
6423 BT_DBG("Queued %p (queue len %d)", skb,
6424 skb_queue_len(&chan->srej_q));
6426 l2cap_pass_to_tx(chan, control);
6427 l2cap_send_srej_list(chan, control->txseq);
6429 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6430 /* We've already queued this frame. Drop this copy. */
6431 l2cap_pass_to_tx(chan, control);
6433 case L2CAP_TXSEQ_DUPLICATE:
6434 /* Expecting a later sequence number, so this frame
6435 * was already received. Ignore it completely.
6438 case L2CAP_TXSEQ_INVALID_IGNORE:
6440 case L2CAP_TXSEQ_INVALID:
6442 l2cap_send_disconn_req(chan, ECONNRESET);
6446 case L2CAP_EV_RECV_RR:
6447 l2cap_pass_to_tx(chan, control);
6448 if (control->final) {
6449 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6451 if (!test_and_clear_bit(CONN_REJ_ACT,
6452 &chan->conn_state)) {
6454 l2cap_retransmit_all(chan, control);
6457 l2cap_ertm_send(chan);
6458 } else if (control->poll) {
6459 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6460 &chan->conn_state) &&
6461 chan->unacked_frames) {
6462 __set_retrans_timer(chan);
6465 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6466 l2cap_send_srej_tail(chan);
6468 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6469 &chan->conn_state) &&
6470 chan->unacked_frames)
6471 __set_retrans_timer(chan);
6473 l2cap_send_ack(chan);
6476 case L2CAP_EV_RECV_RNR:
6477 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6478 l2cap_pass_to_tx(chan, control);
6479 if (control->poll) {
6480 l2cap_send_srej_tail(chan);
6482 struct l2cap_ctrl rr_control;
6483 memset(&rr_control, 0, sizeof(rr_control));
6484 rr_control.sframe = 1;
6485 rr_control.super = L2CAP_SUPER_RR;
6486 rr_control.reqseq = chan->buffer_seq;
6487 l2cap_send_sframe(chan, &rr_control);
6491 case L2CAP_EV_RECV_REJ:
6492 l2cap_handle_rej(chan, control);
6494 case L2CAP_EV_RECV_SREJ:
6495 l2cap_handle_srej(chan, control);
6499 if (skb && !skb_in_use) {
6500 BT_DBG("Freeing %p", skb);
6507 static int l2cap_finish_move(struct l2cap_chan *chan)
6509 BT_DBG("chan %p", chan);
6511 chan->rx_state = L2CAP_RX_STATE_RECV;
6514 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6516 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6518 return l2cap_resegment(chan);
6521 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6522 struct l2cap_ctrl *control,
6523 struct sk_buff *skb, u8 event)
6527 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6533 l2cap_process_reqseq(chan, control->reqseq);
6535 if (!skb_queue_empty(&chan->tx_q))
6536 chan->tx_send_head = skb_peek(&chan->tx_q);
6538 chan->tx_send_head = NULL;
6540 /* Rewind next_tx_seq to the point expected
6543 chan->next_tx_seq = control->reqseq;
6544 chan->unacked_frames = 0;
6546 err = l2cap_finish_move(chan);
6550 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6551 l2cap_send_i_or_rr_or_rnr(chan);
6553 if (event == L2CAP_EV_RECV_IFRAME)
6556 return l2cap_rx_state_recv(chan, control, NULL, event);
6559 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6560 struct l2cap_ctrl *control,
6561 struct sk_buff *skb, u8 event)
6565 if (!control->final)
6568 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6570 chan->rx_state = L2CAP_RX_STATE_RECV;
6571 l2cap_process_reqseq(chan, control->reqseq);
6573 if (!skb_queue_empty(&chan->tx_q))
6574 chan->tx_send_head = skb_peek(&chan->tx_q);
6576 chan->tx_send_head = NULL;
6578 /* Rewind next_tx_seq to the point expected
6581 chan->next_tx_seq = control->reqseq;
6582 chan->unacked_frames = 0;
6585 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6587 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6589 err = l2cap_resegment(chan);
6592 err = l2cap_rx_state_recv(chan, control, skb, event);
6597 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6599 /* Make sure reqseq is for a packet that has been sent but not acked */
6602 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6603 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6606 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6607 struct sk_buff *skb, u8 event)
6611 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6612 control, skb, event, chan->rx_state);
6614 if (__valid_reqseq(chan, control->reqseq)) {
6615 switch (chan->rx_state) {
6616 case L2CAP_RX_STATE_RECV:
6617 err = l2cap_rx_state_recv(chan, control, skb, event);
6619 case L2CAP_RX_STATE_SREJ_SENT:
6620 err = l2cap_rx_state_srej_sent(chan, control, skb,
6623 case L2CAP_RX_STATE_WAIT_P:
6624 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6626 case L2CAP_RX_STATE_WAIT_F:
6627 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6634 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6635 control->reqseq, chan->next_tx_seq,
6636 chan->expected_ack_seq);
6637 l2cap_send_disconn_req(chan, ECONNRESET);
6643 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6644 struct sk_buff *skb)
6646 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6649 if (l2cap_classify_txseq(chan, control->txseq) ==
6650 L2CAP_TXSEQ_EXPECTED) {
6651 l2cap_pass_to_tx(chan, control);
6653 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6654 __next_seq(chan, chan->buffer_seq));
6656 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6658 l2cap_reassemble_sdu(chan, skb, control);
6661 kfree_skb(chan->sdu);
6664 chan->sdu_last_frag = NULL;
6668 BT_DBG("Freeing %p", skb);
6673 chan->last_acked_seq = control->txseq;
6674 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6679 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6681 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6685 __unpack_control(chan, skb);
6690 * We can just drop the corrupted I-frame here.
6691 * Receiver will miss it and start proper recovery
6692 * procedures and ask for retransmission.
6694 if (l2cap_check_fcs(chan, skb))
6697 if (!control->sframe && control->sar == L2CAP_SAR_START)
6698 len -= L2CAP_SDULEN_SIZE;
6700 if (chan->fcs == L2CAP_FCS_CRC16)
6701 len -= L2CAP_FCS_SIZE;
6703 if (len > chan->mps) {
6704 l2cap_send_disconn_req(chan, ECONNRESET);
6708 if (chan->ops->filter) {
6709 if (chan->ops->filter(chan, skb))
6713 if (!control->sframe) {
6716 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6717 control->sar, control->reqseq, control->final,
6720 /* Validate F-bit - F=0 always valid, F=1 only
6721 * valid in TX WAIT_F
6723 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6726 if (chan->mode != L2CAP_MODE_STREAMING) {
6727 event = L2CAP_EV_RECV_IFRAME;
6728 err = l2cap_rx(chan, control, skb, event);
6730 err = l2cap_stream_rx(chan, control, skb);
6734 l2cap_send_disconn_req(chan, ECONNRESET);
6736 const u8 rx_func_to_event[4] = {
6737 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6738 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6741 /* Only I-frames are expected in streaming mode */
6742 if (chan->mode == L2CAP_MODE_STREAMING)
6745 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6746 control->reqseq, control->final, control->poll,
6750 BT_ERR("Trailing bytes: %d in sframe", len);
6751 l2cap_send_disconn_req(chan, ECONNRESET);
6755 /* Validate F and P bits */
6756 if (control->final && (control->poll ||
6757 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6760 event = rx_func_to_event[control->super];
6761 if (l2cap_rx(chan, control, skb, event))
6762 l2cap_send_disconn_req(chan, ECONNRESET);
6772 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6774 struct l2cap_conn *conn = chan->conn;
6775 struct l2cap_le_credits pkt;
6778 return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
6780 if (!return_credits)
6783 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6785 chan->rx_credits += return_credits;
6787 pkt.cid = cpu_to_le16(chan->scid);
6788 pkt.credits = cpu_to_le16(return_credits);
6790 chan->ident = l2cap_get_ident(conn);
6792 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6795 static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6799 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6801 /* Wait recv to confirm reception before updating the credits */
6802 err = chan->ops->recv(chan, skb);
6804 /* Update credits whenever an SDU is received */
6805 l2cap_chan_le_send_credits(chan);
6810 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6814 if (!chan->rx_credits) {
6815 BT_ERR("No credits to receive LE L2CAP data");
6816 l2cap_send_disconn_req(chan, ECONNRESET);
6820 if (chan->imtu < skb->len) {
6821 BT_ERR("Too big LE L2CAP PDU");
6826 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6828 /* Update if remote had run out of credits, this should only happens
6829 * if the remote is not using the entire MPS.
6831 if (!chan->rx_credits)
6832 l2cap_chan_le_send_credits(chan);
6839 sdu_len = get_unaligned_le16(skb->data);
6840 skb_pull(skb, L2CAP_SDULEN_SIZE);
6842 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6843 sdu_len, skb->len, chan->imtu);
6845 if (sdu_len > chan->imtu) {
6846 BT_ERR("Too big LE L2CAP SDU length received");
6851 if (skb->len > sdu_len) {
6852 BT_ERR("Too much LE L2CAP data received");
6857 if (skb->len == sdu_len)
6858 return l2cap_le_recv(chan, skb);
6861 chan->sdu_len = sdu_len;
6862 chan->sdu_last_frag = skb;
6864 /* Detect if remote is not able to use the selected MPS */
6865 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6866 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6868 /* Adjust the number of credits */
6869 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6870 chan->mps = mps_len;
6871 l2cap_chan_le_send_credits(chan);
6877 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6878 chan->sdu->len, skb->len, chan->sdu_len);
6880 if (chan->sdu->len + skb->len > chan->sdu_len) {
6881 BT_ERR("Too much LE L2CAP data received");
6886 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6889 if (chan->sdu->len == chan->sdu_len) {
6890 err = l2cap_le_recv(chan, chan->sdu);
6893 chan->sdu_last_frag = NULL;
6901 kfree_skb(chan->sdu);
6903 chan->sdu_last_frag = NULL;
6907 /* We can't return an error here since we took care of the skb
6908 * freeing internally. An error return would cause the caller to
6909 * do a double-free of the skb.
6914 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6915 struct sk_buff *skb)
6917 struct l2cap_chan *chan;
6919 chan = l2cap_get_chan_by_scid(conn, cid);
6921 if (cid == L2CAP_CID_A2MP) {
6922 chan = a2mp_channel_create(conn, skb);
6928 l2cap_chan_lock(chan);
6930 BT_DBG("unknown cid 0x%4.4x", cid);
6931 /* Drop packet and return */
6937 BT_DBG("chan %p, len %d", chan, skb->len);
6939 /* If we receive data on a fixed channel before the info req/rsp
6940 * procdure is done simply assume that the channel is supported
6941 * and mark it as ready.
6943 if (chan->chan_type == L2CAP_CHAN_FIXED)
6944 l2cap_chan_ready(chan);
6946 if (chan->state != BT_CONNECTED)
6949 switch (chan->mode) {
6950 case L2CAP_MODE_LE_FLOWCTL:
6951 if (l2cap_le_data_rcv(chan, skb) < 0)
6956 case L2CAP_MODE_BASIC:
6957 /* If socket recv buffers overflows we drop data here
6958 * which is *bad* because L2CAP has to be reliable.
6959 * But we don't have any other choice. L2CAP doesn't
6960 * provide flow control mechanism. */
6962 if (chan->imtu < skb->len) {
6963 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6967 if (!chan->ops->recv(chan, skb))
6971 case L2CAP_MODE_ERTM:
6972 case L2CAP_MODE_STREAMING:
6973 l2cap_data_rcv(chan, skb);
6977 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6985 l2cap_chan_unlock(chan);
6988 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6989 struct sk_buff *skb)
6991 struct hci_conn *hcon = conn->hcon;
6992 struct l2cap_chan *chan;
6994 if (hcon->type != ACL_LINK)
6997 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7002 BT_DBG("chan %p, len %d", chan, skb->len);
7004 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7007 if (chan->imtu < skb->len)
7010 /* Store remote BD_ADDR and PSM for msg_name */
7011 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7012 bt_cb(skb)->l2cap.psm = psm;
7014 if (!chan->ops->recv(chan, skb)) {
7015 l2cap_chan_put(chan);
7020 l2cap_chan_put(chan);
7025 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7027 struct l2cap_hdr *lh = (void *) skb->data;
7028 struct hci_conn *hcon = conn->hcon;
7032 if (hcon->state != BT_CONNECTED) {
7033 BT_DBG("queueing pending rx skb");
7034 skb_queue_tail(&conn->pending_rx, skb);
7038 skb_pull(skb, L2CAP_HDR_SIZE);
7039 cid = __le16_to_cpu(lh->cid);
7040 len = __le16_to_cpu(lh->len);
7042 if (len != skb->len) {
7047 /* Since we can't actively block incoming LE connections we must
7048 * at least ensure that we ignore incoming data from them.
7050 if (hcon->type == LE_LINK &&
7051 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7052 bdaddr_dst_type(hcon))) {
7057 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7060 case L2CAP_CID_SIGNALING:
7061 l2cap_sig_channel(conn, skb);
7064 case L2CAP_CID_CONN_LESS:
7065 psm = get_unaligned((__le16 *) skb->data);
7066 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7067 l2cap_conless_channel(conn, psm, skb);
7070 case L2CAP_CID_LE_SIGNALING:
7071 l2cap_le_sig_channel(conn, skb);
7075 l2cap_data_channel(conn, cid, skb);
7080 static void process_pending_rx(struct work_struct *work)
7082 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7084 struct sk_buff *skb;
7088 while ((skb = skb_dequeue(&conn->pending_rx)))
7089 l2cap_recv_frame(conn, skb);
7092 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7094 struct l2cap_conn *conn = hcon->l2cap_data;
7095 struct hci_chan *hchan;
7100 hchan = hci_chan_create(hcon);
7104 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7106 hci_chan_del(hchan);
7110 kref_init(&conn->ref);
7111 hcon->l2cap_data = conn;
7112 conn->hcon = hci_conn_get(hcon);
7113 conn->hchan = hchan;
7115 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7117 switch (hcon->type) {
7119 if (hcon->hdev->le_mtu) {
7120 conn->mtu = hcon->hdev->le_mtu;
7125 conn->mtu = hcon->hdev->acl_mtu;
7129 conn->feat_mask = 0;
7131 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7133 if (hcon->type == ACL_LINK &&
7134 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7135 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7137 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7138 (bredr_sc_enabled(hcon->hdev) ||
7139 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7140 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7142 mutex_init(&conn->ident_lock);
7143 mutex_init(&conn->chan_lock);
7145 INIT_LIST_HEAD(&conn->chan_l);
7146 INIT_LIST_HEAD(&conn->users);
7148 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7150 skb_queue_head_init(&conn->pending_rx);
7151 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7152 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7154 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7159 static bool is_valid_psm(u16 psm, u8 dst_type) {
7163 if (bdaddr_type_is_le(dst_type))
7164 return (psm <= 0x00ff);
7166 /* PSM must be odd and lsb of upper byte must be 0 */
7167 return ((psm & 0x0101) == 0x0001);
7170 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7171 bdaddr_t *dst, u8 dst_type)
7173 struct l2cap_conn *conn;
7174 struct hci_conn *hcon;
7175 struct hci_dev *hdev;
7178 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7179 dst_type, __le16_to_cpu(psm));
7181 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7183 return -EHOSTUNREACH;
7187 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7188 chan->chan_type != L2CAP_CHAN_RAW) {
7193 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7198 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7203 switch (chan->mode) {
7204 case L2CAP_MODE_BASIC:
7206 case L2CAP_MODE_LE_FLOWCTL:
7208 case L2CAP_MODE_ERTM:
7209 case L2CAP_MODE_STREAMING:
7218 switch (chan->state) {
7222 /* Already connecting */
7227 /* Already connected */
7241 /* Set destination address and psm */
7242 bacpy(&chan->dst, dst);
7243 chan->dst_type = dst_type;
7248 if (bdaddr_type_is_le(dst_type)) {
7249 /* Convert from L2CAP channel address type to HCI address type
7251 if (dst_type == BDADDR_LE_PUBLIC)
7252 dst_type = ADDR_LE_DEV_PUBLIC;
7254 dst_type = ADDR_LE_DEV_RANDOM;
7256 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7257 hcon = hci_connect_le(hdev, dst, dst_type,
7259 HCI_LE_CONN_TIMEOUT,
7260 HCI_ROLE_SLAVE, NULL);
7262 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7264 HCI_LE_CONN_TIMEOUT);
7267 u8 auth_type = l2cap_get_auth_type(chan);
7268 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7272 err = PTR_ERR(hcon);
7276 conn = l2cap_conn_add(hcon);
7278 hci_conn_drop(hcon);
7283 mutex_lock(&conn->chan_lock);
7284 l2cap_chan_lock(chan);
7286 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7287 hci_conn_drop(hcon);
7292 /* Update source addr of the socket */
7293 bacpy(&chan->src, &hcon->src);
7294 chan->src_type = bdaddr_src_type(hcon);
7296 __l2cap_chan_add(conn, chan);
7298 /* l2cap_chan_add takes its own ref so we can drop this one */
7299 hci_conn_drop(hcon);
7301 l2cap_state_change(chan, BT_CONNECT);
7302 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7304 /* Release chan->sport so that it can be reused by other
7305 * sockets (as it's only used for listening sockets).
7307 write_lock(&chan_list_lock);
7309 write_unlock(&chan_list_lock);
7311 if (hcon->state == BT_CONNECTED) {
7312 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7313 __clear_chan_timer(chan);
7314 if (l2cap_chan_check_security(chan, true))
7315 l2cap_state_change(chan, BT_CONNECTED);
7317 l2cap_do_start(chan);
7323 l2cap_chan_unlock(chan);
7324 mutex_unlock(&conn->chan_lock);
7326 hci_dev_unlock(hdev);
7330 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7332 /* ---- L2CAP interface with lower layer (HCI) ---- */
7334 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7336 int exact = 0, lm1 = 0, lm2 = 0;
7337 struct l2cap_chan *c;
7339 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7341 /* Find listening sockets and check their link_mode */
7342 read_lock(&chan_list_lock);
7343 list_for_each_entry(c, &chan_list, global_l) {
7344 if (c->state != BT_LISTEN)
7347 if (!bacmp(&c->src, &hdev->bdaddr)) {
7348 lm1 |= HCI_LM_ACCEPT;
7349 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7350 lm1 |= HCI_LM_MASTER;
7352 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7353 lm2 |= HCI_LM_ACCEPT;
7354 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7355 lm2 |= HCI_LM_MASTER;
7358 read_unlock(&chan_list_lock);
7360 return exact ? lm1 : lm2;
7363 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7364 * from an existing channel in the list or from the beginning of the
7365 * global list (by passing NULL as first parameter).
7367 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7368 struct hci_conn *hcon)
7370 u8 src_type = bdaddr_src_type(hcon);
7372 read_lock(&chan_list_lock);
7375 c = list_next_entry(c, global_l);
7377 c = list_entry(chan_list.next, typeof(*c), global_l);
7379 list_for_each_entry_from(c, &chan_list, global_l) {
7380 if (c->chan_type != L2CAP_CHAN_FIXED)
7382 if (c->state != BT_LISTEN)
7384 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7386 if (src_type != c->src_type)
7390 read_unlock(&chan_list_lock);
7394 read_unlock(&chan_list_lock);
7399 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7401 struct hci_dev *hdev = hcon->hdev;
7402 struct l2cap_conn *conn;
7403 struct l2cap_chan *pchan;
7406 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7409 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7412 l2cap_conn_del(hcon, bt_to_errno(status));
7416 conn = l2cap_conn_add(hcon);
7420 dst_type = bdaddr_dst_type(hcon);
7422 /* If device is blocked, do not create channels for it */
7423 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7426 /* Find fixed channels and notify them of the new connection. We
7427 * use multiple individual lookups, continuing each time where
7428 * we left off, because the list lock would prevent calling the
7429 * potentially sleeping l2cap_chan_lock() function.
7431 pchan = l2cap_global_fixed_chan(NULL, hcon);
7433 struct l2cap_chan *chan, *next;
7435 /* Client fixed channels should override server ones */
7436 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7439 l2cap_chan_lock(pchan);
7440 chan = pchan->ops->new_connection(pchan);
7442 bacpy(&chan->src, &hcon->src);
7443 bacpy(&chan->dst, &hcon->dst);
7444 chan->src_type = bdaddr_src_type(hcon);
7445 chan->dst_type = dst_type;
7447 __l2cap_chan_add(conn, chan);
7450 l2cap_chan_unlock(pchan);
7452 next = l2cap_global_fixed_chan(pchan, hcon);
7453 l2cap_chan_put(pchan);
7457 l2cap_conn_ready(conn);
7460 int l2cap_disconn_ind(struct hci_conn *hcon)
7462 struct l2cap_conn *conn = hcon->l2cap_data;
7464 BT_DBG("hcon %p", hcon);
7467 return HCI_ERROR_REMOTE_USER_TERM;
7468 return conn->disc_reason;
7471 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7473 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7476 BT_DBG("hcon %p reason %d", hcon, reason);
7478 l2cap_conn_del(hcon, bt_to_errno(reason));
7481 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7483 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7486 if (encrypt == 0x00) {
7487 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7488 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7489 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7490 chan->sec_level == BT_SECURITY_FIPS)
7491 l2cap_chan_close(chan, ECONNREFUSED);
7493 if (chan->sec_level == BT_SECURITY_MEDIUM)
7494 __clear_chan_timer(chan);
7498 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7500 struct l2cap_conn *conn = hcon->l2cap_data;
7501 struct l2cap_chan *chan;
7506 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7508 mutex_lock(&conn->chan_lock);
7510 list_for_each_entry(chan, &conn->chan_l, list) {
7511 l2cap_chan_lock(chan);
7513 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7514 state_to_string(chan->state));
7516 if (chan->scid == L2CAP_CID_A2MP) {
7517 l2cap_chan_unlock(chan);
7521 if (!status && encrypt)
7522 chan->sec_level = hcon->sec_level;
7524 if (!__l2cap_no_conn_pending(chan)) {
7525 l2cap_chan_unlock(chan);
7529 if (!status && (chan->state == BT_CONNECTED ||
7530 chan->state == BT_CONFIG)) {
7531 chan->ops->resume(chan);
7532 l2cap_check_encryption(chan, encrypt);
7533 l2cap_chan_unlock(chan);
7537 if (chan->state == BT_CONNECT) {
7538 if (!status && l2cap_check_enc_key_size(hcon))
7539 l2cap_start_connection(chan);
7541 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7542 } else if (chan->state == BT_CONNECT2 &&
7543 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7544 struct l2cap_conn_rsp rsp;
7547 if (!status && l2cap_check_enc_key_size(hcon)) {
7548 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7549 res = L2CAP_CR_PEND;
7550 stat = L2CAP_CS_AUTHOR_PEND;
7551 chan->ops->defer(chan);
7553 l2cap_state_change(chan, BT_CONFIG);
7554 res = L2CAP_CR_SUCCESS;
7555 stat = L2CAP_CS_NO_INFO;
7558 l2cap_state_change(chan, BT_DISCONN);
7559 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7560 res = L2CAP_CR_SEC_BLOCK;
7561 stat = L2CAP_CS_NO_INFO;
7564 rsp.scid = cpu_to_le16(chan->dcid);
7565 rsp.dcid = cpu_to_le16(chan->scid);
7566 rsp.result = cpu_to_le16(res);
7567 rsp.status = cpu_to_le16(stat);
7568 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7571 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7572 res == L2CAP_CR_SUCCESS) {
7574 set_bit(CONF_REQ_SENT, &chan->conf_state);
7575 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7577 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7579 chan->num_conf_req++;
7583 l2cap_chan_unlock(chan);
7586 mutex_unlock(&conn->chan_lock);
7589 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7591 struct l2cap_conn *conn = hcon->l2cap_data;
7592 struct l2cap_hdr *hdr;
7595 /* For AMP controller do not create l2cap conn */
7596 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7600 conn = l2cap_conn_add(hcon);
7605 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7609 case ACL_START_NO_FLUSH:
7612 BT_ERR("Unexpected start frame (len %d)", skb->len);
7613 kfree_skb(conn->rx_skb);
7614 conn->rx_skb = NULL;
7616 l2cap_conn_unreliable(conn, ECOMM);
7619 /* Start fragment always begin with Basic L2CAP header */
7620 if (skb->len < L2CAP_HDR_SIZE) {
7621 BT_ERR("Frame is too short (len %d)", skb->len);
7622 l2cap_conn_unreliable(conn, ECOMM);
7626 hdr = (struct l2cap_hdr *) skb->data;
7627 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7629 if (len == skb->len) {
7630 /* Complete frame received */
7631 l2cap_recv_frame(conn, skb);
7635 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7637 if (skb->len > len) {
7638 BT_ERR("Frame is too long (len %d, expected len %d)",
7640 l2cap_conn_unreliable(conn, ECOMM);
7644 /* Allocate skb for the complete frame (with header) */
7645 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7649 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7651 conn->rx_len = len - skb->len;
7655 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7657 if (!conn->rx_len) {
7658 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7659 l2cap_conn_unreliable(conn, ECOMM);
7663 if (skb->len > conn->rx_len) {
7664 BT_ERR("Fragment is too long (len %d, expected %d)",
7665 skb->len, conn->rx_len);
7666 kfree_skb(conn->rx_skb);
7667 conn->rx_skb = NULL;
7669 l2cap_conn_unreliable(conn, ECOMM);
7673 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7675 conn->rx_len -= skb->len;
7677 if (!conn->rx_len) {
7678 /* Complete frame received. l2cap_recv_frame
7679 * takes ownership of the skb so set the global
7680 * rx_skb pointer to NULL first.
7682 struct sk_buff *rx_skb = conn->rx_skb;
7683 conn->rx_skb = NULL;
7684 l2cap_recv_frame(conn, rx_skb);
7693 static struct hci_cb l2cap_cb = {
7695 .connect_cfm = l2cap_connect_cfm,
7696 .disconn_cfm = l2cap_disconn_cfm,
7697 .security_cfm = l2cap_security_cfm,
7700 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7702 struct l2cap_chan *c;
7704 read_lock(&chan_list_lock);
7706 list_for_each_entry(c, &chan_list, global_l) {
7707 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7708 &c->src, c->src_type, &c->dst, c->dst_type,
7709 c->state, __le16_to_cpu(c->psm),
7710 c->scid, c->dcid, c->imtu, c->omtu,
7711 c->sec_level, c->mode);
7714 read_unlock(&chan_list_lock);
7719 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7721 static struct dentry *l2cap_debugfs;
7723 int __init l2cap_init(void)
7727 err = l2cap_init_sockets();
7731 hci_register_cb(&l2cap_cb);
7733 if (IS_ERR_OR_NULL(bt_debugfs))
7736 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7737 NULL, &l2cap_debugfs_fops);
7742 void l2cap_exit(void)
7744 debugfs_remove(l2cap_debugfs);
7745 hci_unregister_cb(&l2cap_cb);
7746 l2cap_cleanup_sockets();
7749 module_param(disable_ertm, bool, 0644);
7750 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");