2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
67 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 if (link_type == LE_LINK) {
70 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
73 return BDADDR_LE_RANDOM;
79 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 return bdaddr_type(hcon->type, hcon->src_type);
84 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 return bdaddr_type(hcon->type, hcon->dst_type);
89 /* ---- L2CAP channels ---- */
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
96 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
106 struct l2cap_chan *c;
108 list_for_each_entry(c, &conn->chan_l, list) {
115 /* Find channel with given SCID.
116 * Returns locked channel. */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 struct l2cap_chan *c;
122 mutex_lock(&conn->chan_lock);
123 c = __l2cap_get_chan_by_scid(conn, cid);
126 mutex_unlock(&conn->chan_lock);
131 /* Find channel with given DCID.
132 * Returns locked channel.
134 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 struct l2cap_chan *c;
139 mutex_lock(&conn->chan_lock);
140 c = __l2cap_get_chan_by_dcid(conn, cid);
143 mutex_unlock(&conn->chan_lock);
148 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 struct l2cap_chan *c;
153 list_for_each_entry(c, &conn->chan_l, list) {
154 if (c->ident == ident)
160 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
163 struct l2cap_chan *c;
165 mutex_lock(&conn->chan_lock);
166 c = __l2cap_get_chan_by_ident(conn, ident);
169 mutex_unlock(&conn->chan_lock);
174 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
176 struct l2cap_chan *c;
178 list_for_each_entry(c, &chan_list, global_l) {
179 if (c->sport == psm && !bacmp(&c->src, src))
185 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
189 write_lock(&chan_list_lock);
191 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
201 u16 p, start, end, incr;
203 if (chan->src_type == BDADDR_BREDR) {
204 start = L2CAP_PSM_DYN_START;
205 end = L2CAP_PSM_AUTO_END;
208 start = L2CAP_PSM_LE_DYN_START;
209 end = L2CAP_PSM_LE_DYN_END;
214 for (p = start; p <= end; p += incr)
215 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
216 chan->psm = cpu_to_le16(p);
217 chan->sport = cpu_to_le16(p);
224 write_unlock(&chan_list_lock);
227 EXPORT_SYMBOL_GPL(l2cap_add_psm);
229 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
231 write_lock(&chan_list_lock);
233 /* Override the defaults (which are for conn-oriented) */
234 chan->omtu = L2CAP_DEFAULT_MTU;
235 chan->chan_type = L2CAP_CHAN_FIXED;
239 write_unlock(&chan_list_lock);
244 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
248 if (conn->hcon->type == LE_LINK)
249 dyn_end = L2CAP_CID_LE_DYN_END;
251 dyn_end = L2CAP_CID_DYN_END;
253 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
254 if (!__l2cap_get_chan_by_scid(conn, cid))
261 static void l2cap_state_change(struct l2cap_chan *chan, int state)
263 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
264 state_to_string(state));
267 chan->ops->state_change(chan, state, 0);
270 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
274 chan->ops->state_change(chan, chan->state, err);
277 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
279 chan->ops->state_change(chan, chan->state, err);
282 static void __set_retrans_timer(struct l2cap_chan *chan)
284 if (!delayed_work_pending(&chan->monitor_timer) &&
285 chan->retrans_timeout) {
286 l2cap_set_timer(chan, &chan->retrans_timer,
287 msecs_to_jiffies(chan->retrans_timeout));
291 static void __set_monitor_timer(struct l2cap_chan *chan)
293 __clear_retrans_timer(chan);
294 if (chan->monitor_timeout) {
295 l2cap_set_timer(chan, &chan->monitor_timer,
296 msecs_to_jiffies(chan->monitor_timeout));
300 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
305 skb_queue_walk(head, skb) {
306 if (bt_cb(skb)->l2cap.txseq == seq)
313 /* ---- L2CAP sequence number lists ---- */
315 /* For ERTM, ordered lists of sequence numbers must be tracked for
316 * SREJ requests that are received and for frames that are to be
317 * retransmitted. These seq_list functions implement a singly-linked
318 * list in an array, where membership in the list can also be checked
319 * in constant time. Items can also be added to the tail of the list
320 * and removed from the head in constant time, without further memory
324 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
326 size_t alloc_size, i;
328 /* Allocated size is a power of 2 to map sequence numbers
329 * (which may be up to 14 bits) in to a smaller array that is
330 * sized for the negotiated ERTM transmit windows.
332 alloc_size = roundup_pow_of_two(size);
334 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
338 seq_list->mask = alloc_size - 1;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 for (i = 0; i < alloc_size; i++)
342 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
347 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
349 kfree(seq_list->list);
352 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
355 /* Constant-time check for list membership */
356 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
361 u16 seq = seq_list->head;
362 u16 mask = seq_list->mask;
364 seq_list->head = seq_list->list[seq & mask];
365 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
367 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
368 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
369 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
375 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
379 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
382 for (i = 0; i <= seq_list->mask; i++)
383 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
385 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
386 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
389 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
391 u16 mask = seq_list->mask;
393 /* All appends happen in constant time */
395 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
398 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
399 seq_list->head = seq;
401 seq_list->list[seq_list->tail & mask] = seq;
403 seq_list->tail = seq;
404 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
407 static void l2cap_chan_timeout(struct work_struct *work)
409 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
411 struct l2cap_conn *conn = chan->conn;
414 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
416 mutex_lock(&conn->chan_lock);
417 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
418 * this work. No need to call l2cap_chan_hold(chan) here again.
420 l2cap_chan_lock(chan);
422 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
423 reason = ECONNREFUSED;
424 else if (chan->state == BT_CONNECT &&
425 chan->sec_level != BT_SECURITY_SDP)
426 reason = ECONNREFUSED;
430 l2cap_chan_close(chan, reason);
432 chan->ops->close(chan);
434 l2cap_chan_unlock(chan);
435 l2cap_chan_put(chan);
437 mutex_unlock(&conn->chan_lock);
440 struct l2cap_chan *l2cap_chan_create(void)
442 struct l2cap_chan *chan;
444 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
448 skb_queue_head_init(&chan->tx_q);
449 skb_queue_head_init(&chan->srej_q);
450 mutex_init(&chan->lock);
452 /* Set default lock nesting level */
453 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455 write_lock(&chan_list_lock);
456 list_add(&chan->global_l, &chan_list);
457 write_unlock(&chan_list_lock);
459 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
461 chan->state = BT_OPEN;
463 kref_init(&chan->kref);
465 /* This flag is cleared in l2cap_chan_ready() */
466 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
468 BT_DBG("chan %p", chan);
472 EXPORT_SYMBOL_GPL(l2cap_chan_create);
474 static void l2cap_chan_destroy(struct kref *kref)
476 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
478 BT_DBG("chan %p", chan);
480 write_lock(&chan_list_lock);
481 list_del(&chan->global_l);
482 write_unlock(&chan_list_lock);
487 void l2cap_chan_hold(struct l2cap_chan *c)
489 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
494 void l2cap_chan_put(struct l2cap_chan *c)
496 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
498 kref_put(&c->kref, l2cap_chan_destroy);
500 EXPORT_SYMBOL_GPL(l2cap_chan_put);
502 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
504 chan->fcs = L2CAP_FCS_CRC16;
505 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
506 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
507 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
508 chan->remote_max_tx = chan->max_tx;
509 chan->remote_tx_win = chan->tx_win;
510 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
511 chan->sec_level = BT_SECURITY_LOW;
512 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
513 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
514 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
516 chan->conf_state = 0;
517 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
519 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
521 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
523 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
526 chan->sdu_last_frag = NULL;
528 chan->tx_credits = 0;
529 chan->rx_credits = le_max_credits;
530 chan->mps = min_t(u16, chan->imtu, le_default_mps);
532 skb_queue_head_init(&chan->tx_q);
535 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
537 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
538 __le16_to_cpu(chan->psm), chan->dcid);
540 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
544 switch (chan->chan_type) {
545 case L2CAP_CHAN_CONN_ORIENTED:
546 /* Alloc CID for connection-oriented socket */
547 chan->scid = l2cap_alloc_cid(conn);
548 if (conn->hcon->type == ACL_LINK)
549 chan->omtu = L2CAP_DEFAULT_MTU;
552 case L2CAP_CHAN_CONN_LESS:
553 /* Connectionless socket */
554 chan->scid = L2CAP_CID_CONN_LESS;
555 chan->dcid = L2CAP_CID_CONN_LESS;
556 chan->omtu = L2CAP_DEFAULT_MTU;
559 case L2CAP_CHAN_FIXED:
560 /* Caller will set CID and CID specific MTU values */
564 /* Raw socket can send/recv signalling messages only */
565 chan->scid = L2CAP_CID_SIGNALING;
566 chan->dcid = L2CAP_CID_SIGNALING;
567 chan->omtu = L2CAP_DEFAULT_MTU;
570 chan->local_id = L2CAP_BESTEFFORT_ID;
571 chan->local_stype = L2CAP_SERV_BESTEFFORT;
572 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
573 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
574 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
575 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
577 l2cap_chan_hold(chan);
579 /* Only keep a reference for fixed channels if they requested it */
580 if (chan->chan_type != L2CAP_CHAN_FIXED ||
581 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
582 hci_conn_hold(conn->hcon);
584 list_add(&chan->list, &conn->chan_l);
587 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
589 mutex_lock(&conn->chan_lock);
590 __l2cap_chan_add(conn, chan);
591 mutex_unlock(&conn->chan_lock);
594 void l2cap_chan_del(struct l2cap_chan *chan, int err)
596 struct l2cap_conn *conn = chan->conn;
598 __clear_chan_timer(chan);
600 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
601 state_to_string(chan->state));
603 chan->ops->teardown(chan, err);
606 struct amp_mgr *mgr = conn->hcon->amp_mgr;
607 /* Delete from channel list */
608 list_del(&chan->list);
610 l2cap_chan_put(chan);
614 /* Reference was only held for non-fixed channels or
615 * fixed channels that explicitly requested it using the
616 * FLAG_HOLD_HCI_CONN flag.
618 if (chan->chan_type != L2CAP_CHAN_FIXED ||
619 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
620 hci_conn_drop(conn->hcon);
622 if (mgr && mgr->bredr_chan == chan)
623 mgr->bredr_chan = NULL;
626 if (chan->hs_hchan) {
627 struct hci_chan *hs_hchan = chan->hs_hchan;
629 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
630 amp_disconnect_logical_link(hs_hchan);
633 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
637 case L2CAP_MODE_BASIC:
640 case L2CAP_MODE_LE_FLOWCTL:
641 skb_queue_purge(&chan->tx_q);
644 case L2CAP_MODE_ERTM:
645 __clear_retrans_timer(chan);
646 __clear_monitor_timer(chan);
647 __clear_ack_timer(chan);
649 skb_queue_purge(&chan->srej_q);
651 l2cap_seq_list_free(&chan->srej_list);
652 l2cap_seq_list_free(&chan->retrans_list);
656 case L2CAP_MODE_STREAMING:
657 skb_queue_purge(&chan->tx_q);
663 EXPORT_SYMBOL_GPL(l2cap_chan_del);
665 static void l2cap_conn_update_id_addr(struct work_struct *work)
667 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
668 id_addr_update_work);
669 struct hci_conn *hcon = conn->hcon;
670 struct l2cap_chan *chan;
672 mutex_lock(&conn->chan_lock);
674 list_for_each_entry(chan, &conn->chan_l, list) {
675 l2cap_chan_lock(chan);
676 bacpy(&chan->dst, &hcon->dst);
677 chan->dst_type = bdaddr_dst_type(hcon);
678 l2cap_chan_unlock(chan);
681 mutex_unlock(&conn->chan_lock);
684 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
686 struct l2cap_conn *conn = chan->conn;
687 struct l2cap_le_conn_rsp rsp;
690 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
691 result = L2CAP_CR_AUTHORIZATION;
693 result = L2CAP_CR_BAD_PSM;
695 l2cap_state_change(chan, BT_DISCONN);
697 rsp.dcid = cpu_to_le16(chan->scid);
698 rsp.mtu = cpu_to_le16(chan->imtu);
699 rsp.mps = cpu_to_le16(chan->mps);
700 rsp.credits = cpu_to_le16(chan->rx_credits);
701 rsp.result = cpu_to_le16(result);
703 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
707 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
709 struct l2cap_conn *conn = chan->conn;
710 struct l2cap_conn_rsp rsp;
713 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
714 result = L2CAP_CR_SEC_BLOCK;
716 result = L2CAP_CR_BAD_PSM;
718 l2cap_state_change(chan, BT_DISCONN);
720 rsp.scid = cpu_to_le16(chan->dcid);
721 rsp.dcid = cpu_to_le16(chan->scid);
722 rsp.result = cpu_to_le16(result);
723 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
725 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
728 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
730 struct l2cap_conn *conn = chan->conn;
732 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
734 switch (chan->state) {
736 chan->ops->teardown(chan, 0);
741 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
742 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
743 l2cap_send_disconn_req(chan, reason);
745 l2cap_chan_del(chan, reason);
749 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
750 if (conn->hcon->type == ACL_LINK)
751 l2cap_chan_connect_reject(chan);
752 else if (conn->hcon->type == LE_LINK)
753 l2cap_chan_le_connect_reject(chan);
756 l2cap_chan_del(chan, reason);
761 l2cap_chan_del(chan, reason);
765 chan->ops->teardown(chan, 0);
769 EXPORT_SYMBOL(l2cap_chan_close);
771 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
773 switch (chan->chan_type) {
775 switch (chan->sec_level) {
776 case BT_SECURITY_HIGH:
777 case BT_SECURITY_FIPS:
778 return HCI_AT_DEDICATED_BONDING_MITM;
779 case BT_SECURITY_MEDIUM:
780 return HCI_AT_DEDICATED_BONDING;
782 return HCI_AT_NO_BONDING;
785 case L2CAP_CHAN_CONN_LESS:
786 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
787 if (chan->sec_level == BT_SECURITY_LOW)
788 chan->sec_level = BT_SECURITY_SDP;
790 if (chan->sec_level == BT_SECURITY_HIGH ||
791 chan->sec_level == BT_SECURITY_FIPS)
792 return HCI_AT_NO_BONDING_MITM;
794 return HCI_AT_NO_BONDING;
796 case L2CAP_CHAN_CONN_ORIENTED:
797 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
798 if (chan->sec_level == BT_SECURITY_LOW)
799 chan->sec_level = BT_SECURITY_SDP;
801 if (chan->sec_level == BT_SECURITY_HIGH ||
802 chan->sec_level == BT_SECURITY_FIPS)
803 return HCI_AT_NO_BONDING_MITM;
805 return HCI_AT_NO_BONDING;
809 switch (chan->sec_level) {
810 case BT_SECURITY_HIGH:
811 case BT_SECURITY_FIPS:
812 return HCI_AT_GENERAL_BONDING_MITM;
813 case BT_SECURITY_MEDIUM:
814 return HCI_AT_GENERAL_BONDING;
816 return HCI_AT_NO_BONDING;
822 /* Service level security */
823 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
825 struct l2cap_conn *conn = chan->conn;
828 if (conn->hcon->type == LE_LINK)
829 return smp_conn_security(conn->hcon, chan->sec_level);
831 auth_type = l2cap_get_auth_type(chan);
833 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
837 static u8 l2cap_get_ident(struct l2cap_conn *conn)
841 /* Get next available identificator.
842 * 1 - 128 are used by kernel.
843 * 129 - 199 are reserved.
844 * 200 - 254 are used by utilities like l2ping, etc.
847 mutex_lock(&conn->ident_lock);
849 if (++conn->tx_ident > 128)
854 mutex_unlock(&conn->ident_lock);
859 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
862 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
865 BT_DBG("code 0x%2.2x", code);
870 /* Use NO_FLUSH if supported or we have an LE link (which does
871 * not support auto-flushing packets) */
872 if (lmp_no_flush_capable(conn->hcon->hdev) ||
873 conn->hcon->type == LE_LINK)
874 flags = ACL_START_NO_FLUSH;
878 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
879 skb->priority = HCI_PRIO_MAX;
881 hci_send_acl(conn->hchan, skb, flags);
884 static bool __chan_is_moving(struct l2cap_chan *chan)
886 return chan->move_state != L2CAP_MOVE_STABLE &&
887 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
890 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
892 struct hci_conn *hcon = chan->conn->hcon;
895 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
898 if (chan->hs_hcon && !__chan_is_moving(chan)) {
900 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
907 /* Use NO_FLUSH for LE links (where this is the only option) or
908 * if the BR/EDR link supports it and flushing has not been
909 * explicitly requested (through FLAG_FLUSHABLE).
911 if (hcon->type == LE_LINK ||
912 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
913 lmp_no_flush_capable(hcon->hdev)))
914 flags = ACL_START_NO_FLUSH;
918 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
919 hci_send_acl(chan->conn->hchan, skb, flags);
922 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
924 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
925 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
927 if (enh & L2CAP_CTRL_FRAME_TYPE) {
930 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
931 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
938 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
939 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
946 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
948 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
949 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
951 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
954 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
955 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
962 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
963 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
970 static inline void __unpack_control(struct l2cap_chan *chan,
973 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
974 __unpack_extended_control(get_unaligned_le32(skb->data),
976 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
978 __unpack_enhanced_control(get_unaligned_le16(skb->data),
980 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
984 static u32 __pack_extended_control(struct l2cap_ctrl *control)
988 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
989 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
991 if (control->sframe) {
992 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
993 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
994 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
996 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
997 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1003 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1007 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1008 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1010 if (control->sframe) {
1011 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1012 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1013 packed |= L2CAP_CTRL_FRAME_TYPE;
1015 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1016 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1022 static inline void __pack_control(struct l2cap_chan *chan,
1023 struct l2cap_ctrl *control,
1024 struct sk_buff *skb)
1026 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1027 put_unaligned_le32(__pack_extended_control(control),
1028 skb->data + L2CAP_HDR_SIZE);
1030 put_unaligned_le16(__pack_enhanced_control(control),
1031 skb->data + L2CAP_HDR_SIZE);
1035 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1037 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1038 return L2CAP_EXT_HDR_SIZE;
1040 return L2CAP_ENH_HDR_SIZE;
1043 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1046 struct sk_buff *skb;
1047 struct l2cap_hdr *lh;
1048 int hlen = __ertm_hdr_size(chan);
1050 if (chan->fcs == L2CAP_FCS_CRC16)
1051 hlen += L2CAP_FCS_SIZE;
1053 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1056 return ERR_PTR(-ENOMEM);
1058 lh = skb_put(skb, L2CAP_HDR_SIZE);
1059 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1060 lh->cid = cpu_to_le16(chan->dcid);
1062 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1063 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1065 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1067 if (chan->fcs == L2CAP_FCS_CRC16) {
1068 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1069 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1072 skb->priority = HCI_PRIO_MAX;
1076 static void l2cap_send_sframe(struct l2cap_chan *chan,
1077 struct l2cap_ctrl *control)
1079 struct sk_buff *skb;
1082 BT_DBG("chan %p, control %p", chan, control);
1084 if (!control->sframe)
1087 if (__chan_is_moving(chan))
1090 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1094 if (control->super == L2CAP_SUPER_RR)
1095 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1096 else if (control->super == L2CAP_SUPER_RNR)
1097 set_bit(CONN_RNR_SENT, &chan->conn_state);
1099 if (control->super != L2CAP_SUPER_SREJ) {
1100 chan->last_acked_seq = control->reqseq;
1101 __clear_ack_timer(chan);
1104 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1105 control->final, control->poll, control->super);
1107 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1108 control_field = __pack_extended_control(control);
1110 control_field = __pack_enhanced_control(control);
1112 skb = l2cap_create_sframe_pdu(chan, control_field);
1114 l2cap_do_send(chan, skb);
1117 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1119 struct l2cap_ctrl control;
1121 BT_DBG("chan %p, poll %d", chan, poll);
1123 memset(&control, 0, sizeof(control));
1125 control.poll = poll;
1127 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1128 control.super = L2CAP_SUPER_RNR;
1130 control.super = L2CAP_SUPER_RR;
1132 control.reqseq = chan->buffer_seq;
1133 l2cap_send_sframe(chan, &control);
1136 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1138 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1141 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1144 static bool __amp_capable(struct l2cap_chan *chan)
1146 struct l2cap_conn *conn = chan->conn;
1147 struct hci_dev *hdev;
1148 bool amp_available = false;
1150 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1153 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1156 read_lock(&hci_dev_list_lock);
1157 list_for_each_entry(hdev, &hci_dev_list, list) {
1158 if (hdev->amp_type != AMP_TYPE_BREDR &&
1159 test_bit(HCI_UP, &hdev->flags)) {
1160 amp_available = true;
1164 read_unlock(&hci_dev_list_lock);
1166 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1167 return amp_available;
1172 static bool l2cap_check_efs(struct l2cap_chan *chan)
1174 /* Check EFS parameters */
1178 void l2cap_send_conn_req(struct l2cap_chan *chan)
1180 struct l2cap_conn *conn = chan->conn;
1181 struct l2cap_conn_req req;
1183 req.scid = cpu_to_le16(chan->scid);
1184 req.psm = chan->psm;
1186 chan->ident = l2cap_get_ident(conn);
1188 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1190 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1193 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1195 struct l2cap_create_chan_req req;
1196 req.scid = cpu_to_le16(chan->scid);
1197 req.psm = chan->psm;
1198 req.amp_id = amp_id;
1200 chan->ident = l2cap_get_ident(chan->conn);
1202 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1206 static void l2cap_move_setup(struct l2cap_chan *chan)
1208 struct sk_buff *skb;
1210 BT_DBG("chan %p", chan);
1212 if (chan->mode != L2CAP_MODE_ERTM)
1215 __clear_retrans_timer(chan);
1216 __clear_monitor_timer(chan);
1217 __clear_ack_timer(chan);
1219 chan->retry_count = 0;
1220 skb_queue_walk(&chan->tx_q, skb) {
1221 if (bt_cb(skb)->l2cap.retries)
1222 bt_cb(skb)->l2cap.retries = 1;
1227 chan->expected_tx_seq = chan->buffer_seq;
1229 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1230 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1231 l2cap_seq_list_clear(&chan->retrans_list);
1232 l2cap_seq_list_clear(&chan->srej_list);
1233 skb_queue_purge(&chan->srej_q);
1235 chan->tx_state = L2CAP_TX_STATE_XMIT;
1236 chan->rx_state = L2CAP_RX_STATE_MOVE;
1238 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1241 static void l2cap_move_done(struct l2cap_chan *chan)
1243 u8 move_role = chan->move_role;
1244 BT_DBG("chan %p", chan);
1246 chan->move_state = L2CAP_MOVE_STABLE;
1247 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1249 if (chan->mode != L2CAP_MODE_ERTM)
1252 switch (move_role) {
1253 case L2CAP_MOVE_ROLE_INITIATOR:
1254 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1255 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1257 case L2CAP_MOVE_ROLE_RESPONDER:
1258 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1263 static void l2cap_chan_ready(struct l2cap_chan *chan)
1265 /* The channel may have already been flagged as connected in
1266 * case of receiving data before the L2CAP info req/rsp
1267 * procedure is complete.
1269 if (chan->state == BT_CONNECTED)
1272 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1273 chan->conf_state = 0;
1274 __clear_chan_timer(chan);
1276 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1277 chan->ops->suspend(chan);
1279 chan->state = BT_CONNECTED;
1281 chan->ops->ready(chan);
1284 static void l2cap_le_connect(struct l2cap_chan *chan)
1286 struct l2cap_conn *conn = chan->conn;
1287 struct l2cap_le_conn_req req;
1289 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1292 req.psm = chan->psm;
1293 req.scid = cpu_to_le16(chan->scid);
1294 req.mtu = cpu_to_le16(chan->imtu);
1295 req.mps = cpu_to_le16(chan->mps);
1296 req.credits = cpu_to_le16(chan->rx_credits);
1298 chan->ident = l2cap_get_ident(conn);
1300 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1304 static void l2cap_le_start(struct l2cap_chan *chan)
1306 struct l2cap_conn *conn = chan->conn;
1308 if (!smp_conn_security(conn->hcon, chan->sec_level))
1312 l2cap_chan_ready(chan);
1316 if (chan->state == BT_CONNECT)
1317 l2cap_le_connect(chan);
1320 static void l2cap_start_connection(struct l2cap_chan *chan)
1322 if (__amp_capable(chan)) {
1323 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1324 a2mp_discover_amp(chan);
1325 } else if (chan->conn->hcon->type == LE_LINK) {
1326 l2cap_le_start(chan);
1328 l2cap_send_conn_req(chan);
1332 static void l2cap_request_info(struct l2cap_conn *conn)
1334 struct l2cap_info_req req;
1336 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1339 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1341 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1342 conn->info_ident = l2cap_get_ident(conn);
1344 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1346 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1350 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1352 /* The minimum encryption key size needs to be enforced by the
1353 * host stack before establishing any L2CAP connections. The
1354 * specification in theory allows a minimum of 1, but to align
1355 * BR/EDR and LE transports, a minimum of 7 is chosen.
1357 * This check might also be called for unencrypted connections
1358 * that have no key size requirements. Ensure that the link is
1359 * actually encrypted before enforcing a key size.
1361 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1362 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
1365 static void l2cap_do_start(struct l2cap_chan *chan)
1367 struct l2cap_conn *conn = chan->conn;
1369 if (conn->hcon->type == LE_LINK) {
1370 l2cap_le_start(chan);
1374 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1375 l2cap_request_info(conn);
1379 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1382 if (!l2cap_chan_check_security(chan, true) ||
1383 !__l2cap_no_conn_pending(chan))
1386 if (l2cap_check_enc_key_size(conn->hcon))
1387 l2cap_start_connection(chan);
1389 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1392 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1394 u32 local_feat_mask = l2cap_feat_mask;
1396 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1399 case L2CAP_MODE_ERTM:
1400 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1401 case L2CAP_MODE_STREAMING:
1402 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1408 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1410 struct l2cap_conn *conn = chan->conn;
1411 struct l2cap_disconn_req req;
1416 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1417 __clear_retrans_timer(chan);
1418 __clear_monitor_timer(chan);
1419 __clear_ack_timer(chan);
1422 if (chan->scid == L2CAP_CID_A2MP) {
1423 l2cap_state_change(chan, BT_DISCONN);
1427 req.dcid = cpu_to_le16(chan->dcid);
1428 req.scid = cpu_to_le16(chan->scid);
1429 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1432 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1435 /* ---- L2CAP connections ---- */
1436 static void l2cap_conn_start(struct l2cap_conn *conn)
1438 struct l2cap_chan *chan, *tmp;
1440 BT_DBG("conn %p", conn);
1442 mutex_lock(&conn->chan_lock);
1444 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1445 l2cap_chan_lock(chan);
1447 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1448 l2cap_chan_ready(chan);
1449 l2cap_chan_unlock(chan);
1453 if (chan->state == BT_CONNECT) {
1454 if (!l2cap_chan_check_security(chan, true) ||
1455 !__l2cap_no_conn_pending(chan)) {
1456 l2cap_chan_unlock(chan);
1460 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1461 && test_bit(CONF_STATE2_DEVICE,
1462 &chan->conf_state)) {
1463 l2cap_chan_close(chan, ECONNRESET);
1464 l2cap_chan_unlock(chan);
1468 if (l2cap_check_enc_key_size(conn->hcon))
1469 l2cap_start_connection(chan);
1471 l2cap_chan_close(chan, ECONNREFUSED);
1473 } else if (chan->state == BT_CONNECT2) {
1474 struct l2cap_conn_rsp rsp;
1476 rsp.scid = cpu_to_le16(chan->dcid);
1477 rsp.dcid = cpu_to_le16(chan->scid);
1479 if (l2cap_chan_check_security(chan, false)) {
1480 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1481 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1482 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1483 chan->ops->defer(chan);
1486 l2cap_state_change(chan, BT_CONFIG);
1487 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1488 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1491 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1492 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1495 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1498 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1499 rsp.result != L2CAP_CR_SUCCESS) {
1500 l2cap_chan_unlock(chan);
1504 set_bit(CONF_REQ_SENT, &chan->conf_state);
1505 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1506 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1507 chan->num_conf_req++;
1510 l2cap_chan_unlock(chan);
1513 mutex_unlock(&conn->chan_lock);
1516 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1518 struct hci_conn *hcon = conn->hcon;
1519 struct hci_dev *hdev = hcon->hdev;
1521 BT_DBG("%s conn %p", hdev->name, conn);
1523 /* For outgoing pairing which doesn't necessarily have an
1524 * associated socket (e.g. mgmt_pair_device).
1527 smp_conn_security(hcon, hcon->pending_sec_level);
1529 /* For LE slave connections, make sure the connection interval
1530 * is in the range of the minium and maximum interval that has
1531 * been configured for this connection. If not, then trigger
1532 * the connection update procedure.
1534 if (hcon->role == HCI_ROLE_SLAVE &&
1535 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1536 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1537 struct l2cap_conn_param_update_req req;
1539 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1540 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1541 req.latency = cpu_to_le16(hcon->le_conn_latency);
1542 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1544 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1545 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1549 static void l2cap_conn_ready(struct l2cap_conn *conn)
1551 struct l2cap_chan *chan;
1552 struct hci_conn *hcon = conn->hcon;
1554 BT_DBG("conn %p", conn);
1556 if (hcon->type == ACL_LINK)
1557 l2cap_request_info(conn);
1559 mutex_lock(&conn->chan_lock);
1561 list_for_each_entry(chan, &conn->chan_l, list) {
1563 l2cap_chan_lock(chan);
1565 if (chan->scid == L2CAP_CID_A2MP) {
1566 l2cap_chan_unlock(chan);
1570 if (hcon->type == LE_LINK) {
1571 l2cap_le_start(chan);
1572 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1573 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1574 l2cap_chan_ready(chan);
1575 } else if (chan->state == BT_CONNECT) {
1576 l2cap_do_start(chan);
1579 l2cap_chan_unlock(chan);
1582 mutex_unlock(&conn->chan_lock);
1584 if (hcon->type == LE_LINK)
1585 l2cap_le_conn_ready(conn);
1587 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1590 /* Notify sockets that we cannot guaranty reliability anymore */
1591 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1593 struct l2cap_chan *chan;
1595 BT_DBG("conn %p", conn);
1597 mutex_lock(&conn->chan_lock);
1599 list_for_each_entry(chan, &conn->chan_l, list) {
1600 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1601 l2cap_chan_set_err(chan, err);
1604 mutex_unlock(&conn->chan_lock);
1607 static void l2cap_info_timeout(struct work_struct *work)
1609 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1612 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1613 conn->info_ident = 0;
1615 l2cap_conn_start(conn);
1620 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1621 * callback is called during registration. The ->remove callback is called
1622 * during unregistration.
1623 * An l2cap_user object can either be explicitly unregistered or when the
1624 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1625 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1626 * External modules must own a reference to the l2cap_conn object if they intend
1627 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1628 * any time if they don't.
1631 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1633 struct hci_dev *hdev = conn->hcon->hdev;
1636 /* We need to check whether l2cap_conn is registered. If it is not, we
1637 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1638 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1639 * relies on the parent hci_conn object to be locked. This itself relies
1640 * on the hci_dev object to be locked. So we must lock the hci device
1645 if (!list_empty(&user->list)) {
1650 /* conn->hchan is NULL after l2cap_conn_del() was called */
1656 ret = user->probe(conn, user);
1660 list_add(&user->list, &conn->users);
1664 hci_dev_unlock(hdev);
1667 EXPORT_SYMBOL(l2cap_register_user);
1669 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1671 struct hci_dev *hdev = conn->hcon->hdev;
1675 if (list_empty(&user->list))
1678 list_del_init(&user->list);
1679 user->remove(conn, user);
1682 hci_dev_unlock(hdev);
1684 EXPORT_SYMBOL(l2cap_unregister_user);
1686 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1688 struct l2cap_user *user;
1690 while (!list_empty(&conn->users)) {
1691 user = list_first_entry(&conn->users, struct l2cap_user, list);
1692 list_del_init(&user->list);
1693 user->remove(conn, user);
1697 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1699 struct l2cap_conn *conn = hcon->l2cap_data;
1700 struct l2cap_chan *chan, *l;
1705 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1707 kfree_skb(conn->rx_skb);
1709 skb_queue_purge(&conn->pending_rx);
1711 /* We can not call flush_work(&conn->pending_rx_work) here since we
1712 * might block if we are running on a worker from the same workqueue
1713 * pending_rx_work is waiting on.
1715 if (work_pending(&conn->pending_rx_work))
1716 cancel_work_sync(&conn->pending_rx_work);
1718 if (work_pending(&conn->id_addr_update_work))
1719 cancel_work_sync(&conn->id_addr_update_work);
1721 l2cap_unregister_all_users(conn);
1723 /* Force the connection to be immediately dropped */
1724 hcon->disc_timeout = 0;
1726 mutex_lock(&conn->chan_lock);
1729 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1730 l2cap_chan_hold(chan);
1731 l2cap_chan_lock(chan);
1733 l2cap_chan_del(chan, err);
1735 chan->ops->close(chan);
1737 l2cap_chan_unlock(chan);
1738 l2cap_chan_put(chan);
1741 mutex_unlock(&conn->chan_lock);
1743 hci_chan_del(conn->hchan);
1745 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1746 cancel_delayed_work_sync(&conn->info_timer);
1748 hcon->l2cap_data = NULL;
1750 l2cap_conn_put(conn);
1753 static void l2cap_conn_free(struct kref *ref)
1755 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1757 hci_conn_put(conn->hcon);
1761 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1763 kref_get(&conn->ref);
1766 EXPORT_SYMBOL(l2cap_conn_get);
1768 void l2cap_conn_put(struct l2cap_conn *conn)
1770 kref_put(&conn->ref, l2cap_conn_free);
1772 EXPORT_SYMBOL(l2cap_conn_put);
1774 /* ---- Socket interface ---- */
1776 /* Find socket with psm and source / destination bdaddr.
1777 * Returns closest match.
1779 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1784 struct l2cap_chan *c, *c1 = NULL;
1786 read_lock(&chan_list_lock);
1788 list_for_each_entry(c, &chan_list, global_l) {
1789 if (state && c->state != state)
1792 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1795 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1798 if (c->psm == psm) {
1799 int src_match, dst_match;
1800 int src_any, dst_any;
1803 src_match = !bacmp(&c->src, src);
1804 dst_match = !bacmp(&c->dst, dst);
1805 if (src_match && dst_match) {
1807 read_unlock(&chan_list_lock);
1812 src_any = !bacmp(&c->src, BDADDR_ANY);
1813 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1814 if ((src_match && dst_any) || (src_any && dst_match) ||
1815 (src_any && dst_any))
1821 l2cap_chan_hold(c1);
1823 read_unlock(&chan_list_lock);
1828 static void l2cap_monitor_timeout(struct work_struct *work)
1830 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1831 monitor_timer.work);
1833 BT_DBG("chan %p", chan);
1835 l2cap_chan_lock(chan);
1838 l2cap_chan_unlock(chan);
1839 l2cap_chan_put(chan);
1843 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1845 l2cap_chan_unlock(chan);
1846 l2cap_chan_put(chan);
1849 static void l2cap_retrans_timeout(struct work_struct *work)
1851 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1852 retrans_timer.work);
1854 BT_DBG("chan %p", chan);
1856 l2cap_chan_lock(chan);
1859 l2cap_chan_unlock(chan);
1860 l2cap_chan_put(chan);
1864 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1865 l2cap_chan_unlock(chan);
1866 l2cap_chan_put(chan);
1869 static void l2cap_streaming_send(struct l2cap_chan *chan,
1870 struct sk_buff_head *skbs)
1872 struct sk_buff *skb;
1873 struct l2cap_ctrl *control;
1875 BT_DBG("chan %p, skbs %p", chan, skbs);
1877 if (__chan_is_moving(chan))
1880 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1882 while (!skb_queue_empty(&chan->tx_q)) {
1884 skb = skb_dequeue(&chan->tx_q);
1886 bt_cb(skb)->l2cap.retries = 1;
1887 control = &bt_cb(skb)->l2cap;
1889 control->reqseq = 0;
1890 control->txseq = chan->next_tx_seq;
1892 __pack_control(chan, control, skb);
1894 if (chan->fcs == L2CAP_FCS_CRC16) {
1895 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1896 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1899 l2cap_do_send(chan, skb);
1901 BT_DBG("Sent txseq %u", control->txseq);
1903 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1904 chan->frames_sent++;
1908 static int l2cap_ertm_send(struct l2cap_chan *chan)
1910 struct sk_buff *skb, *tx_skb;
1911 struct l2cap_ctrl *control;
1914 BT_DBG("chan %p", chan);
1916 if (chan->state != BT_CONNECTED)
1919 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1922 if (__chan_is_moving(chan))
1925 while (chan->tx_send_head &&
1926 chan->unacked_frames < chan->remote_tx_win &&
1927 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1929 skb = chan->tx_send_head;
1931 bt_cb(skb)->l2cap.retries = 1;
1932 control = &bt_cb(skb)->l2cap;
1934 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1937 control->reqseq = chan->buffer_seq;
1938 chan->last_acked_seq = chan->buffer_seq;
1939 control->txseq = chan->next_tx_seq;
1941 __pack_control(chan, control, skb);
1943 if (chan->fcs == L2CAP_FCS_CRC16) {
1944 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1945 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1948 /* Clone after data has been modified. Data is assumed to be
1949 read-only (for locking purposes) on cloned sk_buffs.
1951 tx_skb = skb_clone(skb, GFP_KERNEL);
1956 __set_retrans_timer(chan);
1958 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1959 chan->unacked_frames++;
1960 chan->frames_sent++;
1963 if (skb_queue_is_last(&chan->tx_q, skb))
1964 chan->tx_send_head = NULL;
1966 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1968 l2cap_do_send(chan, tx_skb);
1969 BT_DBG("Sent txseq %u", control->txseq);
1972 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1973 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1978 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1980 struct l2cap_ctrl control;
1981 struct sk_buff *skb;
1982 struct sk_buff *tx_skb;
1985 BT_DBG("chan %p", chan);
1987 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1990 if (__chan_is_moving(chan))
1993 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1994 seq = l2cap_seq_list_pop(&chan->retrans_list);
1996 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1998 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2003 bt_cb(skb)->l2cap.retries++;
2004 control = bt_cb(skb)->l2cap;
2006 if (chan->max_tx != 0 &&
2007 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2008 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2009 l2cap_send_disconn_req(chan, ECONNRESET);
2010 l2cap_seq_list_clear(&chan->retrans_list);
2014 control.reqseq = chan->buffer_seq;
2015 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2020 if (skb_cloned(skb)) {
2021 /* Cloned sk_buffs are read-only, so we need a
2024 tx_skb = skb_copy(skb, GFP_KERNEL);
2026 tx_skb = skb_clone(skb, GFP_KERNEL);
2030 l2cap_seq_list_clear(&chan->retrans_list);
2034 /* Update skb contents */
2035 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2036 put_unaligned_le32(__pack_extended_control(&control),
2037 tx_skb->data + L2CAP_HDR_SIZE);
2039 put_unaligned_le16(__pack_enhanced_control(&control),
2040 tx_skb->data + L2CAP_HDR_SIZE);
2044 if (chan->fcs == L2CAP_FCS_CRC16) {
2045 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2046 tx_skb->len - L2CAP_FCS_SIZE);
2047 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2051 l2cap_do_send(chan, tx_skb);
2053 BT_DBG("Resent txseq %d", control.txseq);
2055 chan->last_acked_seq = chan->buffer_seq;
2059 static void l2cap_retransmit(struct l2cap_chan *chan,
2060 struct l2cap_ctrl *control)
2062 BT_DBG("chan %p, control %p", chan, control);
2064 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2065 l2cap_ertm_resend(chan);
2068 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2069 struct l2cap_ctrl *control)
2071 struct sk_buff *skb;
2073 BT_DBG("chan %p, control %p", chan, control);
2076 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2078 l2cap_seq_list_clear(&chan->retrans_list);
2080 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2083 if (chan->unacked_frames) {
2084 skb_queue_walk(&chan->tx_q, skb) {
2085 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2086 skb == chan->tx_send_head)
2090 skb_queue_walk_from(&chan->tx_q, skb) {
2091 if (skb == chan->tx_send_head)
2094 l2cap_seq_list_append(&chan->retrans_list,
2095 bt_cb(skb)->l2cap.txseq);
2098 l2cap_ertm_resend(chan);
2102 static void l2cap_send_ack(struct l2cap_chan *chan)
2104 struct l2cap_ctrl control;
2105 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2106 chan->last_acked_seq);
2109 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2110 chan, chan->last_acked_seq, chan->buffer_seq);
2112 memset(&control, 0, sizeof(control));
2115 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2116 chan->rx_state == L2CAP_RX_STATE_RECV) {
2117 __clear_ack_timer(chan);
2118 control.super = L2CAP_SUPER_RNR;
2119 control.reqseq = chan->buffer_seq;
2120 l2cap_send_sframe(chan, &control);
2122 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2123 l2cap_ertm_send(chan);
2124 /* If any i-frames were sent, they included an ack */
2125 if (chan->buffer_seq == chan->last_acked_seq)
2129 /* Ack now if the window is 3/4ths full.
2130 * Calculate without mul or div
2132 threshold = chan->ack_win;
2133 threshold += threshold << 1;
2136 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2139 if (frames_to_ack >= threshold) {
2140 __clear_ack_timer(chan);
2141 control.super = L2CAP_SUPER_RR;
2142 control.reqseq = chan->buffer_seq;
2143 l2cap_send_sframe(chan, &control);
2148 __set_ack_timer(chan);
2152 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2153 struct msghdr *msg, int len,
2154 int count, struct sk_buff *skb)
2156 struct l2cap_conn *conn = chan->conn;
2157 struct sk_buff **frag;
2160 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2166 /* Continuation fragments (no L2CAP header) */
2167 frag = &skb_shinfo(skb)->frag_list;
2169 struct sk_buff *tmp;
2171 count = min_t(unsigned int, conn->mtu, len);
2173 tmp = chan->ops->alloc_skb(chan, 0, count,
2174 msg->msg_flags & MSG_DONTWAIT);
2176 return PTR_ERR(tmp);
2180 if (!copy_from_iter_full(skb_put(*frag, count), count,
2187 skb->len += (*frag)->len;
2188 skb->data_len += (*frag)->len;
2190 frag = &(*frag)->next;
2196 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2197 struct msghdr *msg, size_t len)
2199 struct l2cap_conn *conn = chan->conn;
2200 struct sk_buff *skb;
2201 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2202 struct l2cap_hdr *lh;
2204 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2205 __le16_to_cpu(chan->psm), len);
2207 count = min_t(unsigned int, (conn->mtu - hlen), len);
2209 skb = chan->ops->alloc_skb(chan, hlen, count,
2210 msg->msg_flags & MSG_DONTWAIT);
2214 /* Create L2CAP header */
2215 lh = skb_put(skb, L2CAP_HDR_SIZE);
2216 lh->cid = cpu_to_le16(chan->dcid);
2217 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2218 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2220 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2221 if (unlikely(err < 0)) {
2223 return ERR_PTR(err);
2228 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2229 struct msghdr *msg, size_t len)
2231 struct l2cap_conn *conn = chan->conn;
2232 struct sk_buff *skb;
2234 struct l2cap_hdr *lh;
2236 BT_DBG("chan %p len %zu", chan, len);
2238 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2240 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2241 msg->msg_flags & MSG_DONTWAIT);
2245 /* Create L2CAP header */
2246 lh = skb_put(skb, L2CAP_HDR_SIZE);
2247 lh->cid = cpu_to_le16(chan->dcid);
2248 lh->len = cpu_to_le16(len);
2250 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2251 if (unlikely(err < 0)) {
2253 return ERR_PTR(err);
2258 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2259 struct msghdr *msg, size_t len,
2262 struct l2cap_conn *conn = chan->conn;
2263 struct sk_buff *skb;
2264 int err, count, hlen;
2265 struct l2cap_hdr *lh;
2267 BT_DBG("chan %p len %zu", chan, len);
2270 return ERR_PTR(-ENOTCONN);
2272 hlen = __ertm_hdr_size(chan);
2275 hlen += L2CAP_SDULEN_SIZE;
2277 if (chan->fcs == L2CAP_FCS_CRC16)
2278 hlen += L2CAP_FCS_SIZE;
2280 count = min_t(unsigned int, (conn->mtu - hlen), len);
2282 skb = chan->ops->alloc_skb(chan, hlen, count,
2283 msg->msg_flags & MSG_DONTWAIT);
2287 /* Create L2CAP header */
2288 lh = skb_put(skb, L2CAP_HDR_SIZE);
2289 lh->cid = cpu_to_le16(chan->dcid);
2290 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2292 /* Control header is populated later */
2293 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2294 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2296 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2299 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2301 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2302 if (unlikely(err < 0)) {
2304 return ERR_PTR(err);
2307 bt_cb(skb)->l2cap.fcs = chan->fcs;
2308 bt_cb(skb)->l2cap.retries = 0;
2312 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2313 struct sk_buff_head *seg_queue,
2314 struct msghdr *msg, size_t len)
2316 struct sk_buff *skb;
2321 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2323 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2324 * so fragmented skbs are not used. The HCI layer's handling
2325 * of fragmented skbs is not compatible with ERTM's queueing.
2328 /* PDU size is derived from the HCI MTU */
2329 pdu_len = chan->conn->mtu;
2331 /* Constrain PDU size for BR/EDR connections */
2333 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2335 /* Adjust for largest possible L2CAP overhead. */
2337 pdu_len -= L2CAP_FCS_SIZE;
2339 pdu_len -= __ertm_hdr_size(chan);
2341 /* Remote device may have requested smaller PDUs */
2342 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2344 if (len <= pdu_len) {
2345 sar = L2CAP_SAR_UNSEGMENTED;
2349 sar = L2CAP_SAR_START;
2354 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2357 __skb_queue_purge(seg_queue);
2358 return PTR_ERR(skb);
2361 bt_cb(skb)->l2cap.sar = sar;
2362 __skb_queue_tail(seg_queue, skb);
2368 if (len <= pdu_len) {
2369 sar = L2CAP_SAR_END;
2372 sar = L2CAP_SAR_CONTINUE;
2379 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2381 size_t len, u16 sdulen)
2383 struct l2cap_conn *conn = chan->conn;
2384 struct sk_buff *skb;
2385 int err, count, hlen;
2386 struct l2cap_hdr *lh;
2388 BT_DBG("chan %p len %zu", chan, len);
2391 return ERR_PTR(-ENOTCONN);
2393 hlen = L2CAP_HDR_SIZE;
2396 hlen += L2CAP_SDULEN_SIZE;
2398 count = min_t(unsigned int, (conn->mtu - hlen), len);
2400 skb = chan->ops->alloc_skb(chan, hlen, count,
2401 msg->msg_flags & MSG_DONTWAIT);
2405 /* Create L2CAP header */
2406 lh = skb_put(skb, L2CAP_HDR_SIZE);
2407 lh->cid = cpu_to_le16(chan->dcid);
2408 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2411 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2413 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2414 if (unlikely(err < 0)) {
2416 return ERR_PTR(err);
2422 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2423 struct sk_buff_head *seg_queue,
2424 struct msghdr *msg, size_t len)
2426 struct sk_buff *skb;
2430 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2433 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2439 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2441 __skb_queue_purge(seg_queue);
2442 return PTR_ERR(skb);
2445 __skb_queue_tail(seg_queue, skb);
2451 pdu_len += L2CAP_SDULEN_SIZE;
2458 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2462 BT_DBG("chan %p", chan);
2464 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2465 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2470 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2471 skb_queue_len(&chan->tx_q));
2474 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2476 struct sk_buff *skb;
2478 struct sk_buff_head seg_queue;
2483 /* Connectionless channel */
2484 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2485 skb = l2cap_create_connless_pdu(chan, msg, len);
2487 return PTR_ERR(skb);
2489 /* Channel lock is released before requesting new skb and then
2490 * reacquired thus we need to recheck channel state.
2492 if (chan->state != BT_CONNECTED) {
2497 l2cap_do_send(chan, skb);
2501 switch (chan->mode) {
2502 case L2CAP_MODE_LE_FLOWCTL:
2503 /* Check outgoing MTU */
2504 if (len > chan->omtu)
2507 __skb_queue_head_init(&seg_queue);
2509 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2511 if (chan->state != BT_CONNECTED) {
2512 __skb_queue_purge(&seg_queue);
2519 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2521 l2cap_le_flowctl_send(chan);
2523 if (!chan->tx_credits)
2524 chan->ops->suspend(chan);
2530 case L2CAP_MODE_BASIC:
2531 /* Check outgoing MTU */
2532 if (len > chan->omtu)
2535 /* Create a basic PDU */
2536 skb = l2cap_create_basic_pdu(chan, msg, len);
2538 return PTR_ERR(skb);
2540 /* Channel lock is released before requesting new skb and then
2541 * reacquired thus we need to recheck channel state.
2543 if (chan->state != BT_CONNECTED) {
2548 l2cap_do_send(chan, skb);
2552 case L2CAP_MODE_ERTM:
2553 case L2CAP_MODE_STREAMING:
2554 /* Check outgoing MTU */
2555 if (len > chan->omtu) {
2560 __skb_queue_head_init(&seg_queue);
2562 /* Do segmentation before calling in to the state machine,
2563 * since it's possible to block while waiting for memory
2566 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2568 /* The channel could have been closed while segmenting,
2569 * check that it is still connected.
2571 if (chan->state != BT_CONNECTED) {
2572 __skb_queue_purge(&seg_queue);
2579 if (chan->mode == L2CAP_MODE_ERTM)
2580 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2582 l2cap_streaming_send(chan, &seg_queue);
2586 /* If the skbs were not queued for sending, they'll still be in
2587 * seg_queue and need to be purged.
2589 __skb_queue_purge(&seg_queue);
2593 BT_DBG("bad state %1.1x", chan->mode);
2599 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2601 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2603 struct l2cap_ctrl control;
2606 BT_DBG("chan %p, txseq %u", chan, txseq);
2608 memset(&control, 0, sizeof(control));
2610 control.super = L2CAP_SUPER_SREJ;
2612 for (seq = chan->expected_tx_seq; seq != txseq;
2613 seq = __next_seq(chan, seq)) {
2614 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2615 control.reqseq = seq;
2616 l2cap_send_sframe(chan, &control);
2617 l2cap_seq_list_append(&chan->srej_list, seq);
2621 chan->expected_tx_seq = __next_seq(chan, txseq);
2624 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2626 struct l2cap_ctrl control;
2628 BT_DBG("chan %p", chan);
2630 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2633 memset(&control, 0, sizeof(control));
2635 control.super = L2CAP_SUPER_SREJ;
2636 control.reqseq = chan->srej_list.tail;
2637 l2cap_send_sframe(chan, &control);
2640 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2642 struct l2cap_ctrl control;
2646 BT_DBG("chan %p, txseq %u", chan, txseq);
2648 memset(&control, 0, sizeof(control));
2650 control.super = L2CAP_SUPER_SREJ;
2652 /* Capture initial list head to allow only one pass through the list. */
2653 initial_head = chan->srej_list.head;
2656 seq = l2cap_seq_list_pop(&chan->srej_list);
2657 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2660 control.reqseq = seq;
2661 l2cap_send_sframe(chan, &control);
2662 l2cap_seq_list_append(&chan->srej_list, seq);
2663 } while (chan->srej_list.head != initial_head);
2666 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2668 struct sk_buff *acked_skb;
2671 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2673 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2676 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2677 chan->expected_ack_seq, chan->unacked_frames);
2679 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2680 ackseq = __next_seq(chan, ackseq)) {
2682 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2684 skb_unlink(acked_skb, &chan->tx_q);
2685 kfree_skb(acked_skb);
2686 chan->unacked_frames--;
2690 chan->expected_ack_seq = reqseq;
2692 if (chan->unacked_frames == 0)
2693 __clear_retrans_timer(chan);
2695 BT_DBG("unacked_frames %u", chan->unacked_frames);
2698 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2700 BT_DBG("chan %p", chan);
2702 chan->expected_tx_seq = chan->buffer_seq;
2703 l2cap_seq_list_clear(&chan->srej_list);
2704 skb_queue_purge(&chan->srej_q);
2705 chan->rx_state = L2CAP_RX_STATE_RECV;
2708 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2709 struct l2cap_ctrl *control,
2710 struct sk_buff_head *skbs, u8 event)
2712 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2716 case L2CAP_EV_DATA_REQUEST:
2717 if (chan->tx_send_head == NULL)
2718 chan->tx_send_head = skb_peek(skbs);
2720 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2721 l2cap_ertm_send(chan);
2723 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2724 BT_DBG("Enter LOCAL_BUSY");
2725 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2727 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2728 /* The SREJ_SENT state must be aborted if we are to
2729 * enter the LOCAL_BUSY state.
2731 l2cap_abort_rx_srej_sent(chan);
2734 l2cap_send_ack(chan);
2737 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2738 BT_DBG("Exit LOCAL_BUSY");
2739 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2741 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2742 struct l2cap_ctrl local_control;
2744 memset(&local_control, 0, sizeof(local_control));
2745 local_control.sframe = 1;
2746 local_control.super = L2CAP_SUPER_RR;
2747 local_control.poll = 1;
2748 local_control.reqseq = chan->buffer_seq;
2749 l2cap_send_sframe(chan, &local_control);
2751 chan->retry_count = 1;
2752 __set_monitor_timer(chan);
2753 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2756 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2757 l2cap_process_reqseq(chan, control->reqseq);
2759 case L2CAP_EV_EXPLICIT_POLL:
2760 l2cap_send_rr_or_rnr(chan, 1);
2761 chan->retry_count = 1;
2762 __set_monitor_timer(chan);
2763 __clear_ack_timer(chan);
2764 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2766 case L2CAP_EV_RETRANS_TO:
2767 l2cap_send_rr_or_rnr(chan, 1);
2768 chan->retry_count = 1;
2769 __set_monitor_timer(chan);
2770 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2772 case L2CAP_EV_RECV_FBIT:
2773 /* Nothing to process */
2780 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2781 struct l2cap_ctrl *control,
2782 struct sk_buff_head *skbs, u8 event)
2784 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2788 case L2CAP_EV_DATA_REQUEST:
2789 if (chan->tx_send_head == NULL)
2790 chan->tx_send_head = skb_peek(skbs);
2791 /* Queue data, but don't send. */
2792 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2794 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2795 BT_DBG("Enter LOCAL_BUSY");
2796 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2798 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2799 /* The SREJ_SENT state must be aborted if we are to
2800 * enter the LOCAL_BUSY state.
2802 l2cap_abort_rx_srej_sent(chan);
2805 l2cap_send_ack(chan);
2808 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2809 BT_DBG("Exit LOCAL_BUSY");
2810 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2812 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2813 struct l2cap_ctrl local_control;
2814 memset(&local_control, 0, sizeof(local_control));
2815 local_control.sframe = 1;
2816 local_control.super = L2CAP_SUPER_RR;
2817 local_control.poll = 1;
2818 local_control.reqseq = chan->buffer_seq;
2819 l2cap_send_sframe(chan, &local_control);
2821 chan->retry_count = 1;
2822 __set_monitor_timer(chan);
2823 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2826 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2827 l2cap_process_reqseq(chan, control->reqseq);
2831 case L2CAP_EV_RECV_FBIT:
2832 if (control && control->final) {
2833 __clear_monitor_timer(chan);
2834 if (chan->unacked_frames > 0)
2835 __set_retrans_timer(chan);
2836 chan->retry_count = 0;
2837 chan->tx_state = L2CAP_TX_STATE_XMIT;
2838 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2841 case L2CAP_EV_EXPLICIT_POLL:
2844 case L2CAP_EV_MONITOR_TO:
2845 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2846 l2cap_send_rr_or_rnr(chan, 1);
2847 __set_monitor_timer(chan);
2848 chan->retry_count++;
2850 l2cap_send_disconn_req(chan, ECONNABORTED);
2858 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2859 struct sk_buff_head *skbs, u8 event)
2861 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2862 chan, control, skbs, event, chan->tx_state);
2864 switch (chan->tx_state) {
2865 case L2CAP_TX_STATE_XMIT:
2866 l2cap_tx_state_xmit(chan, control, skbs, event);
2868 case L2CAP_TX_STATE_WAIT_F:
2869 l2cap_tx_state_wait_f(chan, control, skbs, event);
2877 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2878 struct l2cap_ctrl *control)
2880 BT_DBG("chan %p, control %p", chan, control);
2881 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2884 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2885 struct l2cap_ctrl *control)
2887 BT_DBG("chan %p, control %p", chan, control);
2888 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2891 /* Copy frame to all raw sockets on that connection */
2892 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2894 struct sk_buff *nskb;
2895 struct l2cap_chan *chan;
2897 BT_DBG("conn %p", conn);
2899 mutex_lock(&conn->chan_lock);
2901 list_for_each_entry(chan, &conn->chan_l, list) {
2902 if (chan->chan_type != L2CAP_CHAN_RAW)
2905 /* Don't send frame to the channel it came from */
2906 if (bt_cb(skb)->l2cap.chan == chan)
2909 nskb = skb_clone(skb, GFP_KERNEL);
2912 if (chan->ops->recv(chan, nskb))
2916 mutex_unlock(&conn->chan_lock);
2919 /* ---- L2CAP signalling commands ---- */
2920 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2921 u8 ident, u16 dlen, void *data)
2923 struct sk_buff *skb, **frag;
2924 struct l2cap_cmd_hdr *cmd;
2925 struct l2cap_hdr *lh;
2928 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2929 conn, code, ident, dlen);
2931 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2934 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2935 count = min_t(unsigned int, conn->mtu, len);
2937 skb = bt_skb_alloc(count, GFP_KERNEL);
2941 lh = skb_put(skb, L2CAP_HDR_SIZE);
2942 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2944 if (conn->hcon->type == LE_LINK)
2945 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2947 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2949 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2952 cmd->len = cpu_to_le16(dlen);
2955 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2956 skb_put_data(skb, data, count);
2962 /* Continuation fragments (no L2CAP header) */
2963 frag = &skb_shinfo(skb)->frag_list;
2965 count = min_t(unsigned int, conn->mtu, len);
2967 *frag = bt_skb_alloc(count, GFP_KERNEL);
2971 skb_put_data(*frag, data, count);
2976 frag = &(*frag)->next;
2986 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2989 struct l2cap_conf_opt *opt = *ptr;
2992 len = L2CAP_CONF_OPT_SIZE + opt->len;
3000 *val = *((u8 *) opt->val);
3004 *val = get_unaligned_le16(opt->val);
3008 *val = get_unaligned_le32(opt->val);
3012 *val = (unsigned long) opt->val;
3016 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3020 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3022 struct l2cap_conf_opt *opt = *ptr;
3024 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3026 if (size < L2CAP_CONF_OPT_SIZE + len)
3034 *((u8 *) opt->val) = val;
3038 put_unaligned_le16(val, opt->val);
3042 put_unaligned_le32(val, opt->val);
3046 memcpy(opt->val, (void *) val, len);
3050 *ptr += L2CAP_CONF_OPT_SIZE + len;
3053 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3055 struct l2cap_conf_efs efs;
3057 switch (chan->mode) {
3058 case L2CAP_MODE_ERTM:
3059 efs.id = chan->local_id;
3060 efs.stype = chan->local_stype;
3061 efs.msdu = cpu_to_le16(chan->local_msdu);
3062 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3063 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3064 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3067 case L2CAP_MODE_STREAMING:
3069 efs.stype = L2CAP_SERV_BESTEFFORT;
3070 efs.msdu = cpu_to_le16(chan->local_msdu);
3071 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3080 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3081 (unsigned long) &efs, size);
3084 static void l2cap_ack_timeout(struct work_struct *work)
3086 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3090 BT_DBG("chan %p", chan);
3092 l2cap_chan_lock(chan);
3094 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3095 chan->last_acked_seq);
3098 l2cap_send_rr_or_rnr(chan, 0);
3100 l2cap_chan_unlock(chan);
3101 l2cap_chan_put(chan);
3104 int l2cap_ertm_init(struct l2cap_chan *chan)
3108 chan->next_tx_seq = 0;
3109 chan->expected_tx_seq = 0;
3110 chan->expected_ack_seq = 0;
3111 chan->unacked_frames = 0;
3112 chan->buffer_seq = 0;
3113 chan->frames_sent = 0;
3114 chan->last_acked_seq = 0;
3116 chan->sdu_last_frag = NULL;
3119 skb_queue_head_init(&chan->tx_q);
3121 chan->local_amp_id = AMP_ID_BREDR;
3122 chan->move_id = AMP_ID_BREDR;
3123 chan->move_state = L2CAP_MOVE_STABLE;
3124 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3126 if (chan->mode != L2CAP_MODE_ERTM)
3129 chan->rx_state = L2CAP_RX_STATE_RECV;
3130 chan->tx_state = L2CAP_TX_STATE_XMIT;
3132 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3133 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3134 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3136 skb_queue_head_init(&chan->srej_q);
3138 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3142 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3144 l2cap_seq_list_free(&chan->srej_list);
3149 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3152 case L2CAP_MODE_STREAMING:
3153 case L2CAP_MODE_ERTM:
3154 if (l2cap_mode_supported(mode, remote_feat_mask))
3158 return L2CAP_MODE_BASIC;
3162 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3164 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3165 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3168 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3170 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3171 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3174 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3175 struct l2cap_conf_rfc *rfc)
3177 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3178 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3180 /* Class 1 devices have must have ERTM timeouts
3181 * exceeding the Link Supervision Timeout. The
3182 * default Link Supervision Timeout for AMP
3183 * controllers is 10 seconds.
3185 * Class 1 devices use 0xffffffff for their
3186 * best-effort flush timeout, so the clamping logic
3187 * will result in a timeout that meets the above
3188 * requirement. ERTM timeouts are 16-bit values, so
3189 * the maximum timeout is 65.535 seconds.
3192 /* Convert timeout to milliseconds and round */
3193 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3195 /* This is the recommended formula for class 2 devices
3196 * that start ERTM timers when packets are sent to the
3199 ertm_to = 3 * ertm_to + 500;
3201 if (ertm_to > 0xffff)
3204 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3205 rfc->monitor_timeout = rfc->retrans_timeout;
3207 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3208 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3212 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3214 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3215 __l2cap_ews_supported(chan->conn)) {
3216 /* use extended control field */
3217 set_bit(FLAG_EXT_CTRL, &chan->flags);
3218 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3220 chan->tx_win = min_t(u16, chan->tx_win,
3221 L2CAP_DEFAULT_TX_WINDOW);
3222 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3224 chan->ack_win = chan->tx_win;
3227 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3229 struct l2cap_conf_req *req = data;
3230 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3231 void *ptr = req->data;
3232 void *endptr = data + data_size;
3235 BT_DBG("chan %p", chan);
3237 if (chan->num_conf_req || chan->num_conf_rsp)
3240 switch (chan->mode) {
3241 case L2CAP_MODE_STREAMING:
3242 case L2CAP_MODE_ERTM:
3243 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3246 if (__l2cap_efs_supported(chan->conn))
3247 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3251 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3256 if (chan->imtu != L2CAP_DEFAULT_MTU)
3257 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3259 switch (chan->mode) {
3260 case L2CAP_MODE_BASIC:
3264 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3265 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3268 rfc.mode = L2CAP_MODE_BASIC;
3270 rfc.max_transmit = 0;
3271 rfc.retrans_timeout = 0;
3272 rfc.monitor_timeout = 0;
3273 rfc.max_pdu_size = 0;
3275 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3276 (unsigned long) &rfc, endptr - ptr);
3279 case L2CAP_MODE_ERTM:
3280 rfc.mode = L2CAP_MODE_ERTM;
3281 rfc.max_transmit = chan->max_tx;
3283 __l2cap_set_ertm_timeouts(chan, &rfc);
3285 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3286 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3288 rfc.max_pdu_size = cpu_to_le16(size);
3290 l2cap_txwin_setup(chan);
3292 rfc.txwin_size = min_t(u16, chan->tx_win,
3293 L2CAP_DEFAULT_TX_WINDOW);
3295 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3296 (unsigned long) &rfc, endptr - ptr);
3298 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3299 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3301 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3302 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3303 chan->tx_win, endptr - ptr);
3305 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3306 if (chan->fcs == L2CAP_FCS_NONE ||
3307 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3308 chan->fcs = L2CAP_FCS_NONE;
3309 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3310 chan->fcs, endptr - ptr);
3314 case L2CAP_MODE_STREAMING:
3315 l2cap_txwin_setup(chan);
3316 rfc.mode = L2CAP_MODE_STREAMING;
3318 rfc.max_transmit = 0;
3319 rfc.retrans_timeout = 0;
3320 rfc.monitor_timeout = 0;
3322 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3323 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3325 rfc.max_pdu_size = cpu_to_le16(size);
3327 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3328 (unsigned long) &rfc, endptr - ptr);
3330 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3331 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3333 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3334 if (chan->fcs == L2CAP_FCS_NONE ||
3335 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3336 chan->fcs = L2CAP_FCS_NONE;
3337 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3338 chan->fcs, endptr - ptr);
3343 req->dcid = cpu_to_le16(chan->dcid);
3344 req->flags = cpu_to_le16(0);
3349 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3351 struct l2cap_conf_rsp *rsp = data;
3352 void *ptr = rsp->data;
3353 void *endptr = data + data_size;
3354 void *req = chan->conf_req;
3355 int len = chan->conf_len;
3356 int type, hint, olen;
3358 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3359 struct l2cap_conf_efs efs;
3361 u16 mtu = L2CAP_DEFAULT_MTU;
3362 u16 result = L2CAP_CONF_SUCCESS;
3365 BT_DBG("chan %p", chan);
3367 while (len >= L2CAP_CONF_OPT_SIZE) {
3368 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3372 hint = type & L2CAP_CONF_HINT;
3373 type &= L2CAP_CONF_MASK;
3376 case L2CAP_CONF_MTU:
3382 case L2CAP_CONF_FLUSH_TO:
3385 chan->flush_to = val;
3388 case L2CAP_CONF_QOS:
3391 case L2CAP_CONF_RFC:
3392 if (olen != sizeof(rfc))
3394 memcpy(&rfc, (void *) val, olen);
3397 case L2CAP_CONF_FCS:
3400 if (val == L2CAP_FCS_NONE)
3401 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3404 case L2CAP_CONF_EFS:
3405 if (olen != sizeof(efs))
3408 memcpy(&efs, (void *) val, olen);
3411 case L2CAP_CONF_EWS:
3414 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3415 return -ECONNREFUSED;
3416 set_bit(FLAG_EXT_CTRL, &chan->flags);
3417 set_bit(CONF_EWS_RECV, &chan->conf_state);
3418 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3419 chan->remote_tx_win = val;
3425 result = L2CAP_CONF_UNKNOWN;
3426 *((u8 *) ptr++) = type;
3431 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3434 switch (chan->mode) {
3435 case L2CAP_MODE_STREAMING:
3436 case L2CAP_MODE_ERTM:
3437 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3438 chan->mode = l2cap_select_mode(rfc.mode,
3439 chan->conn->feat_mask);
3444 if (__l2cap_efs_supported(chan->conn))
3445 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3447 return -ECONNREFUSED;
3450 if (chan->mode != rfc.mode)
3451 return -ECONNREFUSED;
3457 if (chan->mode != rfc.mode) {
3458 result = L2CAP_CONF_UNACCEPT;
3459 rfc.mode = chan->mode;
3461 if (chan->num_conf_rsp == 1)
3462 return -ECONNREFUSED;
3464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3465 (unsigned long) &rfc, endptr - ptr);
3468 if (result == L2CAP_CONF_SUCCESS) {
3469 /* Configure output options and let the other side know
3470 * which ones we don't like. */
3472 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3473 result = L2CAP_CONF_UNACCEPT;
3476 set_bit(CONF_MTU_DONE, &chan->conf_state);
3478 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3481 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3482 efs.stype != L2CAP_SERV_NOTRAFIC &&
3483 efs.stype != chan->local_stype) {
3485 result = L2CAP_CONF_UNACCEPT;
3487 if (chan->num_conf_req >= 1)
3488 return -ECONNREFUSED;
3490 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3492 (unsigned long) &efs, endptr - ptr);
3494 /* Send PENDING Conf Rsp */
3495 result = L2CAP_CONF_PENDING;
3496 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3501 case L2CAP_MODE_BASIC:
3502 chan->fcs = L2CAP_FCS_NONE;
3503 set_bit(CONF_MODE_DONE, &chan->conf_state);
3506 case L2CAP_MODE_ERTM:
3507 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3508 chan->remote_tx_win = rfc.txwin_size;
3510 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3512 chan->remote_max_tx = rfc.max_transmit;
3514 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3515 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3516 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3517 rfc.max_pdu_size = cpu_to_le16(size);
3518 chan->remote_mps = size;
3520 __l2cap_set_ertm_timeouts(chan, &rfc);
3522 set_bit(CONF_MODE_DONE, &chan->conf_state);
3524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3525 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3527 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3528 chan->remote_id = efs.id;
3529 chan->remote_stype = efs.stype;
3530 chan->remote_msdu = le16_to_cpu(efs.msdu);
3531 chan->remote_flush_to =
3532 le32_to_cpu(efs.flush_to);
3533 chan->remote_acc_lat =
3534 le32_to_cpu(efs.acc_lat);
3535 chan->remote_sdu_itime =
3536 le32_to_cpu(efs.sdu_itime);
3537 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3539 (unsigned long) &efs, endptr - ptr);
3543 case L2CAP_MODE_STREAMING:
3544 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3545 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3546 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3547 rfc.max_pdu_size = cpu_to_le16(size);
3548 chan->remote_mps = size;
3550 set_bit(CONF_MODE_DONE, &chan->conf_state);
3552 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3553 (unsigned long) &rfc, endptr - ptr);
3558 result = L2CAP_CONF_UNACCEPT;
3560 memset(&rfc, 0, sizeof(rfc));
3561 rfc.mode = chan->mode;
3564 if (result == L2CAP_CONF_SUCCESS)
3565 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3567 rsp->scid = cpu_to_le16(chan->dcid);
3568 rsp->result = cpu_to_le16(result);
3569 rsp->flags = cpu_to_le16(0);
3574 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3575 void *data, size_t size, u16 *result)
3577 struct l2cap_conf_req *req = data;
3578 void *ptr = req->data;
3579 void *endptr = data + size;
3582 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3583 struct l2cap_conf_efs efs;
3585 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3587 while (len >= L2CAP_CONF_OPT_SIZE) {
3588 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3593 case L2CAP_CONF_MTU:
3596 if (val < L2CAP_DEFAULT_MIN_MTU) {
3597 *result = L2CAP_CONF_UNACCEPT;
3598 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3601 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3605 case L2CAP_CONF_FLUSH_TO:
3608 chan->flush_to = val;
3609 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3610 chan->flush_to, endptr - ptr);
3613 case L2CAP_CONF_RFC:
3614 if (olen != sizeof(rfc))
3616 memcpy(&rfc, (void *)val, olen);
3617 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3618 rfc.mode != chan->mode)
3619 return -ECONNREFUSED;
3621 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3622 (unsigned long) &rfc, endptr - ptr);
3625 case L2CAP_CONF_EWS:
3628 chan->ack_win = min_t(u16, val, chan->ack_win);
3629 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3630 chan->tx_win, endptr - ptr);
3633 case L2CAP_CONF_EFS:
3634 if (olen != sizeof(efs))
3636 memcpy(&efs, (void *)val, olen);
3637 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3638 efs.stype != L2CAP_SERV_NOTRAFIC &&
3639 efs.stype != chan->local_stype)
3640 return -ECONNREFUSED;
3641 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3642 (unsigned long) &efs, endptr - ptr);
3645 case L2CAP_CONF_FCS:
3648 if (*result == L2CAP_CONF_PENDING)
3649 if (val == L2CAP_FCS_NONE)
3650 set_bit(CONF_RECV_NO_FCS,
3656 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3657 return -ECONNREFUSED;
3659 chan->mode = rfc.mode;
3661 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3663 case L2CAP_MODE_ERTM:
3664 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3665 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3666 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3667 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3668 chan->ack_win = min_t(u16, chan->ack_win,
3671 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3672 chan->local_msdu = le16_to_cpu(efs.msdu);
3673 chan->local_sdu_itime =
3674 le32_to_cpu(efs.sdu_itime);
3675 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3676 chan->local_flush_to =
3677 le32_to_cpu(efs.flush_to);
3681 case L2CAP_MODE_STREAMING:
3682 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3686 req->dcid = cpu_to_le16(chan->dcid);
3687 req->flags = cpu_to_le16(0);
3692 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3693 u16 result, u16 flags)
3695 struct l2cap_conf_rsp *rsp = data;
3696 void *ptr = rsp->data;
3698 BT_DBG("chan %p", chan);
3700 rsp->scid = cpu_to_le16(chan->dcid);
3701 rsp->result = cpu_to_le16(result);
3702 rsp->flags = cpu_to_le16(flags);
3707 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3709 struct l2cap_le_conn_rsp rsp;
3710 struct l2cap_conn *conn = chan->conn;
3712 BT_DBG("chan %p", chan);
3714 rsp.dcid = cpu_to_le16(chan->scid);
3715 rsp.mtu = cpu_to_le16(chan->imtu);
3716 rsp.mps = cpu_to_le16(chan->mps);
3717 rsp.credits = cpu_to_le16(chan->rx_credits);
3718 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3720 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3724 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3726 struct l2cap_conn_rsp rsp;
3727 struct l2cap_conn *conn = chan->conn;
3731 rsp.scid = cpu_to_le16(chan->dcid);
3732 rsp.dcid = cpu_to_le16(chan->scid);
3733 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3734 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3737 rsp_code = L2CAP_CREATE_CHAN_RSP;
3739 rsp_code = L2CAP_CONN_RSP;
3741 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3743 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3745 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3748 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3749 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3750 chan->num_conf_req++;
3753 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3757 /* Use sane default values in case a misbehaving remote device
3758 * did not send an RFC or extended window size option.
3760 u16 txwin_ext = chan->ack_win;
3761 struct l2cap_conf_rfc rfc = {
3763 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3764 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3765 .max_pdu_size = cpu_to_le16(chan->imtu),
3766 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3769 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3771 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3774 while (len >= L2CAP_CONF_OPT_SIZE) {
3775 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3780 case L2CAP_CONF_RFC:
3781 if (olen != sizeof(rfc))
3783 memcpy(&rfc, (void *)val, olen);
3785 case L2CAP_CONF_EWS:
3794 case L2CAP_MODE_ERTM:
3795 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3796 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3797 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3798 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3799 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3801 chan->ack_win = min_t(u16, chan->ack_win,
3804 case L2CAP_MODE_STREAMING:
3805 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3809 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3810 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3813 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3815 if (cmd_len < sizeof(*rej))
3818 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3821 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3822 cmd->ident == conn->info_ident) {
3823 cancel_delayed_work(&conn->info_timer);
3825 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3826 conn->info_ident = 0;
3828 l2cap_conn_start(conn);
3834 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3835 struct l2cap_cmd_hdr *cmd,
3836 u8 *data, u8 rsp_code, u8 amp_id)
3838 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3839 struct l2cap_conn_rsp rsp;
3840 struct l2cap_chan *chan = NULL, *pchan;
3841 int result, status = L2CAP_CS_NO_INFO;
3843 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3844 __le16 psm = req->psm;
3846 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3848 /* Check if we have socket listening on psm */
3849 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3850 &conn->hcon->dst, ACL_LINK);
3852 result = L2CAP_CR_BAD_PSM;
3856 mutex_lock(&conn->chan_lock);
3857 l2cap_chan_lock(pchan);
3859 /* Check if the ACL is secure enough (if not SDP) */
3860 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3861 !hci_conn_check_link_mode(conn->hcon)) {
3862 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3863 result = L2CAP_CR_SEC_BLOCK;
3867 result = L2CAP_CR_NO_MEM;
3869 /* Check if we already have channel with that dcid */
3870 if (__l2cap_get_chan_by_dcid(conn, scid))
3873 chan = pchan->ops->new_connection(pchan);
3877 /* For certain devices (ex: HID mouse), support for authentication,
3878 * pairing and bonding is optional. For such devices, inorder to avoid
3879 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3880 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3882 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3884 bacpy(&chan->src, &conn->hcon->src);
3885 bacpy(&chan->dst, &conn->hcon->dst);
3886 chan->src_type = bdaddr_src_type(conn->hcon);
3887 chan->dst_type = bdaddr_dst_type(conn->hcon);
3890 chan->local_amp_id = amp_id;
3892 __l2cap_chan_add(conn, chan);
3896 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3898 chan->ident = cmd->ident;
3900 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3901 if (l2cap_chan_check_security(chan, false)) {
3902 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3903 l2cap_state_change(chan, BT_CONNECT2);
3904 result = L2CAP_CR_PEND;
3905 status = L2CAP_CS_AUTHOR_PEND;
3906 chan->ops->defer(chan);
3908 /* Force pending result for AMP controllers.
3909 * The connection will succeed after the
3910 * physical link is up.
3912 if (amp_id == AMP_ID_BREDR) {
3913 l2cap_state_change(chan, BT_CONFIG);
3914 result = L2CAP_CR_SUCCESS;
3916 l2cap_state_change(chan, BT_CONNECT2);
3917 result = L2CAP_CR_PEND;
3919 status = L2CAP_CS_NO_INFO;
3922 l2cap_state_change(chan, BT_CONNECT2);
3923 result = L2CAP_CR_PEND;
3924 status = L2CAP_CS_AUTHEN_PEND;
3927 l2cap_state_change(chan, BT_CONNECT2);
3928 result = L2CAP_CR_PEND;
3929 status = L2CAP_CS_NO_INFO;
3933 l2cap_chan_unlock(pchan);
3934 mutex_unlock(&conn->chan_lock);
3935 l2cap_chan_put(pchan);
3938 rsp.scid = cpu_to_le16(scid);
3939 rsp.dcid = cpu_to_le16(dcid);
3940 rsp.result = cpu_to_le16(result);
3941 rsp.status = cpu_to_le16(status);
3942 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3944 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3945 struct l2cap_info_req info;
3946 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3948 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3949 conn->info_ident = l2cap_get_ident(conn);
3951 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3953 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3954 sizeof(info), &info);
3957 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3958 result == L2CAP_CR_SUCCESS) {
3960 set_bit(CONF_REQ_SENT, &chan->conf_state);
3961 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3962 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3963 chan->num_conf_req++;
3969 static int l2cap_connect_req(struct l2cap_conn *conn,
3970 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3972 struct hci_dev *hdev = conn->hcon->hdev;
3973 struct hci_conn *hcon = conn->hcon;
3975 if (cmd_len < sizeof(struct l2cap_conn_req))
3979 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3980 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3981 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3982 hci_dev_unlock(hdev);
3984 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3988 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3989 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3992 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3993 u16 scid, dcid, result, status;
3994 struct l2cap_chan *chan;
3998 if (cmd_len < sizeof(*rsp))
4001 scid = __le16_to_cpu(rsp->scid);
4002 dcid = __le16_to_cpu(rsp->dcid);
4003 result = __le16_to_cpu(rsp->result);
4004 status = __le16_to_cpu(rsp->status);
4006 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4007 dcid, scid, result, status);
4009 mutex_lock(&conn->chan_lock);
4012 chan = __l2cap_get_chan_by_scid(conn, scid);
4018 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4027 l2cap_chan_lock(chan);
4030 case L2CAP_CR_SUCCESS:
4031 l2cap_state_change(chan, BT_CONFIG);
4034 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4036 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4039 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4040 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4041 chan->num_conf_req++;
4045 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4049 l2cap_chan_del(chan, ECONNREFUSED);
4053 l2cap_chan_unlock(chan);
4056 mutex_unlock(&conn->chan_lock);
4061 static inline void set_default_fcs(struct l2cap_chan *chan)
4063 /* FCS is enabled only in ERTM or streaming mode, if one or both
4066 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4067 chan->fcs = L2CAP_FCS_NONE;
4068 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4069 chan->fcs = L2CAP_FCS_CRC16;
4072 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4073 u8 ident, u16 flags)
4075 struct l2cap_conn *conn = chan->conn;
4077 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4080 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4081 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4083 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4084 l2cap_build_conf_rsp(chan, data,
4085 L2CAP_CONF_SUCCESS, flags), data);
4088 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4091 struct l2cap_cmd_rej_cid rej;
4093 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4094 rej.scid = __cpu_to_le16(scid);
4095 rej.dcid = __cpu_to_le16(dcid);
4097 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4100 static inline int l2cap_config_req(struct l2cap_conn *conn,
4101 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4104 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4107 struct l2cap_chan *chan;
4110 if (cmd_len < sizeof(*req))
4113 dcid = __le16_to_cpu(req->dcid);
4114 flags = __le16_to_cpu(req->flags);
4116 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4118 chan = l2cap_get_chan_by_scid(conn, dcid);
4120 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4124 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4125 chan->state != BT_CONNECTED) {
4126 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4131 /* Reject if config buffer is too small. */
4132 len = cmd_len - sizeof(*req);
4133 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4134 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4135 l2cap_build_conf_rsp(chan, rsp,
4136 L2CAP_CONF_REJECT, flags), rsp);
4141 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4142 chan->conf_len += len;
4144 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4145 /* Incomplete config. Send empty response. */
4146 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4147 l2cap_build_conf_rsp(chan, rsp,
4148 L2CAP_CONF_SUCCESS, flags), rsp);
4152 /* Complete config. */
4153 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4155 l2cap_send_disconn_req(chan, ECONNRESET);
4159 chan->ident = cmd->ident;
4160 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4161 chan->num_conf_rsp++;
4163 /* Reset config buffer. */
4166 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4169 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4170 set_default_fcs(chan);
4172 if (chan->mode == L2CAP_MODE_ERTM ||
4173 chan->mode == L2CAP_MODE_STREAMING)
4174 err = l2cap_ertm_init(chan);
4177 l2cap_send_disconn_req(chan, -err);
4179 l2cap_chan_ready(chan);
4184 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4186 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4187 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4188 chan->num_conf_req++;
4191 /* Got Conf Rsp PENDING from remote side and assume we sent
4192 Conf Rsp PENDING in the code above */
4193 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4194 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4196 /* check compatibility */
4198 /* Send rsp for BR/EDR channel */
4200 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4202 chan->ident = cmd->ident;
4206 l2cap_chan_unlock(chan);
4210 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4211 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4214 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4215 u16 scid, flags, result;
4216 struct l2cap_chan *chan;
4217 int len = cmd_len - sizeof(*rsp);
4220 if (cmd_len < sizeof(*rsp))
4223 scid = __le16_to_cpu(rsp->scid);
4224 flags = __le16_to_cpu(rsp->flags);
4225 result = __le16_to_cpu(rsp->result);
4227 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4230 chan = l2cap_get_chan_by_scid(conn, scid);
4235 case L2CAP_CONF_SUCCESS:
4236 l2cap_conf_rfc_get(chan, rsp->data, len);
4237 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4240 case L2CAP_CONF_PENDING:
4241 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4243 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4246 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4247 buf, sizeof(buf), &result);
4249 l2cap_send_disconn_req(chan, ECONNRESET);
4253 if (!chan->hs_hcon) {
4254 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4257 if (l2cap_check_efs(chan)) {
4258 amp_create_logical_link(chan);
4259 chan->ident = cmd->ident;
4265 case L2CAP_CONF_UNACCEPT:
4266 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4269 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4270 l2cap_send_disconn_req(chan, ECONNRESET);
4274 /* throw out any old stored conf requests */
4275 result = L2CAP_CONF_SUCCESS;
4276 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4277 req, sizeof(req), &result);
4279 l2cap_send_disconn_req(chan, ECONNRESET);
4283 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4284 L2CAP_CONF_REQ, len, req);
4285 chan->num_conf_req++;
4286 if (result != L2CAP_CONF_SUCCESS)
4292 l2cap_chan_set_err(chan, ECONNRESET);
4294 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4295 l2cap_send_disconn_req(chan, ECONNRESET);
4299 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4302 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4304 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4305 set_default_fcs(chan);
4307 if (chan->mode == L2CAP_MODE_ERTM ||
4308 chan->mode == L2CAP_MODE_STREAMING)
4309 err = l2cap_ertm_init(chan);
4312 l2cap_send_disconn_req(chan, -err);
4314 l2cap_chan_ready(chan);
4318 l2cap_chan_unlock(chan);
4322 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4323 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4326 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4327 struct l2cap_disconn_rsp rsp;
4329 struct l2cap_chan *chan;
4331 if (cmd_len != sizeof(*req))
4334 scid = __le16_to_cpu(req->scid);
4335 dcid = __le16_to_cpu(req->dcid);
4337 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4339 mutex_lock(&conn->chan_lock);
4341 chan = __l2cap_get_chan_by_scid(conn, dcid);
4343 mutex_unlock(&conn->chan_lock);
4344 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4348 l2cap_chan_hold(chan);
4349 l2cap_chan_lock(chan);
4351 rsp.dcid = cpu_to_le16(chan->scid);
4352 rsp.scid = cpu_to_le16(chan->dcid);
4353 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4355 chan->ops->set_shutdown(chan);
4357 l2cap_chan_del(chan, ECONNRESET);
4359 chan->ops->close(chan);
4361 l2cap_chan_unlock(chan);
4362 l2cap_chan_put(chan);
4364 mutex_unlock(&conn->chan_lock);
4369 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4370 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4373 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4375 struct l2cap_chan *chan;
4377 if (cmd_len != sizeof(*rsp))
4380 scid = __le16_to_cpu(rsp->scid);
4381 dcid = __le16_to_cpu(rsp->dcid);
4383 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4385 mutex_lock(&conn->chan_lock);
4387 chan = __l2cap_get_chan_by_scid(conn, scid);
4389 mutex_unlock(&conn->chan_lock);
4393 l2cap_chan_hold(chan);
4394 l2cap_chan_lock(chan);
4396 if (chan->state != BT_DISCONN) {
4397 l2cap_chan_unlock(chan);
4398 l2cap_chan_put(chan);
4399 mutex_unlock(&conn->chan_lock);
4403 l2cap_chan_del(chan, 0);
4405 chan->ops->close(chan);
4407 l2cap_chan_unlock(chan);
4408 l2cap_chan_put(chan);
4410 mutex_unlock(&conn->chan_lock);
4415 static inline int l2cap_information_req(struct l2cap_conn *conn,
4416 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4419 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4422 if (cmd_len != sizeof(*req))
4425 type = __le16_to_cpu(req->type);
4427 BT_DBG("type 0x%4.4x", type);
4429 if (type == L2CAP_IT_FEAT_MASK) {
4431 u32 feat_mask = l2cap_feat_mask;
4432 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4433 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4434 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4436 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4438 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4439 feat_mask |= L2CAP_FEAT_EXT_FLOW
4440 | L2CAP_FEAT_EXT_WINDOW;
4442 put_unaligned_le32(feat_mask, rsp->data);
4443 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4445 } else if (type == L2CAP_IT_FIXED_CHAN) {
4447 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4449 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4450 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4451 rsp->data[0] = conn->local_fixed_chan;
4452 memset(rsp->data + 1, 0, 7);
4453 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4456 struct l2cap_info_rsp rsp;
4457 rsp.type = cpu_to_le16(type);
4458 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4459 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4466 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4467 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4470 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4473 if (cmd_len < sizeof(*rsp))
4476 type = __le16_to_cpu(rsp->type);
4477 result = __le16_to_cpu(rsp->result);
4479 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4481 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4482 if (cmd->ident != conn->info_ident ||
4483 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4486 cancel_delayed_work(&conn->info_timer);
4488 if (result != L2CAP_IR_SUCCESS) {
4489 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4490 conn->info_ident = 0;
4492 l2cap_conn_start(conn);
4498 case L2CAP_IT_FEAT_MASK:
4499 conn->feat_mask = get_unaligned_le32(rsp->data);
4501 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4502 struct l2cap_info_req req;
4503 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4505 conn->info_ident = l2cap_get_ident(conn);
4507 l2cap_send_cmd(conn, conn->info_ident,
4508 L2CAP_INFO_REQ, sizeof(req), &req);
4510 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4511 conn->info_ident = 0;
4513 l2cap_conn_start(conn);
4517 case L2CAP_IT_FIXED_CHAN:
4518 conn->remote_fixed_chan = rsp->data[0];
4519 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4520 conn->info_ident = 0;
4522 l2cap_conn_start(conn);
4529 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4530 struct l2cap_cmd_hdr *cmd,
4531 u16 cmd_len, void *data)
4533 struct l2cap_create_chan_req *req = data;
4534 struct l2cap_create_chan_rsp rsp;
4535 struct l2cap_chan *chan;
4536 struct hci_dev *hdev;
4539 if (cmd_len != sizeof(*req))
4542 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4545 psm = le16_to_cpu(req->psm);
4546 scid = le16_to_cpu(req->scid);
4548 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4550 /* For controller id 0 make BR/EDR connection */
4551 if (req->amp_id == AMP_ID_BREDR) {
4552 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4557 /* Validate AMP controller id */
4558 hdev = hci_dev_get(req->amp_id);
4562 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4567 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4570 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4571 struct hci_conn *hs_hcon;
4573 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4577 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4582 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4584 mgr->bredr_chan = chan;
4585 chan->hs_hcon = hs_hcon;
4586 chan->fcs = L2CAP_FCS_NONE;
4587 conn->mtu = hdev->block_mtu;
4596 rsp.scid = cpu_to_le16(scid);
4597 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4598 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4600 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4606 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4608 struct l2cap_move_chan_req req;
4611 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4613 ident = l2cap_get_ident(chan->conn);
4614 chan->ident = ident;
4616 req.icid = cpu_to_le16(chan->scid);
4617 req.dest_amp_id = dest_amp_id;
4619 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4622 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4625 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4627 struct l2cap_move_chan_rsp rsp;
4629 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4631 rsp.icid = cpu_to_le16(chan->dcid);
4632 rsp.result = cpu_to_le16(result);
4634 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4638 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4640 struct l2cap_move_chan_cfm cfm;
4642 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4644 chan->ident = l2cap_get_ident(chan->conn);
4646 cfm.icid = cpu_to_le16(chan->scid);
4647 cfm.result = cpu_to_le16(result);
4649 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4652 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4655 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4657 struct l2cap_move_chan_cfm cfm;
4659 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4661 cfm.icid = cpu_to_le16(icid);
4662 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4664 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4668 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4671 struct l2cap_move_chan_cfm_rsp rsp;
4673 BT_DBG("icid 0x%4.4x", icid);
4675 rsp.icid = cpu_to_le16(icid);
4676 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4679 static void __release_logical_link(struct l2cap_chan *chan)
4681 chan->hs_hchan = NULL;
4682 chan->hs_hcon = NULL;
4684 /* Placeholder - release the logical link */
4687 static void l2cap_logical_fail(struct l2cap_chan *chan)
4689 /* Logical link setup failed */
4690 if (chan->state != BT_CONNECTED) {
4691 /* Create channel failure, disconnect */
4692 l2cap_send_disconn_req(chan, ECONNRESET);
4696 switch (chan->move_role) {
4697 case L2CAP_MOVE_ROLE_RESPONDER:
4698 l2cap_move_done(chan);
4699 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4701 case L2CAP_MOVE_ROLE_INITIATOR:
4702 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4703 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4704 /* Remote has only sent pending or
4705 * success responses, clean up
4707 l2cap_move_done(chan);
4710 /* Other amp move states imply that the move
4711 * has already aborted
4713 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4718 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4719 struct hci_chan *hchan)
4721 struct l2cap_conf_rsp rsp;
4723 chan->hs_hchan = hchan;
4724 chan->hs_hcon->l2cap_data = chan->conn;
4726 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4728 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4731 set_default_fcs(chan);
4733 err = l2cap_ertm_init(chan);
4735 l2cap_send_disconn_req(chan, -err);
4737 l2cap_chan_ready(chan);
4741 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4742 struct hci_chan *hchan)
4744 chan->hs_hcon = hchan->conn;
4745 chan->hs_hcon->l2cap_data = chan->conn;
4747 BT_DBG("move_state %d", chan->move_state);
4749 switch (chan->move_state) {
4750 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4751 /* Move confirm will be sent after a success
4752 * response is received
4754 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4756 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4757 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4758 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4759 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4760 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4761 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4762 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4763 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4764 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4768 /* Move was not in expected state, free the channel */
4769 __release_logical_link(chan);
4771 chan->move_state = L2CAP_MOVE_STABLE;
4775 /* Call with chan locked */
4776 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4779 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4782 l2cap_logical_fail(chan);
4783 __release_logical_link(chan);
4787 if (chan->state != BT_CONNECTED) {
4788 /* Ignore logical link if channel is on BR/EDR */
4789 if (chan->local_amp_id != AMP_ID_BREDR)
4790 l2cap_logical_finish_create(chan, hchan);
4792 l2cap_logical_finish_move(chan, hchan);
4796 void l2cap_move_start(struct l2cap_chan *chan)
4798 BT_DBG("chan %p", chan);
4800 if (chan->local_amp_id == AMP_ID_BREDR) {
4801 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4803 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4804 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4805 /* Placeholder - start physical link setup */
4807 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4808 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4810 l2cap_move_setup(chan);
4811 l2cap_send_move_chan_req(chan, 0);
4815 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4816 u8 local_amp_id, u8 remote_amp_id)
4818 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4819 local_amp_id, remote_amp_id);
4821 chan->fcs = L2CAP_FCS_NONE;
4823 /* Outgoing channel on AMP */
4824 if (chan->state == BT_CONNECT) {
4825 if (result == L2CAP_CR_SUCCESS) {
4826 chan->local_amp_id = local_amp_id;
4827 l2cap_send_create_chan_req(chan, remote_amp_id);
4829 /* Revert to BR/EDR connect */
4830 l2cap_send_conn_req(chan);
4836 /* Incoming channel on AMP */
4837 if (__l2cap_no_conn_pending(chan)) {
4838 struct l2cap_conn_rsp rsp;
4840 rsp.scid = cpu_to_le16(chan->dcid);
4841 rsp.dcid = cpu_to_le16(chan->scid);
4843 if (result == L2CAP_CR_SUCCESS) {
4844 /* Send successful response */
4845 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4846 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4848 /* Send negative response */
4849 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4850 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4853 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4856 if (result == L2CAP_CR_SUCCESS) {
4857 l2cap_state_change(chan, BT_CONFIG);
4858 set_bit(CONF_REQ_SENT, &chan->conf_state);
4859 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4861 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4862 chan->num_conf_req++;
4867 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4870 l2cap_move_setup(chan);
4871 chan->move_id = local_amp_id;
4872 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4874 l2cap_send_move_chan_req(chan, remote_amp_id);
4877 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4879 struct hci_chan *hchan = NULL;
4881 /* Placeholder - get hci_chan for logical link */
4884 if (hchan->state == BT_CONNECTED) {
4885 /* Logical link is ready to go */
4886 chan->hs_hcon = hchan->conn;
4887 chan->hs_hcon->l2cap_data = chan->conn;
4888 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4889 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4891 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4893 /* Wait for logical link to be ready */
4894 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4897 /* Logical link not available */
4898 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4902 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4904 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4906 if (result == -EINVAL)
4907 rsp_result = L2CAP_MR_BAD_ID;
4909 rsp_result = L2CAP_MR_NOT_ALLOWED;
4911 l2cap_send_move_chan_rsp(chan, rsp_result);
4914 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4915 chan->move_state = L2CAP_MOVE_STABLE;
4917 /* Restart data transmission */
4918 l2cap_ertm_send(chan);
4921 /* Invoke with locked chan */
4922 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4924 u8 local_amp_id = chan->local_amp_id;
4925 u8 remote_amp_id = chan->remote_amp_id;
4927 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4928 chan, result, local_amp_id, remote_amp_id);
4930 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4933 if (chan->state != BT_CONNECTED) {
4934 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4935 } else if (result != L2CAP_MR_SUCCESS) {
4936 l2cap_do_move_cancel(chan, result);
4938 switch (chan->move_role) {
4939 case L2CAP_MOVE_ROLE_INITIATOR:
4940 l2cap_do_move_initiate(chan, local_amp_id,
4943 case L2CAP_MOVE_ROLE_RESPONDER:
4944 l2cap_do_move_respond(chan, result);
4947 l2cap_do_move_cancel(chan, result);
4953 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4954 struct l2cap_cmd_hdr *cmd,
4955 u16 cmd_len, void *data)
4957 struct l2cap_move_chan_req *req = data;
4958 struct l2cap_move_chan_rsp rsp;
4959 struct l2cap_chan *chan;
4961 u16 result = L2CAP_MR_NOT_ALLOWED;
4963 if (cmd_len != sizeof(*req))
4966 icid = le16_to_cpu(req->icid);
4968 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4970 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4973 chan = l2cap_get_chan_by_dcid(conn, icid);
4975 rsp.icid = cpu_to_le16(icid);
4976 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4977 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4982 chan->ident = cmd->ident;
4984 if (chan->scid < L2CAP_CID_DYN_START ||
4985 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4986 (chan->mode != L2CAP_MODE_ERTM &&
4987 chan->mode != L2CAP_MODE_STREAMING)) {
4988 result = L2CAP_MR_NOT_ALLOWED;
4989 goto send_move_response;
4992 if (chan->local_amp_id == req->dest_amp_id) {
4993 result = L2CAP_MR_SAME_ID;
4994 goto send_move_response;
4997 if (req->dest_amp_id != AMP_ID_BREDR) {
4998 struct hci_dev *hdev;
4999 hdev = hci_dev_get(req->dest_amp_id);
5000 if (!hdev || hdev->dev_type != HCI_AMP ||
5001 !test_bit(HCI_UP, &hdev->flags)) {
5005 result = L2CAP_MR_BAD_ID;
5006 goto send_move_response;
5011 /* Detect a move collision. Only send a collision response
5012 * if this side has "lost", otherwise proceed with the move.
5013 * The winner has the larger bd_addr.
5015 if ((__chan_is_moving(chan) ||
5016 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5017 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5018 result = L2CAP_MR_COLLISION;
5019 goto send_move_response;
5022 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5023 l2cap_move_setup(chan);
5024 chan->move_id = req->dest_amp_id;
5027 if (req->dest_amp_id == AMP_ID_BREDR) {
5028 /* Moving to BR/EDR */
5029 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5030 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5031 result = L2CAP_MR_PEND;
5033 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5034 result = L2CAP_MR_SUCCESS;
5037 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5038 /* Placeholder - uncomment when amp functions are available */
5039 /*amp_accept_physical(chan, req->dest_amp_id);*/
5040 result = L2CAP_MR_PEND;
5044 l2cap_send_move_chan_rsp(chan, result);
5046 l2cap_chan_unlock(chan);
5051 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5053 struct l2cap_chan *chan;
5054 struct hci_chan *hchan = NULL;
5056 chan = l2cap_get_chan_by_scid(conn, icid);
5058 l2cap_send_move_chan_cfm_icid(conn, icid);
5062 __clear_chan_timer(chan);
5063 if (result == L2CAP_MR_PEND)
5064 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5066 switch (chan->move_state) {
5067 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5068 /* Move confirm will be sent when logical link
5071 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5073 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5074 if (result == L2CAP_MR_PEND) {
5076 } else if (test_bit(CONN_LOCAL_BUSY,
5077 &chan->conn_state)) {
5078 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5080 /* Logical link is up or moving to BR/EDR,
5083 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5084 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5087 case L2CAP_MOVE_WAIT_RSP:
5089 if (result == L2CAP_MR_SUCCESS) {
5090 /* Remote is ready, send confirm immediately
5091 * after logical link is ready
5093 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5095 /* Both logical link and move success
5096 * are required to confirm
5098 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5101 /* Placeholder - get hci_chan for logical link */
5103 /* Logical link not available */
5104 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5108 /* If the logical link is not yet connected, do not
5109 * send confirmation.
5111 if (hchan->state != BT_CONNECTED)
5114 /* Logical link is already ready to go */
5116 chan->hs_hcon = hchan->conn;
5117 chan->hs_hcon->l2cap_data = chan->conn;
5119 if (result == L2CAP_MR_SUCCESS) {
5120 /* Can confirm now */
5121 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5123 /* Now only need move success
5126 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5129 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5132 /* Any other amp move state means the move failed. */
5133 chan->move_id = chan->local_amp_id;
5134 l2cap_move_done(chan);
5135 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5138 l2cap_chan_unlock(chan);
5141 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5144 struct l2cap_chan *chan;
5146 chan = l2cap_get_chan_by_ident(conn, ident);
5148 /* Could not locate channel, icid is best guess */
5149 l2cap_send_move_chan_cfm_icid(conn, icid);
5153 __clear_chan_timer(chan);
5155 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5156 if (result == L2CAP_MR_COLLISION) {
5157 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5159 /* Cleanup - cancel move */
5160 chan->move_id = chan->local_amp_id;
5161 l2cap_move_done(chan);
5165 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5167 l2cap_chan_unlock(chan);
5170 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5171 struct l2cap_cmd_hdr *cmd,
5172 u16 cmd_len, void *data)
5174 struct l2cap_move_chan_rsp *rsp = data;
5177 if (cmd_len != sizeof(*rsp))
5180 icid = le16_to_cpu(rsp->icid);
5181 result = le16_to_cpu(rsp->result);
5183 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5185 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5186 l2cap_move_continue(conn, icid, result);
5188 l2cap_move_fail(conn, cmd->ident, icid, result);
5193 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5194 struct l2cap_cmd_hdr *cmd,
5195 u16 cmd_len, void *data)
5197 struct l2cap_move_chan_cfm *cfm = data;
5198 struct l2cap_chan *chan;
5201 if (cmd_len != sizeof(*cfm))
5204 icid = le16_to_cpu(cfm->icid);
5205 result = le16_to_cpu(cfm->result);
5207 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5209 chan = l2cap_get_chan_by_dcid(conn, icid);
5211 /* Spec requires a response even if the icid was not found */
5212 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5216 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5217 if (result == L2CAP_MC_CONFIRMED) {
5218 chan->local_amp_id = chan->move_id;
5219 if (chan->local_amp_id == AMP_ID_BREDR)
5220 __release_logical_link(chan);
5222 chan->move_id = chan->local_amp_id;
5225 l2cap_move_done(chan);
5228 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5230 l2cap_chan_unlock(chan);
5235 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5236 struct l2cap_cmd_hdr *cmd,
5237 u16 cmd_len, void *data)
5239 struct l2cap_move_chan_cfm_rsp *rsp = data;
5240 struct l2cap_chan *chan;
5243 if (cmd_len != sizeof(*rsp))
5246 icid = le16_to_cpu(rsp->icid);
5248 BT_DBG("icid 0x%4.4x", icid);
5250 chan = l2cap_get_chan_by_scid(conn, icid);
5254 __clear_chan_timer(chan);
5256 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5257 chan->local_amp_id = chan->move_id;
5259 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5260 __release_logical_link(chan);
5262 l2cap_move_done(chan);
5265 l2cap_chan_unlock(chan);
5270 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5271 struct l2cap_cmd_hdr *cmd,
5272 u16 cmd_len, u8 *data)
5274 struct hci_conn *hcon = conn->hcon;
5275 struct l2cap_conn_param_update_req *req;
5276 struct l2cap_conn_param_update_rsp rsp;
5277 u16 min, max, latency, to_multiplier;
5280 if (hcon->role != HCI_ROLE_MASTER)
5283 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5286 req = (struct l2cap_conn_param_update_req *) data;
5287 min = __le16_to_cpu(req->min);
5288 max = __le16_to_cpu(req->max);
5289 latency = __le16_to_cpu(req->latency);
5290 to_multiplier = __le16_to_cpu(req->to_multiplier);
5292 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5293 min, max, latency, to_multiplier);
5295 memset(&rsp, 0, sizeof(rsp));
5297 err = hci_check_conn_params(min, max, latency, to_multiplier);
5299 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5301 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5303 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5309 store_hint = hci_le_conn_update(hcon, min, max, latency,
5311 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5312 store_hint, min, max, latency,
5320 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5321 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5324 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5325 struct hci_conn *hcon = conn->hcon;
5326 u16 dcid, mtu, mps, credits, result;
5327 struct l2cap_chan *chan;
5330 if (cmd_len < sizeof(*rsp))
5333 dcid = __le16_to_cpu(rsp->dcid);
5334 mtu = __le16_to_cpu(rsp->mtu);
5335 mps = __le16_to_cpu(rsp->mps);
5336 credits = __le16_to_cpu(rsp->credits);
5337 result = __le16_to_cpu(rsp->result);
5339 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5340 dcid < L2CAP_CID_DYN_START ||
5341 dcid > L2CAP_CID_LE_DYN_END))
5344 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5345 dcid, mtu, mps, credits, result);
5347 mutex_lock(&conn->chan_lock);
5349 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5357 l2cap_chan_lock(chan);
5360 case L2CAP_CR_SUCCESS:
5361 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5369 chan->remote_mps = mps;
5370 chan->tx_credits = credits;
5371 l2cap_chan_ready(chan);
5374 case L2CAP_CR_AUTHENTICATION:
5375 case L2CAP_CR_ENCRYPTION:
5376 /* If we already have MITM protection we can't do
5379 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5380 l2cap_chan_del(chan, ECONNREFUSED);
5384 sec_level = hcon->sec_level + 1;
5385 if (chan->sec_level < sec_level)
5386 chan->sec_level = sec_level;
5388 /* We'll need to send a new Connect Request */
5389 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5391 smp_conn_security(hcon, chan->sec_level);
5395 l2cap_chan_del(chan, ECONNREFUSED);
5399 l2cap_chan_unlock(chan);
5402 mutex_unlock(&conn->chan_lock);
5407 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5408 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5413 switch (cmd->code) {
5414 case L2CAP_COMMAND_REJ:
5415 l2cap_command_rej(conn, cmd, cmd_len, data);
5418 case L2CAP_CONN_REQ:
5419 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5422 case L2CAP_CONN_RSP:
5423 case L2CAP_CREATE_CHAN_RSP:
5424 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5427 case L2CAP_CONF_REQ:
5428 err = l2cap_config_req(conn, cmd, cmd_len, data);
5431 case L2CAP_CONF_RSP:
5432 l2cap_config_rsp(conn, cmd, cmd_len, data);
5435 case L2CAP_DISCONN_REQ:
5436 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5439 case L2CAP_DISCONN_RSP:
5440 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5443 case L2CAP_ECHO_REQ:
5444 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5447 case L2CAP_ECHO_RSP:
5450 case L2CAP_INFO_REQ:
5451 err = l2cap_information_req(conn, cmd, cmd_len, data);
5454 case L2CAP_INFO_RSP:
5455 l2cap_information_rsp(conn, cmd, cmd_len, data);
5458 case L2CAP_CREATE_CHAN_REQ:
5459 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5462 case L2CAP_MOVE_CHAN_REQ:
5463 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5466 case L2CAP_MOVE_CHAN_RSP:
5467 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5470 case L2CAP_MOVE_CHAN_CFM:
5471 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5474 case L2CAP_MOVE_CHAN_CFM_RSP:
5475 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5479 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5487 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5488 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5491 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5492 struct l2cap_le_conn_rsp rsp;
5493 struct l2cap_chan *chan, *pchan;
5494 u16 dcid, scid, credits, mtu, mps;
5498 if (cmd_len != sizeof(*req))
5501 scid = __le16_to_cpu(req->scid);
5502 mtu = __le16_to_cpu(req->mtu);
5503 mps = __le16_to_cpu(req->mps);
5508 if (mtu < 23 || mps < 23)
5511 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5514 /* Check if we have socket listening on psm */
5515 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5516 &conn->hcon->dst, LE_LINK);
5518 result = L2CAP_CR_BAD_PSM;
5523 mutex_lock(&conn->chan_lock);
5524 l2cap_chan_lock(pchan);
5526 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5528 result = L2CAP_CR_AUTHENTICATION;
5530 goto response_unlock;
5533 /* Check for valid dynamic CID range */
5534 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5535 result = L2CAP_CR_INVALID_SCID;
5537 goto response_unlock;
5540 /* Check if we already have channel with that dcid */
5541 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5542 result = L2CAP_CR_SCID_IN_USE;
5544 goto response_unlock;
5547 chan = pchan->ops->new_connection(pchan);
5549 result = L2CAP_CR_NO_MEM;
5550 goto response_unlock;
5553 l2cap_le_flowctl_init(chan);
5555 bacpy(&chan->src, &conn->hcon->src);
5556 bacpy(&chan->dst, &conn->hcon->dst);
5557 chan->src_type = bdaddr_src_type(conn->hcon);
5558 chan->dst_type = bdaddr_dst_type(conn->hcon);
5562 chan->remote_mps = mps;
5563 chan->tx_credits = __le16_to_cpu(req->credits);
5565 __l2cap_chan_add(conn, chan);
5567 credits = chan->rx_credits;
5569 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5571 chan->ident = cmd->ident;
5573 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5574 l2cap_state_change(chan, BT_CONNECT2);
5575 /* The following result value is actually not defined
5576 * for LE CoC but we use it to let the function know
5577 * that it should bail out after doing its cleanup
5578 * instead of sending a response.
5580 result = L2CAP_CR_PEND;
5581 chan->ops->defer(chan);
5583 l2cap_chan_ready(chan);
5584 result = L2CAP_CR_SUCCESS;
5588 l2cap_chan_unlock(pchan);
5589 mutex_unlock(&conn->chan_lock);
5590 l2cap_chan_put(pchan);
5592 if (result == L2CAP_CR_PEND)
5597 rsp.mtu = cpu_to_le16(chan->imtu);
5598 rsp.mps = cpu_to_le16(chan->mps);
5604 rsp.dcid = cpu_to_le16(dcid);
5605 rsp.credits = cpu_to_le16(credits);
5606 rsp.result = cpu_to_le16(result);
5608 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5613 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5614 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5617 struct l2cap_le_credits *pkt;
5618 struct l2cap_chan *chan;
5619 u16 cid, credits, max_credits;
5621 if (cmd_len != sizeof(*pkt))
5624 pkt = (struct l2cap_le_credits *) data;
5625 cid = __le16_to_cpu(pkt->cid);
5626 credits = __le16_to_cpu(pkt->credits);
5628 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5630 chan = l2cap_get_chan_by_dcid(conn, cid);
5634 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5635 if (credits > max_credits) {
5636 BT_ERR("LE credits overflow");
5637 l2cap_send_disconn_req(chan, ECONNRESET);
5638 l2cap_chan_unlock(chan);
5640 /* Return 0 so that we don't trigger an unnecessary
5641 * command reject packet.
5646 chan->tx_credits += credits;
5648 /* Resume sending */
5649 l2cap_le_flowctl_send(chan);
5651 if (chan->tx_credits)
5652 chan->ops->resume(chan);
5654 l2cap_chan_unlock(chan);
5659 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5660 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5663 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5664 struct l2cap_chan *chan;
5666 if (cmd_len < sizeof(*rej))
5669 mutex_lock(&conn->chan_lock);
5671 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5675 l2cap_chan_lock(chan);
5676 l2cap_chan_del(chan, ECONNREFUSED);
5677 l2cap_chan_unlock(chan);
5680 mutex_unlock(&conn->chan_lock);
5684 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5685 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5690 switch (cmd->code) {
5691 case L2CAP_COMMAND_REJ:
5692 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5695 case L2CAP_CONN_PARAM_UPDATE_REQ:
5696 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5699 case L2CAP_CONN_PARAM_UPDATE_RSP:
5702 case L2CAP_LE_CONN_RSP:
5703 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5706 case L2CAP_LE_CONN_REQ:
5707 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5710 case L2CAP_LE_CREDITS:
5711 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5714 case L2CAP_DISCONN_REQ:
5715 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5718 case L2CAP_DISCONN_RSP:
5719 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5723 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5731 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5732 struct sk_buff *skb)
5734 struct hci_conn *hcon = conn->hcon;
5735 struct l2cap_cmd_hdr *cmd;
5739 if (hcon->type != LE_LINK)
5742 if (skb->len < L2CAP_CMD_HDR_SIZE)
5745 cmd = (void *) skb->data;
5746 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5748 len = le16_to_cpu(cmd->len);
5750 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5752 if (len != skb->len || !cmd->ident) {
5753 BT_DBG("corrupted command");
5757 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5759 struct l2cap_cmd_rej_unk rej;
5761 BT_ERR("Wrong link type (%d)", err);
5763 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5764 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5772 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5773 struct sk_buff *skb)
5775 struct hci_conn *hcon = conn->hcon;
5776 u8 *data = skb->data;
5778 struct l2cap_cmd_hdr cmd;
5781 l2cap_raw_recv(conn, skb);
5783 if (hcon->type != ACL_LINK)
5786 while (len >= L2CAP_CMD_HDR_SIZE) {
5788 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5789 data += L2CAP_CMD_HDR_SIZE;
5790 len -= L2CAP_CMD_HDR_SIZE;
5792 cmd_len = le16_to_cpu(cmd.len);
5794 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5797 if (cmd_len > len || !cmd.ident) {
5798 BT_DBG("corrupted command");
5802 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5804 struct l2cap_cmd_rej_unk rej;
5806 BT_ERR("Wrong link type (%d)", err);
5808 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5809 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5821 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5823 u16 our_fcs, rcv_fcs;
5826 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5827 hdr_size = L2CAP_EXT_HDR_SIZE;
5829 hdr_size = L2CAP_ENH_HDR_SIZE;
5831 if (chan->fcs == L2CAP_FCS_CRC16) {
5832 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5833 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5834 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5836 if (our_fcs != rcv_fcs)
5842 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5844 struct l2cap_ctrl control;
5846 BT_DBG("chan %p", chan);
5848 memset(&control, 0, sizeof(control));
5851 control.reqseq = chan->buffer_seq;
5852 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5854 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5855 control.super = L2CAP_SUPER_RNR;
5856 l2cap_send_sframe(chan, &control);
5859 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5860 chan->unacked_frames > 0)
5861 __set_retrans_timer(chan);
5863 /* Send pending iframes */
5864 l2cap_ertm_send(chan);
5866 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5867 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5868 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5871 control.super = L2CAP_SUPER_RR;
5872 l2cap_send_sframe(chan, &control);
5876 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5877 struct sk_buff **last_frag)
5879 /* skb->len reflects data in skb as well as all fragments
5880 * skb->data_len reflects only data in fragments
5882 if (!skb_has_frag_list(skb))
5883 skb_shinfo(skb)->frag_list = new_frag;
5885 new_frag->next = NULL;
5887 (*last_frag)->next = new_frag;
5888 *last_frag = new_frag;
5890 skb->len += new_frag->len;
5891 skb->data_len += new_frag->len;
5892 skb->truesize += new_frag->truesize;
5895 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5896 struct l2cap_ctrl *control)
5900 switch (control->sar) {
5901 case L2CAP_SAR_UNSEGMENTED:
5905 err = chan->ops->recv(chan, skb);
5908 case L2CAP_SAR_START:
5912 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5915 chan->sdu_len = get_unaligned_le16(skb->data);
5916 skb_pull(skb, L2CAP_SDULEN_SIZE);
5918 if (chan->sdu_len > chan->imtu) {
5923 if (skb->len >= chan->sdu_len)
5927 chan->sdu_last_frag = skb;
5933 case L2CAP_SAR_CONTINUE:
5937 append_skb_frag(chan->sdu, skb,
5938 &chan->sdu_last_frag);
5941 if (chan->sdu->len >= chan->sdu_len)
5951 append_skb_frag(chan->sdu, skb,
5952 &chan->sdu_last_frag);
5955 if (chan->sdu->len != chan->sdu_len)
5958 err = chan->ops->recv(chan, chan->sdu);
5961 /* Reassembly complete */
5963 chan->sdu_last_frag = NULL;
5971 kfree_skb(chan->sdu);
5973 chan->sdu_last_frag = NULL;
5980 static int l2cap_resegment(struct l2cap_chan *chan)
5986 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5990 if (chan->mode != L2CAP_MODE_ERTM)
5993 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5994 l2cap_tx(chan, NULL, NULL, event);
5997 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6000 /* Pass sequential frames to l2cap_reassemble_sdu()
6001 * until a gap is encountered.
6004 BT_DBG("chan %p", chan);
6006 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6007 struct sk_buff *skb;
6008 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6009 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6011 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6016 skb_unlink(skb, &chan->srej_q);
6017 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6018 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6023 if (skb_queue_empty(&chan->srej_q)) {
6024 chan->rx_state = L2CAP_RX_STATE_RECV;
6025 l2cap_send_ack(chan);
6031 static void l2cap_handle_srej(struct l2cap_chan *chan,
6032 struct l2cap_ctrl *control)
6034 struct sk_buff *skb;
6036 BT_DBG("chan %p, control %p", chan, control);
6038 if (control->reqseq == chan->next_tx_seq) {
6039 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6040 l2cap_send_disconn_req(chan, ECONNRESET);
6044 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6047 BT_DBG("Seq %d not available for retransmission",
6052 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6053 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6054 l2cap_send_disconn_req(chan, ECONNRESET);
6058 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6060 if (control->poll) {
6061 l2cap_pass_to_tx(chan, control);
6063 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6064 l2cap_retransmit(chan, control);
6065 l2cap_ertm_send(chan);
6067 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6068 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6069 chan->srej_save_reqseq = control->reqseq;
6072 l2cap_pass_to_tx_fbit(chan, control);
6074 if (control->final) {
6075 if (chan->srej_save_reqseq != control->reqseq ||
6076 !test_and_clear_bit(CONN_SREJ_ACT,
6078 l2cap_retransmit(chan, control);
6080 l2cap_retransmit(chan, control);
6081 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6082 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6083 chan->srej_save_reqseq = control->reqseq;
6089 static void l2cap_handle_rej(struct l2cap_chan *chan,
6090 struct l2cap_ctrl *control)
6092 struct sk_buff *skb;
6094 BT_DBG("chan %p, control %p", chan, control);
6096 if (control->reqseq == chan->next_tx_seq) {
6097 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6098 l2cap_send_disconn_req(chan, ECONNRESET);
6102 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6104 if (chan->max_tx && skb &&
6105 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6106 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6107 l2cap_send_disconn_req(chan, ECONNRESET);
6111 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6113 l2cap_pass_to_tx(chan, control);
6115 if (control->final) {
6116 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6117 l2cap_retransmit_all(chan, control);
6119 l2cap_retransmit_all(chan, control);
6120 l2cap_ertm_send(chan);
6121 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6122 set_bit(CONN_REJ_ACT, &chan->conn_state);
6126 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6128 BT_DBG("chan %p, txseq %d", chan, txseq);
6130 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6131 chan->expected_tx_seq);
6133 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6134 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6136 /* See notes below regarding "double poll" and
6139 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6140 BT_DBG("Invalid/Ignore - after SREJ");
6141 return L2CAP_TXSEQ_INVALID_IGNORE;
6143 BT_DBG("Invalid - in window after SREJ sent");
6144 return L2CAP_TXSEQ_INVALID;
6148 if (chan->srej_list.head == txseq) {
6149 BT_DBG("Expected SREJ");
6150 return L2CAP_TXSEQ_EXPECTED_SREJ;
6153 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6154 BT_DBG("Duplicate SREJ - txseq already stored");
6155 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6158 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6159 BT_DBG("Unexpected SREJ - not requested");
6160 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6164 if (chan->expected_tx_seq == txseq) {
6165 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6167 BT_DBG("Invalid - txseq outside tx window");
6168 return L2CAP_TXSEQ_INVALID;
6171 return L2CAP_TXSEQ_EXPECTED;
6175 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6176 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6177 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6178 return L2CAP_TXSEQ_DUPLICATE;
6181 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6182 /* A source of invalid packets is a "double poll" condition,
6183 * where delays cause us to send multiple poll packets. If
6184 * the remote stack receives and processes both polls,
6185 * sequence numbers can wrap around in such a way that a
6186 * resent frame has a sequence number that looks like new data
6187 * with a sequence gap. This would trigger an erroneous SREJ
6190 * Fortunately, this is impossible with a tx window that's
6191 * less than half of the maximum sequence number, which allows
6192 * invalid frames to be safely ignored.
6194 * With tx window sizes greater than half of the tx window
6195 * maximum, the frame is invalid and cannot be ignored. This
6196 * causes a disconnect.
6199 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6200 BT_DBG("Invalid/Ignore - txseq outside tx window");
6201 return L2CAP_TXSEQ_INVALID_IGNORE;
6203 BT_DBG("Invalid - txseq outside tx window");
6204 return L2CAP_TXSEQ_INVALID;
6207 BT_DBG("Unexpected - txseq indicates missing frames");
6208 return L2CAP_TXSEQ_UNEXPECTED;
6212 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6213 struct l2cap_ctrl *control,
6214 struct sk_buff *skb, u8 event)
6217 bool skb_in_use = false;
6219 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6223 case L2CAP_EV_RECV_IFRAME:
6224 switch (l2cap_classify_txseq(chan, control->txseq)) {
6225 case L2CAP_TXSEQ_EXPECTED:
6226 l2cap_pass_to_tx(chan, control);
6228 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6229 BT_DBG("Busy, discarding expected seq %d",
6234 chan->expected_tx_seq = __next_seq(chan,
6237 chan->buffer_seq = chan->expected_tx_seq;
6240 err = l2cap_reassemble_sdu(chan, skb, control);
6244 if (control->final) {
6245 if (!test_and_clear_bit(CONN_REJ_ACT,
6246 &chan->conn_state)) {
6248 l2cap_retransmit_all(chan, control);
6249 l2cap_ertm_send(chan);
6253 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6254 l2cap_send_ack(chan);
6256 case L2CAP_TXSEQ_UNEXPECTED:
6257 l2cap_pass_to_tx(chan, control);
6259 /* Can't issue SREJ frames in the local busy state.
6260 * Drop this frame, it will be seen as missing
6261 * when local busy is exited.
6263 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6264 BT_DBG("Busy, discarding unexpected seq %d",
6269 /* There was a gap in the sequence, so an SREJ
6270 * must be sent for each missing frame. The
6271 * current frame is stored for later use.
6273 skb_queue_tail(&chan->srej_q, skb);
6275 BT_DBG("Queued %p (queue len %d)", skb,
6276 skb_queue_len(&chan->srej_q));
6278 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6279 l2cap_seq_list_clear(&chan->srej_list);
6280 l2cap_send_srej(chan, control->txseq);
6282 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6284 case L2CAP_TXSEQ_DUPLICATE:
6285 l2cap_pass_to_tx(chan, control);
6287 case L2CAP_TXSEQ_INVALID_IGNORE:
6289 case L2CAP_TXSEQ_INVALID:
6291 l2cap_send_disconn_req(chan, ECONNRESET);
6295 case L2CAP_EV_RECV_RR:
6296 l2cap_pass_to_tx(chan, control);
6297 if (control->final) {
6298 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6300 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6301 !__chan_is_moving(chan)) {
6303 l2cap_retransmit_all(chan, control);
6306 l2cap_ertm_send(chan);
6307 } else if (control->poll) {
6308 l2cap_send_i_or_rr_or_rnr(chan);
6310 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6311 &chan->conn_state) &&
6312 chan->unacked_frames)
6313 __set_retrans_timer(chan);
6315 l2cap_ertm_send(chan);
6318 case L2CAP_EV_RECV_RNR:
6319 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6320 l2cap_pass_to_tx(chan, control);
6321 if (control && control->poll) {
6322 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6323 l2cap_send_rr_or_rnr(chan, 0);
6325 __clear_retrans_timer(chan);
6326 l2cap_seq_list_clear(&chan->retrans_list);
6328 case L2CAP_EV_RECV_REJ:
6329 l2cap_handle_rej(chan, control);
6331 case L2CAP_EV_RECV_SREJ:
6332 l2cap_handle_srej(chan, control);
6338 if (skb && !skb_in_use) {
6339 BT_DBG("Freeing %p", skb);
6346 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6347 struct l2cap_ctrl *control,
6348 struct sk_buff *skb, u8 event)
6351 u16 txseq = control->txseq;
6352 bool skb_in_use = false;
6354 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6358 case L2CAP_EV_RECV_IFRAME:
6359 switch (l2cap_classify_txseq(chan, txseq)) {
6360 case L2CAP_TXSEQ_EXPECTED:
6361 /* Keep frame for reassembly later */
6362 l2cap_pass_to_tx(chan, control);
6363 skb_queue_tail(&chan->srej_q, skb);
6365 BT_DBG("Queued %p (queue len %d)", skb,
6366 skb_queue_len(&chan->srej_q));
6368 chan->expected_tx_seq = __next_seq(chan, txseq);
6370 case L2CAP_TXSEQ_EXPECTED_SREJ:
6371 l2cap_seq_list_pop(&chan->srej_list);
6373 l2cap_pass_to_tx(chan, control);
6374 skb_queue_tail(&chan->srej_q, skb);
6376 BT_DBG("Queued %p (queue len %d)", skb,
6377 skb_queue_len(&chan->srej_q));
6379 err = l2cap_rx_queued_iframes(chan);
6384 case L2CAP_TXSEQ_UNEXPECTED:
6385 /* Got a frame that can't be reassembled yet.
6386 * Save it for later, and send SREJs to cover
6387 * the missing frames.
6389 skb_queue_tail(&chan->srej_q, skb);
6391 BT_DBG("Queued %p (queue len %d)", skb,
6392 skb_queue_len(&chan->srej_q));
6394 l2cap_pass_to_tx(chan, control);
6395 l2cap_send_srej(chan, control->txseq);
6397 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6398 /* This frame was requested with an SREJ, but
6399 * some expected retransmitted frames are
6400 * missing. Request retransmission of missing
6403 skb_queue_tail(&chan->srej_q, skb);
6405 BT_DBG("Queued %p (queue len %d)", skb,
6406 skb_queue_len(&chan->srej_q));
6408 l2cap_pass_to_tx(chan, control);
6409 l2cap_send_srej_list(chan, control->txseq);
6411 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6412 /* We've already queued this frame. Drop this copy. */
6413 l2cap_pass_to_tx(chan, control);
6415 case L2CAP_TXSEQ_DUPLICATE:
6416 /* Expecting a later sequence number, so this frame
6417 * was already received. Ignore it completely.
6420 case L2CAP_TXSEQ_INVALID_IGNORE:
6422 case L2CAP_TXSEQ_INVALID:
6424 l2cap_send_disconn_req(chan, ECONNRESET);
6428 case L2CAP_EV_RECV_RR:
6429 l2cap_pass_to_tx(chan, control);
6430 if (control->final) {
6431 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6433 if (!test_and_clear_bit(CONN_REJ_ACT,
6434 &chan->conn_state)) {
6436 l2cap_retransmit_all(chan, control);
6439 l2cap_ertm_send(chan);
6440 } else if (control->poll) {
6441 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6442 &chan->conn_state) &&
6443 chan->unacked_frames) {
6444 __set_retrans_timer(chan);
6447 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6448 l2cap_send_srej_tail(chan);
6450 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6451 &chan->conn_state) &&
6452 chan->unacked_frames)
6453 __set_retrans_timer(chan);
6455 l2cap_send_ack(chan);
6458 case L2CAP_EV_RECV_RNR:
6459 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6460 l2cap_pass_to_tx(chan, control);
6461 if (control->poll) {
6462 l2cap_send_srej_tail(chan);
6464 struct l2cap_ctrl rr_control;
6465 memset(&rr_control, 0, sizeof(rr_control));
6466 rr_control.sframe = 1;
6467 rr_control.super = L2CAP_SUPER_RR;
6468 rr_control.reqseq = chan->buffer_seq;
6469 l2cap_send_sframe(chan, &rr_control);
6473 case L2CAP_EV_RECV_REJ:
6474 l2cap_handle_rej(chan, control);
6476 case L2CAP_EV_RECV_SREJ:
6477 l2cap_handle_srej(chan, control);
6481 if (skb && !skb_in_use) {
6482 BT_DBG("Freeing %p", skb);
6489 static int l2cap_finish_move(struct l2cap_chan *chan)
6491 BT_DBG("chan %p", chan);
6493 chan->rx_state = L2CAP_RX_STATE_RECV;
6496 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6498 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6500 return l2cap_resegment(chan);
6503 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6504 struct l2cap_ctrl *control,
6505 struct sk_buff *skb, u8 event)
6509 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6515 l2cap_process_reqseq(chan, control->reqseq);
6517 if (!skb_queue_empty(&chan->tx_q))
6518 chan->tx_send_head = skb_peek(&chan->tx_q);
6520 chan->tx_send_head = NULL;
6522 /* Rewind next_tx_seq to the point expected
6525 chan->next_tx_seq = control->reqseq;
6526 chan->unacked_frames = 0;
6528 err = l2cap_finish_move(chan);
6532 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6533 l2cap_send_i_or_rr_or_rnr(chan);
6535 if (event == L2CAP_EV_RECV_IFRAME)
6538 return l2cap_rx_state_recv(chan, control, NULL, event);
6541 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6542 struct l2cap_ctrl *control,
6543 struct sk_buff *skb, u8 event)
6547 if (!control->final)
6550 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6552 chan->rx_state = L2CAP_RX_STATE_RECV;
6553 l2cap_process_reqseq(chan, control->reqseq);
6555 if (!skb_queue_empty(&chan->tx_q))
6556 chan->tx_send_head = skb_peek(&chan->tx_q);
6558 chan->tx_send_head = NULL;
6560 /* Rewind next_tx_seq to the point expected
6563 chan->next_tx_seq = control->reqseq;
6564 chan->unacked_frames = 0;
6567 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6569 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6571 err = l2cap_resegment(chan);
6574 err = l2cap_rx_state_recv(chan, control, skb, event);
6579 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6581 /* Make sure reqseq is for a packet that has been sent but not acked */
6584 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6585 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6588 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6589 struct sk_buff *skb, u8 event)
6593 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6594 control, skb, event, chan->rx_state);
6596 if (__valid_reqseq(chan, control->reqseq)) {
6597 switch (chan->rx_state) {
6598 case L2CAP_RX_STATE_RECV:
6599 err = l2cap_rx_state_recv(chan, control, skb, event);
6601 case L2CAP_RX_STATE_SREJ_SENT:
6602 err = l2cap_rx_state_srej_sent(chan, control, skb,
6605 case L2CAP_RX_STATE_WAIT_P:
6606 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6608 case L2CAP_RX_STATE_WAIT_F:
6609 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6616 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6617 control->reqseq, chan->next_tx_seq,
6618 chan->expected_ack_seq);
6619 l2cap_send_disconn_req(chan, ECONNRESET);
6625 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6626 struct sk_buff *skb)
6628 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6631 if (l2cap_classify_txseq(chan, control->txseq) ==
6632 L2CAP_TXSEQ_EXPECTED) {
6633 l2cap_pass_to_tx(chan, control);
6635 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6636 __next_seq(chan, chan->buffer_seq));
6638 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6640 l2cap_reassemble_sdu(chan, skb, control);
6643 kfree_skb(chan->sdu);
6646 chan->sdu_last_frag = NULL;
6650 BT_DBG("Freeing %p", skb);
6655 chan->last_acked_seq = control->txseq;
6656 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6661 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6663 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6667 __unpack_control(chan, skb);
6672 * We can just drop the corrupted I-frame here.
6673 * Receiver will miss it and start proper recovery
6674 * procedures and ask for retransmission.
6676 if (l2cap_check_fcs(chan, skb))
6679 if (!control->sframe && control->sar == L2CAP_SAR_START)
6680 len -= L2CAP_SDULEN_SIZE;
6682 if (chan->fcs == L2CAP_FCS_CRC16)
6683 len -= L2CAP_FCS_SIZE;
6685 if (len > chan->mps) {
6686 l2cap_send_disconn_req(chan, ECONNRESET);
6690 if (chan->ops->filter) {
6691 if (chan->ops->filter(chan, skb))
6695 if (!control->sframe) {
6698 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6699 control->sar, control->reqseq, control->final,
6702 /* Validate F-bit - F=0 always valid, F=1 only
6703 * valid in TX WAIT_F
6705 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6708 if (chan->mode != L2CAP_MODE_STREAMING) {
6709 event = L2CAP_EV_RECV_IFRAME;
6710 err = l2cap_rx(chan, control, skb, event);
6712 err = l2cap_stream_rx(chan, control, skb);
6716 l2cap_send_disconn_req(chan, ECONNRESET);
6718 const u8 rx_func_to_event[4] = {
6719 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6720 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6723 /* Only I-frames are expected in streaming mode */
6724 if (chan->mode == L2CAP_MODE_STREAMING)
6727 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6728 control->reqseq, control->final, control->poll,
6732 BT_ERR("Trailing bytes: %d in sframe", len);
6733 l2cap_send_disconn_req(chan, ECONNRESET);
6737 /* Validate F and P bits */
6738 if (control->final && (control->poll ||
6739 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6742 event = rx_func_to_event[control->super];
6743 if (l2cap_rx(chan, control, skb, event))
6744 l2cap_send_disconn_req(chan, ECONNRESET);
6754 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6756 struct l2cap_conn *conn = chan->conn;
6757 struct l2cap_le_credits pkt;
6760 /* We return more credits to the sender only after the amount of
6761 * credits falls below half of the initial amount.
6763 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6766 return_credits = le_max_credits - chan->rx_credits;
6768 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6770 chan->rx_credits += return_credits;
6772 pkt.cid = cpu_to_le16(chan->scid);
6773 pkt.credits = cpu_to_le16(return_credits);
6775 chan->ident = l2cap_get_ident(conn);
6777 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6780 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6784 if (!chan->rx_credits) {
6785 BT_ERR("No credits to receive LE L2CAP data");
6786 l2cap_send_disconn_req(chan, ECONNRESET);
6790 if (chan->imtu < skb->len) {
6791 BT_ERR("Too big LE L2CAP PDU");
6796 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6798 l2cap_chan_le_send_credits(chan);
6805 sdu_len = get_unaligned_le16(skb->data);
6806 skb_pull(skb, L2CAP_SDULEN_SIZE);
6808 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6809 sdu_len, skb->len, chan->imtu);
6811 if (sdu_len > chan->imtu) {
6812 BT_ERR("Too big LE L2CAP SDU length received");
6817 if (skb->len > sdu_len) {
6818 BT_ERR("Too much LE L2CAP data received");
6823 if (skb->len == sdu_len)
6824 return chan->ops->recv(chan, skb);
6827 chan->sdu_len = sdu_len;
6828 chan->sdu_last_frag = skb;
6830 /* Detect if remote is not able to use the selected MPS */
6831 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6832 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6834 /* Adjust the number of credits */
6835 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6836 chan->mps = mps_len;
6837 l2cap_chan_le_send_credits(chan);
6843 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6844 chan->sdu->len, skb->len, chan->sdu_len);
6846 if (chan->sdu->len + skb->len > chan->sdu_len) {
6847 BT_ERR("Too much LE L2CAP data received");
6852 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6855 if (chan->sdu->len == chan->sdu_len) {
6856 err = chan->ops->recv(chan, chan->sdu);
6859 chan->sdu_last_frag = NULL;
6867 kfree_skb(chan->sdu);
6869 chan->sdu_last_frag = NULL;
6873 /* We can't return an error here since we took care of the skb
6874 * freeing internally. An error return would cause the caller to
6875 * do a double-free of the skb.
6880 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6881 struct sk_buff *skb)
6883 struct l2cap_chan *chan;
6885 chan = l2cap_get_chan_by_scid(conn, cid);
6887 if (cid == L2CAP_CID_A2MP) {
6888 chan = a2mp_channel_create(conn, skb);
6894 l2cap_chan_lock(chan);
6896 BT_DBG("unknown cid 0x%4.4x", cid);
6897 /* Drop packet and return */
6903 BT_DBG("chan %p, len %d", chan, skb->len);
6905 /* If we receive data on a fixed channel before the info req/rsp
6906 * procdure is done simply assume that the channel is supported
6907 * and mark it as ready.
6909 if (chan->chan_type == L2CAP_CHAN_FIXED)
6910 l2cap_chan_ready(chan);
6912 if (chan->state != BT_CONNECTED)
6915 switch (chan->mode) {
6916 case L2CAP_MODE_LE_FLOWCTL:
6917 if (l2cap_le_data_rcv(chan, skb) < 0)
6922 case L2CAP_MODE_BASIC:
6923 /* If socket recv buffers overflows we drop data here
6924 * which is *bad* because L2CAP has to be reliable.
6925 * But we don't have any other choice. L2CAP doesn't
6926 * provide flow control mechanism. */
6928 if (chan->imtu < skb->len) {
6929 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6933 if (!chan->ops->recv(chan, skb))
6937 case L2CAP_MODE_ERTM:
6938 case L2CAP_MODE_STREAMING:
6939 l2cap_data_rcv(chan, skb);
6943 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6951 l2cap_chan_unlock(chan);
6954 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6955 struct sk_buff *skb)
6957 struct hci_conn *hcon = conn->hcon;
6958 struct l2cap_chan *chan;
6960 if (hcon->type != ACL_LINK)
6963 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6968 BT_DBG("chan %p, len %d", chan, skb->len);
6970 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6973 if (chan->imtu < skb->len)
6976 /* Store remote BD_ADDR and PSM for msg_name */
6977 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6978 bt_cb(skb)->l2cap.psm = psm;
6980 if (!chan->ops->recv(chan, skb)) {
6981 l2cap_chan_put(chan);
6986 l2cap_chan_put(chan);
6991 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6993 struct l2cap_hdr *lh = (void *) skb->data;
6994 struct hci_conn *hcon = conn->hcon;
6998 if (hcon->state != BT_CONNECTED) {
6999 BT_DBG("queueing pending rx skb");
7000 skb_queue_tail(&conn->pending_rx, skb);
7004 skb_pull(skb, L2CAP_HDR_SIZE);
7005 cid = __le16_to_cpu(lh->cid);
7006 len = __le16_to_cpu(lh->len);
7008 if (len != skb->len) {
7013 /* Since we can't actively block incoming LE connections we must
7014 * at least ensure that we ignore incoming data from them.
7016 if (hcon->type == LE_LINK &&
7017 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7018 bdaddr_dst_type(hcon))) {
7023 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7026 case L2CAP_CID_SIGNALING:
7027 l2cap_sig_channel(conn, skb);
7030 case L2CAP_CID_CONN_LESS:
7031 psm = get_unaligned((__le16 *) skb->data);
7032 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7033 l2cap_conless_channel(conn, psm, skb);
7036 case L2CAP_CID_LE_SIGNALING:
7037 l2cap_le_sig_channel(conn, skb);
7041 l2cap_data_channel(conn, cid, skb);
7046 static void process_pending_rx(struct work_struct *work)
7048 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7050 struct sk_buff *skb;
7054 while ((skb = skb_dequeue(&conn->pending_rx)))
7055 l2cap_recv_frame(conn, skb);
7058 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7060 struct l2cap_conn *conn = hcon->l2cap_data;
7061 struct hci_chan *hchan;
7066 hchan = hci_chan_create(hcon);
7070 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7072 hci_chan_del(hchan);
7076 kref_init(&conn->ref);
7077 hcon->l2cap_data = conn;
7078 conn->hcon = hci_conn_get(hcon);
7079 conn->hchan = hchan;
7081 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7083 switch (hcon->type) {
7085 if (hcon->hdev->le_mtu) {
7086 conn->mtu = hcon->hdev->le_mtu;
7091 conn->mtu = hcon->hdev->acl_mtu;
7095 conn->feat_mask = 0;
7097 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7099 if (hcon->type == ACL_LINK &&
7100 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7101 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7103 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7104 (bredr_sc_enabled(hcon->hdev) ||
7105 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7106 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7108 mutex_init(&conn->ident_lock);
7109 mutex_init(&conn->chan_lock);
7111 INIT_LIST_HEAD(&conn->chan_l);
7112 INIT_LIST_HEAD(&conn->users);
7114 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7116 skb_queue_head_init(&conn->pending_rx);
7117 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7118 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7120 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7125 static bool is_valid_psm(u16 psm, u8 dst_type) {
7129 if (bdaddr_type_is_le(dst_type))
7130 return (psm <= 0x00ff);
7132 /* PSM must be odd and lsb of upper byte must be 0 */
7133 return ((psm & 0x0101) == 0x0001);
7136 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7137 bdaddr_t *dst, u8 dst_type)
7139 struct l2cap_conn *conn;
7140 struct hci_conn *hcon;
7141 struct hci_dev *hdev;
7144 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7145 dst_type, __le16_to_cpu(psm));
7147 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7149 return -EHOSTUNREACH;
7153 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7154 chan->chan_type != L2CAP_CHAN_RAW) {
7159 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7164 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7169 switch (chan->mode) {
7170 case L2CAP_MODE_BASIC:
7172 case L2CAP_MODE_LE_FLOWCTL:
7173 l2cap_le_flowctl_init(chan);
7175 case L2CAP_MODE_ERTM:
7176 case L2CAP_MODE_STREAMING:
7185 switch (chan->state) {
7189 /* Already connecting */
7194 /* Already connected */
7208 /* Set destination address and psm */
7209 bacpy(&chan->dst, dst);
7210 chan->dst_type = dst_type;
7215 if (bdaddr_type_is_le(dst_type)) {
7216 /* Convert from L2CAP channel address type to HCI address type
7218 if (dst_type == BDADDR_LE_PUBLIC)
7219 dst_type = ADDR_LE_DEV_PUBLIC;
7221 dst_type = ADDR_LE_DEV_RANDOM;
7223 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7224 hcon = hci_connect_le(hdev, dst, dst_type,
7226 HCI_LE_CONN_TIMEOUT,
7227 HCI_ROLE_SLAVE, NULL);
7229 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7231 HCI_LE_CONN_TIMEOUT);
7234 u8 auth_type = l2cap_get_auth_type(chan);
7235 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7239 err = PTR_ERR(hcon);
7243 conn = l2cap_conn_add(hcon);
7245 hci_conn_drop(hcon);
7250 mutex_lock(&conn->chan_lock);
7251 l2cap_chan_lock(chan);
7253 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7254 hci_conn_drop(hcon);
7259 /* Update source addr of the socket */
7260 bacpy(&chan->src, &hcon->src);
7261 chan->src_type = bdaddr_src_type(hcon);
7263 __l2cap_chan_add(conn, chan);
7265 /* l2cap_chan_add takes its own ref so we can drop this one */
7266 hci_conn_drop(hcon);
7268 l2cap_state_change(chan, BT_CONNECT);
7269 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7271 /* Release chan->sport so that it can be reused by other
7272 * sockets (as it's only used for listening sockets).
7274 write_lock(&chan_list_lock);
7276 write_unlock(&chan_list_lock);
7278 if (hcon->state == BT_CONNECTED) {
7279 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7280 __clear_chan_timer(chan);
7281 if (l2cap_chan_check_security(chan, true))
7282 l2cap_state_change(chan, BT_CONNECTED);
7284 l2cap_do_start(chan);
7290 l2cap_chan_unlock(chan);
7291 mutex_unlock(&conn->chan_lock);
7293 hci_dev_unlock(hdev);
7297 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7299 /* ---- L2CAP interface with lower layer (HCI) ---- */
7301 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7303 int exact = 0, lm1 = 0, lm2 = 0;
7304 struct l2cap_chan *c;
7306 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7308 /* Find listening sockets and check their link_mode */
7309 read_lock(&chan_list_lock);
7310 list_for_each_entry(c, &chan_list, global_l) {
7311 if (c->state != BT_LISTEN)
7314 if (!bacmp(&c->src, &hdev->bdaddr)) {
7315 lm1 |= HCI_LM_ACCEPT;
7316 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7317 lm1 |= HCI_LM_MASTER;
7319 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7320 lm2 |= HCI_LM_ACCEPT;
7321 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7322 lm2 |= HCI_LM_MASTER;
7325 read_unlock(&chan_list_lock);
7327 return exact ? lm1 : lm2;
7330 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7331 * from an existing channel in the list or from the beginning of the
7332 * global list (by passing NULL as first parameter).
7334 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7335 struct hci_conn *hcon)
7337 u8 src_type = bdaddr_src_type(hcon);
7339 read_lock(&chan_list_lock);
7342 c = list_next_entry(c, global_l);
7344 c = list_entry(chan_list.next, typeof(*c), global_l);
7346 list_for_each_entry_from(c, &chan_list, global_l) {
7347 if (c->chan_type != L2CAP_CHAN_FIXED)
7349 if (c->state != BT_LISTEN)
7351 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7353 if (src_type != c->src_type)
7357 read_unlock(&chan_list_lock);
7361 read_unlock(&chan_list_lock);
7366 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7368 struct hci_dev *hdev = hcon->hdev;
7369 struct l2cap_conn *conn;
7370 struct l2cap_chan *pchan;
7373 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7376 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7379 l2cap_conn_del(hcon, bt_to_errno(status));
7383 conn = l2cap_conn_add(hcon);
7387 dst_type = bdaddr_dst_type(hcon);
7389 /* If device is blocked, do not create channels for it */
7390 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7393 /* Find fixed channels and notify them of the new connection. We
7394 * use multiple individual lookups, continuing each time where
7395 * we left off, because the list lock would prevent calling the
7396 * potentially sleeping l2cap_chan_lock() function.
7398 pchan = l2cap_global_fixed_chan(NULL, hcon);
7400 struct l2cap_chan *chan, *next;
7402 /* Client fixed channels should override server ones */
7403 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7406 l2cap_chan_lock(pchan);
7407 chan = pchan->ops->new_connection(pchan);
7409 bacpy(&chan->src, &hcon->src);
7410 bacpy(&chan->dst, &hcon->dst);
7411 chan->src_type = bdaddr_src_type(hcon);
7412 chan->dst_type = dst_type;
7414 __l2cap_chan_add(conn, chan);
7417 l2cap_chan_unlock(pchan);
7419 next = l2cap_global_fixed_chan(pchan, hcon);
7420 l2cap_chan_put(pchan);
7424 l2cap_conn_ready(conn);
7427 int l2cap_disconn_ind(struct hci_conn *hcon)
7429 struct l2cap_conn *conn = hcon->l2cap_data;
7431 BT_DBG("hcon %p", hcon);
7434 return HCI_ERROR_REMOTE_USER_TERM;
7435 return conn->disc_reason;
7438 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7440 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7443 BT_DBG("hcon %p reason %d", hcon, reason);
7445 l2cap_conn_del(hcon, bt_to_errno(reason));
7448 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7450 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7453 if (encrypt == 0x00) {
7454 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7455 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7456 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7457 chan->sec_level == BT_SECURITY_FIPS)
7458 l2cap_chan_close(chan, ECONNREFUSED);
7460 if (chan->sec_level == BT_SECURITY_MEDIUM)
7461 __clear_chan_timer(chan);
7465 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7467 struct l2cap_conn *conn = hcon->l2cap_data;
7468 struct l2cap_chan *chan;
7473 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7475 mutex_lock(&conn->chan_lock);
7477 list_for_each_entry(chan, &conn->chan_l, list) {
7478 l2cap_chan_lock(chan);
7480 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7481 state_to_string(chan->state));
7483 if (chan->scid == L2CAP_CID_A2MP) {
7484 l2cap_chan_unlock(chan);
7488 if (!status && encrypt)
7489 chan->sec_level = hcon->sec_level;
7491 if (!__l2cap_no_conn_pending(chan)) {
7492 l2cap_chan_unlock(chan);
7496 if (!status && (chan->state == BT_CONNECTED ||
7497 chan->state == BT_CONFIG)) {
7498 chan->ops->resume(chan);
7499 l2cap_check_encryption(chan, encrypt);
7500 l2cap_chan_unlock(chan);
7504 if (chan->state == BT_CONNECT) {
7505 if (!status && l2cap_check_enc_key_size(hcon))
7506 l2cap_start_connection(chan);
7508 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7509 } else if (chan->state == BT_CONNECT2 &&
7510 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7511 struct l2cap_conn_rsp rsp;
7514 if (!status && l2cap_check_enc_key_size(hcon)) {
7515 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7516 res = L2CAP_CR_PEND;
7517 stat = L2CAP_CS_AUTHOR_PEND;
7518 chan->ops->defer(chan);
7520 l2cap_state_change(chan, BT_CONFIG);
7521 res = L2CAP_CR_SUCCESS;
7522 stat = L2CAP_CS_NO_INFO;
7525 l2cap_state_change(chan, BT_DISCONN);
7526 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7527 res = L2CAP_CR_SEC_BLOCK;
7528 stat = L2CAP_CS_NO_INFO;
7531 rsp.scid = cpu_to_le16(chan->dcid);
7532 rsp.dcid = cpu_to_le16(chan->scid);
7533 rsp.result = cpu_to_le16(res);
7534 rsp.status = cpu_to_le16(stat);
7535 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7538 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7539 res == L2CAP_CR_SUCCESS) {
7541 set_bit(CONF_REQ_SENT, &chan->conf_state);
7542 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7544 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7546 chan->num_conf_req++;
7550 l2cap_chan_unlock(chan);
7553 mutex_unlock(&conn->chan_lock);
7556 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7558 struct l2cap_conn *conn = hcon->l2cap_data;
7559 struct l2cap_hdr *hdr;
7562 /* For AMP controller do not create l2cap conn */
7563 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7567 conn = l2cap_conn_add(hcon);
7572 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7576 case ACL_START_NO_FLUSH:
7579 BT_ERR("Unexpected start frame (len %d)", skb->len);
7580 kfree_skb(conn->rx_skb);
7581 conn->rx_skb = NULL;
7583 l2cap_conn_unreliable(conn, ECOMM);
7586 /* Start fragment always begin with Basic L2CAP header */
7587 if (skb->len < L2CAP_HDR_SIZE) {
7588 BT_ERR("Frame is too short (len %d)", skb->len);
7589 l2cap_conn_unreliable(conn, ECOMM);
7593 hdr = (struct l2cap_hdr *) skb->data;
7594 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7596 if (len == skb->len) {
7597 /* Complete frame received */
7598 l2cap_recv_frame(conn, skb);
7602 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7604 if (skb->len > len) {
7605 BT_ERR("Frame is too long (len %d, expected len %d)",
7607 l2cap_conn_unreliable(conn, ECOMM);
7611 /* Allocate skb for the complete frame (with header) */
7612 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7616 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7618 conn->rx_len = len - skb->len;
7622 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7624 if (!conn->rx_len) {
7625 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7626 l2cap_conn_unreliable(conn, ECOMM);
7630 if (skb->len > conn->rx_len) {
7631 BT_ERR("Fragment is too long (len %d, expected %d)",
7632 skb->len, conn->rx_len);
7633 kfree_skb(conn->rx_skb);
7634 conn->rx_skb = NULL;
7636 l2cap_conn_unreliable(conn, ECOMM);
7640 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7642 conn->rx_len -= skb->len;
7644 if (!conn->rx_len) {
7645 /* Complete frame received. l2cap_recv_frame
7646 * takes ownership of the skb so set the global
7647 * rx_skb pointer to NULL first.
7649 struct sk_buff *rx_skb = conn->rx_skb;
7650 conn->rx_skb = NULL;
7651 l2cap_recv_frame(conn, rx_skb);
7660 static struct hci_cb l2cap_cb = {
7662 .connect_cfm = l2cap_connect_cfm,
7663 .disconn_cfm = l2cap_disconn_cfm,
7664 .security_cfm = l2cap_security_cfm,
7667 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7669 struct l2cap_chan *c;
7671 read_lock(&chan_list_lock);
7673 list_for_each_entry(c, &chan_list, global_l) {
7674 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7675 &c->src, c->src_type, &c->dst, c->dst_type,
7676 c->state, __le16_to_cpu(c->psm),
7677 c->scid, c->dcid, c->imtu, c->omtu,
7678 c->sec_level, c->mode);
7681 read_unlock(&chan_list_lock);
7686 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7688 return single_open(file, l2cap_debugfs_show, inode->i_private);
7691 static const struct file_operations l2cap_debugfs_fops = {
7692 .open = l2cap_debugfs_open,
7694 .llseek = seq_lseek,
7695 .release = single_release,
7698 static struct dentry *l2cap_debugfs;
7700 int __init l2cap_init(void)
7704 err = l2cap_init_sockets();
7708 hci_register_cb(&l2cap_cb);
7710 if (IS_ERR_OR_NULL(bt_debugfs))
7713 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7714 NULL, &l2cap_debugfs_fops);
7716 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7718 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7724 void l2cap_exit(void)
7726 debugfs_remove(l2cap_debugfs);
7727 hci_unregister_cb(&l2cap_cb);
7728 l2cap_cleanup_sockets();
7731 module_param(disable_ertm, bool, 0644);
7732 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");