2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
67 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 if (link_type == LE_LINK) {
70 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
73 return BDADDR_LE_RANDOM;
79 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 return bdaddr_type(hcon->type, hcon->src_type);
84 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 return bdaddr_type(hcon->type, hcon->dst_type);
89 /* ---- L2CAP channels ---- */
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
96 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
106 struct l2cap_chan *c;
108 list_for_each_entry(c, &conn->chan_l, list) {
115 /* Find channel with given SCID.
116 * Returns locked channel. */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 struct l2cap_chan *c;
122 mutex_lock(&conn->chan_lock);
123 c = __l2cap_get_chan_by_scid(conn, cid);
126 mutex_unlock(&conn->chan_lock);
131 /* Find channel with given DCID.
132 * Returns locked channel.
134 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137 struct l2cap_chan *c;
139 mutex_lock(&conn->chan_lock);
140 c = __l2cap_get_chan_by_dcid(conn, cid);
143 mutex_unlock(&conn->chan_lock);
148 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 struct l2cap_chan *c;
153 list_for_each_entry(c, &conn->chan_l, list) {
154 if (c->ident == ident)
160 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
163 struct l2cap_chan *c;
165 mutex_lock(&conn->chan_lock);
166 c = __l2cap_get_chan_by_ident(conn, ident);
169 mutex_unlock(&conn->chan_lock);
174 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
176 struct l2cap_chan *c;
178 list_for_each_entry(c, &chan_list, global_l) {
179 if (c->sport == psm && !bacmp(&c->src, src))
185 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
189 write_lock(&chan_list_lock);
191 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
201 u16 p, start, end, incr;
203 if (chan->src_type == BDADDR_BREDR) {
204 start = L2CAP_PSM_DYN_START;
205 end = L2CAP_PSM_AUTO_END;
208 start = L2CAP_PSM_LE_DYN_START;
209 end = L2CAP_PSM_LE_DYN_END;
214 for (p = start; p <= end; p += incr)
215 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
216 chan->psm = cpu_to_le16(p);
217 chan->sport = cpu_to_le16(p);
224 write_unlock(&chan_list_lock);
227 EXPORT_SYMBOL_GPL(l2cap_add_psm);
229 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
231 write_lock(&chan_list_lock);
233 /* Override the defaults (which are for conn-oriented) */
234 chan->omtu = L2CAP_DEFAULT_MTU;
235 chan->chan_type = L2CAP_CHAN_FIXED;
239 write_unlock(&chan_list_lock);
244 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
248 if (conn->hcon->type == LE_LINK)
249 dyn_end = L2CAP_CID_LE_DYN_END;
251 dyn_end = L2CAP_CID_DYN_END;
253 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
254 if (!__l2cap_get_chan_by_scid(conn, cid))
261 static void l2cap_state_change(struct l2cap_chan *chan, int state)
263 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
264 state_to_string(state));
267 chan->ops->state_change(chan, state, 0);
270 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
274 chan->ops->state_change(chan, chan->state, err);
277 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
279 chan->ops->state_change(chan, chan->state, err);
282 static void __set_retrans_timer(struct l2cap_chan *chan)
284 if (!delayed_work_pending(&chan->monitor_timer) &&
285 chan->retrans_timeout) {
286 l2cap_set_timer(chan, &chan->retrans_timer,
287 msecs_to_jiffies(chan->retrans_timeout));
291 static void __set_monitor_timer(struct l2cap_chan *chan)
293 __clear_retrans_timer(chan);
294 if (chan->monitor_timeout) {
295 l2cap_set_timer(chan, &chan->monitor_timer,
296 msecs_to_jiffies(chan->monitor_timeout));
300 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
305 skb_queue_walk(head, skb) {
306 if (bt_cb(skb)->l2cap.txseq == seq)
313 /* ---- L2CAP sequence number lists ---- */
315 /* For ERTM, ordered lists of sequence numbers must be tracked for
316 * SREJ requests that are received and for frames that are to be
317 * retransmitted. These seq_list functions implement a singly-linked
318 * list in an array, where membership in the list can also be checked
319 * in constant time. Items can also be added to the tail of the list
320 * and removed from the head in constant time, without further memory
324 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
326 size_t alloc_size, i;
328 /* Allocated size is a power of 2 to map sequence numbers
329 * (which may be up to 14 bits) in to a smaller array that is
330 * sized for the negotiated ERTM transmit windows.
332 alloc_size = roundup_pow_of_two(size);
334 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
338 seq_list->mask = alloc_size - 1;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 for (i = 0; i < alloc_size; i++)
342 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
347 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
349 kfree(seq_list->list);
352 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
355 /* Constant-time check for list membership */
356 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
361 u16 seq = seq_list->head;
362 u16 mask = seq_list->mask;
364 seq_list->head = seq_list->list[seq & mask];
365 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
367 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
368 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
369 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
375 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
379 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
382 for (i = 0; i <= seq_list->mask; i++)
383 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
385 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
386 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
389 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
391 u16 mask = seq_list->mask;
393 /* All appends happen in constant time */
395 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
398 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
399 seq_list->head = seq;
401 seq_list->list[seq_list->tail & mask] = seq;
403 seq_list->tail = seq;
404 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
407 static void l2cap_chan_timeout(struct work_struct *work)
409 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
411 struct l2cap_conn *conn = chan->conn;
414 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
416 mutex_lock(&conn->chan_lock);
417 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
418 * this work. No need to call l2cap_chan_hold(chan) here again.
420 l2cap_chan_lock(chan);
422 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
423 reason = ECONNREFUSED;
424 else if (chan->state == BT_CONNECT &&
425 chan->sec_level != BT_SECURITY_SDP)
426 reason = ECONNREFUSED;
430 l2cap_chan_close(chan, reason);
432 chan->ops->close(chan);
434 l2cap_chan_unlock(chan);
435 l2cap_chan_put(chan);
437 mutex_unlock(&conn->chan_lock);
440 struct l2cap_chan *l2cap_chan_create(void)
442 struct l2cap_chan *chan;
444 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
448 skb_queue_head_init(&chan->tx_q);
449 skb_queue_head_init(&chan->srej_q);
450 mutex_init(&chan->lock);
452 /* Set default lock nesting level */
453 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455 write_lock(&chan_list_lock);
456 list_add(&chan->global_l, &chan_list);
457 write_unlock(&chan_list_lock);
459 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
461 chan->state = BT_OPEN;
463 kref_init(&chan->kref);
465 /* This flag is cleared in l2cap_chan_ready() */
466 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
468 BT_DBG("chan %p", chan);
472 EXPORT_SYMBOL_GPL(l2cap_chan_create);
474 static void l2cap_chan_destroy(struct kref *kref)
476 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
478 BT_DBG("chan %p", chan);
480 write_lock(&chan_list_lock);
481 list_del(&chan->global_l);
482 write_unlock(&chan_list_lock);
487 void l2cap_chan_hold(struct l2cap_chan *c)
489 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
494 void l2cap_chan_put(struct l2cap_chan *c)
496 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
498 kref_put(&c->kref, l2cap_chan_destroy);
500 EXPORT_SYMBOL_GPL(l2cap_chan_put);
502 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
504 chan->fcs = L2CAP_FCS_CRC16;
505 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
506 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
507 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
508 chan->remote_max_tx = chan->max_tx;
509 chan->remote_tx_win = chan->tx_win;
510 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
511 chan->sec_level = BT_SECURITY_LOW;
512 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
513 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
514 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
516 chan->conf_state = 0;
517 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
519 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
521 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
523 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
526 chan->sdu_last_frag = NULL;
528 chan->tx_credits = 0;
529 chan->rx_credits = le_max_credits;
530 chan->mps = min_t(u16, chan->imtu, le_default_mps);
532 skb_queue_head_init(&chan->tx_q);
535 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
537 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
538 __le16_to_cpu(chan->psm), chan->dcid);
540 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
544 switch (chan->chan_type) {
545 case L2CAP_CHAN_CONN_ORIENTED:
546 /* Alloc CID for connection-oriented socket */
547 chan->scid = l2cap_alloc_cid(conn);
548 if (conn->hcon->type == ACL_LINK)
549 chan->omtu = L2CAP_DEFAULT_MTU;
552 case L2CAP_CHAN_CONN_LESS:
553 /* Connectionless socket */
554 chan->scid = L2CAP_CID_CONN_LESS;
555 chan->dcid = L2CAP_CID_CONN_LESS;
556 chan->omtu = L2CAP_DEFAULT_MTU;
559 case L2CAP_CHAN_FIXED:
560 /* Caller will set CID and CID specific MTU values */
564 /* Raw socket can send/recv signalling messages only */
565 chan->scid = L2CAP_CID_SIGNALING;
566 chan->dcid = L2CAP_CID_SIGNALING;
567 chan->omtu = L2CAP_DEFAULT_MTU;
570 chan->local_id = L2CAP_BESTEFFORT_ID;
571 chan->local_stype = L2CAP_SERV_BESTEFFORT;
572 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
573 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
574 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
575 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
577 l2cap_chan_hold(chan);
579 /* Only keep a reference for fixed channels if they requested it */
580 if (chan->chan_type != L2CAP_CHAN_FIXED ||
581 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
582 hci_conn_hold(conn->hcon);
584 list_add(&chan->list, &conn->chan_l);
587 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
589 mutex_lock(&conn->chan_lock);
590 __l2cap_chan_add(conn, chan);
591 mutex_unlock(&conn->chan_lock);
594 void l2cap_chan_del(struct l2cap_chan *chan, int err)
596 struct l2cap_conn *conn = chan->conn;
598 __clear_chan_timer(chan);
600 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
601 state_to_string(chan->state));
603 chan->ops->teardown(chan, err);
606 struct amp_mgr *mgr = conn->hcon->amp_mgr;
607 /* Delete from channel list */
608 list_del(&chan->list);
610 l2cap_chan_put(chan);
614 /* Reference was only held for non-fixed channels or
615 * fixed channels that explicitly requested it using the
616 * FLAG_HOLD_HCI_CONN flag.
618 if (chan->chan_type != L2CAP_CHAN_FIXED ||
619 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
620 hci_conn_drop(conn->hcon);
622 if (mgr && mgr->bredr_chan == chan)
623 mgr->bredr_chan = NULL;
626 if (chan->hs_hchan) {
627 struct hci_chan *hs_hchan = chan->hs_hchan;
629 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
630 amp_disconnect_logical_link(hs_hchan);
633 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
637 case L2CAP_MODE_BASIC:
640 case L2CAP_MODE_LE_FLOWCTL:
641 skb_queue_purge(&chan->tx_q);
644 case L2CAP_MODE_ERTM:
645 __clear_retrans_timer(chan);
646 __clear_monitor_timer(chan);
647 __clear_ack_timer(chan);
649 skb_queue_purge(&chan->srej_q);
651 l2cap_seq_list_free(&chan->srej_list);
652 l2cap_seq_list_free(&chan->retrans_list);
656 case L2CAP_MODE_STREAMING:
657 skb_queue_purge(&chan->tx_q);
663 EXPORT_SYMBOL_GPL(l2cap_chan_del);
665 static void l2cap_conn_update_id_addr(struct work_struct *work)
667 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
668 id_addr_update_work);
669 struct hci_conn *hcon = conn->hcon;
670 struct l2cap_chan *chan;
672 mutex_lock(&conn->chan_lock);
674 list_for_each_entry(chan, &conn->chan_l, list) {
675 l2cap_chan_lock(chan);
676 bacpy(&chan->dst, &hcon->dst);
677 chan->dst_type = bdaddr_dst_type(hcon);
678 l2cap_chan_unlock(chan);
681 mutex_unlock(&conn->chan_lock);
684 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
686 struct l2cap_conn *conn = chan->conn;
687 struct l2cap_le_conn_rsp rsp;
690 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
691 result = L2CAP_CR_AUTHORIZATION;
693 result = L2CAP_CR_BAD_PSM;
695 l2cap_state_change(chan, BT_DISCONN);
697 rsp.dcid = cpu_to_le16(chan->scid);
698 rsp.mtu = cpu_to_le16(chan->imtu);
699 rsp.mps = cpu_to_le16(chan->mps);
700 rsp.credits = cpu_to_le16(chan->rx_credits);
701 rsp.result = cpu_to_le16(result);
703 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
707 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
709 struct l2cap_conn *conn = chan->conn;
710 struct l2cap_conn_rsp rsp;
713 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
714 result = L2CAP_CR_SEC_BLOCK;
716 result = L2CAP_CR_BAD_PSM;
718 l2cap_state_change(chan, BT_DISCONN);
720 rsp.scid = cpu_to_le16(chan->dcid);
721 rsp.dcid = cpu_to_le16(chan->scid);
722 rsp.result = cpu_to_le16(result);
723 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
725 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
728 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
730 struct l2cap_conn *conn = chan->conn;
732 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
734 switch (chan->state) {
736 chan->ops->teardown(chan, 0);
741 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
742 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
743 l2cap_send_disconn_req(chan, reason);
745 l2cap_chan_del(chan, reason);
749 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
750 if (conn->hcon->type == ACL_LINK)
751 l2cap_chan_connect_reject(chan);
752 else if (conn->hcon->type == LE_LINK)
753 l2cap_chan_le_connect_reject(chan);
756 l2cap_chan_del(chan, reason);
761 l2cap_chan_del(chan, reason);
765 chan->ops->teardown(chan, 0);
769 EXPORT_SYMBOL(l2cap_chan_close);
771 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
773 switch (chan->chan_type) {
775 switch (chan->sec_level) {
776 case BT_SECURITY_HIGH:
777 case BT_SECURITY_FIPS:
778 return HCI_AT_DEDICATED_BONDING_MITM;
779 case BT_SECURITY_MEDIUM:
780 return HCI_AT_DEDICATED_BONDING;
782 return HCI_AT_NO_BONDING;
785 case L2CAP_CHAN_CONN_LESS:
786 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
787 if (chan->sec_level == BT_SECURITY_LOW)
788 chan->sec_level = BT_SECURITY_SDP;
790 if (chan->sec_level == BT_SECURITY_HIGH ||
791 chan->sec_level == BT_SECURITY_FIPS)
792 return HCI_AT_NO_BONDING_MITM;
794 return HCI_AT_NO_BONDING;
796 case L2CAP_CHAN_CONN_ORIENTED:
797 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
798 if (chan->sec_level == BT_SECURITY_LOW)
799 chan->sec_level = BT_SECURITY_SDP;
801 if (chan->sec_level == BT_SECURITY_HIGH ||
802 chan->sec_level == BT_SECURITY_FIPS)
803 return HCI_AT_NO_BONDING_MITM;
805 return HCI_AT_NO_BONDING;
809 switch (chan->sec_level) {
810 case BT_SECURITY_HIGH:
811 case BT_SECURITY_FIPS:
812 return HCI_AT_GENERAL_BONDING_MITM;
813 case BT_SECURITY_MEDIUM:
814 return HCI_AT_GENERAL_BONDING;
816 return HCI_AT_NO_BONDING;
822 /* Service level security */
823 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
825 struct l2cap_conn *conn = chan->conn;
828 if (conn->hcon->type == LE_LINK)
829 return smp_conn_security(conn->hcon, chan->sec_level);
831 auth_type = l2cap_get_auth_type(chan);
833 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
837 static u8 l2cap_get_ident(struct l2cap_conn *conn)
841 /* Get next available identificator.
842 * 1 - 128 are used by kernel.
843 * 129 - 199 are reserved.
844 * 200 - 254 are used by utilities like l2ping, etc.
847 mutex_lock(&conn->ident_lock);
849 if (++conn->tx_ident > 128)
854 mutex_unlock(&conn->ident_lock);
859 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
862 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
865 BT_DBG("code 0x%2.2x", code);
870 /* Use NO_FLUSH if supported or we have an LE link (which does
871 * not support auto-flushing packets) */
872 if (lmp_no_flush_capable(conn->hcon->hdev) ||
873 conn->hcon->type == LE_LINK)
874 flags = ACL_START_NO_FLUSH;
878 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
879 skb->priority = HCI_PRIO_MAX;
881 hci_send_acl(conn->hchan, skb, flags);
884 static bool __chan_is_moving(struct l2cap_chan *chan)
886 return chan->move_state != L2CAP_MOVE_STABLE &&
887 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
890 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
892 struct hci_conn *hcon = chan->conn->hcon;
895 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
898 if (chan->hs_hcon && !__chan_is_moving(chan)) {
900 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
907 /* Use NO_FLUSH for LE links (where this is the only option) or
908 * if the BR/EDR link supports it and flushing has not been
909 * explicitly requested (through FLAG_FLUSHABLE).
911 if (hcon->type == LE_LINK ||
912 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
913 lmp_no_flush_capable(hcon->hdev)))
914 flags = ACL_START_NO_FLUSH;
918 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
919 hci_send_acl(chan->conn->hchan, skb, flags);
922 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
924 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
925 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
927 if (enh & L2CAP_CTRL_FRAME_TYPE) {
930 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
931 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
938 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
939 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
946 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
948 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
949 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
951 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
954 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
955 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
962 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
963 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
970 static inline void __unpack_control(struct l2cap_chan *chan,
973 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
974 __unpack_extended_control(get_unaligned_le32(skb->data),
976 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
978 __unpack_enhanced_control(get_unaligned_le16(skb->data),
980 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
984 static u32 __pack_extended_control(struct l2cap_ctrl *control)
988 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
989 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
991 if (control->sframe) {
992 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
993 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
994 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
996 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
997 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1003 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1007 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1008 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1010 if (control->sframe) {
1011 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1012 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1013 packed |= L2CAP_CTRL_FRAME_TYPE;
1015 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1016 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1022 static inline void __pack_control(struct l2cap_chan *chan,
1023 struct l2cap_ctrl *control,
1024 struct sk_buff *skb)
1026 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1027 put_unaligned_le32(__pack_extended_control(control),
1028 skb->data + L2CAP_HDR_SIZE);
1030 put_unaligned_le16(__pack_enhanced_control(control),
1031 skb->data + L2CAP_HDR_SIZE);
1035 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1037 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1038 return L2CAP_EXT_HDR_SIZE;
1040 return L2CAP_ENH_HDR_SIZE;
1043 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1046 struct sk_buff *skb;
1047 struct l2cap_hdr *lh;
1048 int hlen = __ertm_hdr_size(chan);
1050 if (chan->fcs == L2CAP_FCS_CRC16)
1051 hlen += L2CAP_FCS_SIZE;
1053 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1056 return ERR_PTR(-ENOMEM);
1058 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1059 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1060 lh->cid = cpu_to_le16(chan->dcid);
1062 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1063 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1065 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1067 if (chan->fcs == L2CAP_FCS_CRC16) {
1068 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1069 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1072 skb->priority = HCI_PRIO_MAX;
1076 static void l2cap_send_sframe(struct l2cap_chan *chan,
1077 struct l2cap_ctrl *control)
1079 struct sk_buff *skb;
1082 BT_DBG("chan %p, control %p", chan, control);
1084 if (!control->sframe)
1087 if (__chan_is_moving(chan))
1090 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1094 if (control->super == L2CAP_SUPER_RR)
1095 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1096 else if (control->super == L2CAP_SUPER_RNR)
1097 set_bit(CONN_RNR_SENT, &chan->conn_state);
1099 if (control->super != L2CAP_SUPER_SREJ) {
1100 chan->last_acked_seq = control->reqseq;
1101 __clear_ack_timer(chan);
1104 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1105 control->final, control->poll, control->super);
1107 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1108 control_field = __pack_extended_control(control);
1110 control_field = __pack_enhanced_control(control);
1112 skb = l2cap_create_sframe_pdu(chan, control_field);
1114 l2cap_do_send(chan, skb);
1117 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1119 struct l2cap_ctrl control;
1121 BT_DBG("chan %p, poll %d", chan, poll);
1123 memset(&control, 0, sizeof(control));
1125 control.poll = poll;
1127 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1128 control.super = L2CAP_SUPER_RNR;
1130 control.super = L2CAP_SUPER_RR;
1132 control.reqseq = chan->buffer_seq;
1133 l2cap_send_sframe(chan, &control);
1136 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1138 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1141 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1144 static bool __amp_capable(struct l2cap_chan *chan)
1146 struct l2cap_conn *conn = chan->conn;
1147 struct hci_dev *hdev;
1148 bool amp_available = false;
1150 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1153 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1156 read_lock(&hci_dev_list_lock);
1157 list_for_each_entry(hdev, &hci_dev_list, list) {
1158 if (hdev->amp_type != AMP_TYPE_BREDR &&
1159 test_bit(HCI_UP, &hdev->flags)) {
1160 amp_available = true;
1164 read_unlock(&hci_dev_list_lock);
1166 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1167 return amp_available;
1172 static bool l2cap_check_efs(struct l2cap_chan *chan)
1174 /* Check EFS parameters */
1178 void l2cap_send_conn_req(struct l2cap_chan *chan)
1180 struct l2cap_conn *conn = chan->conn;
1181 struct l2cap_conn_req req;
1183 req.scid = cpu_to_le16(chan->scid);
1184 req.psm = chan->psm;
1186 chan->ident = l2cap_get_ident(conn);
1188 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1190 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1193 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1195 struct l2cap_create_chan_req req;
1196 req.scid = cpu_to_le16(chan->scid);
1197 req.psm = chan->psm;
1198 req.amp_id = amp_id;
1200 chan->ident = l2cap_get_ident(chan->conn);
1202 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1206 static void l2cap_move_setup(struct l2cap_chan *chan)
1208 struct sk_buff *skb;
1210 BT_DBG("chan %p", chan);
1212 if (chan->mode != L2CAP_MODE_ERTM)
1215 __clear_retrans_timer(chan);
1216 __clear_monitor_timer(chan);
1217 __clear_ack_timer(chan);
1219 chan->retry_count = 0;
1220 skb_queue_walk(&chan->tx_q, skb) {
1221 if (bt_cb(skb)->l2cap.retries)
1222 bt_cb(skb)->l2cap.retries = 1;
1227 chan->expected_tx_seq = chan->buffer_seq;
1229 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1230 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1231 l2cap_seq_list_clear(&chan->retrans_list);
1232 l2cap_seq_list_clear(&chan->srej_list);
1233 skb_queue_purge(&chan->srej_q);
1235 chan->tx_state = L2CAP_TX_STATE_XMIT;
1236 chan->rx_state = L2CAP_RX_STATE_MOVE;
1238 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1241 static void l2cap_move_done(struct l2cap_chan *chan)
1243 u8 move_role = chan->move_role;
1244 BT_DBG("chan %p", chan);
1246 chan->move_state = L2CAP_MOVE_STABLE;
1247 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1249 if (chan->mode != L2CAP_MODE_ERTM)
1252 switch (move_role) {
1253 case L2CAP_MOVE_ROLE_INITIATOR:
1254 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1255 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1257 case L2CAP_MOVE_ROLE_RESPONDER:
1258 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1263 static void l2cap_chan_ready(struct l2cap_chan *chan)
1265 /* The channel may have already been flagged as connected in
1266 * case of receiving data before the L2CAP info req/rsp
1267 * procedure is complete.
1269 if (chan->state == BT_CONNECTED)
1272 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1273 chan->conf_state = 0;
1274 __clear_chan_timer(chan);
1276 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1277 chan->ops->suspend(chan);
1279 chan->state = BT_CONNECTED;
1281 chan->ops->ready(chan);
1284 static void l2cap_le_connect(struct l2cap_chan *chan)
1286 struct l2cap_conn *conn = chan->conn;
1287 struct l2cap_le_conn_req req;
1289 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1292 req.psm = chan->psm;
1293 req.scid = cpu_to_le16(chan->scid);
1294 req.mtu = cpu_to_le16(chan->imtu);
1295 req.mps = cpu_to_le16(chan->mps);
1296 req.credits = cpu_to_le16(chan->rx_credits);
1298 chan->ident = l2cap_get_ident(conn);
1300 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1304 static void l2cap_le_start(struct l2cap_chan *chan)
1306 struct l2cap_conn *conn = chan->conn;
1308 if (!smp_conn_security(conn->hcon, chan->sec_level))
1312 l2cap_chan_ready(chan);
1316 if (chan->state == BT_CONNECT)
1317 l2cap_le_connect(chan);
1320 static void l2cap_start_connection(struct l2cap_chan *chan)
1322 if (__amp_capable(chan)) {
1323 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1324 a2mp_discover_amp(chan);
1325 } else if (chan->conn->hcon->type == LE_LINK) {
1326 l2cap_le_start(chan);
1328 l2cap_send_conn_req(chan);
1332 static void l2cap_request_info(struct l2cap_conn *conn)
1334 struct l2cap_info_req req;
1336 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1339 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1341 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1342 conn->info_ident = l2cap_get_ident(conn);
1344 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1346 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1350 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1352 /* The minimum encryption key size needs to be enforced by the
1353 * host stack before establishing any L2CAP connections. The
1354 * specification in theory allows a minimum of 1, but to align
1355 * BR/EDR and LE transports, a minimum of 7 is chosen.
1357 * This check might also be called for unencrypted connections
1358 * that have no key size requirements. Ensure that the link is
1359 * actually encrypted before enforcing a key size.
1361 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1362 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
1365 static void l2cap_do_start(struct l2cap_chan *chan)
1367 struct l2cap_conn *conn = chan->conn;
1369 if (conn->hcon->type == LE_LINK) {
1370 l2cap_le_start(chan);
1374 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1375 l2cap_request_info(conn);
1379 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1382 if (!l2cap_chan_check_security(chan, true) ||
1383 !__l2cap_no_conn_pending(chan))
1386 if (l2cap_check_enc_key_size(conn->hcon))
1387 l2cap_start_connection(chan);
1389 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1392 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1394 u32 local_feat_mask = l2cap_feat_mask;
1396 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1399 case L2CAP_MODE_ERTM:
1400 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1401 case L2CAP_MODE_STREAMING:
1402 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1408 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1410 struct l2cap_conn *conn = chan->conn;
1411 struct l2cap_disconn_req req;
1416 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1417 __clear_retrans_timer(chan);
1418 __clear_monitor_timer(chan);
1419 __clear_ack_timer(chan);
1422 if (chan->scid == L2CAP_CID_A2MP) {
1423 l2cap_state_change(chan, BT_DISCONN);
1427 req.dcid = cpu_to_le16(chan->dcid);
1428 req.scid = cpu_to_le16(chan->scid);
1429 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1432 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1435 /* ---- L2CAP connections ---- */
1436 static void l2cap_conn_start(struct l2cap_conn *conn)
1438 struct l2cap_chan *chan, *tmp;
1440 BT_DBG("conn %p", conn);
1442 mutex_lock(&conn->chan_lock);
1444 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1445 l2cap_chan_lock(chan);
1447 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1448 l2cap_chan_ready(chan);
1449 l2cap_chan_unlock(chan);
1453 if (chan->state == BT_CONNECT) {
1454 if (!l2cap_chan_check_security(chan, true) ||
1455 !__l2cap_no_conn_pending(chan)) {
1456 l2cap_chan_unlock(chan);
1460 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1461 && test_bit(CONF_STATE2_DEVICE,
1462 &chan->conf_state)) {
1463 l2cap_chan_close(chan, ECONNRESET);
1464 l2cap_chan_unlock(chan);
1468 if (l2cap_check_enc_key_size(conn->hcon))
1469 l2cap_start_connection(chan);
1471 l2cap_chan_close(chan, ECONNREFUSED);
1473 } else if (chan->state == BT_CONNECT2) {
1474 struct l2cap_conn_rsp rsp;
1476 rsp.scid = cpu_to_le16(chan->dcid);
1477 rsp.dcid = cpu_to_le16(chan->scid);
1479 if (l2cap_chan_check_security(chan, false)) {
1480 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1481 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1482 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1483 chan->ops->defer(chan);
1486 l2cap_state_change(chan, BT_CONFIG);
1487 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1488 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1491 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1492 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1495 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1498 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1499 rsp.result != L2CAP_CR_SUCCESS) {
1500 l2cap_chan_unlock(chan);
1504 set_bit(CONF_REQ_SENT, &chan->conf_state);
1505 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1506 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1507 chan->num_conf_req++;
1510 l2cap_chan_unlock(chan);
1513 mutex_unlock(&conn->chan_lock);
1516 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1518 struct hci_conn *hcon = conn->hcon;
1519 struct hci_dev *hdev = hcon->hdev;
1521 BT_DBG("%s conn %p", hdev->name, conn);
1523 /* For outgoing pairing which doesn't necessarily have an
1524 * associated socket (e.g. mgmt_pair_device).
1527 smp_conn_security(hcon, hcon->pending_sec_level);
1529 /* For LE slave connections, make sure the connection interval
1530 * is in the range of the minium and maximum interval that has
1531 * been configured for this connection. If not, then trigger
1532 * the connection update procedure.
1534 if (hcon->role == HCI_ROLE_SLAVE &&
1535 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1536 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1537 struct l2cap_conn_param_update_req req;
1539 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1540 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1541 req.latency = cpu_to_le16(hcon->le_conn_latency);
1542 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1544 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1545 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1549 static void l2cap_conn_ready(struct l2cap_conn *conn)
1551 struct l2cap_chan *chan;
1552 struct hci_conn *hcon = conn->hcon;
1554 BT_DBG("conn %p", conn);
1556 if (hcon->type == ACL_LINK)
1557 l2cap_request_info(conn);
1559 mutex_lock(&conn->chan_lock);
1561 list_for_each_entry(chan, &conn->chan_l, list) {
1563 l2cap_chan_lock(chan);
1565 if (chan->scid == L2CAP_CID_A2MP) {
1566 l2cap_chan_unlock(chan);
1570 if (hcon->type == LE_LINK) {
1571 l2cap_le_start(chan);
1572 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1573 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1574 l2cap_chan_ready(chan);
1575 } else if (chan->state == BT_CONNECT) {
1576 l2cap_do_start(chan);
1579 l2cap_chan_unlock(chan);
1582 mutex_unlock(&conn->chan_lock);
1584 if (hcon->type == LE_LINK)
1585 l2cap_le_conn_ready(conn);
1587 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1590 /* Notify sockets that we cannot guaranty reliability anymore */
1591 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1593 struct l2cap_chan *chan;
1595 BT_DBG("conn %p", conn);
1597 mutex_lock(&conn->chan_lock);
1599 list_for_each_entry(chan, &conn->chan_l, list) {
1600 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1601 l2cap_chan_set_err(chan, err);
1604 mutex_unlock(&conn->chan_lock);
1607 static void l2cap_info_timeout(struct work_struct *work)
1609 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1612 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1613 conn->info_ident = 0;
1615 l2cap_conn_start(conn);
1620 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1621 * callback is called during registration. The ->remove callback is called
1622 * during unregistration.
1623 * An l2cap_user object can either be explicitly unregistered or when the
1624 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1625 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1626 * External modules must own a reference to the l2cap_conn object if they intend
1627 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1628 * any time if they don't.
1631 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1633 struct hci_dev *hdev = conn->hcon->hdev;
1636 /* We need to check whether l2cap_conn is registered. If it is not, we
1637 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1638 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1639 * relies on the parent hci_conn object to be locked. This itself relies
1640 * on the hci_dev object to be locked. So we must lock the hci device
1645 if (!list_empty(&user->list)) {
1650 /* conn->hchan is NULL after l2cap_conn_del() was called */
1656 ret = user->probe(conn, user);
1660 list_add(&user->list, &conn->users);
1664 hci_dev_unlock(hdev);
1667 EXPORT_SYMBOL(l2cap_register_user);
1669 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1671 struct hci_dev *hdev = conn->hcon->hdev;
1675 if (list_empty(&user->list))
1678 list_del_init(&user->list);
1679 user->remove(conn, user);
1682 hci_dev_unlock(hdev);
1684 EXPORT_SYMBOL(l2cap_unregister_user);
1686 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1688 struct l2cap_user *user;
1690 while (!list_empty(&conn->users)) {
1691 user = list_first_entry(&conn->users, struct l2cap_user, list);
1692 list_del_init(&user->list);
1693 user->remove(conn, user);
1697 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1699 struct l2cap_conn *conn = hcon->l2cap_data;
1700 struct l2cap_chan *chan, *l;
1705 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1707 kfree_skb(conn->rx_skb);
1709 skb_queue_purge(&conn->pending_rx);
1711 /* We can not call flush_work(&conn->pending_rx_work) here since we
1712 * might block if we are running on a worker from the same workqueue
1713 * pending_rx_work is waiting on.
1715 if (work_pending(&conn->pending_rx_work))
1716 cancel_work_sync(&conn->pending_rx_work);
1718 if (work_pending(&conn->id_addr_update_work))
1719 cancel_work_sync(&conn->id_addr_update_work);
1721 l2cap_unregister_all_users(conn);
1723 /* Force the connection to be immediately dropped */
1724 hcon->disc_timeout = 0;
1726 mutex_lock(&conn->chan_lock);
1729 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1730 l2cap_chan_hold(chan);
1731 l2cap_chan_lock(chan);
1733 l2cap_chan_del(chan, err);
1735 chan->ops->close(chan);
1737 l2cap_chan_unlock(chan);
1738 l2cap_chan_put(chan);
1741 mutex_unlock(&conn->chan_lock);
1743 hci_chan_del(conn->hchan);
1745 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1746 cancel_delayed_work_sync(&conn->info_timer);
1748 hcon->l2cap_data = NULL;
1750 l2cap_conn_put(conn);
1753 static void l2cap_conn_free(struct kref *ref)
1755 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1757 hci_conn_put(conn->hcon);
1761 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1763 kref_get(&conn->ref);
1766 EXPORT_SYMBOL(l2cap_conn_get);
1768 void l2cap_conn_put(struct l2cap_conn *conn)
1770 kref_put(&conn->ref, l2cap_conn_free);
1772 EXPORT_SYMBOL(l2cap_conn_put);
1774 /* ---- Socket interface ---- */
1776 /* Find socket with psm and source / destination bdaddr.
1777 * Returns closest match.
1779 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1784 struct l2cap_chan *c, *c1 = NULL;
1786 read_lock(&chan_list_lock);
1788 list_for_each_entry(c, &chan_list, global_l) {
1789 if (state && c->state != state)
1792 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1795 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1798 if (c->psm == psm) {
1799 int src_match, dst_match;
1800 int src_any, dst_any;
1803 src_match = !bacmp(&c->src, src);
1804 dst_match = !bacmp(&c->dst, dst);
1805 if (src_match && dst_match) {
1807 read_unlock(&chan_list_lock);
1812 src_any = !bacmp(&c->src, BDADDR_ANY);
1813 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1814 if ((src_match && dst_any) || (src_any && dst_match) ||
1815 (src_any && dst_any))
1821 l2cap_chan_hold(c1);
1823 read_unlock(&chan_list_lock);
1828 static void l2cap_monitor_timeout(struct work_struct *work)
1830 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1831 monitor_timer.work);
1833 BT_DBG("chan %p", chan);
1835 l2cap_chan_lock(chan);
1838 l2cap_chan_unlock(chan);
1839 l2cap_chan_put(chan);
1843 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1845 l2cap_chan_unlock(chan);
1846 l2cap_chan_put(chan);
1849 static void l2cap_retrans_timeout(struct work_struct *work)
1851 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1852 retrans_timer.work);
1854 BT_DBG("chan %p", chan);
1856 l2cap_chan_lock(chan);
1859 l2cap_chan_unlock(chan);
1860 l2cap_chan_put(chan);
1864 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1865 l2cap_chan_unlock(chan);
1866 l2cap_chan_put(chan);
1869 static void l2cap_streaming_send(struct l2cap_chan *chan,
1870 struct sk_buff_head *skbs)
1872 struct sk_buff *skb;
1873 struct l2cap_ctrl *control;
1875 BT_DBG("chan %p, skbs %p", chan, skbs);
1877 if (__chan_is_moving(chan))
1880 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1882 while (!skb_queue_empty(&chan->tx_q)) {
1884 skb = skb_dequeue(&chan->tx_q);
1886 bt_cb(skb)->l2cap.retries = 1;
1887 control = &bt_cb(skb)->l2cap;
1889 control->reqseq = 0;
1890 control->txseq = chan->next_tx_seq;
1892 __pack_control(chan, control, skb);
1894 if (chan->fcs == L2CAP_FCS_CRC16) {
1895 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1896 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1899 l2cap_do_send(chan, skb);
1901 BT_DBG("Sent txseq %u", control->txseq);
1903 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1904 chan->frames_sent++;
1908 static int l2cap_ertm_send(struct l2cap_chan *chan)
1910 struct sk_buff *skb, *tx_skb;
1911 struct l2cap_ctrl *control;
1914 BT_DBG("chan %p", chan);
1916 if (chan->state != BT_CONNECTED)
1919 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1922 if (__chan_is_moving(chan))
1925 while (chan->tx_send_head &&
1926 chan->unacked_frames < chan->remote_tx_win &&
1927 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1929 skb = chan->tx_send_head;
1931 bt_cb(skb)->l2cap.retries = 1;
1932 control = &bt_cb(skb)->l2cap;
1934 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1937 control->reqseq = chan->buffer_seq;
1938 chan->last_acked_seq = chan->buffer_seq;
1939 control->txseq = chan->next_tx_seq;
1941 __pack_control(chan, control, skb);
1943 if (chan->fcs == L2CAP_FCS_CRC16) {
1944 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1945 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1948 /* Clone after data has been modified. Data is assumed to be
1949 read-only (for locking purposes) on cloned sk_buffs.
1951 tx_skb = skb_clone(skb, GFP_KERNEL);
1956 __set_retrans_timer(chan);
1958 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1959 chan->unacked_frames++;
1960 chan->frames_sent++;
1963 if (skb_queue_is_last(&chan->tx_q, skb))
1964 chan->tx_send_head = NULL;
1966 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1968 l2cap_do_send(chan, tx_skb);
1969 BT_DBG("Sent txseq %u", control->txseq);
1972 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1973 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1978 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1980 struct l2cap_ctrl control;
1981 struct sk_buff *skb;
1982 struct sk_buff *tx_skb;
1985 BT_DBG("chan %p", chan);
1987 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1990 if (__chan_is_moving(chan))
1993 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1994 seq = l2cap_seq_list_pop(&chan->retrans_list);
1996 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1998 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2003 bt_cb(skb)->l2cap.retries++;
2004 control = bt_cb(skb)->l2cap;
2006 if (chan->max_tx != 0 &&
2007 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2008 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2009 l2cap_send_disconn_req(chan, ECONNRESET);
2010 l2cap_seq_list_clear(&chan->retrans_list);
2014 control.reqseq = chan->buffer_seq;
2015 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2020 if (skb_cloned(skb)) {
2021 /* Cloned sk_buffs are read-only, so we need a
2024 tx_skb = skb_copy(skb, GFP_KERNEL);
2026 tx_skb = skb_clone(skb, GFP_KERNEL);
2030 l2cap_seq_list_clear(&chan->retrans_list);
2034 /* Update skb contents */
2035 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2036 put_unaligned_le32(__pack_extended_control(&control),
2037 tx_skb->data + L2CAP_HDR_SIZE);
2039 put_unaligned_le16(__pack_enhanced_control(&control),
2040 tx_skb->data + L2CAP_HDR_SIZE);
2044 if (chan->fcs == L2CAP_FCS_CRC16) {
2045 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2046 tx_skb->len - L2CAP_FCS_SIZE);
2047 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2051 l2cap_do_send(chan, tx_skb);
2053 BT_DBG("Resent txseq %d", control.txseq);
2055 chan->last_acked_seq = chan->buffer_seq;
2059 static void l2cap_retransmit(struct l2cap_chan *chan,
2060 struct l2cap_ctrl *control)
2062 BT_DBG("chan %p, control %p", chan, control);
2064 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2065 l2cap_ertm_resend(chan);
2068 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2069 struct l2cap_ctrl *control)
2071 struct sk_buff *skb;
2073 BT_DBG("chan %p, control %p", chan, control);
2076 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2078 l2cap_seq_list_clear(&chan->retrans_list);
2080 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2083 if (chan->unacked_frames) {
2084 skb_queue_walk(&chan->tx_q, skb) {
2085 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2086 skb == chan->tx_send_head)
2090 skb_queue_walk_from(&chan->tx_q, skb) {
2091 if (skb == chan->tx_send_head)
2094 l2cap_seq_list_append(&chan->retrans_list,
2095 bt_cb(skb)->l2cap.txseq);
2098 l2cap_ertm_resend(chan);
2102 static void l2cap_send_ack(struct l2cap_chan *chan)
2104 struct l2cap_ctrl control;
2105 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2106 chan->last_acked_seq);
2109 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2110 chan, chan->last_acked_seq, chan->buffer_seq);
2112 memset(&control, 0, sizeof(control));
2115 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2116 chan->rx_state == L2CAP_RX_STATE_RECV) {
2117 __clear_ack_timer(chan);
2118 control.super = L2CAP_SUPER_RNR;
2119 control.reqseq = chan->buffer_seq;
2120 l2cap_send_sframe(chan, &control);
2122 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2123 l2cap_ertm_send(chan);
2124 /* If any i-frames were sent, they included an ack */
2125 if (chan->buffer_seq == chan->last_acked_seq)
2129 /* Ack now if the window is 3/4ths full.
2130 * Calculate without mul or div
2132 threshold = chan->ack_win;
2133 threshold += threshold << 1;
2136 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2139 if (frames_to_ack >= threshold) {
2140 __clear_ack_timer(chan);
2141 control.super = L2CAP_SUPER_RR;
2142 control.reqseq = chan->buffer_seq;
2143 l2cap_send_sframe(chan, &control);
2148 __set_ack_timer(chan);
2152 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2153 struct msghdr *msg, int len,
2154 int count, struct sk_buff *skb)
2156 struct l2cap_conn *conn = chan->conn;
2157 struct sk_buff **frag;
2160 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2166 /* Continuation fragments (no L2CAP header) */
2167 frag = &skb_shinfo(skb)->frag_list;
2169 struct sk_buff *tmp;
2171 count = min_t(unsigned int, conn->mtu, len);
2173 tmp = chan->ops->alloc_skb(chan, 0, count,
2174 msg->msg_flags & MSG_DONTWAIT);
2176 return PTR_ERR(tmp);
2180 if (copy_from_iter(skb_put(*frag, count), count,
2181 &msg->msg_iter) != count)
2187 skb->len += (*frag)->len;
2188 skb->data_len += (*frag)->len;
2190 frag = &(*frag)->next;
2196 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2197 struct msghdr *msg, size_t len)
2199 struct l2cap_conn *conn = chan->conn;
2200 struct sk_buff *skb;
2201 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2202 struct l2cap_hdr *lh;
2204 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2205 __le16_to_cpu(chan->psm), len);
2207 count = min_t(unsigned int, (conn->mtu - hlen), len);
2209 skb = chan->ops->alloc_skb(chan, hlen, count,
2210 msg->msg_flags & MSG_DONTWAIT);
2214 /* Create L2CAP header */
2215 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2216 lh->cid = cpu_to_le16(chan->dcid);
2217 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2218 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2220 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2221 if (unlikely(err < 0)) {
2223 return ERR_PTR(err);
2228 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2229 struct msghdr *msg, size_t len)
2231 struct l2cap_conn *conn = chan->conn;
2232 struct sk_buff *skb;
2234 struct l2cap_hdr *lh;
2236 BT_DBG("chan %p len %zu", chan, len);
2238 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2240 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2241 msg->msg_flags & MSG_DONTWAIT);
2245 /* Create L2CAP header */
2246 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2247 lh->cid = cpu_to_le16(chan->dcid);
2248 lh->len = cpu_to_le16(len);
2250 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2251 if (unlikely(err < 0)) {
2253 return ERR_PTR(err);
2258 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2259 struct msghdr *msg, size_t len,
2262 struct l2cap_conn *conn = chan->conn;
2263 struct sk_buff *skb;
2264 int err, count, hlen;
2265 struct l2cap_hdr *lh;
2267 BT_DBG("chan %p len %zu", chan, len);
2270 return ERR_PTR(-ENOTCONN);
2272 hlen = __ertm_hdr_size(chan);
2275 hlen += L2CAP_SDULEN_SIZE;
2277 if (chan->fcs == L2CAP_FCS_CRC16)
2278 hlen += L2CAP_FCS_SIZE;
2280 count = min_t(unsigned int, (conn->mtu - hlen), len);
2282 skb = chan->ops->alloc_skb(chan, hlen, count,
2283 msg->msg_flags & MSG_DONTWAIT);
2287 /* Create L2CAP header */
2288 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2289 lh->cid = cpu_to_le16(chan->dcid);
2290 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2292 /* Control header is populated later */
2293 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2294 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2296 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2299 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2301 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2302 if (unlikely(err < 0)) {
2304 return ERR_PTR(err);
2307 bt_cb(skb)->l2cap.fcs = chan->fcs;
2308 bt_cb(skb)->l2cap.retries = 0;
2312 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2313 struct sk_buff_head *seg_queue,
2314 struct msghdr *msg, size_t len)
2316 struct sk_buff *skb;
2321 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2323 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2324 * so fragmented skbs are not used. The HCI layer's handling
2325 * of fragmented skbs is not compatible with ERTM's queueing.
2328 /* PDU size is derived from the HCI MTU */
2329 pdu_len = chan->conn->mtu;
2331 /* Constrain PDU size for BR/EDR connections */
2333 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2335 /* Adjust for largest possible L2CAP overhead. */
2337 pdu_len -= L2CAP_FCS_SIZE;
2339 pdu_len -= __ertm_hdr_size(chan);
2341 /* Remote device may have requested smaller PDUs */
2342 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2344 if (len <= pdu_len) {
2345 sar = L2CAP_SAR_UNSEGMENTED;
2349 sar = L2CAP_SAR_START;
2354 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2357 __skb_queue_purge(seg_queue);
2358 return PTR_ERR(skb);
2361 bt_cb(skb)->l2cap.sar = sar;
2362 __skb_queue_tail(seg_queue, skb);
2368 if (len <= pdu_len) {
2369 sar = L2CAP_SAR_END;
2372 sar = L2CAP_SAR_CONTINUE;
2379 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2381 size_t len, u16 sdulen)
2383 struct l2cap_conn *conn = chan->conn;
2384 struct sk_buff *skb;
2385 int err, count, hlen;
2386 struct l2cap_hdr *lh;
2388 BT_DBG("chan %p len %zu", chan, len);
2391 return ERR_PTR(-ENOTCONN);
2393 hlen = L2CAP_HDR_SIZE;
2396 hlen += L2CAP_SDULEN_SIZE;
2398 count = min_t(unsigned int, (conn->mtu - hlen), len);
2400 skb = chan->ops->alloc_skb(chan, hlen, count,
2401 msg->msg_flags & MSG_DONTWAIT);
2405 /* Create L2CAP header */
2406 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2407 lh->cid = cpu_to_le16(chan->dcid);
2408 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2411 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2413 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2414 if (unlikely(err < 0)) {
2416 return ERR_PTR(err);
2422 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2423 struct sk_buff_head *seg_queue,
2424 struct msghdr *msg, size_t len)
2426 struct sk_buff *skb;
2430 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2433 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2439 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2441 __skb_queue_purge(seg_queue);
2442 return PTR_ERR(skb);
2445 __skb_queue_tail(seg_queue, skb);
2451 pdu_len += L2CAP_SDULEN_SIZE;
2458 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2460 struct sk_buff *skb;
2462 struct sk_buff_head seg_queue;
2467 /* Connectionless channel */
2468 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2469 skb = l2cap_create_connless_pdu(chan, msg, len);
2471 return PTR_ERR(skb);
2473 /* Channel lock is released before requesting new skb and then
2474 * reacquired thus we need to recheck channel state.
2476 if (chan->state != BT_CONNECTED) {
2481 l2cap_do_send(chan, skb);
2485 switch (chan->mode) {
2486 case L2CAP_MODE_LE_FLOWCTL:
2487 /* Check outgoing MTU */
2488 if (len > chan->omtu)
2491 if (!chan->tx_credits)
2494 __skb_queue_head_init(&seg_queue);
2496 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2498 if (chan->state != BT_CONNECTED) {
2499 __skb_queue_purge(&seg_queue);
2506 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2508 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2509 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2513 if (!chan->tx_credits)
2514 chan->ops->suspend(chan);
2520 case L2CAP_MODE_BASIC:
2521 /* Check outgoing MTU */
2522 if (len > chan->omtu)
2525 /* Create a basic PDU */
2526 skb = l2cap_create_basic_pdu(chan, msg, len);
2528 return PTR_ERR(skb);
2530 /* Channel lock is released before requesting new skb and then
2531 * reacquired thus we need to recheck channel state.
2533 if (chan->state != BT_CONNECTED) {
2538 l2cap_do_send(chan, skb);
2542 case L2CAP_MODE_ERTM:
2543 case L2CAP_MODE_STREAMING:
2544 /* Check outgoing MTU */
2545 if (len > chan->omtu) {
2550 __skb_queue_head_init(&seg_queue);
2552 /* Do segmentation before calling in to the state machine,
2553 * since it's possible to block while waiting for memory
2556 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2558 /* The channel could have been closed while segmenting,
2559 * check that it is still connected.
2561 if (chan->state != BT_CONNECTED) {
2562 __skb_queue_purge(&seg_queue);
2569 if (chan->mode == L2CAP_MODE_ERTM)
2570 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2572 l2cap_streaming_send(chan, &seg_queue);
2576 /* If the skbs were not queued for sending, they'll still be in
2577 * seg_queue and need to be purged.
2579 __skb_queue_purge(&seg_queue);
2583 BT_DBG("bad state %1.1x", chan->mode);
2589 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2591 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2593 struct l2cap_ctrl control;
2596 BT_DBG("chan %p, txseq %u", chan, txseq);
2598 memset(&control, 0, sizeof(control));
2600 control.super = L2CAP_SUPER_SREJ;
2602 for (seq = chan->expected_tx_seq; seq != txseq;
2603 seq = __next_seq(chan, seq)) {
2604 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2605 control.reqseq = seq;
2606 l2cap_send_sframe(chan, &control);
2607 l2cap_seq_list_append(&chan->srej_list, seq);
2611 chan->expected_tx_seq = __next_seq(chan, txseq);
2614 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2616 struct l2cap_ctrl control;
2618 BT_DBG("chan %p", chan);
2620 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2623 memset(&control, 0, sizeof(control));
2625 control.super = L2CAP_SUPER_SREJ;
2626 control.reqseq = chan->srej_list.tail;
2627 l2cap_send_sframe(chan, &control);
2630 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2632 struct l2cap_ctrl control;
2636 BT_DBG("chan %p, txseq %u", chan, txseq);
2638 memset(&control, 0, sizeof(control));
2640 control.super = L2CAP_SUPER_SREJ;
2642 /* Capture initial list head to allow only one pass through the list. */
2643 initial_head = chan->srej_list.head;
2646 seq = l2cap_seq_list_pop(&chan->srej_list);
2647 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2650 control.reqseq = seq;
2651 l2cap_send_sframe(chan, &control);
2652 l2cap_seq_list_append(&chan->srej_list, seq);
2653 } while (chan->srej_list.head != initial_head);
2656 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2658 struct sk_buff *acked_skb;
2661 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2663 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2666 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2667 chan->expected_ack_seq, chan->unacked_frames);
2669 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2670 ackseq = __next_seq(chan, ackseq)) {
2672 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2674 skb_unlink(acked_skb, &chan->tx_q);
2675 kfree_skb(acked_skb);
2676 chan->unacked_frames--;
2680 chan->expected_ack_seq = reqseq;
2682 if (chan->unacked_frames == 0)
2683 __clear_retrans_timer(chan);
2685 BT_DBG("unacked_frames %u", chan->unacked_frames);
2688 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2690 BT_DBG("chan %p", chan);
2692 chan->expected_tx_seq = chan->buffer_seq;
2693 l2cap_seq_list_clear(&chan->srej_list);
2694 skb_queue_purge(&chan->srej_q);
2695 chan->rx_state = L2CAP_RX_STATE_RECV;
2698 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2699 struct l2cap_ctrl *control,
2700 struct sk_buff_head *skbs, u8 event)
2702 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2706 case L2CAP_EV_DATA_REQUEST:
2707 if (chan->tx_send_head == NULL)
2708 chan->tx_send_head = skb_peek(skbs);
2710 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2711 l2cap_ertm_send(chan);
2713 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2714 BT_DBG("Enter LOCAL_BUSY");
2715 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2717 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2718 /* The SREJ_SENT state must be aborted if we are to
2719 * enter the LOCAL_BUSY state.
2721 l2cap_abort_rx_srej_sent(chan);
2724 l2cap_send_ack(chan);
2727 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2728 BT_DBG("Exit LOCAL_BUSY");
2729 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2731 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2732 struct l2cap_ctrl local_control;
2734 memset(&local_control, 0, sizeof(local_control));
2735 local_control.sframe = 1;
2736 local_control.super = L2CAP_SUPER_RR;
2737 local_control.poll = 1;
2738 local_control.reqseq = chan->buffer_seq;
2739 l2cap_send_sframe(chan, &local_control);
2741 chan->retry_count = 1;
2742 __set_monitor_timer(chan);
2743 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2746 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2747 l2cap_process_reqseq(chan, control->reqseq);
2749 case L2CAP_EV_EXPLICIT_POLL:
2750 l2cap_send_rr_or_rnr(chan, 1);
2751 chan->retry_count = 1;
2752 __set_monitor_timer(chan);
2753 __clear_ack_timer(chan);
2754 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2756 case L2CAP_EV_RETRANS_TO:
2757 l2cap_send_rr_or_rnr(chan, 1);
2758 chan->retry_count = 1;
2759 __set_monitor_timer(chan);
2760 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2762 case L2CAP_EV_RECV_FBIT:
2763 /* Nothing to process */
2770 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2771 struct l2cap_ctrl *control,
2772 struct sk_buff_head *skbs, u8 event)
2774 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2778 case L2CAP_EV_DATA_REQUEST:
2779 if (chan->tx_send_head == NULL)
2780 chan->tx_send_head = skb_peek(skbs);
2781 /* Queue data, but don't send. */
2782 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2784 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2785 BT_DBG("Enter LOCAL_BUSY");
2786 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2788 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2789 /* The SREJ_SENT state must be aborted if we are to
2790 * enter the LOCAL_BUSY state.
2792 l2cap_abort_rx_srej_sent(chan);
2795 l2cap_send_ack(chan);
2798 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2799 BT_DBG("Exit LOCAL_BUSY");
2800 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2802 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2803 struct l2cap_ctrl local_control;
2804 memset(&local_control, 0, sizeof(local_control));
2805 local_control.sframe = 1;
2806 local_control.super = L2CAP_SUPER_RR;
2807 local_control.poll = 1;
2808 local_control.reqseq = chan->buffer_seq;
2809 l2cap_send_sframe(chan, &local_control);
2811 chan->retry_count = 1;
2812 __set_monitor_timer(chan);
2813 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2816 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2817 l2cap_process_reqseq(chan, control->reqseq);
2821 case L2CAP_EV_RECV_FBIT:
2822 if (control && control->final) {
2823 __clear_monitor_timer(chan);
2824 if (chan->unacked_frames > 0)
2825 __set_retrans_timer(chan);
2826 chan->retry_count = 0;
2827 chan->tx_state = L2CAP_TX_STATE_XMIT;
2828 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2831 case L2CAP_EV_EXPLICIT_POLL:
2834 case L2CAP_EV_MONITOR_TO:
2835 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2836 l2cap_send_rr_or_rnr(chan, 1);
2837 __set_monitor_timer(chan);
2838 chan->retry_count++;
2840 l2cap_send_disconn_req(chan, ECONNABORTED);
2848 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2849 struct sk_buff_head *skbs, u8 event)
2851 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2852 chan, control, skbs, event, chan->tx_state);
2854 switch (chan->tx_state) {
2855 case L2CAP_TX_STATE_XMIT:
2856 l2cap_tx_state_xmit(chan, control, skbs, event);
2858 case L2CAP_TX_STATE_WAIT_F:
2859 l2cap_tx_state_wait_f(chan, control, skbs, event);
2867 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2868 struct l2cap_ctrl *control)
2870 BT_DBG("chan %p, control %p", chan, control);
2871 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2874 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2875 struct l2cap_ctrl *control)
2877 BT_DBG("chan %p, control %p", chan, control);
2878 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2881 /* Copy frame to all raw sockets on that connection */
2882 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2884 struct sk_buff *nskb;
2885 struct l2cap_chan *chan;
2887 BT_DBG("conn %p", conn);
2889 mutex_lock(&conn->chan_lock);
2891 list_for_each_entry(chan, &conn->chan_l, list) {
2892 if (chan->chan_type != L2CAP_CHAN_RAW)
2895 /* Don't send frame to the channel it came from */
2896 if (bt_cb(skb)->l2cap.chan == chan)
2899 nskb = skb_clone(skb, GFP_KERNEL);
2902 if (chan->ops->recv(chan, nskb))
2906 mutex_unlock(&conn->chan_lock);
2909 /* ---- L2CAP signalling commands ---- */
2910 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2911 u8 ident, u16 dlen, void *data)
2913 struct sk_buff *skb, **frag;
2914 struct l2cap_cmd_hdr *cmd;
2915 struct l2cap_hdr *lh;
2918 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2919 conn, code, ident, dlen);
2921 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2924 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2925 count = min_t(unsigned int, conn->mtu, len);
2927 skb = bt_skb_alloc(count, GFP_KERNEL);
2931 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2932 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2934 if (conn->hcon->type == LE_LINK)
2935 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2937 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2939 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2942 cmd->len = cpu_to_le16(dlen);
2945 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2946 memcpy(skb_put(skb, count), data, count);
2952 /* Continuation fragments (no L2CAP header) */
2953 frag = &skb_shinfo(skb)->frag_list;
2955 count = min_t(unsigned int, conn->mtu, len);
2957 *frag = bt_skb_alloc(count, GFP_KERNEL);
2961 memcpy(skb_put(*frag, count), data, count);
2966 frag = &(*frag)->next;
2976 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2979 struct l2cap_conf_opt *opt = *ptr;
2982 len = L2CAP_CONF_OPT_SIZE + opt->len;
2990 *val = *((u8 *) opt->val);
2994 *val = get_unaligned_le16(opt->val);
2998 *val = get_unaligned_le32(opt->val);
3002 *val = (unsigned long) opt->val;
3006 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3010 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3012 struct l2cap_conf_opt *opt = *ptr;
3014 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3016 if (size < L2CAP_CONF_OPT_SIZE + len)
3024 *((u8 *) opt->val) = val;
3028 put_unaligned_le16(val, opt->val);
3032 put_unaligned_le32(val, opt->val);
3036 memcpy(opt->val, (void *) val, len);
3040 *ptr += L2CAP_CONF_OPT_SIZE + len;
3043 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3045 struct l2cap_conf_efs efs;
3047 switch (chan->mode) {
3048 case L2CAP_MODE_ERTM:
3049 efs.id = chan->local_id;
3050 efs.stype = chan->local_stype;
3051 efs.msdu = cpu_to_le16(chan->local_msdu);
3052 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3053 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3054 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3057 case L2CAP_MODE_STREAMING:
3059 efs.stype = L2CAP_SERV_BESTEFFORT;
3060 efs.msdu = cpu_to_le16(chan->local_msdu);
3061 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3070 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3071 (unsigned long) &efs, size);
3074 static void l2cap_ack_timeout(struct work_struct *work)
3076 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3080 BT_DBG("chan %p", chan);
3082 l2cap_chan_lock(chan);
3084 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3085 chan->last_acked_seq);
3088 l2cap_send_rr_or_rnr(chan, 0);
3090 l2cap_chan_unlock(chan);
3091 l2cap_chan_put(chan);
3094 int l2cap_ertm_init(struct l2cap_chan *chan)
3098 chan->next_tx_seq = 0;
3099 chan->expected_tx_seq = 0;
3100 chan->expected_ack_seq = 0;
3101 chan->unacked_frames = 0;
3102 chan->buffer_seq = 0;
3103 chan->frames_sent = 0;
3104 chan->last_acked_seq = 0;
3106 chan->sdu_last_frag = NULL;
3109 skb_queue_head_init(&chan->tx_q);
3111 chan->local_amp_id = AMP_ID_BREDR;
3112 chan->move_id = AMP_ID_BREDR;
3113 chan->move_state = L2CAP_MOVE_STABLE;
3114 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3116 if (chan->mode != L2CAP_MODE_ERTM)
3119 chan->rx_state = L2CAP_RX_STATE_RECV;
3120 chan->tx_state = L2CAP_TX_STATE_XMIT;
3122 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3123 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3124 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3126 skb_queue_head_init(&chan->srej_q);
3128 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3132 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3134 l2cap_seq_list_free(&chan->srej_list);
3139 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3142 case L2CAP_MODE_STREAMING:
3143 case L2CAP_MODE_ERTM:
3144 if (l2cap_mode_supported(mode, remote_feat_mask))
3148 return L2CAP_MODE_BASIC;
3152 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3154 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3155 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3158 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3160 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3161 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3164 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3165 struct l2cap_conf_rfc *rfc)
3167 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3168 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3170 /* Class 1 devices have must have ERTM timeouts
3171 * exceeding the Link Supervision Timeout. The
3172 * default Link Supervision Timeout for AMP
3173 * controllers is 10 seconds.
3175 * Class 1 devices use 0xffffffff for their
3176 * best-effort flush timeout, so the clamping logic
3177 * will result in a timeout that meets the above
3178 * requirement. ERTM timeouts are 16-bit values, so
3179 * the maximum timeout is 65.535 seconds.
3182 /* Convert timeout to milliseconds and round */
3183 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3185 /* This is the recommended formula for class 2 devices
3186 * that start ERTM timers when packets are sent to the
3189 ertm_to = 3 * ertm_to + 500;
3191 if (ertm_to > 0xffff)
3194 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3195 rfc->monitor_timeout = rfc->retrans_timeout;
3197 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3198 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3202 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3204 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3205 __l2cap_ews_supported(chan->conn)) {
3206 /* use extended control field */
3207 set_bit(FLAG_EXT_CTRL, &chan->flags);
3208 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3210 chan->tx_win = min_t(u16, chan->tx_win,
3211 L2CAP_DEFAULT_TX_WINDOW);
3212 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3214 chan->ack_win = chan->tx_win;
3217 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3219 struct l2cap_conf_req *req = data;
3220 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3221 void *ptr = req->data;
3222 void *endptr = data + data_size;
3225 BT_DBG("chan %p", chan);
3227 if (chan->num_conf_req || chan->num_conf_rsp)
3230 switch (chan->mode) {
3231 case L2CAP_MODE_STREAMING:
3232 case L2CAP_MODE_ERTM:
3233 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3236 if (__l2cap_efs_supported(chan->conn))
3237 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3241 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3246 if (chan->imtu != L2CAP_DEFAULT_MTU)
3247 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3249 switch (chan->mode) {
3250 case L2CAP_MODE_BASIC:
3254 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3255 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3258 rfc.mode = L2CAP_MODE_BASIC;
3260 rfc.max_transmit = 0;
3261 rfc.retrans_timeout = 0;
3262 rfc.monitor_timeout = 0;
3263 rfc.max_pdu_size = 0;
3265 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3266 (unsigned long) &rfc, endptr - ptr);
3269 case L2CAP_MODE_ERTM:
3270 rfc.mode = L2CAP_MODE_ERTM;
3271 rfc.max_transmit = chan->max_tx;
3273 __l2cap_set_ertm_timeouts(chan, &rfc);
3275 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3276 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3278 rfc.max_pdu_size = cpu_to_le16(size);
3280 l2cap_txwin_setup(chan);
3282 rfc.txwin_size = min_t(u16, chan->tx_win,
3283 L2CAP_DEFAULT_TX_WINDOW);
3285 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3286 (unsigned long) &rfc, endptr - ptr);
3288 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3289 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3291 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3292 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3293 chan->tx_win, endptr - ptr);
3295 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3296 if (chan->fcs == L2CAP_FCS_NONE ||
3297 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3298 chan->fcs = L2CAP_FCS_NONE;
3299 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3300 chan->fcs, endptr - ptr);
3304 case L2CAP_MODE_STREAMING:
3305 l2cap_txwin_setup(chan);
3306 rfc.mode = L2CAP_MODE_STREAMING;
3308 rfc.max_transmit = 0;
3309 rfc.retrans_timeout = 0;
3310 rfc.monitor_timeout = 0;
3312 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3313 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3315 rfc.max_pdu_size = cpu_to_le16(size);
3317 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3318 (unsigned long) &rfc, endptr - ptr);
3320 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3321 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3323 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3324 if (chan->fcs == L2CAP_FCS_NONE ||
3325 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3326 chan->fcs = L2CAP_FCS_NONE;
3327 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3328 chan->fcs, endptr - ptr);
3333 req->dcid = cpu_to_le16(chan->dcid);
3334 req->flags = cpu_to_le16(0);
3339 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3341 struct l2cap_conf_rsp *rsp = data;
3342 void *ptr = rsp->data;
3343 void *endptr = data + data_size;
3344 void *req = chan->conf_req;
3345 int len = chan->conf_len;
3346 int type, hint, olen;
3348 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3349 struct l2cap_conf_efs efs;
3351 u16 mtu = L2CAP_DEFAULT_MTU;
3352 u16 result = L2CAP_CONF_SUCCESS;
3355 BT_DBG("chan %p", chan);
3357 while (len >= L2CAP_CONF_OPT_SIZE) {
3358 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3362 hint = type & L2CAP_CONF_HINT;
3363 type &= L2CAP_CONF_MASK;
3366 case L2CAP_CONF_MTU:
3372 case L2CAP_CONF_FLUSH_TO:
3375 chan->flush_to = val;
3378 case L2CAP_CONF_QOS:
3381 case L2CAP_CONF_RFC:
3382 if (olen != sizeof(rfc))
3384 memcpy(&rfc, (void *) val, olen);
3387 case L2CAP_CONF_FCS:
3390 if (val == L2CAP_FCS_NONE)
3391 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3394 case L2CAP_CONF_EFS:
3395 if (olen != sizeof(efs))
3398 memcpy(&efs, (void *) val, olen);
3401 case L2CAP_CONF_EWS:
3404 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3405 return -ECONNREFUSED;
3406 set_bit(FLAG_EXT_CTRL, &chan->flags);
3407 set_bit(CONF_EWS_RECV, &chan->conf_state);
3408 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3409 chan->remote_tx_win = val;
3415 result = L2CAP_CONF_UNKNOWN;
3416 *((u8 *) ptr++) = type;
3421 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3424 switch (chan->mode) {
3425 case L2CAP_MODE_STREAMING:
3426 case L2CAP_MODE_ERTM:
3427 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3428 chan->mode = l2cap_select_mode(rfc.mode,
3429 chan->conn->feat_mask);
3434 if (__l2cap_efs_supported(chan->conn))
3435 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3437 return -ECONNREFUSED;
3440 if (chan->mode != rfc.mode)
3441 return -ECONNREFUSED;
3447 if (chan->mode != rfc.mode) {
3448 result = L2CAP_CONF_UNACCEPT;
3449 rfc.mode = chan->mode;
3451 if (chan->num_conf_rsp == 1)
3452 return -ECONNREFUSED;
3454 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3455 (unsigned long) &rfc, endptr - ptr);
3458 if (result == L2CAP_CONF_SUCCESS) {
3459 /* Configure output options and let the other side know
3460 * which ones we don't like. */
3462 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3463 result = L2CAP_CONF_UNACCEPT;
3466 set_bit(CONF_MTU_DONE, &chan->conf_state);
3468 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3471 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3472 efs.stype != L2CAP_SERV_NOTRAFIC &&
3473 efs.stype != chan->local_stype) {
3475 result = L2CAP_CONF_UNACCEPT;
3477 if (chan->num_conf_req >= 1)
3478 return -ECONNREFUSED;
3480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3482 (unsigned long) &efs, endptr - ptr);
3484 /* Send PENDING Conf Rsp */
3485 result = L2CAP_CONF_PENDING;
3486 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3491 case L2CAP_MODE_BASIC:
3492 chan->fcs = L2CAP_FCS_NONE;
3493 set_bit(CONF_MODE_DONE, &chan->conf_state);
3496 case L2CAP_MODE_ERTM:
3497 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3498 chan->remote_tx_win = rfc.txwin_size;
3500 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3502 chan->remote_max_tx = rfc.max_transmit;
3504 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3505 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3506 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3507 rfc.max_pdu_size = cpu_to_le16(size);
3508 chan->remote_mps = size;
3510 __l2cap_set_ertm_timeouts(chan, &rfc);
3512 set_bit(CONF_MODE_DONE, &chan->conf_state);
3514 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3515 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3517 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3518 chan->remote_id = efs.id;
3519 chan->remote_stype = efs.stype;
3520 chan->remote_msdu = le16_to_cpu(efs.msdu);
3521 chan->remote_flush_to =
3522 le32_to_cpu(efs.flush_to);
3523 chan->remote_acc_lat =
3524 le32_to_cpu(efs.acc_lat);
3525 chan->remote_sdu_itime =
3526 le32_to_cpu(efs.sdu_itime);
3527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3529 (unsigned long) &efs, endptr - ptr);
3533 case L2CAP_MODE_STREAMING:
3534 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3535 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3536 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3537 rfc.max_pdu_size = cpu_to_le16(size);
3538 chan->remote_mps = size;
3540 set_bit(CONF_MODE_DONE, &chan->conf_state);
3542 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3543 (unsigned long) &rfc, endptr - ptr);
3548 result = L2CAP_CONF_UNACCEPT;
3550 memset(&rfc, 0, sizeof(rfc));
3551 rfc.mode = chan->mode;
3554 if (result == L2CAP_CONF_SUCCESS)
3555 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3557 rsp->scid = cpu_to_le16(chan->dcid);
3558 rsp->result = cpu_to_le16(result);
3559 rsp->flags = cpu_to_le16(0);
3564 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3565 void *data, size_t size, u16 *result)
3567 struct l2cap_conf_req *req = data;
3568 void *ptr = req->data;
3569 void *endptr = data + size;
3572 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3573 struct l2cap_conf_efs efs;
3575 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3577 while (len >= L2CAP_CONF_OPT_SIZE) {
3578 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3583 case L2CAP_CONF_MTU:
3586 if (val < L2CAP_DEFAULT_MIN_MTU) {
3587 *result = L2CAP_CONF_UNACCEPT;
3588 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3591 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3595 case L2CAP_CONF_FLUSH_TO:
3598 chan->flush_to = val;
3599 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3600 chan->flush_to, endptr - ptr);
3603 case L2CAP_CONF_RFC:
3604 if (olen != sizeof(rfc))
3606 memcpy(&rfc, (void *)val, olen);
3607 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3608 rfc.mode != chan->mode)
3609 return -ECONNREFUSED;
3611 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3612 (unsigned long) &rfc, endptr - ptr);
3615 case L2CAP_CONF_EWS:
3618 chan->ack_win = min_t(u16, val, chan->ack_win);
3619 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3620 chan->tx_win, endptr - ptr);
3623 case L2CAP_CONF_EFS:
3624 if (olen != sizeof(efs))
3626 memcpy(&efs, (void *)val, olen);
3627 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3628 efs.stype != L2CAP_SERV_NOTRAFIC &&
3629 efs.stype != chan->local_stype)
3630 return -ECONNREFUSED;
3631 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3632 (unsigned long) &efs, endptr - ptr);
3635 case L2CAP_CONF_FCS:
3638 if (*result == L2CAP_CONF_PENDING)
3639 if (val == L2CAP_FCS_NONE)
3640 set_bit(CONF_RECV_NO_FCS,
3646 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3647 return -ECONNREFUSED;
3649 chan->mode = rfc.mode;
3651 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3653 case L2CAP_MODE_ERTM:
3654 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3655 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3656 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3657 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3658 chan->ack_win = min_t(u16, chan->ack_win,
3661 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3662 chan->local_msdu = le16_to_cpu(efs.msdu);
3663 chan->local_sdu_itime =
3664 le32_to_cpu(efs.sdu_itime);
3665 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3666 chan->local_flush_to =
3667 le32_to_cpu(efs.flush_to);
3671 case L2CAP_MODE_STREAMING:
3672 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3676 req->dcid = cpu_to_le16(chan->dcid);
3677 req->flags = cpu_to_le16(0);
3682 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3683 u16 result, u16 flags)
3685 struct l2cap_conf_rsp *rsp = data;
3686 void *ptr = rsp->data;
3688 BT_DBG("chan %p", chan);
3690 rsp->scid = cpu_to_le16(chan->dcid);
3691 rsp->result = cpu_to_le16(result);
3692 rsp->flags = cpu_to_le16(flags);
3697 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3699 struct l2cap_le_conn_rsp rsp;
3700 struct l2cap_conn *conn = chan->conn;
3702 BT_DBG("chan %p", chan);
3704 rsp.dcid = cpu_to_le16(chan->scid);
3705 rsp.mtu = cpu_to_le16(chan->imtu);
3706 rsp.mps = cpu_to_le16(chan->mps);
3707 rsp.credits = cpu_to_le16(chan->rx_credits);
3708 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3710 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3714 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3716 struct l2cap_conn_rsp rsp;
3717 struct l2cap_conn *conn = chan->conn;
3721 rsp.scid = cpu_to_le16(chan->dcid);
3722 rsp.dcid = cpu_to_le16(chan->scid);
3723 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3724 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3727 rsp_code = L2CAP_CREATE_CHAN_RSP;
3729 rsp_code = L2CAP_CONN_RSP;
3731 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3733 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3735 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3738 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3739 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3740 chan->num_conf_req++;
3743 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3747 /* Use sane default values in case a misbehaving remote device
3748 * did not send an RFC or extended window size option.
3750 u16 txwin_ext = chan->ack_win;
3751 struct l2cap_conf_rfc rfc = {
3753 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3754 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3755 .max_pdu_size = cpu_to_le16(chan->imtu),
3756 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3759 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3761 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3764 while (len >= L2CAP_CONF_OPT_SIZE) {
3765 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3770 case L2CAP_CONF_RFC:
3771 if (olen != sizeof(rfc))
3773 memcpy(&rfc, (void *)val, olen);
3775 case L2CAP_CONF_EWS:
3784 case L2CAP_MODE_ERTM:
3785 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3786 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3787 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3788 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3789 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3791 chan->ack_win = min_t(u16, chan->ack_win,
3794 case L2CAP_MODE_STREAMING:
3795 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3799 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3800 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3803 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3805 if (cmd_len < sizeof(*rej))
3808 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3811 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3812 cmd->ident == conn->info_ident) {
3813 cancel_delayed_work(&conn->info_timer);
3815 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3816 conn->info_ident = 0;
3818 l2cap_conn_start(conn);
3824 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3825 struct l2cap_cmd_hdr *cmd,
3826 u8 *data, u8 rsp_code, u8 amp_id)
3828 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3829 struct l2cap_conn_rsp rsp;
3830 struct l2cap_chan *chan = NULL, *pchan;
3831 int result, status = L2CAP_CS_NO_INFO;
3833 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3834 __le16 psm = req->psm;
3836 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3838 /* Check if we have socket listening on psm */
3839 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3840 &conn->hcon->dst, ACL_LINK);
3842 result = L2CAP_CR_BAD_PSM;
3846 mutex_lock(&conn->chan_lock);
3847 l2cap_chan_lock(pchan);
3849 /* Check if the ACL is secure enough (if not SDP) */
3850 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3851 !hci_conn_check_link_mode(conn->hcon)) {
3852 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3853 result = L2CAP_CR_SEC_BLOCK;
3857 result = L2CAP_CR_NO_MEM;
3859 /* Check if we already have channel with that dcid */
3860 if (__l2cap_get_chan_by_dcid(conn, scid))
3863 chan = pchan->ops->new_connection(pchan);
3867 /* For certain devices (ex: HID mouse), support for authentication,
3868 * pairing and bonding is optional. For such devices, inorder to avoid
3869 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3870 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3872 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3874 bacpy(&chan->src, &conn->hcon->src);
3875 bacpy(&chan->dst, &conn->hcon->dst);
3876 chan->src_type = bdaddr_src_type(conn->hcon);
3877 chan->dst_type = bdaddr_dst_type(conn->hcon);
3880 chan->local_amp_id = amp_id;
3882 __l2cap_chan_add(conn, chan);
3886 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3888 chan->ident = cmd->ident;
3890 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3891 if (l2cap_chan_check_security(chan, false)) {
3892 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3893 l2cap_state_change(chan, BT_CONNECT2);
3894 result = L2CAP_CR_PEND;
3895 status = L2CAP_CS_AUTHOR_PEND;
3896 chan->ops->defer(chan);
3898 /* Force pending result for AMP controllers.
3899 * The connection will succeed after the
3900 * physical link is up.
3902 if (amp_id == AMP_ID_BREDR) {
3903 l2cap_state_change(chan, BT_CONFIG);
3904 result = L2CAP_CR_SUCCESS;
3906 l2cap_state_change(chan, BT_CONNECT2);
3907 result = L2CAP_CR_PEND;
3909 status = L2CAP_CS_NO_INFO;
3912 l2cap_state_change(chan, BT_CONNECT2);
3913 result = L2CAP_CR_PEND;
3914 status = L2CAP_CS_AUTHEN_PEND;
3917 l2cap_state_change(chan, BT_CONNECT2);
3918 result = L2CAP_CR_PEND;
3919 status = L2CAP_CS_NO_INFO;
3923 l2cap_chan_unlock(pchan);
3924 mutex_unlock(&conn->chan_lock);
3925 l2cap_chan_put(pchan);
3928 rsp.scid = cpu_to_le16(scid);
3929 rsp.dcid = cpu_to_le16(dcid);
3930 rsp.result = cpu_to_le16(result);
3931 rsp.status = cpu_to_le16(status);
3932 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3934 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3935 struct l2cap_info_req info;
3936 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3938 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3939 conn->info_ident = l2cap_get_ident(conn);
3941 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3943 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3944 sizeof(info), &info);
3947 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3948 result == L2CAP_CR_SUCCESS) {
3950 set_bit(CONF_REQ_SENT, &chan->conf_state);
3951 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3952 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3953 chan->num_conf_req++;
3959 static int l2cap_connect_req(struct l2cap_conn *conn,
3960 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3962 struct hci_dev *hdev = conn->hcon->hdev;
3963 struct hci_conn *hcon = conn->hcon;
3965 if (cmd_len < sizeof(struct l2cap_conn_req))
3969 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3970 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3971 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3972 hci_dev_unlock(hdev);
3974 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3978 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3979 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3982 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3983 u16 scid, dcid, result, status;
3984 struct l2cap_chan *chan;
3988 if (cmd_len < sizeof(*rsp))
3991 scid = __le16_to_cpu(rsp->scid);
3992 dcid = __le16_to_cpu(rsp->dcid);
3993 result = __le16_to_cpu(rsp->result);
3994 status = __le16_to_cpu(rsp->status);
3996 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3997 dcid, scid, result, status);
3999 mutex_lock(&conn->chan_lock);
4002 chan = __l2cap_get_chan_by_scid(conn, scid);
4008 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4017 l2cap_chan_lock(chan);
4020 case L2CAP_CR_SUCCESS:
4021 l2cap_state_change(chan, BT_CONFIG);
4024 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4026 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4029 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4030 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4031 chan->num_conf_req++;
4035 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4039 l2cap_chan_del(chan, ECONNREFUSED);
4043 l2cap_chan_unlock(chan);
4046 mutex_unlock(&conn->chan_lock);
4051 static inline void set_default_fcs(struct l2cap_chan *chan)
4053 /* FCS is enabled only in ERTM or streaming mode, if one or both
4056 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4057 chan->fcs = L2CAP_FCS_NONE;
4058 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4059 chan->fcs = L2CAP_FCS_CRC16;
4062 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4063 u8 ident, u16 flags)
4065 struct l2cap_conn *conn = chan->conn;
4067 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4070 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4071 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4073 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4074 l2cap_build_conf_rsp(chan, data,
4075 L2CAP_CONF_SUCCESS, flags), data);
4078 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4081 struct l2cap_cmd_rej_cid rej;
4083 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4084 rej.scid = __cpu_to_le16(scid);
4085 rej.dcid = __cpu_to_le16(dcid);
4087 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4090 static inline int l2cap_config_req(struct l2cap_conn *conn,
4091 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4094 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4097 struct l2cap_chan *chan;
4100 if (cmd_len < sizeof(*req))
4103 dcid = __le16_to_cpu(req->dcid);
4104 flags = __le16_to_cpu(req->flags);
4106 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4108 chan = l2cap_get_chan_by_scid(conn, dcid);
4110 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4114 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4115 chan->state != BT_CONNECTED) {
4116 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4121 /* Reject if config buffer is too small. */
4122 len = cmd_len - sizeof(*req);
4123 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4124 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4125 l2cap_build_conf_rsp(chan, rsp,
4126 L2CAP_CONF_REJECT, flags), rsp);
4131 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4132 chan->conf_len += len;
4134 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4135 /* Incomplete config. Send empty response. */
4136 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4137 l2cap_build_conf_rsp(chan, rsp,
4138 L2CAP_CONF_SUCCESS, flags), rsp);
4142 /* Complete config. */
4143 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4145 l2cap_send_disconn_req(chan, ECONNRESET);
4149 chan->ident = cmd->ident;
4150 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4151 chan->num_conf_rsp++;
4153 /* Reset config buffer. */
4156 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4159 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4160 set_default_fcs(chan);
4162 if (chan->mode == L2CAP_MODE_ERTM ||
4163 chan->mode == L2CAP_MODE_STREAMING)
4164 err = l2cap_ertm_init(chan);
4167 l2cap_send_disconn_req(chan, -err);
4169 l2cap_chan_ready(chan);
4174 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4176 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4177 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4178 chan->num_conf_req++;
4181 /* Got Conf Rsp PENDING from remote side and assume we sent
4182 Conf Rsp PENDING in the code above */
4183 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4184 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4186 /* check compatibility */
4188 /* Send rsp for BR/EDR channel */
4190 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4192 chan->ident = cmd->ident;
4196 l2cap_chan_unlock(chan);
4200 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4201 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4204 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4205 u16 scid, flags, result;
4206 struct l2cap_chan *chan;
4207 int len = cmd_len - sizeof(*rsp);
4210 if (cmd_len < sizeof(*rsp))
4213 scid = __le16_to_cpu(rsp->scid);
4214 flags = __le16_to_cpu(rsp->flags);
4215 result = __le16_to_cpu(rsp->result);
4217 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4220 chan = l2cap_get_chan_by_scid(conn, scid);
4225 case L2CAP_CONF_SUCCESS:
4226 l2cap_conf_rfc_get(chan, rsp->data, len);
4227 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4230 case L2CAP_CONF_PENDING:
4231 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4233 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4236 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4237 buf, sizeof(buf), &result);
4239 l2cap_send_disconn_req(chan, ECONNRESET);
4243 if (!chan->hs_hcon) {
4244 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4247 if (l2cap_check_efs(chan)) {
4248 amp_create_logical_link(chan);
4249 chan->ident = cmd->ident;
4255 case L2CAP_CONF_UNACCEPT:
4256 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4259 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4260 l2cap_send_disconn_req(chan, ECONNRESET);
4264 /* throw out any old stored conf requests */
4265 result = L2CAP_CONF_SUCCESS;
4266 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4267 req, sizeof(req), &result);
4269 l2cap_send_disconn_req(chan, ECONNRESET);
4273 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4274 L2CAP_CONF_REQ, len, req);
4275 chan->num_conf_req++;
4276 if (result != L2CAP_CONF_SUCCESS)
4282 l2cap_chan_set_err(chan, ECONNRESET);
4284 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4285 l2cap_send_disconn_req(chan, ECONNRESET);
4289 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4292 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4294 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4295 set_default_fcs(chan);
4297 if (chan->mode == L2CAP_MODE_ERTM ||
4298 chan->mode == L2CAP_MODE_STREAMING)
4299 err = l2cap_ertm_init(chan);
4302 l2cap_send_disconn_req(chan, -err);
4304 l2cap_chan_ready(chan);
4308 l2cap_chan_unlock(chan);
4312 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4313 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4316 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4317 struct l2cap_disconn_rsp rsp;
4319 struct l2cap_chan *chan;
4321 if (cmd_len != sizeof(*req))
4324 scid = __le16_to_cpu(req->scid);
4325 dcid = __le16_to_cpu(req->dcid);
4327 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4329 mutex_lock(&conn->chan_lock);
4331 chan = __l2cap_get_chan_by_scid(conn, dcid);
4333 mutex_unlock(&conn->chan_lock);
4334 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4338 l2cap_chan_hold(chan);
4339 l2cap_chan_lock(chan);
4341 rsp.dcid = cpu_to_le16(chan->scid);
4342 rsp.scid = cpu_to_le16(chan->dcid);
4343 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4345 chan->ops->set_shutdown(chan);
4347 l2cap_chan_del(chan, ECONNRESET);
4349 chan->ops->close(chan);
4351 l2cap_chan_unlock(chan);
4352 l2cap_chan_put(chan);
4354 mutex_unlock(&conn->chan_lock);
4359 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4360 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4363 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4365 struct l2cap_chan *chan;
4367 if (cmd_len != sizeof(*rsp))
4370 scid = __le16_to_cpu(rsp->scid);
4371 dcid = __le16_to_cpu(rsp->dcid);
4373 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4375 mutex_lock(&conn->chan_lock);
4377 chan = __l2cap_get_chan_by_scid(conn, scid);
4379 mutex_unlock(&conn->chan_lock);
4383 l2cap_chan_hold(chan);
4384 l2cap_chan_lock(chan);
4386 if (chan->state != BT_DISCONN) {
4387 l2cap_chan_unlock(chan);
4388 l2cap_chan_put(chan);
4389 mutex_unlock(&conn->chan_lock);
4393 l2cap_chan_del(chan, 0);
4395 chan->ops->close(chan);
4397 l2cap_chan_unlock(chan);
4398 l2cap_chan_put(chan);
4400 mutex_unlock(&conn->chan_lock);
4405 static inline int l2cap_information_req(struct l2cap_conn *conn,
4406 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4409 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4412 if (cmd_len != sizeof(*req))
4415 type = __le16_to_cpu(req->type);
4417 BT_DBG("type 0x%4.4x", type);
4419 if (type == L2CAP_IT_FEAT_MASK) {
4421 u32 feat_mask = l2cap_feat_mask;
4422 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4423 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4424 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4426 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4428 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4429 feat_mask |= L2CAP_FEAT_EXT_FLOW
4430 | L2CAP_FEAT_EXT_WINDOW;
4432 put_unaligned_le32(feat_mask, rsp->data);
4433 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4435 } else if (type == L2CAP_IT_FIXED_CHAN) {
4437 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4439 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4440 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4441 rsp->data[0] = conn->local_fixed_chan;
4442 memset(rsp->data + 1, 0, 7);
4443 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4446 struct l2cap_info_rsp rsp;
4447 rsp.type = cpu_to_le16(type);
4448 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4449 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4456 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4457 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4460 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4463 if (cmd_len < sizeof(*rsp))
4466 type = __le16_to_cpu(rsp->type);
4467 result = __le16_to_cpu(rsp->result);
4469 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4471 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4472 if (cmd->ident != conn->info_ident ||
4473 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4476 cancel_delayed_work(&conn->info_timer);
4478 if (result != L2CAP_IR_SUCCESS) {
4479 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4480 conn->info_ident = 0;
4482 l2cap_conn_start(conn);
4488 case L2CAP_IT_FEAT_MASK:
4489 conn->feat_mask = get_unaligned_le32(rsp->data);
4491 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4492 struct l2cap_info_req req;
4493 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4495 conn->info_ident = l2cap_get_ident(conn);
4497 l2cap_send_cmd(conn, conn->info_ident,
4498 L2CAP_INFO_REQ, sizeof(req), &req);
4500 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4501 conn->info_ident = 0;
4503 l2cap_conn_start(conn);
4507 case L2CAP_IT_FIXED_CHAN:
4508 conn->remote_fixed_chan = rsp->data[0];
4509 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4510 conn->info_ident = 0;
4512 l2cap_conn_start(conn);
4519 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4520 struct l2cap_cmd_hdr *cmd,
4521 u16 cmd_len, void *data)
4523 struct l2cap_create_chan_req *req = data;
4524 struct l2cap_create_chan_rsp rsp;
4525 struct l2cap_chan *chan;
4526 struct hci_dev *hdev;
4529 if (cmd_len != sizeof(*req))
4532 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4535 psm = le16_to_cpu(req->psm);
4536 scid = le16_to_cpu(req->scid);
4538 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4540 /* For controller id 0 make BR/EDR connection */
4541 if (req->amp_id == AMP_ID_BREDR) {
4542 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4547 /* Validate AMP controller id */
4548 hdev = hci_dev_get(req->amp_id);
4552 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4557 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4560 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4561 struct hci_conn *hs_hcon;
4563 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4567 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4572 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4574 mgr->bredr_chan = chan;
4575 chan->hs_hcon = hs_hcon;
4576 chan->fcs = L2CAP_FCS_NONE;
4577 conn->mtu = hdev->block_mtu;
4586 rsp.scid = cpu_to_le16(scid);
4587 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4588 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4590 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4596 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4598 struct l2cap_move_chan_req req;
4601 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4603 ident = l2cap_get_ident(chan->conn);
4604 chan->ident = ident;
4606 req.icid = cpu_to_le16(chan->scid);
4607 req.dest_amp_id = dest_amp_id;
4609 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4612 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4615 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4617 struct l2cap_move_chan_rsp rsp;
4619 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4621 rsp.icid = cpu_to_le16(chan->dcid);
4622 rsp.result = cpu_to_le16(result);
4624 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4628 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4630 struct l2cap_move_chan_cfm cfm;
4632 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4634 chan->ident = l2cap_get_ident(chan->conn);
4636 cfm.icid = cpu_to_le16(chan->scid);
4637 cfm.result = cpu_to_le16(result);
4639 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4642 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4645 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4647 struct l2cap_move_chan_cfm cfm;
4649 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4651 cfm.icid = cpu_to_le16(icid);
4652 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4654 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4658 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4661 struct l2cap_move_chan_cfm_rsp rsp;
4663 BT_DBG("icid 0x%4.4x", icid);
4665 rsp.icid = cpu_to_le16(icid);
4666 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4669 static void __release_logical_link(struct l2cap_chan *chan)
4671 chan->hs_hchan = NULL;
4672 chan->hs_hcon = NULL;
4674 /* Placeholder - release the logical link */
4677 static void l2cap_logical_fail(struct l2cap_chan *chan)
4679 /* Logical link setup failed */
4680 if (chan->state != BT_CONNECTED) {
4681 /* Create channel failure, disconnect */
4682 l2cap_send_disconn_req(chan, ECONNRESET);
4686 switch (chan->move_role) {
4687 case L2CAP_MOVE_ROLE_RESPONDER:
4688 l2cap_move_done(chan);
4689 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4691 case L2CAP_MOVE_ROLE_INITIATOR:
4692 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4693 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4694 /* Remote has only sent pending or
4695 * success responses, clean up
4697 l2cap_move_done(chan);
4700 /* Other amp move states imply that the move
4701 * has already aborted
4703 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4708 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4709 struct hci_chan *hchan)
4711 struct l2cap_conf_rsp rsp;
4713 chan->hs_hchan = hchan;
4714 chan->hs_hcon->l2cap_data = chan->conn;
4716 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4718 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4721 set_default_fcs(chan);
4723 err = l2cap_ertm_init(chan);
4725 l2cap_send_disconn_req(chan, -err);
4727 l2cap_chan_ready(chan);
4731 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4732 struct hci_chan *hchan)
4734 chan->hs_hcon = hchan->conn;
4735 chan->hs_hcon->l2cap_data = chan->conn;
4737 BT_DBG("move_state %d", chan->move_state);
4739 switch (chan->move_state) {
4740 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4741 /* Move confirm will be sent after a success
4742 * response is received
4744 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4746 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4747 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4748 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4749 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4750 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4751 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4752 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4753 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4754 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4758 /* Move was not in expected state, free the channel */
4759 __release_logical_link(chan);
4761 chan->move_state = L2CAP_MOVE_STABLE;
4765 /* Call with chan locked */
4766 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4769 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4772 l2cap_logical_fail(chan);
4773 __release_logical_link(chan);
4777 if (chan->state != BT_CONNECTED) {
4778 /* Ignore logical link if channel is on BR/EDR */
4779 if (chan->local_amp_id != AMP_ID_BREDR)
4780 l2cap_logical_finish_create(chan, hchan);
4782 l2cap_logical_finish_move(chan, hchan);
4786 void l2cap_move_start(struct l2cap_chan *chan)
4788 BT_DBG("chan %p", chan);
4790 if (chan->local_amp_id == AMP_ID_BREDR) {
4791 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4793 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4794 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4795 /* Placeholder - start physical link setup */
4797 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4798 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4800 l2cap_move_setup(chan);
4801 l2cap_send_move_chan_req(chan, 0);
4805 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4806 u8 local_amp_id, u8 remote_amp_id)
4808 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4809 local_amp_id, remote_amp_id);
4811 chan->fcs = L2CAP_FCS_NONE;
4813 /* Outgoing channel on AMP */
4814 if (chan->state == BT_CONNECT) {
4815 if (result == L2CAP_CR_SUCCESS) {
4816 chan->local_amp_id = local_amp_id;
4817 l2cap_send_create_chan_req(chan, remote_amp_id);
4819 /* Revert to BR/EDR connect */
4820 l2cap_send_conn_req(chan);
4826 /* Incoming channel on AMP */
4827 if (__l2cap_no_conn_pending(chan)) {
4828 struct l2cap_conn_rsp rsp;
4830 rsp.scid = cpu_to_le16(chan->dcid);
4831 rsp.dcid = cpu_to_le16(chan->scid);
4833 if (result == L2CAP_CR_SUCCESS) {
4834 /* Send successful response */
4835 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4836 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4838 /* Send negative response */
4839 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4840 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4843 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4846 if (result == L2CAP_CR_SUCCESS) {
4847 l2cap_state_change(chan, BT_CONFIG);
4848 set_bit(CONF_REQ_SENT, &chan->conf_state);
4849 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4851 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4852 chan->num_conf_req++;
4857 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4860 l2cap_move_setup(chan);
4861 chan->move_id = local_amp_id;
4862 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4864 l2cap_send_move_chan_req(chan, remote_amp_id);
4867 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4869 struct hci_chan *hchan = NULL;
4871 /* Placeholder - get hci_chan for logical link */
4874 if (hchan->state == BT_CONNECTED) {
4875 /* Logical link is ready to go */
4876 chan->hs_hcon = hchan->conn;
4877 chan->hs_hcon->l2cap_data = chan->conn;
4878 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4879 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4881 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4883 /* Wait for logical link to be ready */
4884 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4887 /* Logical link not available */
4888 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4892 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4894 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4896 if (result == -EINVAL)
4897 rsp_result = L2CAP_MR_BAD_ID;
4899 rsp_result = L2CAP_MR_NOT_ALLOWED;
4901 l2cap_send_move_chan_rsp(chan, rsp_result);
4904 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4905 chan->move_state = L2CAP_MOVE_STABLE;
4907 /* Restart data transmission */
4908 l2cap_ertm_send(chan);
4911 /* Invoke with locked chan */
4912 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4914 u8 local_amp_id = chan->local_amp_id;
4915 u8 remote_amp_id = chan->remote_amp_id;
4917 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4918 chan, result, local_amp_id, remote_amp_id);
4920 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4923 if (chan->state != BT_CONNECTED) {
4924 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4925 } else if (result != L2CAP_MR_SUCCESS) {
4926 l2cap_do_move_cancel(chan, result);
4928 switch (chan->move_role) {
4929 case L2CAP_MOVE_ROLE_INITIATOR:
4930 l2cap_do_move_initiate(chan, local_amp_id,
4933 case L2CAP_MOVE_ROLE_RESPONDER:
4934 l2cap_do_move_respond(chan, result);
4937 l2cap_do_move_cancel(chan, result);
4943 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4944 struct l2cap_cmd_hdr *cmd,
4945 u16 cmd_len, void *data)
4947 struct l2cap_move_chan_req *req = data;
4948 struct l2cap_move_chan_rsp rsp;
4949 struct l2cap_chan *chan;
4951 u16 result = L2CAP_MR_NOT_ALLOWED;
4953 if (cmd_len != sizeof(*req))
4956 icid = le16_to_cpu(req->icid);
4958 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4960 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4963 chan = l2cap_get_chan_by_dcid(conn, icid);
4965 rsp.icid = cpu_to_le16(icid);
4966 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4967 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4972 chan->ident = cmd->ident;
4974 if (chan->scid < L2CAP_CID_DYN_START ||
4975 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4976 (chan->mode != L2CAP_MODE_ERTM &&
4977 chan->mode != L2CAP_MODE_STREAMING)) {
4978 result = L2CAP_MR_NOT_ALLOWED;
4979 goto send_move_response;
4982 if (chan->local_amp_id == req->dest_amp_id) {
4983 result = L2CAP_MR_SAME_ID;
4984 goto send_move_response;
4987 if (req->dest_amp_id != AMP_ID_BREDR) {
4988 struct hci_dev *hdev;
4989 hdev = hci_dev_get(req->dest_amp_id);
4990 if (!hdev || hdev->dev_type != HCI_AMP ||
4991 !test_bit(HCI_UP, &hdev->flags)) {
4995 result = L2CAP_MR_BAD_ID;
4996 goto send_move_response;
5001 /* Detect a move collision. Only send a collision response
5002 * if this side has "lost", otherwise proceed with the move.
5003 * The winner has the larger bd_addr.
5005 if ((__chan_is_moving(chan) ||
5006 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5007 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5008 result = L2CAP_MR_COLLISION;
5009 goto send_move_response;
5012 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5013 l2cap_move_setup(chan);
5014 chan->move_id = req->dest_amp_id;
5017 if (req->dest_amp_id == AMP_ID_BREDR) {
5018 /* Moving to BR/EDR */
5019 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5020 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5021 result = L2CAP_MR_PEND;
5023 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5024 result = L2CAP_MR_SUCCESS;
5027 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5028 /* Placeholder - uncomment when amp functions are available */
5029 /*amp_accept_physical(chan, req->dest_amp_id);*/
5030 result = L2CAP_MR_PEND;
5034 l2cap_send_move_chan_rsp(chan, result);
5036 l2cap_chan_unlock(chan);
5041 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5043 struct l2cap_chan *chan;
5044 struct hci_chan *hchan = NULL;
5046 chan = l2cap_get_chan_by_scid(conn, icid);
5048 l2cap_send_move_chan_cfm_icid(conn, icid);
5052 __clear_chan_timer(chan);
5053 if (result == L2CAP_MR_PEND)
5054 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5056 switch (chan->move_state) {
5057 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5058 /* Move confirm will be sent when logical link
5061 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5063 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5064 if (result == L2CAP_MR_PEND) {
5066 } else if (test_bit(CONN_LOCAL_BUSY,
5067 &chan->conn_state)) {
5068 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5070 /* Logical link is up or moving to BR/EDR,
5073 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5074 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5077 case L2CAP_MOVE_WAIT_RSP:
5079 if (result == L2CAP_MR_SUCCESS) {
5080 /* Remote is ready, send confirm immediately
5081 * after logical link is ready
5083 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5085 /* Both logical link and move success
5086 * are required to confirm
5088 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5091 /* Placeholder - get hci_chan for logical link */
5093 /* Logical link not available */
5094 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5098 /* If the logical link is not yet connected, do not
5099 * send confirmation.
5101 if (hchan->state != BT_CONNECTED)
5104 /* Logical link is already ready to go */
5106 chan->hs_hcon = hchan->conn;
5107 chan->hs_hcon->l2cap_data = chan->conn;
5109 if (result == L2CAP_MR_SUCCESS) {
5110 /* Can confirm now */
5111 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5113 /* Now only need move success
5116 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5119 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5122 /* Any other amp move state means the move failed. */
5123 chan->move_id = chan->local_amp_id;
5124 l2cap_move_done(chan);
5125 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5128 l2cap_chan_unlock(chan);
5131 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5134 struct l2cap_chan *chan;
5136 chan = l2cap_get_chan_by_ident(conn, ident);
5138 /* Could not locate channel, icid is best guess */
5139 l2cap_send_move_chan_cfm_icid(conn, icid);
5143 __clear_chan_timer(chan);
5145 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5146 if (result == L2CAP_MR_COLLISION) {
5147 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5149 /* Cleanup - cancel move */
5150 chan->move_id = chan->local_amp_id;
5151 l2cap_move_done(chan);
5155 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5157 l2cap_chan_unlock(chan);
5160 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5161 struct l2cap_cmd_hdr *cmd,
5162 u16 cmd_len, void *data)
5164 struct l2cap_move_chan_rsp *rsp = data;
5167 if (cmd_len != sizeof(*rsp))
5170 icid = le16_to_cpu(rsp->icid);
5171 result = le16_to_cpu(rsp->result);
5173 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5175 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5176 l2cap_move_continue(conn, icid, result);
5178 l2cap_move_fail(conn, cmd->ident, icid, result);
5183 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5184 struct l2cap_cmd_hdr *cmd,
5185 u16 cmd_len, void *data)
5187 struct l2cap_move_chan_cfm *cfm = data;
5188 struct l2cap_chan *chan;
5191 if (cmd_len != sizeof(*cfm))
5194 icid = le16_to_cpu(cfm->icid);
5195 result = le16_to_cpu(cfm->result);
5197 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5199 chan = l2cap_get_chan_by_dcid(conn, icid);
5201 /* Spec requires a response even if the icid was not found */
5202 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5206 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5207 if (result == L2CAP_MC_CONFIRMED) {
5208 chan->local_amp_id = chan->move_id;
5209 if (chan->local_amp_id == AMP_ID_BREDR)
5210 __release_logical_link(chan);
5212 chan->move_id = chan->local_amp_id;
5215 l2cap_move_done(chan);
5218 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5220 l2cap_chan_unlock(chan);
5225 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5226 struct l2cap_cmd_hdr *cmd,
5227 u16 cmd_len, void *data)
5229 struct l2cap_move_chan_cfm_rsp *rsp = data;
5230 struct l2cap_chan *chan;
5233 if (cmd_len != sizeof(*rsp))
5236 icid = le16_to_cpu(rsp->icid);
5238 BT_DBG("icid 0x%4.4x", icid);
5240 chan = l2cap_get_chan_by_scid(conn, icid);
5244 __clear_chan_timer(chan);
5246 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5247 chan->local_amp_id = chan->move_id;
5249 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5250 __release_logical_link(chan);
5252 l2cap_move_done(chan);
5255 l2cap_chan_unlock(chan);
5260 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5261 struct l2cap_cmd_hdr *cmd,
5262 u16 cmd_len, u8 *data)
5264 struct hci_conn *hcon = conn->hcon;
5265 struct l2cap_conn_param_update_req *req;
5266 struct l2cap_conn_param_update_rsp rsp;
5267 u16 min, max, latency, to_multiplier;
5270 if (hcon->role != HCI_ROLE_MASTER)
5273 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5276 req = (struct l2cap_conn_param_update_req *) data;
5277 min = __le16_to_cpu(req->min);
5278 max = __le16_to_cpu(req->max);
5279 latency = __le16_to_cpu(req->latency);
5280 to_multiplier = __le16_to_cpu(req->to_multiplier);
5282 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5283 min, max, latency, to_multiplier);
5285 memset(&rsp, 0, sizeof(rsp));
5287 err = hci_check_conn_params(min, max, latency, to_multiplier);
5289 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5291 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5293 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5299 store_hint = hci_le_conn_update(hcon, min, max, latency,
5301 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5302 store_hint, min, max, latency,
5310 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5311 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5314 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5315 struct hci_conn *hcon = conn->hcon;
5316 u16 dcid, mtu, mps, credits, result;
5317 struct l2cap_chan *chan;
5320 if (cmd_len < sizeof(*rsp))
5323 dcid = __le16_to_cpu(rsp->dcid);
5324 mtu = __le16_to_cpu(rsp->mtu);
5325 mps = __le16_to_cpu(rsp->mps);
5326 credits = __le16_to_cpu(rsp->credits);
5327 result = __le16_to_cpu(rsp->result);
5329 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5330 dcid < L2CAP_CID_DYN_START ||
5331 dcid > L2CAP_CID_LE_DYN_END))
5334 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5335 dcid, mtu, mps, credits, result);
5337 mutex_lock(&conn->chan_lock);
5339 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5347 l2cap_chan_lock(chan);
5350 case L2CAP_CR_SUCCESS:
5351 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5359 chan->remote_mps = mps;
5360 chan->tx_credits = credits;
5361 l2cap_chan_ready(chan);
5364 case L2CAP_CR_AUTHENTICATION:
5365 case L2CAP_CR_ENCRYPTION:
5366 /* If we already have MITM protection we can't do
5369 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5370 l2cap_chan_del(chan, ECONNREFUSED);
5374 sec_level = hcon->sec_level + 1;
5375 if (chan->sec_level < sec_level)
5376 chan->sec_level = sec_level;
5378 /* We'll need to send a new Connect Request */
5379 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5381 smp_conn_security(hcon, chan->sec_level);
5385 l2cap_chan_del(chan, ECONNREFUSED);
5389 l2cap_chan_unlock(chan);
5392 mutex_unlock(&conn->chan_lock);
5397 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5398 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5403 switch (cmd->code) {
5404 case L2CAP_COMMAND_REJ:
5405 l2cap_command_rej(conn, cmd, cmd_len, data);
5408 case L2CAP_CONN_REQ:
5409 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5412 case L2CAP_CONN_RSP:
5413 case L2CAP_CREATE_CHAN_RSP:
5414 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5417 case L2CAP_CONF_REQ:
5418 err = l2cap_config_req(conn, cmd, cmd_len, data);
5421 case L2CAP_CONF_RSP:
5422 l2cap_config_rsp(conn, cmd, cmd_len, data);
5425 case L2CAP_DISCONN_REQ:
5426 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5429 case L2CAP_DISCONN_RSP:
5430 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5433 case L2CAP_ECHO_REQ:
5434 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5437 case L2CAP_ECHO_RSP:
5440 case L2CAP_INFO_REQ:
5441 err = l2cap_information_req(conn, cmd, cmd_len, data);
5444 case L2CAP_INFO_RSP:
5445 l2cap_information_rsp(conn, cmd, cmd_len, data);
5448 case L2CAP_CREATE_CHAN_REQ:
5449 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5452 case L2CAP_MOVE_CHAN_REQ:
5453 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5456 case L2CAP_MOVE_CHAN_RSP:
5457 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5460 case L2CAP_MOVE_CHAN_CFM:
5461 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5464 case L2CAP_MOVE_CHAN_CFM_RSP:
5465 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5469 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5477 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5478 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5481 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5482 struct l2cap_le_conn_rsp rsp;
5483 struct l2cap_chan *chan, *pchan;
5484 u16 dcid, scid, credits, mtu, mps;
5488 if (cmd_len != sizeof(*req))
5491 scid = __le16_to_cpu(req->scid);
5492 mtu = __le16_to_cpu(req->mtu);
5493 mps = __le16_to_cpu(req->mps);
5498 if (mtu < 23 || mps < 23)
5501 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5504 /* Check if we have socket listening on psm */
5505 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5506 &conn->hcon->dst, LE_LINK);
5508 result = L2CAP_CR_BAD_PSM;
5513 mutex_lock(&conn->chan_lock);
5514 l2cap_chan_lock(pchan);
5516 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5518 result = L2CAP_CR_AUTHENTICATION;
5520 goto response_unlock;
5523 /* Check for valid dynamic CID range */
5524 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5525 result = L2CAP_CR_INVALID_SCID;
5527 goto response_unlock;
5530 /* Check if we already have channel with that dcid */
5531 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5532 result = L2CAP_CR_SCID_IN_USE;
5534 goto response_unlock;
5537 chan = pchan->ops->new_connection(pchan);
5539 result = L2CAP_CR_NO_MEM;
5540 goto response_unlock;
5543 l2cap_le_flowctl_init(chan);
5545 bacpy(&chan->src, &conn->hcon->src);
5546 bacpy(&chan->dst, &conn->hcon->dst);
5547 chan->src_type = bdaddr_src_type(conn->hcon);
5548 chan->dst_type = bdaddr_dst_type(conn->hcon);
5552 chan->remote_mps = mps;
5553 chan->tx_credits = __le16_to_cpu(req->credits);
5555 __l2cap_chan_add(conn, chan);
5557 credits = chan->rx_credits;
5559 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5561 chan->ident = cmd->ident;
5563 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5564 l2cap_state_change(chan, BT_CONNECT2);
5565 /* The following result value is actually not defined
5566 * for LE CoC but we use it to let the function know
5567 * that it should bail out after doing its cleanup
5568 * instead of sending a response.
5570 result = L2CAP_CR_PEND;
5571 chan->ops->defer(chan);
5573 l2cap_chan_ready(chan);
5574 result = L2CAP_CR_SUCCESS;
5578 l2cap_chan_unlock(pchan);
5579 mutex_unlock(&conn->chan_lock);
5580 l2cap_chan_put(pchan);
5582 if (result == L2CAP_CR_PEND)
5587 rsp.mtu = cpu_to_le16(chan->imtu);
5588 rsp.mps = cpu_to_le16(chan->mps);
5594 rsp.dcid = cpu_to_le16(dcid);
5595 rsp.credits = cpu_to_le16(credits);
5596 rsp.result = cpu_to_le16(result);
5598 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5603 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5604 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5607 struct l2cap_le_credits *pkt;
5608 struct l2cap_chan *chan;
5609 u16 cid, credits, max_credits;
5611 if (cmd_len != sizeof(*pkt))
5614 pkt = (struct l2cap_le_credits *) data;
5615 cid = __le16_to_cpu(pkt->cid);
5616 credits = __le16_to_cpu(pkt->credits);
5618 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5620 chan = l2cap_get_chan_by_dcid(conn, cid);
5624 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5625 if (credits > max_credits) {
5626 BT_ERR("LE credits overflow");
5627 l2cap_send_disconn_req(chan, ECONNRESET);
5628 l2cap_chan_unlock(chan);
5630 /* Return 0 so that we don't trigger an unnecessary
5631 * command reject packet.
5636 chan->tx_credits += credits;
5638 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5639 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5643 if (chan->tx_credits)
5644 chan->ops->resume(chan);
5646 l2cap_chan_unlock(chan);
5651 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5652 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5655 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5656 struct l2cap_chan *chan;
5658 if (cmd_len < sizeof(*rej))
5661 mutex_lock(&conn->chan_lock);
5663 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5667 l2cap_chan_lock(chan);
5668 l2cap_chan_del(chan, ECONNREFUSED);
5669 l2cap_chan_unlock(chan);
5672 mutex_unlock(&conn->chan_lock);
5676 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5677 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5682 switch (cmd->code) {
5683 case L2CAP_COMMAND_REJ:
5684 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5687 case L2CAP_CONN_PARAM_UPDATE_REQ:
5688 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5691 case L2CAP_CONN_PARAM_UPDATE_RSP:
5694 case L2CAP_LE_CONN_RSP:
5695 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5698 case L2CAP_LE_CONN_REQ:
5699 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5702 case L2CAP_LE_CREDITS:
5703 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5706 case L2CAP_DISCONN_REQ:
5707 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5710 case L2CAP_DISCONN_RSP:
5711 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5715 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5723 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5724 struct sk_buff *skb)
5726 struct hci_conn *hcon = conn->hcon;
5727 struct l2cap_cmd_hdr *cmd;
5731 if (hcon->type != LE_LINK)
5734 if (skb->len < L2CAP_CMD_HDR_SIZE)
5737 cmd = (void *) skb->data;
5738 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5740 len = le16_to_cpu(cmd->len);
5742 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5744 if (len != skb->len || !cmd->ident) {
5745 BT_DBG("corrupted command");
5749 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5751 struct l2cap_cmd_rej_unk rej;
5753 BT_ERR("Wrong link type (%d)", err);
5755 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5756 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5764 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5765 struct sk_buff *skb)
5767 struct hci_conn *hcon = conn->hcon;
5768 u8 *data = skb->data;
5770 struct l2cap_cmd_hdr cmd;
5773 l2cap_raw_recv(conn, skb);
5775 if (hcon->type != ACL_LINK)
5778 while (len >= L2CAP_CMD_HDR_SIZE) {
5780 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5781 data += L2CAP_CMD_HDR_SIZE;
5782 len -= L2CAP_CMD_HDR_SIZE;
5784 cmd_len = le16_to_cpu(cmd.len);
5786 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5789 if (cmd_len > len || !cmd.ident) {
5790 BT_DBG("corrupted command");
5794 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5796 struct l2cap_cmd_rej_unk rej;
5798 BT_ERR("Wrong link type (%d)", err);
5800 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5801 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5813 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5815 u16 our_fcs, rcv_fcs;
5818 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5819 hdr_size = L2CAP_EXT_HDR_SIZE;
5821 hdr_size = L2CAP_ENH_HDR_SIZE;
5823 if (chan->fcs == L2CAP_FCS_CRC16) {
5824 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5825 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5826 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5828 if (our_fcs != rcv_fcs)
5834 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5836 struct l2cap_ctrl control;
5838 BT_DBG("chan %p", chan);
5840 memset(&control, 0, sizeof(control));
5843 control.reqseq = chan->buffer_seq;
5844 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5846 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5847 control.super = L2CAP_SUPER_RNR;
5848 l2cap_send_sframe(chan, &control);
5851 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5852 chan->unacked_frames > 0)
5853 __set_retrans_timer(chan);
5855 /* Send pending iframes */
5856 l2cap_ertm_send(chan);
5858 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5859 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5860 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5863 control.super = L2CAP_SUPER_RR;
5864 l2cap_send_sframe(chan, &control);
5868 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5869 struct sk_buff **last_frag)
5871 /* skb->len reflects data in skb as well as all fragments
5872 * skb->data_len reflects only data in fragments
5874 if (!skb_has_frag_list(skb))
5875 skb_shinfo(skb)->frag_list = new_frag;
5877 new_frag->next = NULL;
5879 (*last_frag)->next = new_frag;
5880 *last_frag = new_frag;
5882 skb->len += new_frag->len;
5883 skb->data_len += new_frag->len;
5884 skb->truesize += new_frag->truesize;
5887 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5888 struct l2cap_ctrl *control)
5892 switch (control->sar) {
5893 case L2CAP_SAR_UNSEGMENTED:
5897 err = chan->ops->recv(chan, skb);
5900 case L2CAP_SAR_START:
5904 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5907 chan->sdu_len = get_unaligned_le16(skb->data);
5908 skb_pull(skb, L2CAP_SDULEN_SIZE);
5910 if (chan->sdu_len > chan->imtu) {
5915 if (skb->len >= chan->sdu_len)
5919 chan->sdu_last_frag = skb;
5925 case L2CAP_SAR_CONTINUE:
5929 append_skb_frag(chan->sdu, skb,
5930 &chan->sdu_last_frag);
5933 if (chan->sdu->len >= chan->sdu_len)
5943 append_skb_frag(chan->sdu, skb,
5944 &chan->sdu_last_frag);
5947 if (chan->sdu->len != chan->sdu_len)
5950 err = chan->ops->recv(chan, chan->sdu);
5953 /* Reassembly complete */
5955 chan->sdu_last_frag = NULL;
5963 kfree_skb(chan->sdu);
5965 chan->sdu_last_frag = NULL;
5972 static int l2cap_resegment(struct l2cap_chan *chan)
5978 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5982 if (chan->mode != L2CAP_MODE_ERTM)
5985 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5986 l2cap_tx(chan, NULL, NULL, event);
5989 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5992 /* Pass sequential frames to l2cap_reassemble_sdu()
5993 * until a gap is encountered.
5996 BT_DBG("chan %p", chan);
5998 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5999 struct sk_buff *skb;
6000 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6001 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6003 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6008 skb_unlink(skb, &chan->srej_q);
6009 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6010 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6015 if (skb_queue_empty(&chan->srej_q)) {
6016 chan->rx_state = L2CAP_RX_STATE_RECV;
6017 l2cap_send_ack(chan);
6023 static void l2cap_handle_srej(struct l2cap_chan *chan,
6024 struct l2cap_ctrl *control)
6026 struct sk_buff *skb;
6028 BT_DBG("chan %p, control %p", chan, control);
6030 if (control->reqseq == chan->next_tx_seq) {
6031 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6032 l2cap_send_disconn_req(chan, ECONNRESET);
6036 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6039 BT_DBG("Seq %d not available for retransmission",
6044 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6045 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6046 l2cap_send_disconn_req(chan, ECONNRESET);
6050 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6052 if (control->poll) {
6053 l2cap_pass_to_tx(chan, control);
6055 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6056 l2cap_retransmit(chan, control);
6057 l2cap_ertm_send(chan);
6059 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6060 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6061 chan->srej_save_reqseq = control->reqseq;
6064 l2cap_pass_to_tx_fbit(chan, control);
6066 if (control->final) {
6067 if (chan->srej_save_reqseq != control->reqseq ||
6068 !test_and_clear_bit(CONN_SREJ_ACT,
6070 l2cap_retransmit(chan, control);
6072 l2cap_retransmit(chan, control);
6073 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6074 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6075 chan->srej_save_reqseq = control->reqseq;
6081 static void l2cap_handle_rej(struct l2cap_chan *chan,
6082 struct l2cap_ctrl *control)
6084 struct sk_buff *skb;
6086 BT_DBG("chan %p, control %p", chan, control);
6088 if (control->reqseq == chan->next_tx_seq) {
6089 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6090 l2cap_send_disconn_req(chan, ECONNRESET);
6094 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6096 if (chan->max_tx && skb &&
6097 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6098 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6099 l2cap_send_disconn_req(chan, ECONNRESET);
6103 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6105 l2cap_pass_to_tx(chan, control);
6107 if (control->final) {
6108 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6109 l2cap_retransmit_all(chan, control);
6111 l2cap_retransmit_all(chan, control);
6112 l2cap_ertm_send(chan);
6113 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6114 set_bit(CONN_REJ_ACT, &chan->conn_state);
6118 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6120 BT_DBG("chan %p, txseq %d", chan, txseq);
6122 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6123 chan->expected_tx_seq);
6125 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6126 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6128 /* See notes below regarding "double poll" and
6131 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6132 BT_DBG("Invalid/Ignore - after SREJ");
6133 return L2CAP_TXSEQ_INVALID_IGNORE;
6135 BT_DBG("Invalid - in window after SREJ sent");
6136 return L2CAP_TXSEQ_INVALID;
6140 if (chan->srej_list.head == txseq) {
6141 BT_DBG("Expected SREJ");
6142 return L2CAP_TXSEQ_EXPECTED_SREJ;
6145 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6146 BT_DBG("Duplicate SREJ - txseq already stored");
6147 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6150 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6151 BT_DBG("Unexpected SREJ - not requested");
6152 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6156 if (chan->expected_tx_seq == txseq) {
6157 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6159 BT_DBG("Invalid - txseq outside tx window");
6160 return L2CAP_TXSEQ_INVALID;
6163 return L2CAP_TXSEQ_EXPECTED;
6167 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6168 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6169 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6170 return L2CAP_TXSEQ_DUPLICATE;
6173 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6174 /* A source of invalid packets is a "double poll" condition,
6175 * where delays cause us to send multiple poll packets. If
6176 * the remote stack receives and processes both polls,
6177 * sequence numbers can wrap around in such a way that a
6178 * resent frame has a sequence number that looks like new data
6179 * with a sequence gap. This would trigger an erroneous SREJ
6182 * Fortunately, this is impossible with a tx window that's
6183 * less than half of the maximum sequence number, which allows
6184 * invalid frames to be safely ignored.
6186 * With tx window sizes greater than half of the tx window
6187 * maximum, the frame is invalid and cannot be ignored. This
6188 * causes a disconnect.
6191 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6192 BT_DBG("Invalid/Ignore - txseq outside tx window");
6193 return L2CAP_TXSEQ_INVALID_IGNORE;
6195 BT_DBG("Invalid - txseq outside tx window");
6196 return L2CAP_TXSEQ_INVALID;
6199 BT_DBG("Unexpected - txseq indicates missing frames");
6200 return L2CAP_TXSEQ_UNEXPECTED;
6204 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6205 struct l2cap_ctrl *control,
6206 struct sk_buff *skb, u8 event)
6209 bool skb_in_use = false;
6211 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6215 case L2CAP_EV_RECV_IFRAME:
6216 switch (l2cap_classify_txseq(chan, control->txseq)) {
6217 case L2CAP_TXSEQ_EXPECTED:
6218 l2cap_pass_to_tx(chan, control);
6220 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6221 BT_DBG("Busy, discarding expected seq %d",
6226 chan->expected_tx_seq = __next_seq(chan,
6229 chan->buffer_seq = chan->expected_tx_seq;
6232 err = l2cap_reassemble_sdu(chan, skb, control);
6236 if (control->final) {
6237 if (!test_and_clear_bit(CONN_REJ_ACT,
6238 &chan->conn_state)) {
6240 l2cap_retransmit_all(chan, control);
6241 l2cap_ertm_send(chan);
6245 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6246 l2cap_send_ack(chan);
6248 case L2CAP_TXSEQ_UNEXPECTED:
6249 l2cap_pass_to_tx(chan, control);
6251 /* Can't issue SREJ frames in the local busy state.
6252 * Drop this frame, it will be seen as missing
6253 * when local busy is exited.
6255 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6256 BT_DBG("Busy, discarding unexpected seq %d",
6261 /* There was a gap in the sequence, so an SREJ
6262 * must be sent for each missing frame. The
6263 * current frame is stored for later use.
6265 skb_queue_tail(&chan->srej_q, skb);
6267 BT_DBG("Queued %p (queue len %d)", skb,
6268 skb_queue_len(&chan->srej_q));
6270 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6271 l2cap_seq_list_clear(&chan->srej_list);
6272 l2cap_send_srej(chan, control->txseq);
6274 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6276 case L2CAP_TXSEQ_DUPLICATE:
6277 l2cap_pass_to_tx(chan, control);
6279 case L2CAP_TXSEQ_INVALID_IGNORE:
6281 case L2CAP_TXSEQ_INVALID:
6283 l2cap_send_disconn_req(chan, ECONNRESET);
6287 case L2CAP_EV_RECV_RR:
6288 l2cap_pass_to_tx(chan, control);
6289 if (control->final) {
6290 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6292 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6293 !__chan_is_moving(chan)) {
6295 l2cap_retransmit_all(chan, control);
6298 l2cap_ertm_send(chan);
6299 } else if (control->poll) {
6300 l2cap_send_i_or_rr_or_rnr(chan);
6302 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6303 &chan->conn_state) &&
6304 chan->unacked_frames)
6305 __set_retrans_timer(chan);
6307 l2cap_ertm_send(chan);
6310 case L2CAP_EV_RECV_RNR:
6311 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6312 l2cap_pass_to_tx(chan, control);
6313 if (control && control->poll) {
6314 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6315 l2cap_send_rr_or_rnr(chan, 0);
6317 __clear_retrans_timer(chan);
6318 l2cap_seq_list_clear(&chan->retrans_list);
6320 case L2CAP_EV_RECV_REJ:
6321 l2cap_handle_rej(chan, control);
6323 case L2CAP_EV_RECV_SREJ:
6324 l2cap_handle_srej(chan, control);
6330 if (skb && !skb_in_use) {
6331 BT_DBG("Freeing %p", skb);
6338 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6339 struct l2cap_ctrl *control,
6340 struct sk_buff *skb, u8 event)
6343 u16 txseq = control->txseq;
6344 bool skb_in_use = false;
6346 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6350 case L2CAP_EV_RECV_IFRAME:
6351 switch (l2cap_classify_txseq(chan, txseq)) {
6352 case L2CAP_TXSEQ_EXPECTED:
6353 /* Keep frame for reassembly later */
6354 l2cap_pass_to_tx(chan, control);
6355 skb_queue_tail(&chan->srej_q, skb);
6357 BT_DBG("Queued %p (queue len %d)", skb,
6358 skb_queue_len(&chan->srej_q));
6360 chan->expected_tx_seq = __next_seq(chan, txseq);
6362 case L2CAP_TXSEQ_EXPECTED_SREJ:
6363 l2cap_seq_list_pop(&chan->srej_list);
6365 l2cap_pass_to_tx(chan, control);
6366 skb_queue_tail(&chan->srej_q, skb);
6368 BT_DBG("Queued %p (queue len %d)", skb,
6369 skb_queue_len(&chan->srej_q));
6371 err = l2cap_rx_queued_iframes(chan);
6376 case L2CAP_TXSEQ_UNEXPECTED:
6377 /* Got a frame that can't be reassembled yet.
6378 * Save it for later, and send SREJs to cover
6379 * the missing frames.
6381 skb_queue_tail(&chan->srej_q, skb);
6383 BT_DBG("Queued %p (queue len %d)", skb,
6384 skb_queue_len(&chan->srej_q));
6386 l2cap_pass_to_tx(chan, control);
6387 l2cap_send_srej(chan, control->txseq);
6389 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6390 /* This frame was requested with an SREJ, but
6391 * some expected retransmitted frames are
6392 * missing. Request retransmission of missing
6395 skb_queue_tail(&chan->srej_q, skb);
6397 BT_DBG("Queued %p (queue len %d)", skb,
6398 skb_queue_len(&chan->srej_q));
6400 l2cap_pass_to_tx(chan, control);
6401 l2cap_send_srej_list(chan, control->txseq);
6403 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6404 /* We've already queued this frame. Drop this copy. */
6405 l2cap_pass_to_tx(chan, control);
6407 case L2CAP_TXSEQ_DUPLICATE:
6408 /* Expecting a later sequence number, so this frame
6409 * was already received. Ignore it completely.
6412 case L2CAP_TXSEQ_INVALID_IGNORE:
6414 case L2CAP_TXSEQ_INVALID:
6416 l2cap_send_disconn_req(chan, ECONNRESET);
6420 case L2CAP_EV_RECV_RR:
6421 l2cap_pass_to_tx(chan, control);
6422 if (control->final) {
6423 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6425 if (!test_and_clear_bit(CONN_REJ_ACT,
6426 &chan->conn_state)) {
6428 l2cap_retransmit_all(chan, control);
6431 l2cap_ertm_send(chan);
6432 } else if (control->poll) {
6433 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6434 &chan->conn_state) &&
6435 chan->unacked_frames) {
6436 __set_retrans_timer(chan);
6439 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6440 l2cap_send_srej_tail(chan);
6442 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6443 &chan->conn_state) &&
6444 chan->unacked_frames)
6445 __set_retrans_timer(chan);
6447 l2cap_send_ack(chan);
6450 case L2CAP_EV_RECV_RNR:
6451 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6452 l2cap_pass_to_tx(chan, control);
6453 if (control->poll) {
6454 l2cap_send_srej_tail(chan);
6456 struct l2cap_ctrl rr_control;
6457 memset(&rr_control, 0, sizeof(rr_control));
6458 rr_control.sframe = 1;
6459 rr_control.super = L2CAP_SUPER_RR;
6460 rr_control.reqseq = chan->buffer_seq;
6461 l2cap_send_sframe(chan, &rr_control);
6465 case L2CAP_EV_RECV_REJ:
6466 l2cap_handle_rej(chan, control);
6468 case L2CAP_EV_RECV_SREJ:
6469 l2cap_handle_srej(chan, control);
6473 if (skb && !skb_in_use) {
6474 BT_DBG("Freeing %p", skb);
6481 static int l2cap_finish_move(struct l2cap_chan *chan)
6483 BT_DBG("chan %p", chan);
6485 chan->rx_state = L2CAP_RX_STATE_RECV;
6488 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6490 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6492 return l2cap_resegment(chan);
6495 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6496 struct l2cap_ctrl *control,
6497 struct sk_buff *skb, u8 event)
6501 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6507 l2cap_process_reqseq(chan, control->reqseq);
6509 if (!skb_queue_empty(&chan->tx_q))
6510 chan->tx_send_head = skb_peek(&chan->tx_q);
6512 chan->tx_send_head = NULL;
6514 /* Rewind next_tx_seq to the point expected
6517 chan->next_tx_seq = control->reqseq;
6518 chan->unacked_frames = 0;
6520 err = l2cap_finish_move(chan);
6524 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6525 l2cap_send_i_or_rr_or_rnr(chan);
6527 if (event == L2CAP_EV_RECV_IFRAME)
6530 return l2cap_rx_state_recv(chan, control, NULL, event);
6533 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6534 struct l2cap_ctrl *control,
6535 struct sk_buff *skb, u8 event)
6539 if (!control->final)
6542 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6544 chan->rx_state = L2CAP_RX_STATE_RECV;
6545 l2cap_process_reqseq(chan, control->reqseq);
6547 if (!skb_queue_empty(&chan->tx_q))
6548 chan->tx_send_head = skb_peek(&chan->tx_q);
6550 chan->tx_send_head = NULL;
6552 /* Rewind next_tx_seq to the point expected
6555 chan->next_tx_seq = control->reqseq;
6556 chan->unacked_frames = 0;
6559 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6561 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6563 err = l2cap_resegment(chan);
6566 err = l2cap_rx_state_recv(chan, control, skb, event);
6571 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6573 /* Make sure reqseq is for a packet that has been sent but not acked */
6576 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6577 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6580 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6581 struct sk_buff *skb, u8 event)
6585 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6586 control, skb, event, chan->rx_state);
6588 if (__valid_reqseq(chan, control->reqseq)) {
6589 switch (chan->rx_state) {
6590 case L2CAP_RX_STATE_RECV:
6591 err = l2cap_rx_state_recv(chan, control, skb, event);
6593 case L2CAP_RX_STATE_SREJ_SENT:
6594 err = l2cap_rx_state_srej_sent(chan, control, skb,
6597 case L2CAP_RX_STATE_WAIT_P:
6598 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6600 case L2CAP_RX_STATE_WAIT_F:
6601 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6608 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6609 control->reqseq, chan->next_tx_seq,
6610 chan->expected_ack_seq);
6611 l2cap_send_disconn_req(chan, ECONNRESET);
6617 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6618 struct sk_buff *skb)
6620 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6623 if (l2cap_classify_txseq(chan, control->txseq) ==
6624 L2CAP_TXSEQ_EXPECTED) {
6625 l2cap_pass_to_tx(chan, control);
6627 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6628 __next_seq(chan, chan->buffer_seq));
6630 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6632 l2cap_reassemble_sdu(chan, skb, control);
6635 kfree_skb(chan->sdu);
6638 chan->sdu_last_frag = NULL;
6642 BT_DBG("Freeing %p", skb);
6647 chan->last_acked_seq = control->txseq;
6648 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6653 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6655 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6659 __unpack_control(chan, skb);
6664 * We can just drop the corrupted I-frame here.
6665 * Receiver will miss it and start proper recovery
6666 * procedures and ask for retransmission.
6668 if (l2cap_check_fcs(chan, skb))
6671 if (!control->sframe && control->sar == L2CAP_SAR_START)
6672 len -= L2CAP_SDULEN_SIZE;
6674 if (chan->fcs == L2CAP_FCS_CRC16)
6675 len -= L2CAP_FCS_SIZE;
6677 if (len > chan->mps) {
6678 l2cap_send_disconn_req(chan, ECONNRESET);
6682 if (chan->ops->filter) {
6683 if (chan->ops->filter(chan, skb))
6687 if (!control->sframe) {
6690 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6691 control->sar, control->reqseq, control->final,
6694 /* Validate F-bit - F=0 always valid, F=1 only
6695 * valid in TX WAIT_F
6697 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6700 if (chan->mode != L2CAP_MODE_STREAMING) {
6701 event = L2CAP_EV_RECV_IFRAME;
6702 err = l2cap_rx(chan, control, skb, event);
6704 err = l2cap_stream_rx(chan, control, skb);
6708 l2cap_send_disconn_req(chan, ECONNRESET);
6710 const u8 rx_func_to_event[4] = {
6711 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6712 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6715 /* Only I-frames are expected in streaming mode */
6716 if (chan->mode == L2CAP_MODE_STREAMING)
6719 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6720 control->reqseq, control->final, control->poll,
6724 BT_ERR("Trailing bytes: %d in sframe", len);
6725 l2cap_send_disconn_req(chan, ECONNRESET);
6729 /* Validate F and P bits */
6730 if (control->final && (control->poll ||
6731 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6734 event = rx_func_to_event[control->super];
6735 if (l2cap_rx(chan, control, skb, event))
6736 l2cap_send_disconn_req(chan, ECONNRESET);
6746 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6748 struct l2cap_conn *conn = chan->conn;
6749 struct l2cap_le_credits pkt;
6752 /* We return more credits to the sender only after the amount of
6753 * credits falls below half of the initial amount.
6755 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6758 return_credits = le_max_credits - chan->rx_credits;
6760 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6762 chan->rx_credits += return_credits;
6764 pkt.cid = cpu_to_le16(chan->scid);
6765 pkt.credits = cpu_to_le16(return_credits);
6767 chan->ident = l2cap_get_ident(conn);
6769 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6772 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6776 if (!chan->rx_credits) {
6777 BT_ERR("No credits to receive LE L2CAP data");
6778 l2cap_send_disconn_req(chan, ECONNRESET);
6782 if (chan->imtu < skb->len) {
6783 BT_ERR("Too big LE L2CAP PDU");
6788 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6790 l2cap_chan_le_send_credits(chan);
6797 sdu_len = get_unaligned_le16(skb->data);
6798 skb_pull(skb, L2CAP_SDULEN_SIZE);
6800 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6801 sdu_len, skb->len, chan->imtu);
6803 if (sdu_len > chan->imtu) {
6804 BT_ERR("Too big LE L2CAP SDU length received");
6809 if (skb->len > sdu_len) {
6810 BT_ERR("Too much LE L2CAP data received");
6815 if (skb->len == sdu_len)
6816 return chan->ops->recv(chan, skb);
6819 chan->sdu_len = sdu_len;
6820 chan->sdu_last_frag = skb;
6822 /* Detect if remote is not able to use the selected MPS */
6823 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6824 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6826 /* Adjust the number of credits */
6827 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6828 chan->mps = mps_len;
6829 l2cap_chan_le_send_credits(chan);
6835 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6836 chan->sdu->len, skb->len, chan->sdu_len);
6838 if (chan->sdu->len + skb->len > chan->sdu_len) {
6839 BT_ERR("Too much LE L2CAP data received");
6844 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6847 if (chan->sdu->len == chan->sdu_len) {
6848 err = chan->ops->recv(chan, chan->sdu);
6851 chan->sdu_last_frag = NULL;
6859 kfree_skb(chan->sdu);
6861 chan->sdu_last_frag = NULL;
6865 /* We can't return an error here since we took care of the skb
6866 * freeing internally. An error return would cause the caller to
6867 * do a double-free of the skb.
6872 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6873 struct sk_buff *skb)
6875 struct l2cap_chan *chan;
6877 chan = l2cap_get_chan_by_scid(conn, cid);
6879 if (cid == L2CAP_CID_A2MP) {
6880 chan = a2mp_channel_create(conn, skb);
6886 l2cap_chan_lock(chan);
6888 BT_DBG("unknown cid 0x%4.4x", cid);
6889 /* Drop packet and return */
6895 BT_DBG("chan %p, len %d", chan, skb->len);
6897 /* If we receive data on a fixed channel before the info req/rsp
6898 * procdure is done simply assume that the channel is supported
6899 * and mark it as ready.
6901 if (chan->chan_type == L2CAP_CHAN_FIXED)
6902 l2cap_chan_ready(chan);
6904 if (chan->state != BT_CONNECTED)
6907 switch (chan->mode) {
6908 case L2CAP_MODE_LE_FLOWCTL:
6909 if (l2cap_le_data_rcv(chan, skb) < 0)
6914 case L2CAP_MODE_BASIC:
6915 /* If socket recv buffers overflows we drop data here
6916 * which is *bad* because L2CAP has to be reliable.
6917 * But we don't have any other choice. L2CAP doesn't
6918 * provide flow control mechanism. */
6920 if (chan->imtu < skb->len) {
6921 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6925 if (!chan->ops->recv(chan, skb))
6929 case L2CAP_MODE_ERTM:
6930 case L2CAP_MODE_STREAMING:
6931 l2cap_data_rcv(chan, skb);
6935 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6943 l2cap_chan_unlock(chan);
6946 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6947 struct sk_buff *skb)
6949 struct hci_conn *hcon = conn->hcon;
6950 struct l2cap_chan *chan;
6952 if (hcon->type != ACL_LINK)
6955 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6960 BT_DBG("chan %p, len %d", chan, skb->len);
6962 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6965 if (chan->imtu < skb->len)
6968 /* Store remote BD_ADDR and PSM for msg_name */
6969 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6970 bt_cb(skb)->l2cap.psm = psm;
6972 if (!chan->ops->recv(chan, skb)) {
6973 l2cap_chan_put(chan);
6978 l2cap_chan_put(chan);
6983 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6985 struct l2cap_hdr *lh = (void *) skb->data;
6986 struct hci_conn *hcon = conn->hcon;
6990 if (hcon->state != BT_CONNECTED) {
6991 BT_DBG("queueing pending rx skb");
6992 skb_queue_tail(&conn->pending_rx, skb);
6996 skb_pull(skb, L2CAP_HDR_SIZE);
6997 cid = __le16_to_cpu(lh->cid);
6998 len = __le16_to_cpu(lh->len);
7000 if (len != skb->len) {
7005 /* Since we can't actively block incoming LE connections we must
7006 * at least ensure that we ignore incoming data from them.
7008 if (hcon->type == LE_LINK &&
7009 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7010 bdaddr_dst_type(hcon))) {
7015 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7018 case L2CAP_CID_SIGNALING:
7019 l2cap_sig_channel(conn, skb);
7022 case L2CAP_CID_CONN_LESS:
7023 psm = get_unaligned((__le16 *) skb->data);
7024 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7025 l2cap_conless_channel(conn, psm, skb);
7028 case L2CAP_CID_LE_SIGNALING:
7029 l2cap_le_sig_channel(conn, skb);
7033 l2cap_data_channel(conn, cid, skb);
7038 static void process_pending_rx(struct work_struct *work)
7040 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7042 struct sk_buff *skb;
7046 while ((skb = skb_dequeue(&conn->pending_rx)))
7047 l2cap_recv_frame(conn, skb);
7050 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7052 struct l2cap_conn *conn = hcon->l2cap_data;
7053 struct hci_chan *hchan;
7058 hchan = hci_chan_create(hcon);
7062 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7064 hci_chan_del(hchan);
7068 kref_init(&conn->ref);
7069 hcon->l2cap_data = conn;
7070 conn->hcon = hci_conn_get(hcon);
7071 conn->hchan = hchan;
7073 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7075 switch (hcon->type) {
7077 if (hcon->hdev->le_mtu) {
7078 conn->mtu = hcon->hdev->le_mtu;
7083 conn->mtu = hcon->hdev->acl_mtu;
7087 conn->feat_mask = 0;
7089 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7091 if (hcon->type == ACL_LINK &&
7092 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7093 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7095 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7096 (bredr_sc_enabled(hcon->hdev) ||
7097 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7098 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7100 mutex_init(&conn->ident_lock);
7101 mutex_init(&conn->chan_lock);
7103 INIT_LIST_HEAD(&conn->chan_l);
7104 INIT_LIST_HEAD(&conn->users);
7106 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7108 skb_queue_head_init(&conn->pending_rx);
7109 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7110 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7112 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7117 static bool is_valid_psm(u16 psm, u8 dst_type) {
7121 if (bdaddr_type_is_le(dst_type))
7122 return (psm <= 0x00ff);
7124 /* PSM must be odd and lsb of upper byte must be 0 */
7125 return ((psm & 0x0101) == 0x0001);
7128 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7129 bdaddr_t *dst, u8 dst_type)
7131 struct l2cap_conn *conn;
7132 struct hci_conn *hcon;
7133 struct hci_dev *hdev;
7136 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7137 dst_type, __le16_to_cpu(psm));
7139 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7141 return -EHOSTUNREACH;
7145 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7146 chan->chan_type != L2CAP_CHAN_RAW) {
7151 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7156 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7161 switch (chan->mode) {
7162 case L2CAP_MODE_BASIC:
7164 case L2CAP_MODE_LE_FLOWCTL:
7165 l2cap_le_flowctl_init(chan);
7167 case L2CAP_MODE_ERTM:
7168 case L2CAP_MODE_STREAMING:
7177 switch (chan->state) {
7181 /* Already connecting */
7186 /* Already connected */
7200 /* Set destination address and psm */
7201 bacpy(&chan->dst, dst);
7202 chan->dst_type = dst_type;
7207 if (bdaddr_type_is_le(dst_type)) {
7208 /* Convert from L2CAP channel address type to HCI address type
7210 if (dst_type == BDADDR_LE_PUBLIC)
7211 dst_type = ADDR_LE_DEV_PUBLIC;
7213 dst_type = ADDR_LE_DEV_RANDOM;
7215 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7216 hcon = hci_connect_le(hdev, dst, dst_type,
7218 HCI_LE_CONN_TIMEOUT,
7219 HCI_ROLE_SLAVE, NULL);
7221 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7223 HCI_LE_CONN_TIMEOUT);
7226 u8 auth_type = l2cap_get_auth_type(chan);
7227 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7231 err = PTR_ERR(hcon);
7235 conn = l2cap_conn_add(hcon);
7237 hci_conn_drop(hcon);
7242 mutex_lock(&conn->chan_lock);
7243 l2cap_chan_lock(chan);
7245 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7246 hci_conn_drop(hcon);
7251 /* Update source addr of the socket */
7252 bacpy(&chan->src, &hcon->src);
7253 chan->src_type = bdaddr_src_type(hcon);
7255 __l2cap_chan_add(conn, chan);
7257 /* l2cap_chan_add takes its own ref so we can drop this one */
7258 hci_conn_drop(hcon);
7260 l2cap_state_change(chan, BT_CONNECT);
7261 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7263 /* Release chan->sport so that it can be reused by other
7264 * sockets (as it's only used for listening sockets).
7266 write_lock(&chan_list_lock);
7268 write_unlock(&chan_list_lock);
7270 if (hcon->state == BT_CONNECTED) {
7271 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7272 __clear_chan_timer(chan);
7273 if (l2cap_chan_check_security(chan, true))
7274 l2cap_state_change(chan, BT_CONNECTED);
7276 l2cap_do_start(chan);
7282 l2cap_chan_unlock(chan);
7283 mutex_unlock(&conn->chan_lock);
7285 hci_dev_unlock(hdev);
7289 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7291 /* ---- L2CAP interface with lower layer (HCI) ---- */
7293 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7295 int exact = 0, lm1 = 0, lm2 = 0;
7296 struct l2cap_chan *c;
7298 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7300 /* Find listening sockets and check their link_mode */
7301 read_lock(&chan_list_lock);
7302 list_for_each_entry(c, &chan_list, global_l) {
7303 if (c->state != BT_LISTEN)
7306 if (!bacmp(&c->src, &hdev->bdaddr)) {
7307 lm1 |= HCI_LM_ACCEPT;
7308 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7309 lm1 |= HCI_LM_MASTER;
7311 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7312 lm2 |= HCI_LM_ACCEPT;
7313 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7314 lm2 |= HCI_LM_MASTER;
7317 read_unlock(&chan_list_lock);
7319 return exact ? lm1 : lm2;
7322 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7323 * from an existing channel in the list or from the beginning of the
7324 * global list (by passing NULL as first parameter).
7326 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7327 struct hci_conn *hcon)
7329 u8 src_type = bdaddr_src_type(hcon);
7331 read_lock(&chan_list_lock);
7334 c = list_next_entry(c, global_l);
7336 c = list_entry(chan_list.next, typeof(*c), global_l);
7338 list_for_each_entry_from(c, &chan_list, global_l) {
7339 if (c->chan_type != L2CAP_CHAN_FIXED)
7341 if (c->state != BT_LISTEN)
7343 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7345 if (src_type != c->src_type)
7349 read_unlock(&chan_list_lock);
7353 read_unlock(&chan_list_lock);
7358 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7360 struct hci_dev *hdev = hcon->hdev;
7361 struct l2cap_conn *conn;
7362 struct l2cap_chan *pchan;
7365 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7368 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7371 l2cap_conn_del(hcon, bt_to_errno(status));
7375 conn = l2cap_conn_add(hcon);
7379 dst_type = bdaddr_dst_type(hcon);
7381 /* If device is blocked, do not create channels for it */
7382 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7385 /* Find fixed channels and notify them of the new connection. We
7386 * use multiple individual lookups, continuing each time where
7387 * we left off, because the list lock would prevent calling the
7388 * potentially sleeping l2cap_chan_lock() function.
7390 pchan = l2cap_global_fixed_chan(NULL, hcon);
7392 struct l2cap_chan *chan, *next;
7394 /* Client fixed channels should override server ones */
7395 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7398 l2cap_chan_lock(pchan);
7399 chan = pchan->ops->new_connection(pchan);
7401 bacpy(&chan->src, &hcon->src);
7402 bacpy(&chan->dst, &hcon->dst);
7403 chan->src_type = bdaddr_src_type(hcon);
7404 chan->dst_type = dst_type;
7406 __l2cap_chan_add(conn, chan);
7409 l2cap_chan_unlock(pchan);
7411 next = l2cap_global_fixed_chan(pchan, hcon);
7412 l2cap_chan_put(pchan);
7416 l2cap_conn_ready(conn);
7419 int l2cap_disconn_ind(struct hci_conn *hcon)
7421 struct l2cap_conn *conn = hcon->l2cap_data;
7423 BT_DBG("hcon %p", hcon);
7426 return HCI_ERROR_REMOTE_USER_TERM;
7427 return conn->disc_reason;
7430 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7432 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7435 BT_DBG("hcon %p reason %d", hcon, reason);
7437 l2cap_conn_del(hcon, bt_to_errno(reason));
7440 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7442 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7445 if (encrypt == 0x00) {
7446 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7447 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7448 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7449 chan->sec_level == BT_SECURITY_FIPS)
7450 l2cap_chan_close(chan, ECONNREFUSED);
7452 if (chan->sec_level == BT_SECURITY_MEDIUM)
7453 __clear_chan_timer(chan);
7457 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7459 struct l2cap_conn *conn = hcon->l2cap_data;
7460 struct l2cap_chan *chan;
7465 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7467 mutex_lock(&conn->chan_lock);
7469 list_for_each_entry(chan, &conn->chan_l, list) {
7470 l2cap_chan_lock(chan);
7472 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7473 state_to_string(chan->state));
7475 if (chan->scid == L2CAP_CID_A2MP) {
7476 l2cap_chan_unlock(chan);
7480 if (!status && encrypt)
7481 chan->sec_level = hcon->sec_level;
7483 if (!__l2cap_no_conn_pending(chan)) {
7484 l2cap_chan_unlock(chan);
7488 if (!status && (chan->state == BT_CONNECTED ||
7489 chan->state == BT_CONFIG)) {
7490 chan->ops->resume(chan);
7491 l2cap_check_encryption(chan, encrypt);
7492 l2cap_chan_unlock(chan);
7496 if (chan->state == BT_CONNECT) {
7497 if (!status && l2cap_check_enc_key_size(hcon))
7498 l2cap_start_connection(chan);
7500 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7501 } else if (chan->state == BT_CONNECT2 &&
7502 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7503 struct l2cap_conn_rsp rsp;
7506 if (!status && l2cap_check_enc_key_size(hcon)) {
7507 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7508 res = L2CAP_CR_PEND;
7509 stat = L2CAP_CS_AUTHOR_PEND;
7510 chan->ops->defer(chan);
7512 l2cap_state_change(chan, BT_CONFIG);
7513 res = L2CAP_CR_SUCCESS;
7514 stat = L2CAP_CS_NO_INFO;
7517 l2cap_state_change(chan, BT_DISCONN);
7518 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7519 res = L2CAP_CR_SEC_BLOCK;
7520 stat = L2CAP_CS_NO_INFO;
7523 rsp.scid = cpu_to_le16(chan->dcid);
7524 rsp.dcid = cpu_to_le16(chan->scid);
7525 rsp.result = cpu_to_le16(res);
7526 rsp.status = cpu_to_le16(stat);
7527 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7530 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7531 res == L2CAP_CR_SUCCESS) {
7533 set_bit(CONF_REQ_SENT, &chan->conf_state);
7534 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7536 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7538 chan->num_conf_req++;
7542 l2cap_chan_unlock(chan);
7545 mutex_unlock(&conn->chan_lock);
7548 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7550 struct l2cap_conn *conn = hcon->l2cap_data;
7551 struct l2cap_hdr *hdr;
7554 /* For AMP controller do not create l2cap conn */
7555 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7559 conn = l2cap_conn_add(hcon);
7564 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7568 case ACL_START_NO_FLUSH:
7571 BT_ERR("Unexpected start frame (len %d)", skb->len);
7572 kfree_skb(conn->rx_skb);
7573 conn->rx_skb = NULL;
7575 l2cap_conn_unreliable(conn, ECOMM);
7578 /* Start fragment always begin with Basic L2CAP header */
7579 if (skb->len < L2CAP_HDR_SIZE) {
7580 BT_ERR("Frame is too short (len %d)", skb->len);
7581 l2cap_conn_unreliable(conn, ECOMM);
7585 hdr = (struct l2cap_hdr *) skb->data;
7586 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7588 if (len == skb->len) {
7589 /* Complete frame received */
7590 l2cap_recv_frame(conn, skb);
7594 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7596 if (skb->len > len) {
7597 BT_ERR("Frame is too long (len %d, expected len %d)",
7599 l2cap_conn_unreliable(conn, ECOMM);
7603 /* Allocate skb for the complete frame (with header) */
7604 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7608 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7610 conn->rx_len = len - skb->len;
7614 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7616 if (!conn->rx_len) {
7617 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7618 l2cap_conn_unreliable(conn, ECOMM);
7622 if (skb->len > conn->rx_len) {
7623 BT_ERR("Fragment is too long (len %d, expected %d)",
7624 skb->len, conn->rx_len);
7625 kfree_skb(conn->rx_skb);
7626 conn->rx_skb = NULL;
7628 l2cap_conn_unreliable(conn, ECOMM);
7632 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7634 conn->rx_len -= skb->len;
7636 if (!conn->rx_len) {
7637 /* Complete frame received. l2cap_recv_frame
7638 * takes ownership of the skb so set the global
7639 * rx_skb pointer to NULL first.
7641 struct sk_buff *rx_skb = conn->rx_skb;
7642 conn->rx_skb = NULL;
7643 l2cap_recv_frame(conn, rx_skb);
7652 static struct hci_cb l2cap_cb = {
7654 .connect_cfm = l2cap_connect_cfm,
7655 .disconn_cfm = l2cap_disconn_cfm,
7656 .security_cfm = l2cap_security_cfm,
7659 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7661 struct l2cap_chan *c;
7663 read_lock(&chan_list_lock);
7665 list_for_each_entry(c, &chan_list, global_l) {
7666 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7667 &c->src, c->src_type, &c->dst, c->dst_type,
7668 c->state, __le16_to_cpu(c->psm),
7669 c->scid, c->dcid, c->imtu, c->omtu,
7670 c->sec_level, c->mode);
7673 read_unlock(&chan_list_lock);
7678 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7680 return single_open(file, l2cap_debugfs_show, inode->i_private);
7683 static const struct file_operations l2cap_debugfs_fops = {
7684 .open = l2cap_debugfs_open,
7686 .llseek = seq_lseek,
7687 .release = single_release,
7690 static struct dentry *l2cap_debugfs;
7692 int __init l2cap_init(void)
7696 err = l2cap_init_sockets();
7700 hci_register_cb(&l2cap_cb);
7702 if (IS_ERR_OR_NULL(bt_debugfs))
7705 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7706 NULL, &l2cap_debugfs_fops);
7708 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7710 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7716 void l2cap_exit(void)
7718 debugfs_remove(l2cap_debugfs);
7719 hci_unregister_cb(&l2cap_cb);
7720 l2cap_cleanup_sockets();
7723 module_param(disable_ertm, bool, 0644);
7724 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");